Hopefully fail to break anything in the process of syncing with trunk (r47786)
[reactos.git] / ntoskrnl / mm / ARM3 / mdlsup.c
1 /*
2 * PROJECT: ReactOS Kernel
3 * LICENSE: BSD - See COPYING.ARM in the top level directory
4 * FILE: ntoskrnl/mm/ARM3/mdlsup.c
5 * PURPOSE: ARM Memory Manager Memory Descriptor List (MDL) Management
6 * PROGRAMMERS: ReactOS Portable Systems Group
7 */
8
9 /* INCLUDES *******************************************************************/
10
11 #include <ntoskrnl.h>
12 #define NDEBUG
13 #include <debug.h>
14
15 #line 15 "ARMĀ³::MDLSUP"
16 #define MODULE_INVOLVED_IN_ARM3
17 #include "../ARM3/miarm.h"
18
19 BOOLEAN MmTrackPtes;
20 BOOLEAN MmTrackLockedPages;
21
22 /* PUBLIC FUNCTIONS ***********************************************************/
23
24 /*
25 * @implemented
26 */
27 PMDL
28 NTAPI
29 MmCreateMdl(IN PMDL Mdl,
30 IN PVOID Base,
31 IN ULONG Length)
32 {
33 ULONG Size;
34
35 //
36 // Check if we don't have an MDL built
37 //
38 if (!Mdl)
39 {
40 //
41 // Calculate the size we'll need and allocate the MDL
42 //
43 Size = MmSizeOfMdl(Base, Length);
44 Mdl = ExAllocatePoolWithTag(NonPagedPool, Size, TAG_MDL);
45 if (!Mdl) return NULL;
46 }
47
48 //
49 // Initialize it
50 //
51 MmInitializeMdl(Mdl, Base, Length);
52 return Mdl;
53 }
54
55 /*
56 * @implemented
57 */
58 ULONG
59 NTAPI
60 MmSizeOfMdl(IN PVOID Base,
61 IN ULONG Length)
62 {
63 //
64 // Return the MDL size
65 //
66 return sizeof(MDL) +
67 (ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base, Length) * sizeof(PFN_NUMBER));
68 }
69
70 /*
71 * @implemented
72 */
73 VOID
74 NTAPI
75 MmBuildMdlForNonPagedPool(IN PMDL Mdl)
76 {
77 PPFN_NUMBER MdlPages, EndPage;
78 PFN_NUMBER Pfn, PageCount;
79 PVOID Base;
80 PMMPTE PointerPte;
81
82 //
83 // Sanity checks
84 //
85 ASSERT(Mdl->ByteCount != 0);
86 ASSERT((Mdl->MdlFlags & (MDL_PAGES_LOCKED |
87 MDL_MAPPED_TO_SYSTEM_VA |
88 MDL_SOURCE_IS_NONPAGED_POOL |
89 MDL_PARTIAL)) == 0);
90
91 //
92 // We know the MDL isn't associated to a process now
93 //
94 Mdl->Process = NULL;
95
96 //
97 // Get page and VA information
98 //
99 MdlPages = (PPFN_NUMBER)(Mdl + 1);
100 Base = Mdl->StartVa;
101
102 //
103 // Set the system address and now get the page count
104 //
105 Mdl->MappedSystemVa = (PVOID)((ULONG_PTR)Base + Mdl->ByteOffset);
106 PageCount = ADDRESS_AND_SIZE_TO_SPAN_PAGES(Mdl->MappedSystemVa,
107 Mdl->ByteCount);
108 ASSERT(PageCount != 0);
109 EndPage = MdlPages + PageCount;
110
111 //
112 // Loop the PTEs
113 //
114 PointerPte = MiAddressToPte(Base);
115 do
116 {
117 //
118 // Write the PFN
119 //
120 Pfn = PFN_FROM_PTE(PointerPte++);
121 *MdlPages++ = Pfn;
122 } while (MdlPages < EndPage);
123
124 //
125 // Set the nonpaged pool flag
126 //
127 Mdl->MdlFlags |= MDL_SOURCE_IS_NONPAGED_POOL;
128
129 //
130 // Check if this is an I/O mapping
131 //
132 if (!MiGetPfnEntry(Pfn)) Mdl->MdlFlags |= MDL_IO_SPACE;
133 }
134
135 /*
136 * @implemented
137 */
138 PMDL
139 NTAPI
140 MmAllocatePagesForMdl(IN PHYSICAL_ADDRESS LowAddress,
141 IN PHYSICAL_ADDRESS HighAddress,
142 IN PHYSICAL_ADDRESS SkipBytes,
143 IN SIZE_T TotalBytes)
144 {
145 //
146 // Call the internal routine
147 //
148 return MiAllocatePagesForMdl(LowAddress,
149 HighAddress,
150 SkipBytes,
151 TotalBytes,
152 MiNotMapped,
153 0);
154 }
155
156 /*
157 * @implemented
158 */
159 PMDL
160 NTAPI
161 MmAllocatePagesForMdlEx(IN PHYSICAL_ADDRESS LowAddress,
162 IN PHYSICAL_ADDRESS HighAddress,
163 IN PHYSICAL_ADDRESS SkipBytes,
164 IN SIZE_T TotalBytes,
165 IN MEMORY_CACHING_TYPE CacheType,
166 IN ULONG Flags)
167 {
168 MI_PFN_CACHE_ATTRIBUTE CacheAttribute;
169
170 //
171 // Check for invalid cache type
172 //
173 if (CacheType > MmWriteCombined)
174 {
175 //
176 // Normalize to default
177 //
178 CacheAttribute = MiNotMapped;
179 }
180 else
181 {
182 //
183 // Conver to internal caching attribute
184 //
185 CacheAttribute = MiPlatformCacheAttributes[FALSE][CacheType];
186 }
187
188 //
189 // Only these flags are allowed
190 //
191 if (Flags & ~(MM_DONT_ZERO_ALLOCATION | MM_ALLOCATE_FROM_LOCAL_NODE_ONLY))
192 {
193 //
194 // Silently fail
195 //
196 return NULL;
197 }
198
199 //
200 // Call the internal routine
201 //
202 return MiAllocatePagesForMdl(LowAddress,
203 HighAddress,
204 SkipBytes,
205 TotalBytes,
206 CacheAttribute,
207 Flags);
208 }
209
210 /*
211 * @implemented
212 */
213 VOID
214 NTAPI
215 MmFreePagesFromMdl(IN PMDL Mdl)
216 {
217 PVOID Base;
218 PPFN_NUMBER Pages;
219 LONG NumberOfPages;
220 PMMPFN Pfn1;
221 KIRQL OldIrql;
222 DPRINT("Freeing MDL: %p\n", Mdl);
223
224 //
225 // Sanity checks
226 //
227 ASSERT(KeGetCurrentIrql() <= APC_LEVEL);
228 ASSERT((Mdl->MdlFlags & MDL_IO_SPACE) == 0);
229 ASSERT(((ULONG_PTR)Mdl->StartVa & (PAGE_SIZE - 1)) == 0);
230
231 //
232 // Get address and page information
233 //
234 Base = (PVOID)((ULONG_PTR)Mdl->StartVa + Mdl->ByteOffset);
235 NumberOfPages = ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base, Mdl->ByteCount);
236
237 //
238 // Acquire PFN lock
239 //
240 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
241
242 //
243 // Loop all the MDL pages
244 //
245 Pages = (PPFN_NUMBER)(Mdl + 1);
246 do
247 {
248 //
249 // Reached the last page
250 //
251 if (*Pages == -1) break;
252
253 //
254 // Sanity check
255 //
256 ASSERT(*Pages <= MmHighestPhysicalPage);
257
258 //
259 // Get the page entry
260 //
261 Pfn1 = MiGetPfnEntry(*Pages);
262 ASSERT(Pfn1->u3.ReferenceCount == 1);
263
264 //
265 // Clear it
266 //
267 Pfn1->u3.e1.StartOfAllocation = 0;
268 Pfn1->u3.e1.EndOfAllocation = 0;
269
270 //
271 // Dereference it
272 //
273 MmDereferencePage(*Pages);
274
275 //
276 // Clear this page and move on
277 //
278 *Pages++ = -1;
279 } while (--NumberOfPages != 0);
280
281 //
282 // Release the lock
283 //
284 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
285
286 //
287 // Remove the pages locked flag
288 //
289 Mdl->MdlFlags &= ~MDL_PAGES_LOCKED;
290 }
291
292 /*
293 * @implemented
294 */
295 PVOID
296 NTAPI
297 MmMapLockedPagesSpecifyCache(IN PMDL Mdl,
298 IN KPROCESSOR_MODE AccessMode,
299 IN MEMORY_CACHING_TYPE CacheType,
300 IN PVOID BaseAddress,
301 IN ULONG BugCheckOnFailure,
302 IN MM_PAGE_PRIORITY Priority)
303 {
304 PVOID Base;
305 PPFN_NUMBER MdlPages, LastPage;
306 PFN_NUMBER PageCount;
307 BOOLEAN IsIoMapping;
308 MI_PFN_CACHE_ATTRIBUTE CacheAttribute;
309 PMMPTE PointerPte;
310 MMPTE TempPte;
311
312 //
313 // Sanity check
314 //
315 ASSERT(Mdl->ByteCount != 0);
316
317 //
318 // Get the base
319 //
320 Base = (PVOID)((ULONG_PTR)Mdl->StartVa + Mdl->ByteOffset);
321
322 //
323 // Handle kernel case first
324 //
325 if (AccessMode == KernelMode)
326 {
327 //
328 // Get the list of pages and count
329 //
330 MdlPages = (PPFN_NUMBER)(Mdl + 1);
331 PageCount = ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base, Mdl->ByteCount);
332 LastPage = MdlPages + PageCount;
333
334 //
335 // Sanity checks
336 //
337 ASSERT((Mdl->MdlFlags & (MDL_MAPPED_TO_SYSTEM_VA |
338 MDL_SOURCE_IS_NONPAGED_POOL |
339 MDL_PARTIAL_HAS_BEEN_MAPPED)) == 0);
340 ASSERT((Mdl->MdlFlags & (MDL_PAGES_LOCKED | MDL_PARTIAL)) != 0);
341
342 //
343 // Get the correct cache type
344 //
345 IsIoMapping = (Mdl->MdlFlags & MDL_IO_SPACE) != 0;
346 CacheAttribute = MiPlatformCacheAttributes[IsIoMapping][CacheType];
347
348 //
349 // Reserve the PTEs
350 //
351 PointerPte = MiReserveSystemPtes(PageCount, SystemPteSpace);
352 if (!PointerPte)
353 {
354 //
355 // If it can fail, return NULL
356 //
357 if (Mdl->MdlFlags & MDL_MAPPING_CAN_FAIL) return NULL;
358
359 //
360 // Should we bugcheck?
361 //
362 if (!BugCheckOnFailure) return NULL;
363
364 //
365 // Yes, crash the system
366 //
367 KeBugCheckEx(NO_MORE_SYSTEM_PTES, 0, PageCount, 0, 0);
368 }
369
370 //
371 // Get the mapped address
372 //
373 Base = (PVOID)((ULONG_PTR)MiPteToAddress(PointerPte) + Mdl->ByteOffset);
374
375 //
376 // Get the template
377 //
378 TempPte = ValidKernelPte;
379 switch (CacheAttribute)
380 {
381 case MiNonCached:
382
383 //
384 // Disable caching
385 //
386 MI_PAGE_DISABLE_CACHE(&TempPte);
387 MI_PAGE_WRITE_THROUGH(&TempPte);
388 break;
389
390 case MiWriteCombined:
391
392 //
393 // Enable write combining
394 //
395 MI_PAGE_DISABLE_CACHE(&TempPte);
396 MI_PAGE_WRITE_COMBINED(&TempPte);
397 break;
398
399 default:
400 //
401 // Nothing to do
402 //
403 break;
404 }
405
406 //
407 // Loop all PTEs
408 //
409 do
410 {
411 //
412 // We're done here
413 //
414 if (*MdlPages == -1) break;
415
416 //
417 // Write the PTE
418 //
419 TempPte.u.Hard.PageFrameNumber = *MdlPages;
420 MI_WRITE_VALID_PTE(PointerPte++, TempPte);
421 } while (++MdlPages < LastPage);
422
423 //
424 // Mark it as mapped
425 //
426 ASSERT((Mdl->MdlFlags & MDL_MAPPED_TO_SYSTEM_VA) == 0);
427 Mdl->MappedSystemVa = Base;
428 Mdl->MdlFlags |= MDL_MAPPED_TO_SYSTEM_VA;
429
430 //
431 // Check if it was partial
432 //
433 if (Mdl->MdlFlags & MDL_PARTIAL)
434 {
435 //
436 // Write the appropriate flag here too
437 //
438 Mdl->MdlFlags |= MDL_PARTIAL_HAS_BEEN_MAPPED;
439 }
440
441 //
442 // Return the mapped address
443 //
444 return Base;
445 }
446
447 //
448 // In user-mode, let ReactOS do it
449 //
450 return MiMapLockedPagesInUserSpace(Mdl, Base, CacheType, BaseAddress);
451 }
452
453 /*
454 * @implemented
455 */
456 PVOID
457 NTAPI
458 MmMapLockedPages(IN PMDL Mdl,
459 IN KPROCESSOR_MODE AccessMode)
460 {
461 //
462 // Call the extended version
463 //
464 return MmMapLockedPagesSpecifyCache(Mdl,
465 AccessMode,
466 MmCached,
467 NULL,
468 TRUE,
469 HighPagePriority);
470 }
471
472 /*
473 * @implemented
474 */
475 VOID
476 NTAPI
477 MmUnmapLockedPages(IN PVOID BaseAddress,
478 IN PMDL Mdl)
479 {
480 PVOID Base;
481 PFN_NUMBER PageCount;
482 PPFN_NUMBER MdlPages;
483 PMMPTE PointerPte;
484
485 //
486 // Sanity check
487 //
488 ASSERT(Mdl->ByteCount != 0);
489
490 //
491 // Check if this is a kernel request
492 //
493 if (BaseAddress > MM_HIGHEST_USER_ADDRESS)
494 {
495 //
496 // Get base and count information
497 //
498 Base = (PVOID)((ULONG_PTR)Mdl->StartVa + Mdl->ByteOffset);
499 PageCount = ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base, Mdl->ByteCount);
500
501 //
502 // Sanity checks
503 //
504 ASSERT((Mdl->MdlFlags & MDL_PARENT_MAPPED_SYSTEM_VA) == 0);
505 ASSERT(PageCount != 0);
506 ASSERT(Mdl->MdlFlags & MDL_MAPPED_TO_SYSTEM_VA);
507
508 //
509 // Get the PTE
510 //
511 PointerPte = MiAddressToPte(BaseAddress);
512
513 //
514 // This should be a resident system PTE
515 //
516 ASSERT(PointerPte >= MmSystemPtesStart[SystemPteSpace]);
517 ASSERT(PointerPte <= MmSystemPtesEnd[SystemPteSpace]);
518 ASSERT(PointerPte->u.Hard.Valid == 1);
519
520 //
521 // Check if the caller wants us to free advanced pages
522 //
523 if (Mdl->MdlFlags & MDL_FREE_EXTRA_PTES)
524 {
525 //
526 // Get the MDL page array
527 //
528 MdlPages = (PPFN_NUMBER)(Mdl + 1);
529 MdlPages += PageCount;
530
531 //
532 // Do the math
533 //
534 PageCount += *MdlPages;
535 PointerPte -= *MdlPages;
536 ASSERT(PointerPte >= MmSystemPtesStart[SystemPteSpace]);
537 ASSERT(PointerPte <= MmSystemPtesEnd[SystemPteSpace]);
538
539 //
540 // Get the new base address
541 //
542 BaseAddress = (PVOID)((ULONG_PTR)BaseAddress -
543 ((*MdlPages) << PAGE_SHIFT));
544 }
545
546 //
547 // Remove flags
548 //
549 Mdl->MdlFlags &= ~(MDL_MAPPED_TO_SYSTEM_VA |
550 MDL_PARTIAL_HAS_BEEN_MAPPED |
551 MDL_FREE_EXTRA_PTES);
552
553 //
554 // Release the system PTEs
555 //
556 MiReleaseSystemPtes(PointerPte, PageCount, SystemPteSpace);
557 }
558 else
559 {
560 //
561 // Let ReactOS handle it
562 //
563 MiUnmapLockedPagesInUserSpace(BaseAddress, Mdl);
564 }
565 }
566
567 /*
568 * @implemented
569 */
570 VOID
571 NTAPI
572 MmProbeAndLockPages(IN PMDL Mdl,
573 IN KPROCESSOR_MODE AccessMode,
574 IN LOCK_OPERATION Operation)
575 {
576 PPFN_TYPE MdlPages;
577 PVOID Base, Address, LastAddress, StartAddress;
578 ULONG LockPages, TotalPages;
579 NTSTATUS Status = STATUS_SUCCESS;
580 PEPROCESS CurrentProcess;
581 PETHREAD Thread;
582 PMMSUPPORT AddressSpace;
583 NTSTATUS ProbeStatus;
584 PMMPTE PointerPte, LastPte;
585 PMMPDE PointerPde;
586 PFN_NUMBER PageFrameIndex;
587 PMMPFN Pfn1;
588 BOOLEAN UsePfnLock;
589 KIRQL OldIrql;
590 DPRINT("Probing MDL: %p\n", Mdl);
591
592 //
593 // Sanity checks
594 //
595 ASSERT(Mdl->ByteCount != 0);
596 ASSERT(((ULONG)Mdl->ByteOffset & ~(PAGE_SIZE - 1)) == 0);
597 ASSERT(((ULONG_PTR)Mdl->StartVa & (PAGE_SIZE - 1)) == 0);
598 ASSERT((Mdl->MdlFlags & (MDL_PAGES_LOCKED |
599 MDL_MAPPED_TO_SYSTEM_VA |
600 MDL_SOURCE_IS_NONPAGED_POOL |
601 MDL_PARTIAL |
602 MDL_IO_SPACE)) == 0);
603
604 //
605 // Get page and base information
606 //
607 MdlPages = (PPFN_NUMBER)(Mdl + 1);
608 Base = (PVOID)Mdl->StartVa;
609
610 //
611 // Get the addresses and how many pages we span (and need to lock)
612 //
613 Address = (PVOID)((ULONG_PTR)Base + Mdl->ByteOffset);
614 LastAddress = (PVOID)((ULONG_PTR)Address + Mdl->ByteCount);
615 LockPages = ADDRESS_AND_SIZE_TO_SPAN_PAGES(Address, Mdl->ByteCount);
616 ASSERT(LockPages != 0);
617
618 //
619 // Get the thread and process
620 //
621 Thread = PsGetCurrentThread();
622 if (Address <= MM_HIGHEST_USER_ADDRESS)
623 {
624 //
625 // Get the process
626 //
627 CurrentProcess = PsGetCurrentProcess();
628 }
629 else
630 {
631 //
632 // No process
633 //
634 CurrentProcess = NULL;
635 }
636
637 //
638 // Save the number of pages we'll have to lock, and the start address
639 //
640 TotalPages = LockPages;
641 StartAddress = Address;
642
643 //
644 // Now probe them
645 //
646 ProbeStatus = STATUS_SUCCESS;
647 _SEH2_TRY
648 {
649 //
650 // Enter probe loop
651 //
652 do
653 {
654 //
655 // Assume failure
656 //
657 *MdlPages = -1;
658
659 //
660 // Read
661 //
662 *(volatile CHAR*)Address;
663
664 //
665 // Check if this is write access (only probe for user-mode)
666 //
667 if ((Operation != IoReadAccess) &&
668 (Address <= MM_HIGHEST_USER_ADDRESS))
669 {
670 //
671 // Probe for write too
672 //
673 ProbeForWriteChar(Address);
674 }
675
676 //
677 // Next address...
678 //
679 Address = (PVOID)((ULONG_PTR)Address + PAGE_SIZE);
680 Address = PAGE_ALIGN(Address);
681
682 //
683 // Next page...
684 //
685 LockPages--;
686 MdlPages++;
687 } while (Address < LastAddress);
688
689 //
690 // Reset back to the original page
691 //
692 ASSERT(LockPages == 0);
693 MdlPages = (PPFN_NUMBER)(Mdl + 1);
694 }
695 _SEH2_EXCEPT(EXCEPTION_EXECUTE_HANDLER)
696 {
697 //
698 // Oops :(
699 //
700 ProbeStatus = _SEH2_GetExceptionCode();
701 }
702 _SEH2_END;
703
704 //
705 // So how did that go?
706 //
707 if (ProbeStatus != STATUS_SUCCESS)
708 {
709 //
710 // Fail
711 //
712 DPRINT1("MDL PROBE FAILED!\n");
713 Mdl->Process = NULL;
714 ExRaiseStatus(ProbeStatus);
715 }
716
717 //
718 // Get the PTE and PDE
719 //
720 PointerPte = MiAddressToPte(StartAddress);
721 PointerPde = MiAddressToPde(StartAddress);
722
723 //
724 // Sanity check
725 //
726 ASSERT(MdlPages == (PPFN_NUMBER)(Mdl + 1));
727
728 //
729 // Check what kind of operation this is
730 //
731 if (Operation != IoReadAccess)
732 {
733 //
734 // Set the write flag
735 //
736 Mdl->MdlFlags |= MDL_WRITE_OPERATION;
737 }
738 else
739 {
740 //
741 // Remove the write flag
742 //
743 Mdl->MdlFlags &= ~(MDL_WRITE_OPERATION);
744 }
745
746 //
747 // Mark the MDL as locked *now*
748 //
749 Mdl->MdlFlags |= MDL_PAGES_LOCKED;
750
751 //
752 // Check if this came from kernel mode
753 //
754 if (Base >= MM_HIGHEST_USER_ADDRESS)
755 {
756 //
757 // We should not have a process
758 //
759 ASSERT(CurrentProcess == NULL);
760 Mdl->Process = NULL;
761
762 //
763 // In kernel mode, we don't need to check for write access
764 //
765 Operation = IoReadAccess;
766
767 //
768 // Use the PFN lock
769 //
770 UsePfnLock = TRUE;
771 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
772 AddressSpace = NULL; // Keep compiler happy
773 }
774 else
775 {
776 //
777 // Sanity checks
778 //
779 ASSERT(TotalPages != 0);
780 ASSERT(CurrentProcess == PsGetCurrentProcess());
781
782 //
783 // Track locked pages
784 //
785 InterlockedExchangeAddSizeT(&CurrentProcess->NumberOfLockedPages,
786 TotalPages);
787
788 //
789 // Save the process
790 //
791 Mdl->Process = CurrentProcess;
792
793 //
794 // Use the process lock
795 //
796 UsePfnLock = FALSE;
797 AddressSpace = &CurrentProcess->Vm;
798 MmLockAddressSpace(AddressSpace);
799 OldIrql = DISPATCH_LEVEL; // Keep compiler happy
800 }
801
802 //
803 // Get the last PTE
804 //
805 LastPte = MiAddressToPte((PVOID)((ULONG_PTR)LastAddress - 1));
806
807 //
808 // Loop the pages
809 //
810 do
811 {
812 //
813 // Assume failure and check for non-mapped pages
814 //
815 *MdlPages = -1;
816 while ((PointerPde->u.Hard.Valid == 0) ||
817 (PointerPte->u.Hard.Valid == 0))
818 {
819 //
820 // What kind of lock where we using?
821 //
822 if (UsePfnLock)
823 {
824 //
825 // Release PFN lock
826 //
827 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
828 }
829 else
830 {
831 //
832 // Release process address space lock
833 //
834 MmUnlockAddressSpace(AddressSpace);
835 }
836
837 //
838 // Access the page
839 //
840 Address = MiPteToAddress(PointerPte);
841 Status = MmAccessFault(FALSE, Address, KernelMode, NULL);
842 if (!NT_SUCCESS(Status))
843 {
844 //
845 // Fail
846 //
847 DPRINT1("Access fault failed\n");
848 goto Cleanup;
849 }
850
851 //
852 // Waht lock should we use?
853 //
854 if (UsePfnLock)
855 {
856 //
857 // Grab the PFN lock
858 //
859 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
860 }
861 else
862 {
863 //
864 // Use the address space lock
865 //
866 MmLockAddressSpace(AddressSpace);
867 }
868 }
869
870 //
871 // Check if this was a write or modify
872 //
873 if (Operation != IoReadAccess)
874 {
875 //
876 // Check if the PTE is not writable
877 //
878 if (MI_IS_PAGE_WRITEABLE(PointerPte) == FALSE)
879 {
880 //
881 // Check if it's copy on write
882 //
883 if (MI_IS_PAGE_COPY_ON_WRITE(PointerPte))
884 {
885 //
886 // Get the base address and allow a change for user-mode
887 //
888 Address = MiPteToAddress(PointerPte);
889 if (Address <= MM_HIGHEST_USER_ADDRESS)
890 {
891 //
892 // What kind of lock where we using?
893 //
894 if (UsePfnLock)
895 {
896 //
897 // Release PFN lock
898 //
899 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
900 }
901 else
902 {
903 //
904 // Release process address space lock
905 //
906 MmUnlockAddressSpace(AddressSpace);
907 }
908
909 //
910 // Access the page
911 //
912 Status = MmAccessFault(TRUE, Address, KernelMode, NULL);
913 if (!NT_SUCCESS(Status))
914 {
915 //
916 // Fail
917 //
918 DPRINT1("Access fault failed\n");
919 goto Cleanup;
920 }
921
922 //
923 // Re-acquire the lock
924 //
925 if (UsePfnLock)
926 {
927 //
928 // Grab the PFN lock
929 //
930 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
931 }
932 else
933 {
934 //
935 // Use the address space lock
936 //
937 MmLockAddressSpace(AddressSpace);
938 }
939
940 //
941 // Start over
942 //
943 continue;
944 }
945 }
946
947 //
948 // Fail, since we won't allow this
949 //
950 Status = STATUS_ACCESS_VIOLATION;
951 goto CleanupWithLock;
952 }
953 }
954
955 //
956 // Grab the PFN
957 //
958 PageFrameIndex = PFN_FROM_PTE(PointerPte);
959 if (PageFrameIndex <= MmHighestPhysicalPage)
960 {
961 //
962 // Get the PFN entry
963 //
964 Pfn1 = MiGetPfnEntry(PageFrameIndex);
965 ASSERT((CurrentProcess == NULL) || (UsePfnLock == FALSE));
966
967 //
968 // Now lock the page
969 //
970 MmReferencePage(PageFrameIndex);
971 }
972 else
973 {
974 //
975 // For I/O addresses, just remember this
976 //
977 Mdl->MdlFlags |= MDL_IO_SPACE;
978 }
979
980 //
981 // Write the page and move on
982 //
983 *MdlPages++ = PageFrameIndex;
984 if (!((ULONG_PTR)(++PointerPte) & (PAGE_SIZE - 1))) PointerPde++;
985 } while (PointerPte <= LastPte);
986
987 //
988 // What kind of lock where we using?
989 //
990 if (UsePfnLock)
991 {
992 //
993 // Release PFN lock
994 //
995 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
996 }
997 else
998 {
999 //
1000 // Release process address space lock
1001 //
1002 MmUnlockAddressSpace(AddressSpace);
1003 }
1004
1005 //
1006 // Sanity check
1007 //
1008 ASSERT((Mdl->MdlFlags & MDL_DESCRIBES_AWE) == 0);
1009 return;
1010
1011 CleanupWithLock:
1012 //
1013 // This is the failure path
1014 //
1015 ASSERT(!NT_SUCCESS(Status));
1016
1017 //
1018 // What kind of lock where we using?
1019 //
1020 if (UsePfnLock)
1021 {
1022 //
1023 // Release PFN lock
1024 //
1025 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
1026 }
1027 else
1028 {
1029 //
1030 // Release process address space lock
1031 //
1032 MmUnlockAddressSpace(AddressSpace);
1033 }
1034 Cleanup:
1035 //
1036 // Pages must be locked so MmUnlock can work
1037 //
1038 ASSERT(Mdl->MdlFlags & MDL_PAGES_LOCKED);
1039 MmUnlockPages(Mdl);
1040
1041 //
1042 // Raise the error
1043 //
1044 ExRaiseStatus(Status);
1045 }
1046
1047 /*
1048 * @implemented
1049 */
1050 VOID
1051 NTAPI
1052 MmUnlockPages(IN PMDL Mdl)
1053 {
1054 PPFN_NUMBER MdlPages, LastPage;
1055 PEPROCESS Process;
1056 PVOID Base;
1057 ULONG Flags, PageCount;
1058 KIRQL OldIrql;
1059 DPRINT("Unlocking MDL: %p\n", Mdl);
1060
1061 //
1062 // Sanity checks
1063 //
1064 ASSERT((Mdl->MdlFlags & MDL_PAGES_LOCKED) != 0);
1065 ASSERT((Mdl->MdlFlags & MDL_SOURCE_IS_NONPAGED_POOL) == 0);
1066 ASSERT((Mdl->MdlFlags & MDL_PARTIAL) == 0);
1067 ASSERT(Mdl->ByteCount != 0);
1068
1069 //
1070 // Get the process associated and capture the flags which are volatile
1071 //
1072 Process = Mdl->Process;
1073 Flags = Mdl->MdlFlags;
1074
1075 //
1076 // Automagically undo any calls to MmGetSystemAddressForMdl's for this MDL
1077 //
1078 if (Mdl->MdlFlags & MDL_MAPPED_TO_SYSTEM_VA)
1079 {
1080 //
1081 // Unmap the pages from system space
1082 //
1083 MmUnmapLockedPages(Mdl->MappedSystemVa, Mdl);
1084 }
1085
1086 //
1087 // Get the page count
1088 //
1089 MdlPages = (PPFN_NUMBER)(Mdl + 1);
1090 Base = (PVOID)((ULONG_PTR)Mdl->StartVa + Mdl->ByteOffset);
1091 PageCount = ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base, Mdl->ByteCount);
1092 ASSERT(PageCount != 0);
1093
1094 //
1095 // We don't support AWE
1096 //
1097 if (Flags & MDL_DESCRIBES_AWE) ASSERT(FALSE);
1098
1099 //
1100 // Check if the buffer is mapped I/O space
1101 //
1102 if (Flags & MDL_IO_SPACE)
1103 {
1104 //
1105 // Acquire PFN lock
1106 //
1107 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
1108
1109 //
1110 // Loop every page
1111 //
1112 LastPage = MdlPages + PageCount;
1113 do
1114 {
1115 //
1116 // Last page, break out
1117 //
1118 if (*MdlPages == -1) break;
1119
1120 //
1121 // Check if this page is in the PFN database
1122 //
1123 if (*MdlPages <= MmHighestPhysicalPage)
1124 {
1125 //
1126 // Unlock and dereference
1127 //
1128 MmDereferencePage(*MdlPages);
1129 }
1130 } while (++MdlPages < LastPage);
1131
1132 //
1133 // Release the lock
1134 //
1135 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
1136
1137 //
1138 // Check if we have a process
1139 //
1140 if (Process)
1141 {
1142 //
1143 // Handle the accounting of locked pages
1144 //
1145 ASSERT(Process->NumberOfLockedPages > 0);
1146 InterlockedExchangeAddSizeT(&Process->NumberOfLockedPages,
1147 -PageCount);
1148 }
1149
1150 //
1151 // We're done
1152 //
1153 Mdl->MdlFlags &= ~MDL_IO_SPACE;
1154 Mdl->MdlFlags &= ~MDL_PAGES_LOCKED;
1155 return;
1156 }
1157
1158 //
1159 // Check if we have a process
1160 //
1161 if (Process)
1162 {
1163 //
1164 // Handle the accounting of locked pages
1165 //
1166 ASSERT(Process->NumberOfLockedPages > 0);
1167 InterlockedExchangeAddSizeT(&Process->NumberOfLockedPages,
1168 -PageCount);
1169 }
1170
1171 //
1172 // Loop every page
1173 //
1174 LastPage = MdlPages + PageCount;
1175 do
1176 {
1177 //
1178 // Last page reached
1179 //
1180 if (*MdlPages == -1)
1181 {
1182 //
1183 // Were there no pages at all?
1184 //
1185 if (MdlPages == (PPFN_NUMBER)(Mdl + 1))
1186 {
1187 //
1188 // We're already done
1189 //
1190 Mdl->MdlFlags &= ~MDL_PAGES_LOCKED;
1191 return;
1192 }
1193
1194 //
1195 // Otherwise, stop here
1196 //
1197 LastPage = MdlPages;
1198 break;
1199 }
1200
1201 //
1202 // Sanity check
1203 //
1204 ASSERT(*MdlPages <= MmHighestPhysicalPage);
1205 } while (++MdlPages < LastPage);
1206
1207 //
1208 // Reset pointer
1209 //
1210 MdlPages = (PPFN_NUMBER)(Mdl + 1);
1211
1212 //
1213 // Now grab the PFN lock for the actual unlock and dereference
1214 //
1215 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
1216 do
1217 {
1218 //
1219 // Unlock and dereference
1220 //
1221 MmDereferencePage(*MdlPages);
1222 } while (++MdlPages < LastPage);
1223
1224 //
1225 // Release the lock
1226 //
1227 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
1228
1229 //
1230 // We're done
1231 //
1232 Mdl->MdlFlags &= ~MDL_PAGES_LOCKED;
1233 }
1234
1235 /*
1236 * @unimplemented
1237 */
1238 NTSTATUS
1239 NTAPI
1240 MmAdvanceMdl(IN PMDL Mdl,
1241 IN ULONG NumberOfBytes)
1242 {
1243 UNIMPLEMENTED;
1244 return STATUS_NOT_IMPLEMENTED;
1245 }
1246
1247 /*
1248 * @unimplemented
1249 */
1250 PVOID
1251 NTAPI
1252 MmMapLockedPagesWithReservedMapping(IN PVOID MappingAddress,
1253 IN ULONG PoolTag,
1254 IN PMDL MemoryDescriptorList,
1255 IN MEMORY_CACHING_TYPE CacheType)
1256 {
1257 UNIMPLEMENTED;
1258 return 0;
1259 }
1260
1261 /*
1262 * @unimplemented
1263 */
1264 VOID
1265 NTAPI
1266 MmUnmapReservedMapping(IN PVOID BaseAddress,
1267 IN ULONG PoolTag,
1268 IN PMDL MemoryDescriptorList)
1269 {
1270 UNIMPLEMENTED;
1271 }
1272
1273 /*
1274 * @unimplemented
1275 */
1276 NTSTATUS
1277 NTAPI
1278 MmPrefetchPages(IN ULONG NumberOfLists,
1279 IN PREAD_LIST *ReadLists)
1280 {
1281 UNIMPLEMENTED;
1282 return STATUS_NOT_IMPLEMENTED;
1283 }
1284
1285 /*
1286 * @unimplemented
1287 */
1288 NTSTATUS
1289 NTAPI
1290 MmProtectMdlSystemAddress(IN PMDL MemoryDescriptorList,
1291 IN ULONG NewProtect)
1292 {
1293 UNIMPLEMENTED;
1294 return STATUS_NOT_IMPLEMENTED;
1295 }
1296
1297 /*
1298 * @unimplemented
1299 */
1300 VOID
1301 NTAPI
1302 MmProbeAndLockProcessPages(IN OUT PMDL MemoryDescriptorList,
1303 IN PEPROCESS Process,
1304 IN KPROCESSOR_MODE AccessMode,
1305 IN LOCK_OPERATION Operation)
1306 {
1307 UNIMPLEMENTED;
1308 }
1309
1310
1311 /*
1312 * @unimplemented
1313 */
1314 VOID
1315 NTAPI
1316 MmProbeAndLockSelectedPages(IN OUT PMDL MemoryDescriptorList,
1317 IN LARGE_INTEGER PageList[],
1318 IN KPROCESSOR_MODE AccessMode,
1319 IN LOCK_OPERATION Operation)
1320 {
1321 UNIMPLEMENTED;
1322 }
1323
1324 /*
1325 * @unimplemented
1326 */
1327 VOID
1328 NTAPI
1329 MmMapMemoryDumpMdl(IN PMDL Mdl)
1330 {
1331 UNIMPLEMENTED;
1332 }
1333
1334 /* EOF */