2 * PROJECT: ReactOS Kernel
3 * LICENSE: BSD - See COPYING.ARM in the top level directory
4 * FILE: ntoskrnl/mm/ARM3/mdlsup.c
5 * PURPOSE: ARM Memory Manager Memory Descriptor List (MDL) Management
6 * PROGRAMMERS: ReactOS Portable Systems Group
9 /* INCLUDES *******************************************************************/
15 #define MODULE_INVOLVED_IN_ARM3
16 #include <mm/ARM3/miarm.h>
18 /* GLOBALS ********************************************************************/
21 BOOLEAN MmTrackLockedPages
;
22 SIZE_T MmSystemLockPagesCount
;
24 /* PUBLIC FUNCTIONS ***********************************************************/
31 MmCreateMdl(IN PMDL Mdl
,
38 // Check if we don't have an MDL built
43 // Calculate the size we'll need and allocate the MDL
45 Size
= MmSizeOfMdl(Base
, Length
);
46 Mdl
= ExAllocatePoolWithTag(NonPagedPool
, Size
, TAG_MDL
);
47 if (!Mdl
) return NULL
;
53 MmInitializeMdl(Mdl
, Base
, Length
);
62 MmSizeOfMdl(IN PVOID Base
,
66 // Return the MDL size
69 (ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base
, Length
) * sizeof(PFN_NUMBER
));
77 MmBuildMdlForNonPagedPool(IN PMDL Mdl
)
79 PPFN_NUMBER MdlPages
, EndPage
;
80 PFN_NUMBER Pfn
, PageCount
;
87 ASSERT(Mdl
->ByteCount
!= 0);
88 ASSERT((Mdl
->MdlFlags
& (MDL_PAGES_LOCKED
|
89 MDL_MAPPED_TO_SYSTEM_VA
|
90 MDL_SOURCE_IS_NONPAGED_POOL
|
94 // We know the MDL isn't associated to a process now
99 // Get page and VA information
101 MdlPages
= (PPFN_NUMBER
)(Mdl
+ 1);
105 // Set the system address and now get the page count
107 Mdl
->MappedSystemVa
= (PVOID
)((ULONG_PTR
)Base
+ Mdl
->ByteOffset
);
108 PageCount
= ADDRESS_AND_SIZE_TO_SPAN_PAGES(Mdl
->MappedSystemVa
,
110 ASSERT(PageCount
!= 0);
111 EndPage
= MdlPages
+ PageCount
;
116 PointerPte
= MiAddressToPte(Base
);
122 Pfn
= PFN_FROM_PTE(PointerPte
++);
124 } while (MdlPages
< EndPage
);
127 // Set the nonpaged pool flag
129 Mdl
->MdlFlags
|= MDL_SOURCE_IS_NONPAGED_POOL
;
132 // Check if this is an I/O mapping
134 if (!MiGetPfnEntry(Pfn
)) Mdl
->MdlFlags
|= MDL_IO_SPACE
;
142 MmAllocatePagesForMdl(IN PHYSICAL_ADDRESS LowAddress
,
143 IN PHYSICAL_ADDRESS HighAddress
,
144 IN PHYSICAL_ADDRESS SkipBytes
,
145 IN SIZE_T TotalBytes
)
148 // Call the internal routine
150 return MiAllocatePagesForMdl(LowAddress
,
163 MmAllocatePagesForMdlEx(IN PHYSICAL_ADDRESS LowAddress
,
164 IN PHYSICAL_ADDRESS HighAddress
,
165 IN PHYSICAL_ADDRESS SkipBytes
,
166 IN SIZE_T TotalBytes
,
167 IN MEMORY_CACHING_TYPE CacheType
,
170 MI_PFN_CACHE_ATTRIBUTE CacheAttribute
;
173 // Check for invalid cache type
175 if (CacheType
> MmWriteCombined
)
178 // Normalize to default
180 CacheAttribute
= MiNotMapped
;
185 // Conver to internal caching attribute
187 CacheAttribute
= MiPlatformCacheAttributes
[FALSE
][CacheType
];
191 // Only these flags are allowed
193 if (Flags
& ~(MM_DONT_ZERO_ALLOCATION
| MM_ALLOCATE_FROM_LOCAL_NODE_ONLY
))
202 // Call the internal routine
204 return MiAllocatePagesForMdl(LowAddress
,
217 MmFreePagesFromMdl(IN PMDL Mdl
)
224 DPRINT("Freeing MDL: %p\n", Mdl
);
229 ASSERT(KeGetCurrentIrql() <= APC_LEVEL
);
230 ASSERT((Mdl
->MdlFlags
& MDL_IO_SPACE
) == 0);
231 ASSERT(((ULONG_PTR
)Mdl
->StartVa
& (PAGE_SIZE
- 1)) == 0);
234 // Get address and page information
236 Base
= (PVOID
)((ULONG_PTR
)Mdl
->StartVa
+ Mdl
->ByteOffset
);
237 NumberOfPages
= ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base
, Mdl
->ByteCount
);
242 OldIrql
= KeAcquireQueuedSpinLock(LockQueuePfnLock
);
245 // Loop all the MDL pages
247 Pages
= (PPFN_NUMBER
)(Mdl
+ 1);
251 // Reached the last page
253 if (*Pages
== LIST_HEAD
) break;
256 // Get the page entry
258 Pfn1
= MiGetPfnEntry(*Pages
);
260 ASSERT(Pfn1
->u2
.ShareCount
== 1);
261 ASSERT(MI_IS_PFN_DELETED(Pfn1
) == TRUE
);
262 if (Pfn1
->u4
.PteFrame
!= 0x1FFEDCB)
264 /* Corrupted PFN entry or invalid free */
265 KeBugCheckEx(MEMORY_MANAGEMENT
, 0x1236, (ULONG_PTR
)Mdl
, (ULONG_PTR
)Pages
, *Pages
);
271 Pfn1
->u3
.e1
.StartOfAllocation
= 0;
272 Pfn1
->u3
.e1
.EndOfAllocation
= 0;
273 Pfn1
->u3
.e1
.PageLocation
= StandbyPageList
;
274 Pfn1
->u2
.ShareCount
= 0;
279 ASSERT(Pfn1
->u3
.e2
.ReferenceCount
!= 0);
280 if (Pfn1
->u3
.e2
.ReferenceCount
!= 1)
282 /* Just take off one reference */
283 InterlockedDecrement16((PSHORT
)&Pfn1
->u3
.e2
.ReferenceCount
);
287 /* We'll be nuking the whole page */
288 MiDecrementReferenceCount(Pfn1
, *Pages
);
292 // Clear this page and move on
294 *Pages
++ = LIST_HEAD
;
295 } while (--NumberOfPages
!= 0);
300 KeReleaseQueuedSpinLock(LockQueuePfnLock
, OldIrql
);
303 // Remove the pages locked flag
305 Mdl
->MdlFlags
&= ~MDL_PAGES_LOCKED
;
313 MmMapLockedPagesSpecifyCache(IN PMDL Mdl
,
314 IN KPROCESSOR_MODE AccessMode
,
315 IN MEMORY_CACHING_TYPE CacheType
,
316 IN PVOID BaseAddress
,
317 IN ULONG BugCheckOnFailure
,
318 IN MM_PAGE_PRIORITY Priority
)
321 PPFN_NUMBER MdlPages
, LastPage
;
324 MI_PFN_CACHE_ATTRIBUTE CacheAttribute
;
331 ASSERT(Mdl
->ByteCount
!= 0);
336 Base
= (PVOID
)((ULONG_PTR
)Mdl
->StartVa
+ Mdl
->ByteOffset
);
339 // Handle kernel case first
341 if (AccessMode
== KernelMode
)
344 // Get the list of pages and count
346 MdlPages
= (PPFN_NUMBER
)(Mdl
+ 1);
347 PageCount
= ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base
, Mdl
->ByteCount
);
348 LastPage
= MdlPages
+ PageCount
;
353 ASSERT((Mdl
->MdlFlags
& (MDL_MAPPED_TO_SYSTEM_VA
|
354 MDL_SOURCE_IS_NONPAGED_POOL
|
355 MDL_PARTIAL_HAS_BEEN_MAPPED
)) == 0);
356 ASSERT((Mdl
->MdlFlags
& (MDL_PAGES_LOCKED
| MDL_PARTIAL
)) != 0);
359 // Get the correct cache type
361 IsIoMapping
= (Mdl
->MdlFlags
& MDL_IO_SPACE
) != 0;
362 CacheAttribute
= MiPlatformCacheAttributes
[IsIoMapping
][CacheType
];
367 PointerPte
= MiReserveSystemPtes(PageCount
, SystemPteSpace
);
371 // If it can fail, return NULL
373 if (Mdl
->MdlFlags
& MDL_MAPPING_CAN_FAIL
) return NULL
;
376 // Should we bugcheck?
378 if (!BugCheckOnFailure
) return NULL
;
381 // Yes, crash the system
383 KeBugCheckEx(NO_MORE_SYSTEM_PTES
, 0, PageCount
, 0, 0);
387 // Get the mapped address
389 Base
= (PVOID
)((ULONG_PTR
)MiPteToAddress(PointerPte
) + Mdl
->ByteOffset
);
394 TempPte
= ValidKernelPte
;
395 switch (CacheAttribute
)
402 MI_PAGE_DISABLE_CACHE(&TempPte
);
403 MI_PAGE_WRITE_THROUGH(&TempPte
);
406 case MiWriteCombined
:
409 // Enable write combining
411 MI_PAGE_DISABLE_CACHE(&TempPte
);
412 MI_PAGE_WRITE_COMBINED(&TempPte
);
430 if (*MdlPages
== LIST_HEAD
) break;
435 TempPte
.u
.Hard
.PageFrameNumber
= *MdlPages
;
436 MI_WRITE_VALID_PTE(PointerPte
++, TempPte
);
437 } while (++MdlPages
< LastPage
);
442 ASSERT((Mdl
->MdlFlags
& MDL_MAPPED_TO_SYSTEM_VA
) == 0);
443 Mdl
->MappedSystemVa
= Base
;
444 Mdl
->MdlFlags
|= MDL_MAPPED_TO_SYSTEM_VA
;
447 // Check if it was partial
449 if (Mdl
->MdlFlags
& MDL_PARTIAL
)
452 // Write the appropriate flag here too
454 Mdl
->MdlFlags
|= MDL_PARTIAL_HAS_BEEN_MAPPED
;
458 // Return the mapped address
472 MmMapLockedPages(IN PMDL Mdl
,
473 IN KPROCESSOR_MODE AccessMode
)
476 // Call the extended version
478 return MmMapLockedPagesSpecifyCache(Mdl
,
491 MmUnmapLockedPages(IN PVOID BaseAddress
,
495 PFN_COUNT PageCount
, ExtraPageCount
;
496 PPFN_NUMBER MdlPages
;
502 ASSERT(Mdl
->ByteCount
!= 0);
505 // Check if this is a kernel request
507 if (BaseAddress
> MM_HIGHEST_USER_ADDRESS
)
510 // Get base and count information
512 Base
= (PVOID
)((ULONG_PTR
)Mdl
->StartVa
+ Mdl
->ByteOffset
);
513 PageCount
= ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base
, Mdl
->ByteCount
);
518 ASSERT((Mdl
->MdlFlags
& MDL_PARENT_MAPPED_SYSTEM_VA
) == 0);
519 ASSERT(PageCount
!= 0);
520 ASSERT(Mdl
->MdlFlags
& MDL_MAPPED_TO_SYSTEM_VA
);
525 PointerPte
= MiAddressToPte(BaseAddress
);
528 // This should be a resident system PTE
530 ASSERT(PointerPte
>= MmSystemPtesStart
[SystemPteSpace
]);
531 ASSERT(PointerPte
<= MmSystemPtesEnd
[SystemPteSpace
]);
532 ASSERT(PointerPte
->u
.Hard
.Valid
== 1);
535 // Check if the caller wants us to free advanced pages
537 if (Mdl
->MdlFlags
& MDL_FREE_EXTRA_PTES
)
540 // Get the MDL page array
542 MdlPages
= MmGetMdlPfnArray(Mdl
);
544 /* Number of extra pages stored after the PFN array */
545 ExtraPageCount
= (PFN_COUNT
)*(MdlPages
+ PageCount
);
550 PageCount
+= ExtraPageCount
;
551 PointerPte
-= ExtraPageCount
;
552 ASSERT(PointerPte
>= MmSystemPtesStart
[SystemPteSpace
]);
553 ASSERT(PointerPte
<= MmSystemPtesEnd
[SystemPteSpace
]);
556 // Get the new base address
558 BaseAddress
= (PVOID
)((ULONG_PTR
)BaseAddress
-
559 (ExtraPageCount
<< PAGE_SHIFT
));
565 Mdl
->MdlFlags
&= ~(MDL_MAPPED_TO_SYSTEM_VA
|
566 MDL_PARTIAL_HAS_BEEN_MAPPED
|
567 MDL_FREE_EXTRA_PTES
);
570 // Release the system PTEs
572 MiReleaseSystemPtes(PointerPte
, PageCount
, SystemPteSpace
);
585 MmProbeAndLockPages(IN PMDL Mdl
,
586 IN KPROCESSOR_MODE AccessMode
,
587 IN LOCK_OPERATION Operation
)
589 PPFN_NUMBER MdlPages
;
590 PVOID Base
, Address
, LastAddress
, StartAddress
;
591 ULONG LockPages
, TotalPages
;
592 NTSTATUS Status
= STATUS_SUCCESS
;
593 PEPROCESS CurrentProcess
;
594 NTSTATUS ProbeStatus
;
595 PMMPTE PointerPte
, LastPte
;
597 #if (_MI_PAGING_LEVELS >= 3)
600 #if (_MI_PAGING_LEVELS == 4)
603 PFN_NUMBER PageFrameIndex
;
607 DPRINT("Probing MDL: %p\n", Mdl
);
612 ASSERT(Mdl
->ByteCount
!= 0);
613 ASSERT(((ULONG
)Mdl
->ByteOffset
& ~(PAGE_SIZE
- 1)) == 0);
614 ASSERT(((ULONG_PTR
)Mdl
->StartVa
& (PAGE_SIZE
- 1)) == 0);
615 ASSERT((Mdl
->MdlFlags
& (MDL_PAGES_LOCKED
|
616 MDL_MAPPED_TO_SYSTEM_VA
|
617 MDL_SOURCE_IS_NONPAGED_POOL
|
619 MDL_IO_SPACE
)) == 0);
622 // Get page and base information
624 MdlPages
= (PPFN_NUMBER
)(Mdl
+ 1);
628 // Get the addresses and how many pages we span (and need to lock)
630 Address
= (PVOID
)((ULONG_PTR
)Base
+ Mdl
->ByteOffset
);
631 LastAddress
= (PVOID
)((ULONG_PTR
)Address
+ Mdl
->ByteCount
);
632 LockPages
= ADDRESS_AND_SIZE_TO_SPAN_PAGES(Address
, Mdl
->ByteCount
);
633 ASSERT(LockPages
!= 0);
635 /* Block invalid access */
636 if ((AccessMode
!= KernelMode
) &&
637 ((LastAddress
> (PVOID
)MM_USER_PROBE_ADDRESS
) || (Address
>= LastAddress
)))
639 /* Caller should be in SEH, raise the error */
640 *MdlPages
= LIST_HEAD
;
641 ExRaiseStatus(STATUS_ACCESS_VIOLATION
);
647 if (Address
<= MM_HIGHEST_USER_ADDRESS
)
652 CurrentProcess
= PsGetCurrentProcess();
659 CurrentProcess
= NULL
;
663 // Save the number of pages we'll have to lock, and the start address
665 TotalPages
= LockPages
;
666 StartAddress
= Address
;
668 /* Large pages not supported */
669 ASSERT(!MI_IS_PHYSICAL_ADDRESS(Address
));
674 ProbeStatus
= STATUS_SUCCESS
;
685 *MdlPages
= LIST_HEAD
;
690 *(volatile CHAR
*)Address
;
693 // Check if this is write access (only probe for user-mode)
695 if ((Operation
!= IoReadAccess
) &&
696 (Address
<= MM_HIGHEST_USER_ADDRESS
))
699 // Probe for write too
701 ProbeForWriteChar(Address
);
707 Address
= PAGE_ALIGN((ULONG_PTR
)Address
+ PAGE_SIZE
);
714 } while (Address
< LastAddress
);
717 // Reset back to the original page
719 ASSERT(LockPages
== 0);
720 MdlPages
= (PPFN_NUMBER
)(Mdl
+ 1);
722 _SEH2_EXCEPT(EXCEPTION_EXECUTE_HANDLER
)
727 ProbeStatus
= _SEH2_GetExceptionCode();
732 // So how did that go?
734 if (ProbeStatus
!= STATUS_SUCCESS
)
739 DPRINT1("MDL PROBE FAILED!\n");
741 ExRaiseStatus(ProbeStatus
);
745 // Get the PTE and PDE
747 PointerPte
= MiAddressToPte(StartAddress
);
748 PointerPde
= MiAddressToPde(StartAddress
);
749 #if (_MI_PAGING_LEVELS >= 3)
750 PointerPpe
= MiAddressToPpe(StartAddress
);
752 #if (_MI_PAGING_LEVELS == 4)
753 PointerPxe
= MiAddressToPxe(StartAddress
);
759 ASSERT(MdlPages
== (PPFN_NUMBER
)(Mdl
+ 1));
762 // Check what kind of operation this is
764 if (Operation
!= IoReadAccess
)
767 // Set the write flag
769 Mdl
->MdlFlags
|= MDL_WRITE_OPERATION
;
774 // Remove the write flag
776 Mdl
->MdlFlags
&= ~(MDL_WRITE_OPERATION
);
780 // Mark the MDL as locked *now*
782 Mdl
->MdlFlags
|= MDL_PAGES_LOCKED
;
785 // Check if this came from kernel mode
787 if (Base
> MM_HIGHEST_USER_ADDRESS
)
790 // We should not have a process
792 ASSERT(CurrentProcess
== NULL
);
796 // In kernel mode, we don't need to check for write access
798 Operation
= IoReadAccess
;
804 OldIrql
= KeAcquireQueuedSpinLock(LockQueuePfnLock
);
811 ASSERT(TotalPages
!= 0);
812 ASSERT(CurrentProcess
== PsGetCurrentProcess());
815 // Track locked pages
817 InterlockedExchangeAddSizeT(&CurrentProcess
->NumberOfLockedPages
,
823 Mdl
->Process
= CurrentProcess
;
825 /* Lock the process working set */
826 MiLockProcessWorkingSet(CurrentProcess
, PsGetCurrentThread());
834 LastPte
= MiAddressToPte((PVOID
)((ULONG_PTR
)LastAddress
- 1));
842 // Assume failure and check for non-mapped pages
844 *MdlPages
= LIST_HEAD
;
846 #if (_MI_PAGING_LEVELS == 4)
847 (PointerPxe
->u
.Hard
.Valid
== 0) ||
849 #if (_MI_PAGING_LEVELS >= 3)
850 (PointerPpe
->u
.Hard
.Valid
== 0) ||
852 (PointerPde
->u
.Hard
.Valid
== 0) ||
853 (PointerPte
->u
.Hard
.Valid
== 0))
856 // What kind of lock were we using?
863 KeReleaseQueuedSpinLock(LockQueuePfnLock
, OldIrql
);
867 /* Release process working set */
868 MiUnlockProcessWorkingSet(CurrentProcess
, PsGetCurrentThread());
874 Address
= MiPteToAddress(PointerPte
);
876 //HACK: Pass a placeholder TrapInformation so the fault handler knows we're unlocked
877 Status
= MmAccessFault(FALSE
, Address
, KernelMode
, (PVOID
)0xBADBADA3);
878 if (!NT_SUCCESS(Status
))
883 DPRINT1("Access fault failed\n");
888 // What lock should we use?
895 OldIrql
= KeAcquireQueuedSpinLock(LockQueuePfnLock
);
899 /* Lock the process working set */
900 MiLockProcessWorkingSet(CurrentProcess
, PsGetCurrentThread());
905 // Check if this was a write or modify
907 if (Operation
!= IoReadAccess
)
910 // Check if the PTE is not writable
912 if (MI_IS_PAGE_WRITEABLE(PointerPte
) == FALSE
)
915 // Check if it's copy on write
917 if (MI_IS_PAGE_COPY_ON_WRITE(PointerPte
))
920 // Get the base address and allow a change for user-mode
922 Address
= MiPteToAddress(PointerPte
);
923 if (Address
<= MM_HIGHEST_USER_ADDRESS
)
926 // What kind of lock were we using?
933 KeReleaseQueuedSpinLock(LockQueuePfnLock
, OldIrql
);
937 /* Release process working set */
938 MiUnlockProcessWorkingSet(CurrentProcess
, PsGetCurrentThread());
945 //HACK: Pass a placeholder TrapInformation so the fault handler knows we're unlocked
946 Status
= MmAccessFault(TRUE
, Address
, KernelMode
, (PVOID
)0xBADBADA3);
947 if (!NT_SUCCESS(Status
))
952 DPRINT1("Access fault failed\n");
957 // Re-acquire the lock
964 OldIrql
= KeAcquireQueuedSpinLock(LockQueuePfnLock
);
968 /* Lock the process working set */
969 MiLockProcessWorkingSet(CurrentProcess
, PsGetCurrentThread());
980 // Fail, since we won't allow this
982 Status
= STATUS_ACCESS_VIOLATION
;
983 goto CleanupWithLock
;
990 PageFrameIndex
= PFN_FROM_PTE(PointerPte
);
991 Pfn1
= MiGetPfnEntry(PageFrameIndex
);
994 /* Either this is for kernel-mode, or the working set is held */
995 ASSERT((CurrentProcess
== NULL
) || (UsePfnLock
== FALSE
));
997 /* No Physical VADs supported yet */
998 if (CurrentProcess
) ASSERT(CurrentProcess
->PhysicalVadRoot
== NULL
);
1000 /* This address should already exist and be fully valid */
1001 MiReferenceProbedPageAndBumpLockCount(Pfn1
);
1006 // For I/O addresses, just remember this
1008 Mdl
->MdlFlags
|= MDL_IO_SPACE
;
1012 // Write the page and move on
1014 *MdlPages
++ = PageFrameIndex
;
1017 /* Check if we're on a PDE boundary */
1018 if (MiIsPteOnPdeBoundary(PointerPte
)) PointerPde
++;
1019 #if (_MI_PAGING_LEVELS >= 3)
1020 if (MiIsPteOnPpeBoundary(PointerPte
)) PointerPpe
++;
1022 #if (_MI_PAGING_LEVELS == 4)
1023 if (MiIsPteOnPxeBoundary(PointerPte
)) PointerPxe
++;
1026 } while (PointerPte
<= LastPte
);
1029 // What kind of lock were we using?
1036 KeReleaseQueuedSpinLock(LockQueuePfnLock
, OldIrql
);
1040 /* Release process working set */
1041 MiUnlockProcessWorkingSet(CurrentProcess
, PsGetCurrentThread());
1047 ASSERT((Mdl
->MdlFlags
& MDL_DESCRIBES_AWE
) == 0);
1052 // This is the failure path
1054 ASSERT(!NT_SUCCESS(Status
));
1057 // What kind of lock were we using?
1064 KeReleaseQueuedSpinLock(LockQueuePfnLock
, OldIrql
);
1068 /* Release process working set */
1069 MiUnlockProcessWorkingSet(CurrentProcess
, PsGetCurrentThread());
1073 // Pages must be locked so MmUnlock can work
1075 ASSERT(Mdl
->MdlFlags
& MDL_PAGES_LOCKED
);
1081 ExRaiseStatus(Status
);
1089 MmUnlockPages(IN PMDL Mdl
)
1091 PPFN_NUMBER MdlPages
, LastPage
;
1094 ULONG Flags
, PageCount
;
1097 DPRINT("Unlocking MDL: %p\n", Mdl
);
1102 ASSERT((Mdl
->MdlFlags
& MDL_PAGES_LOCKED
) != 0);
1103 ASSERT((Mdl
->MdlFlags
& MDL_SOURCE_IS_NONPAGED_POOL
) == 0);
1104 ASSERT((Mdl
->MdlFlags
& MDL_PARTIAL
) == 0);
1105 ASSERT(Mdl
->ByteCount
!= 0);
1108 // Get the process associated and capture the flags which are volatile
1110 Process
= Mdl
->Process
;
1111 Flags
= Mdl
->MdlFlags
;
1114 // Automagically undo any calls to MmGetSystemAddressForMdl's for this MDL
1116 if (Mdl
->MdlFlags
& MDL_MAPPED_TO_SYSTEM_VA
)
1119 // Unmap the pages from system space
1121 MmUnmapLockedPages(Mdl
->MappedSystemVa
, Mdl
);
1125 // Get the page count
1127 MdlPages
= (PPFN_NUMBER
)(Mdl
+ 1);
1128 Base
= (PVOID
)((ULONG_PTR
)Mdl
->StartVa
+ Mdl
->ByteOffset
);
1129 PageCount
= ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base
, Mdl
->ByteCount
);
1130 ASSERT(PageCount
!= 0);
1133 // We don't support AWE
1135 if (Flags
& MDL_DESCRIBES_AWE
) ASSERT(FALSE
);
1138 // Check if the buffer is mapped I/O space
1140 if (Flags
& MDL_IO_SPACE
)
1145 OldIrql
= KeAcquireQueuedSpinLock(LockQueuePfnLock
);
1150 LastPage
= MdlPages
+ PageCount
;
1154 // Last page, break out
1156 if (*MdlPages
== LIST_HEAD
) break;
1159 // Check if this page is in the PFN database
1161 Pfn1
= MiGetPfnEntry(*MdlPages
);
1162 if (Pfn1
) MiDereferencePfnAndDropLockCount(Pfn1
);
1163 } while (++MdlPages
< LastPage
);
1168 KeReleaseQueuedSpinLock(LockQueuePfnLock
, OldIrql
);
1171 // Check if we have a process
1176 // Handle the accounting of locked pages
1178 ASSERT(Process
->NumberOfLockedPages
> 0);
1179 InterlockedExchangeAddSizeT(&Process
->NumberOfLockedPages
,
1180 -(LONG_PTR
)PageCount
);
1186 Mdl
->MdlFlags
&= ~MDL_IO_SPACE
;
1187 Mdl
->MdlFlags
&= ~MDL_PAGES_LOCKED
;
1192 // Check if we have a process
1197 // Handle the accounting of locked pages
1199 ASSERT(Process
->NumberOfLockedPages
> 0);
1200 InterlockedExchangeAddSizeT(&Process
->NumberOfLockedPages
,
1201 -(LONG_PTR
)PageCount
);
1207 LastPage
= MdlPages
+ PageCount
;
1211 // Last page reached
1213 if (*MdlPages
== LIST_HEAD
)
1216 // Were there no pages at all?
1218 if (MdlPages
== (PPFN_NUMBER
)(Mdl
+ 1))
1221 // We're already done
1223 Mdl
->MdlFlags
&= ~MDL_PAGES_LOCKED
;
1228 // Otherwise, stop here
1230 LastPage
= MdlPages
;
1234 /* Save the PFN entry instead for the secondary loop */
1235 *MdlPages
= (PFN_NUMBER
)MiGetPfnEntry(*MdlPages
);
1236 ASSERT(*MdlPages
!= 0);
1237 } while (++MdlPages
< LastPage
);
1242 MdlPages
= (PPFN_NUMBER
)(Mdl
+ 1);
1245 // Now grab the PFN lock for the actual unlock and dereference
1247 OldIrql
= KeAcquireQueuedSpinLock(LockQueuePfnLock
);
1250 /* Get the current entry and reference count */
1251 Pfn1
= (PMMPFN
)*MdlPages
;
1252 MiDereferencePfnAndDropLockCount(Pfn1
);
1253 } while (++MdlPages
< LastPage
);
1258 KeReleaseQueuedSpinLock(LockQueuePfnLock
, OldIrql
);
1263 Mdl
->MdlFlags
&= ~MDL_PAGES_LOCKED
;
1271 MmAdvanceMdl(IN PMDL Mdl
,
1272 IN ULONG NumberOfBytes
)
1275 return STATUS_NOT_IMPLEMENTED
;
1283 MmMapLockedPagesWithReservedMapping(IN PVOID MappingAddress
,
1285 IN PMDL MemoryDescriptorList
,
1286 IN MEMORY_CACHING_TYPE CacheType
)
1297 MmUnmapReservedMapping(IN PVOID BaseAddress
,
1299 IN PMDL MemoryDescriptorList
)
1309 MmPrefetchPages(IN ULONG NumberOfLists
,
1310 IN PREAD_LIST
*ReadLists
)
1313 return STATUS_NOT_IMPLEMENTED
;
1321 MmProtectMdlSystemAddress(IN PMDL MemoryDescriptorList
,
1322 IN ULONG NewProtect
)
1325 return STATUS_NOT_IMPLEMENTED
;
1333 MmProbeAndLockProcessPages(IN OUT PMDL MemoryDescriptorList
,
1334 IN PEPROCESS Process
,
1335 IN KPROCESSOR_MODE AccessMode
,
1336 IN LOCK_OPERATION Operation
)
1347 MmProbeAndLockSelectedPages(IN OUT PMDL MemoryDescriptorList
,
1348 IN LARGE_INTEGER PageList
[],
1349 IN KPROCESSOR_MODE AccessMode
,
1350 IN LOCK_OPERATION Operation
)
1360 MmMapMemoryDumpMdl(IN PMDL Mdl
)