2 * PROJECT: ReactOS Kernel
3 * LICENSE: BSD - See COPYING.ARM in the top level directory
4 * FILE: ntoskrnl/mm/ARM3/mdlsup.c
5 * PURPOSE: ARM Memory Manager Memory Descriptor List (MDL) Management
6 * PROGRAMMERS: ReactOS Portable Systems Group
9 /* INCLUDES *******************************************************************/
15 #line 15 "ARMĀ³::MDLSUP"
16 #define MODULE_INVOLVED_IN_ARM3
17 #include "../ARM3/miarm.h"
20 BOOLEAN MmTrackLockedPages
;
22 /* PUBLIC FUNCTIONS ***********************************************************/
29 MmCreateMdl(IN PMDL Mdl
,
36 // Check if we don't have an MDL built
41 // Calculate the size we'll need and allocate the MDL
43 Size
= MmSizeOfMdl(Base
, Length
);
44 Mdl
= ExAllocatePoolWithTag(NonPagedPool
, Size
, TAG_MDL
);
45 if (!Mdl
) return NULL
;
51 MmInitializeMdl(Mdl
, Base
, Length
);
60 MmSizeOfMdl(IN PVOID Base
,
64 // Return the MDL size
67 (ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base
, Length
) * sizeof(PFN_NUMBER
));
75 MmBuildMdlForNonPagedPool(IN PMDL Mdl
)
77 PPFN_NUMBER MdlPages
, EndPage
;
78 PFN_NUMBER Pfn
, PageCount
;
85 ASSERT(Mdl
->ByteCount
!= 0);
86 ASSERT((Mdl
->MdlFlags
& (MDL_PAGES_LOCKED
|
87 MDL_MAPPED_TO_SYSTEM_VA
|
88 MDL_SOURCE_IS_NONPAGED_POOL
|
92 // We know the MDL isn't associated to a process now
97 // Get page and VA information
99 MdlPages
= (PPFN_NUMBER
)(Mdl
+ 1);
103 // Set the system address and now get the page count
105 Mdl
->MappedSystemVa
= (PVOID
)((ULONG_PTR
)Base
+ Mdl
->ByteOffset
);
106 PageCount
= ADDRESS_AND_SIZE_TO_SPAN_PAGES(Mdl
->MappedSystemVa
,
108 ASSERT(PageCount
!= 0);
109 EndPage
= MdlPages
+ PageCount
;
114 PointerPte
= MiAddressToPte(Base
);
120 Pfn
= PFN_FROM_PTE(PointerPte
++);
122 } while (MdlPages
< EndPage
);
125 // Set the nonpaged pool flag
127 Mdl
->MdlFlags
|= MDL_SOURCE_IS_NONPAGED_POOL
;
130 // Check if this is an I/O mapping
132 if (!MiGetPfnEntry(Pfn
)) Mdl
->MdlFlags
|= MDL_IO_SPACE
;
140 MmAllocatePagesForMdl(IN PHYSICAL_ADDRESS LowAddress
,
141 IN PHYSICAL_ADDRESS HighAddress
,
142 IN PHYSICAL_ADDRESS SkipBytes
,
143 IN SIZE_T TotalBytes
)
146 // Call the internal routine
148 return MiAllocatePagesForMdl(LowAddress
,
161 MmAllocatePagesForMdlEx(IN PHYSICAL_ADDRESS LowAddress
,
162 IN PHYSICAL_ADDRESS HighAddress
,
163 IN PHYSICAL_ADDRESS SkipBytes
,
164 IN SIZE_T TotalBytes
,
165 IN MEMORY_CACHING_TYPE CacheType
,
168 MI_PFN_CACHE_ATTRIBUTE CacheAttribute
;
171 // Check for invalid cache type
173 if (CacheType
> MmWriteCombined
)
176 // Normalize to default
178 CacheAttribute
= MiNotMapped
;
183 // Conver to internal caching attribute
185 CacheAttribute
= MiPlatformCacheAttributes
[FALSE
][CacheType
];
189 // Only these flags are allowed
191 if (Flags
& ~(MM_DONT_ZERO_ALLOCATION
| MM_ALLOCATE_FROM_LOCAL_NODE_ONLY
))
200 // Call the internal routine
202 return MiAllocatePagesForMdl(LowAddress
,
215 MmFreePagesFromMdl(IN PMDL Mdl
)
222 DPRINT("Freeing MDL: %p\n", Mdl
);
227 ASSERT(KeGetCurrentIrql() <= APC_LEVEL
);
228 ASSERT((Mdl
->MdlFlags
& MDL_IO_SPACE
) == 0);
229 ASSERT(((ULONG_PTR
)Mdl
->StartVa
& (PAGE_SIZE
- 1)) == 0);
232 // Get address and page information
234 Base
= (PVOID
)((ULONG_PTR
)Mdl
->StartVa
+ Mdl
->ByteOffset
);
235 NumberOfPages
= ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base
, Mdl
->ByteCount
);
240 OldIrql
= KeAcquireQueuedSpinLock(LockQueuePfnLock
);
243 // Loop all the MDL pages
245 Pages
= (PPFN_NUMBER
)(Mdl
+ 1);
249 // Reached the last page
251 if (*Pages
== -1) break;
256 ASSERT(*Pages
<= MmHighestPhysicalPage
);
259 // Get the page entry
261 Pfn1
= MiGetPfnEntry(*Pages
);
262 ASSERT(Pfn1
->u3
.ReferenceCount
== 1);
267 Pfn1
->u3
.e1
.StartOfAllocation
= 0;
268 Pfn1
->u3
.e1
.EndOfAllocation
= 0;
273 MmDereferencePage(*Pages
);
276 // Clear this page and move on
279 } while (--NumberOfPages
!= 0);
284 KeReleaseQueuedSpinLock(LockQueuePfnLock
, OldIrql
);
287 // Remove the pages locked flag
289 Mdl
->MdlFlags
&= ~MDL_PAGES_LOCKED
;
297 MmMapLockedPagesSpecifyCache(IN PMDL Mdl
,
298 IN KPROCESSOR_MODE AccessMode
,
299 IN MEMORY_CACHING_TYPE CacheType
,
300 IN PVOID BaseAddress
,
301 IN ULONG BugCheckOnFailure
,
302 IN MM_PAGE_PRIORITY Priority
)
305 PPFN_NUMBER MdlPages
, LastPage
;
306 PFN_NUMBER PageCount
;
308 MI_PFN_CACHE_ATTRIBUTE CacheAttribute
;
315 ASSERT(Mdl
->ByteCount
!= 0);
320 Base
= (PVOID
)((ULONG_PTR
)Mdl
->StartVa
+ Mdl
->ByteOffset
);
323 // Handle kernel case first
325 if (AccessMode
== KernelMode
)
328 // Get the list of pages and count
330 MdlPages
= (PPFN_NUMBER
)(Mdl
+ 1);
331 PageCount
= ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base
, Mdl
->ByteCount
);
332 LastPage
= MdlPages
+ PageCount
;
337 ASSERT((Mdl
->MdlFlags
& (MDL_MAPPED_TO_SYSTEM_VA
|
338 MDL_SOURCE_IS_NONPAGED_POOL
|
339 MDL_PARTIAL_HAS_BEEN_MAPPED
)) == 0);
340 ASSERT((Mdl
->MdlFlags
& (MDL_PAGES_LOCKED
| MDL_PARTIAL
)) != 0);
343 // Get the correct cache type
345 IsIoMapping
= (Mdl
->MdlFlags
& MDL_IO_SPACE
) != 0;
346 CacheAttribute
= MiPlatformCacheAttributes
[IsIoMapping
][CacheType
];
351 PointerPte
= MiReserveSystemPtes(PageCount
, SystemPteSpace
);
355 // If it can fail, return NULL
357 if (Mdl
->MdlFlags
& MDL_MAPPING_CAN_FAIL
) return NULL
;
360 // Should we bugcheck?
362 if (!BugCheckOnFailure
) return NULL
;
365 // Yes, crash the system
367 KeBugCheckEx(NO_MORE_SYSTEM_PTES
, 0, PageCount
, 0, 0);
371 // Get the mapped address
373 Base
= (PVOID
)((ULONG_PTR
)MiPteToAddress(PointerPte
) + Mdl
->ByteOffset
);
378 TempPte
= ValidKernelPte
;
379 switch (CacheAttribute
)
386 MI_PAGE_DISABLE_CACHE(&TempPte
);
387 MI_PAGE_WRITE_THROUGH(&TempPte
);
390 case MiWriteCombined
:
393 // Enable write combining
395 MI_PAGE_DISABLE_CACHE(&TempPte
);
396 MI_PAGE_WRITE_COMBINED(&TempPte
);
414 if (*MdlPages
== -1) break;
419 TempPte
.u
.Hard
.PageFrameNumber
= *MdlPages
;
420 MI_WRITE_VALID_PTE(PointerPte
++, TempPte
);
421 } while (++MdlPages
< LastPage
);
426 ASSERT((Mdl
->MdlFlags
& MDL_MAPPED_TO_SYSTEM_VA
) == 0);
427 Mdl
->MappedSystemVa
= Base
;
428 Mdl
->MdlFlags
|= MDL_MAPPED_TO_SYSTEM_VA
;
431 // Check if it was partial
433 if (Mdl
->MdlFlags
& MDL_PARTIAL
)
436 // Write the appropriate flag here too
438 Mdl
->MdlFlags
|= MDL_PARTIAL_HAS_BEEN_MAPPED
;
442 // Return the mapped address
448 // In user-mode, let ReactOS do it
450 return MiMapLockedPagesInUserSpace(Mdl
, Base
, CacheType
, BaseAddress
);
458 MmMapLockedPages(IN PMDL Mdl
,
459 IN KPROCESSOR_MODE AccessMode
)
462 // Call the extended version
464 return MmMapLockedPagesSpecifyCache(Mdl
,
477 MmUnmapLockedPages(IN PVOID BaseAddress
,
481 PFN_NUMBER PageCount
;
482 PPFN_NUMBER MdlPages
;
488 ASSERT(Mdl
->ByteCount
!= 0);
491 // Check if this is a kernel request
493 if (BaseAddress
> MM_HIGHEST_USER_ADDRESS
)
496 // Get base and count information
498 Base
= (PVOID
)((ULONG_PTR
)Mdl
->StartVa
+ Mdl
->ByteOffset
);
499 PageCount
= ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base
, Mdl
->ByteCount
);
504 ASSERT((Mdl
->MdlFlags
& MDL_PARENT_MAPPED_SYSTEM_VA
) == 0);
505 ASSERT(PageCount
!= 0);
506 ASSERT(Mdl
->MdlFlags
& MDL_MAPPED_TO_SYSTEM_VA
);
511 PointerPte
= MiAddressToPte(BaseAddress
);
514 // This should be a resident system PTE
516 ASSERT(PointerPte
>= MmSystemPtesStart
[SystemPteSpace
]);
517 ASSERT(PointerPte
<= MmSystemPtesEnd
[SystemPteSpace
]);
518 ASSERT(PointerPte
->u
.Hard
.Valid
== 1);
521 // Check if the caller wants us to free advanced pages
523 if (Mdl
->MdlFlags
& MDL_FREE_EXTRA_PTES
)
526 // Get the MDL page array
528 MdlPages
= (PPFN_NUMBER
)(Mdl
+ 1);
529 MdlPages
+= PageCount
;
534 PageCount
+= *MdlPages
;
535 PointerPte
-= *MdlPages
;
536 ASSERT(PointerPte
>= MmSystemPtesStart
[SystemPteSpace
]);
537 ASSERT(PointerPte
<= MmSystemPtesEnd
[SystemPteSpace
]);
540 // Get the new base address
542 BaseAddress
= (PVOID
)((ULONG_PTR
)BaseAddress
-
543 ((*MdlPages
) << PAGE_SHIFT
));
549 Mdl
->MdlFlags
&= ~(MDL_MAPPED_TO_SYSTEM_VA
|
550 MDL_PARTIAL_HAS_BEEN_MAPPED
|
551 MDL_FREE_EXTRA_PTES
);
554 // Release the system PTEs
556 MiReleaseSystemPtes(PointerPte
, PageCount
, SystemPteSpace
);
561 // Let ReactOS handle it
563 MiUnmapLockedPagesInUserSpace(BaseAddress
, Mdl
);
572 MmProbeAndLockPages(IN PMDL Mdl
,
573 IN KPROCESSOR_MODE AccessMode
,
574 IN LOCK_OPERATION Operation
)
576 PPFN_NUMBER MdlPages
;
577 PVOID Base
, Address
, LastAddress
, StartAddress
;
578 ULONG LockPages
, TotalPages
;
579 NTSTATUS Status
= STATUS_SUCCESS
;
580 PEPROCESS CurrentProcess
;
582 PMMSUPPORT AddressSpace
;
583 NTSTATUS ProbeStatus
;
584 PMMPTE PointerPte
, LastPte
;
586 PFN_NUMBER PageFrameIndex
;
590 DPRINT("Probing MDL: %p\n", Mdl
);
595 ASSERT(Mdl
->ByteCount
!= 0);
596 ASSERT(((ULONG
)Mdl
->ByteOffset
& ~(PAGE_SIZE
- 1)) == 0);
597 ASSERT(((ULONG_PTR
)Mdl
->StartVa
& (PAGE_SIZE
- 1)) == 0);
598 ASSERT((Mdl
->MdlFlags
& (MDL_PAGES_LOCKED
|
599 MDL_MAPPED_TO_SYSTEM_VA
|
600 MDL_SOURCE_IS_NONPAGED_POOL
|
602 MDL_IO_SPACE
)) == 0);
605 // Get page and base information
607 MdlPages
= (PPFN_NUMBER
)(Mdl
+ 1);
608 Base
= (PVOID
)Mdl
->StartVa
;
611 // Get the addresses and how many pages we span (and need to lock)
613 Address
= (PVOID
)((ULONG_PTR
)Base
+ Mdl
->ByteOffset
);
614 LastAddress
= (PVOID
)((ULONG_PTR
)Address
+ Mdl
->ByteCount
);
615 LockPages
= ADDRESS_AND_SIZE_TO_SPAN_PAGES(Address
, Mdl
->ByteCount
);
616 ASSERT(LockPages
!= 0);
619 // Get the thread and process
621 Thread
= PsGetCurrentThread();
622 if (Address
<= MM_HIGHEST_USER_ADDRESS
)
627 CurrentProcess
= PsGetCurrentProcess();
634 CurrentProcess
= NULL
;
638 // Save the number of pages we'll have to lock, and the start address
640 TotalPages
= LockPages
;
641 StartAddress
= Address
;
646 ProbeStatus
= STATUS_SUCCESS
;
662 *(volatile CHAR
*)Address
;
665 // Check if this is write access (only probe for user-mode)
667 if ((Operation
!= IoReadAccess
) &&
668 (Address
<= MM_HIGHEST_USER_ADDRESS
))
671 // Probe for write too
673 ProbeForWriteChar(Address
);
679 Address
= (PVOID
)((ULONG_PTR
)Address
+ PAGE_SIZE
);
680 Address
= PAGE_ALIGN(Address
);
687 } while (Address
< LastAddress
);
690 // Reset back to the original page
692 ASSERT(LockPages
== 0);
693 MdlPages
= (PPFN_NUMBER
)(Mdl
+ 1);
695 _SEH2_EXCEPT(EXCEPTION_EXECUTE_HANDLER
)
700 ProbeStatus
= _SEH2_GetExceptionCode();
705 // So how did that go?
707 if (ProbeStatus
!= STATUS_SUCCESS
)
712 DPRINT1("MDL PROBE FAILED!\n");
714 ExRaiseStatus(ProbeStatus
);
718 // Get the PTE and PDE
720 PointerPte
= MiAddressToPte(StartAddress
);
721 PointerPde
= MiAddressToPde(StartAddress
);
726 ASSERT(MdlPages
== (PPFN_NUMBER
)(Mdl
+ 1));
729 // Check what kind of operation this is
731 if (Operation
!= IoReadAccess
)
734 // Set the write flag
736 Mdl
->MdlFlags
|= MDL_WRITE_OPERATION
;
741 // Remove the write flag
743 Mdl
->MdlFlags
&= ~(MDL_WRITE_OPERATION
);
747 // Mark the MDL as locked *now*
749 Mdl
->MdlFlags
|= MDL_PAGES_LOCKED
;
752 // Check if this came from kernel mode
754 if (Base
>= MM_HIGHEST_USER_ADDRESS
)
757 // We should not have a process
759 ASSERT(CurrentProcess
== NULL
);
763 // In kernel mode, we don't need to check for write access
765 Operation
= IoReadAccess
;
771 OldIrql
= KeAcquireQueuedSpinLock(LockQueuePfnLock
);
772 AddressSpace
= NULL
; // Keep compiler happy
779 ASSERT(TotalPages
!= 0);
780 ASSERT(CurrentProcess
== PsGetCurrentProcess());
783 // Track locked pages
785 InterlockedExchangeAddSizeT(&CurrentProcess
->NumberOfLockedPages
,
791 Mdl
->Process
= CurrentProcess
;
794 // Use the process lock
797 AddressSpace
= &CurrentProcess
->Vm
;
798 MmLockAddressSpace(AddressSpace
);
799 OldIrql
= DISPATCH_LEVEL
; // Keep compiler happy
805 LastPte
= MiAddressToPte((PVOID
)((ULONG_PTR
)LastAddress
- 1));
813 // Assume failure and check for non-mapped pages
816 #if (_MI_PAGING_LEVELS >= 3)
817 /* Should be checking the PPE and PXE */
820 while ((PointerPde
->u
.Hard
.Valid
== 0) ||
821 (PointerPte
->u
.Hard
.Valid
== 0))
824 // What kind of lock where we using?
831 KeReleaseQueuedSpinLock(LockQueuePfnLock
, OldIrql
);
836 // Release process address space lock
838 MmUnlockAddressSpace(AddressSpace
);
844 Address
= MiPteToAddress(PointerPte
);
845 Status
= MmAccessFault(FALSE
, Address
, KernelMode
, NULL
);
846 if (!NT_SUCCESS(Status
))
851 DPRINT1("Access fault failed\n");
856 // Waht lock should we use?
863 OldIrql
= KeAcquireQueuedSpinLock(LockQueuePfnLock
);
868 // Use the address space lock
870 MmLockAddressSpace(AddressSpace
);
875 // Check if this was a write or modify
877 if (Operation
!= IoReadAccess
)
880 // Check if the PTE is not writable
882 if (MI_IS_PAGE_WRITEABLE(PointerPte
) == FALSE
)
885 // Check if it's copy on write
887 if (MI_IS_PAGE_COPY_ON_WRITE(PointerPte
))
890 // Get the base address and allow a change for user-mode
892 Address
= MiPteToAddress(PointerPte
);
893 if (Address
<= MM_HIGHEST_USER_ADDRESS
)
896 // What kind of lock where we using?
903 KeReleaseQueuedSpinLock(LockQueuePfnLock
, OldIrql
);
908 // Release process address space lock
910 MmUnlockAddressSpace(AddressSpace
);
916 Status
= MmAccessFault(TRUE
, Address
, KernelMode
, NULL
);
917 if (!NT_SUCCESS(Status
))
922 DPRINT1("Access fault failed\n");
927 // Re-acquire the lock
934 OldIrql
= KeAcquireQueuedSpinLock(LockQueuePfnLock
);
939 // Use the address space lock
941 MmLockAddressSpace(AddressSpace
);
952 // Fail, since we won't allow this
954 Status
= STATUS_ACCESS_VIOLATION
;
955 goto CleanupWithLock
;
962 PageFrameIndex
= PFN_FROM_PTE(PointerPte
);
963 if (PageFrameIndex
<= MmHighestPhysicalPage
)
968 Pfn1
= MiGetPfnEntry(PageFrameIndex
);
969 ASSERT((CurrentProcess
== NULL
) || (UsePfnLock
== FALSE
));
974 MmReferencePage(PageFrameIndex
);
979 // For I/O addresses, just remember this
981 Mdl
->MdlFlags
|= MDL_IO_SPACE
;
985 // Write the page and move on
987 *MdlPages
++ = PageFrameIndex
;
988 if (!((ULONG_PTR
)(++PointerPte
) & (PAGE_SIZE
- 1))) PointerPde
++;
989 } while (PointerPte
<= LastPte
);
992 // What kind of lock where we using?
999 KeReleaseQueuedSpinLock(LockQueuePfnLock
, OldIrql
);
1004 // Release process address space lock
1006 MmUnlockAddressSpace(AddressSpace
);
1012 ASSERT((Mdl
->MdlFlags
& MDL_DESCRIBES_AWE
) == 0);
1017 // This is the failure path
1019 ASSERT(!NT_SUCCESS(Status
));
1022 // What kind of lock where we using?
1029 KeReleaseQueuedSpinLock(LockQueuePfnLock
, OldIrql
);
1034 // Release process address space lock
1036 MmUnlockAddressSpace(AddressSpace
);
1040 // Pages must be locked so MmUnlock can work
1042 ASSERT(Mdl
->MdlFlags
& MDL_PAGES_LOCKED
);
1048 ExRaiseStatus(Status
);
1056 MmUnlockPages(IN PMDL Mdl
)
1058 PPFN_NUMBER MdlPages
, LastPage
;
1061 ULONG Flags
, PageCount
;
1063 DPRINT("Unlocking MDL: %p\n", Mdl
);
1068 ASSERT((Mdl
->MdlFlags
& MDL_PAGES_LOCKED
) != 0);
1069 ASSERT((Mdl
->MdlFlags
& MDL_SOURCE_IS_NONPAGED_POOL
) == 0);
1070 ASSERT((Mdl
->MdlFlags
& MDL_PARTIAL
) == 0);
1071 ASSERT(Mdl
->ByteCount
!= 0);
1074 // Get the process associated and capture the flags which are volatile
1076 Process
= Mdl
->Process
;
1077 Flags
= Mdl
->MdlFlags
;
1080 // Automagically undo any calls to MmGetSystemAddressForMdl's for this MDL
1082 if (Mdl
->MdlFlags
& MDL_MAPPED_TO_SYSTEM_VA
)
1085 // Unmap the pages from system space
1087 MmUnmapLockedPages(Mdl
->MappedSystemVa
, Mdl
);
1091 // Get the page count
1093 MdlPages
= (PPFN_NUMBER
)(Mdl
+ 1);
1094 Base
= (PVOID
)((ULONG_PTR
)Mdl
->StartVa
+ Mdl
->ByteOffset
);
1095 PageCount
= ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base
, Mdl
->ByteCount
);
1096 ASSERT(PageCount
!= 0);
1099 // We don't support AWE
1101 if (Flags
& MDL_DESCRIBES_AWE
) ASSERT(FALSE
);
1104 // Check if the buffer is mapped I/O space
1106 if (Flags
& MDL_IO_SPACE
)
1111 OldIrql
= KeAcquireQueuedSpinLock(LockQueuePfnLock
);
1116 LastPage
= MdlPages
+ PageCount
;
1120 // Last page, break out
1122 if (*MdlPages
== -1) break;
1125 // Check if this page is in the PFN database
1127 if (*MdlPages
<= MmHighestPhysicalPage
)
1130 // Unlock and dereference
1132 MmDereferencePage(*MdlPages
);
1134 } while (++MdlPages
< LastPage
);
1139 KeReleaseQueuedSpinLock(LockQueuePfnLock
, OldIrql
);
1142 // Check if we have a process
1147 // Handle the accounting of locked pages
1149 ASSERT(Process
->NumberOfLockedPages
> 0);
1150 InterlockedExchangeAddSizeT(&Process
->NumberOfLockedPages
,
1157 Mdl
->MdlFlags
&= ~MDL_IO_SPACE
;
1158 Mdl
->MdlFlags
&= ~MDL_PAGES_LOCKED
;
1163 // Check if we have a process
1168 // Handle the accounting of locked pages
1170 ASSERT(Process
->NumberOfLockedPages
> 0);
1171 InterlockedExchangeAddSizeT(&Process
->NumberOfLockedPages
,
1178 LastPage
= MdlPages
+ PageCount
;
1182 // Last page reached
1184 if (*MdlPages
== -1)
1187 // Were there no pages at all?
1189 if (MdlPages
== (PPFN_NUMBER
)(Mdl
+ 1))
1192 // We're already done
1194 Mdl
->MdlFlags
&= ~MDL_PAGES_LOCKED
;
1199 // Otherwise, stop here
1201 LastPage
= MdlPages
;
1208 ASSERT(*MdlPages
<= MmHighestPhysicalPage
);
1209 } while (++MdlPages
< LastPage
);
1214 MdlPages
= (PPFN_NUMBER
)(Mdl
+ 1);
1217 // Now grab the PFN lock for the actual unlock and dereference
1219 OldIrql
= KeAcquireQueuedSpinLock(LockQueuePfnLock
);
1223 // Unlock and dereference
1225 MmDereferencePage(*MdlPages
);
1226 } while (++MdlPages
< LastPage
);
1231 KeReleaseQueuedSpinLock(LockQueuePfnLock
, OldIrql
);
1236 Mdl
->MdlFlags
&= ~MDL_PAGES_LOCKED
;
1244 MmAdvanceMdl(IN PMDL Mdl
,
1245 IN ULONG NumberOfBytes
)
1248 return STATUS_NOT_IMPLEMENTED
;
1256 MmMapLockedPagesWithReservedMapping(IN PVOID MappingAddress
,
1258 IN PMDL MemoryDescriptorList
,
1259 IN MEMORY_CACHING_TYPE CacheType
)
1270 MmUnmapReservedMapping(IN PVOID BaseAddress
,
1272 IN PMDL MemoryDescriptorList
)
1282 MmPrefetchPages(IN ULONG NumberOfLists
,
1283 IN PREAD_LIST
*ReadLists
)
1286 return STATUS_NOT_IMPLEMENTED
;
1294 MmProtectMdlSystemAddress(IN PMDL MemoryDescriptorList
,
1295 IN ULONG NewProtect
)
1298 return STATUS_NOT_IMPLEMENTED
;
1306 MmProbeAndLockProcessPages(IN OUT PMDL MemoryDescriptorList
,
1307 IN PEPROCESS Process
,
1308 IN KPROCESSOR_MODE AccessMode
,
1309 IN LOCK_OPERATION Operation
)
1320 MmProbeAndLockSelectedPages(IN OUT PMDL MemoryDescriptorList
,
1321 IN LARGE_INTEGER PageList
[],
1322 IN KPROCESSOR_MODE AccessMode
,
1323 IN LOCK_OPERATION Operation
)
1333 MmMapMemoryDumpMdl(IN PMDL Mdl
)