2 * PROJECT: ReactOS Kernel
3 * LICENSE: BSD - See COPYING.ARM in the top level directory
4 * FILE: ntoskrnl/mm/ARM3/mdlsup.c
5 * PURPOSE: ARM Memory Manager Memory Descriptor List (MDL) Management
6 * PROGRAMMERS: ReactOS Portable Systems Group
9 /* INCLUDES *******************************************************************/
15 #line 15 "ARMĀ³::MDLSUP"
16 #define MODULE_INVOLVED_IN_ARM3
17 #include "../ARM3/miarm.h"
20 BOOLEAN MmTrackLockedPages
;
22 /* PUBLIC FUNCTIONS ***********************************************************/
29 MmCreateMdl(IN PMDL Mdl
,
36 // Check if we don't have an MDL built
41 // Calculate the size we'll need and allocate the MDL
43 Size
= MmSizeOfMdl(Base
, Length
);
44 Mdl
= ExAllocatePoolWithTag(NonPagedPool
, Size
, TAG_MDL
);
45 if (!Mdl
) return NULL
;
51 MmInitializeMdl(Mdl
, Base
, Length
);
60 MmSizeOfMdl(IN PVOID Base
,
64 // Return the MDL size
67 (ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base
, Length
) * sizeof(PFN_NUMBER
));
75 MmBuildMdlForNonPagedPool(IN PMDL Mdl
)
77 PPFN_NUMBER MdlPages
, EndPage
;
78 PFN_NUMBER Pfn
, PageCount
;
85 ASSERT(Mdl
->ByteCount
!= 0);
86 ASSERT((Mdl
->MdlFlags
& (MDL_PAGES_LOCKED
|
87 MDL_MAPPED_TO_SYSTEM_VA
|
88 MDL_SOURCE_IS_NONPAGED_POOL
|
92 // We know the MDL isn't associated to a process now
97 // Get page and VA information
99 MdlPages
= (PPFN_NUMBER
)(Mdl
+ 1);
103 // Set the system address and now get the page count
105 Mdl
->MappedSystemVa
= (PVOID
)((ULONG_PTR
)Base
+ Mdl
->ByteOffset
);
106 PageCount
= ADDRESS_AND_SIZE_TO_SPAN_PAGES(Mdl
->MappedSystemVa
,
108 ASSERT(PageCount
!= 0);
109 EndPage
= MdlPages
+ PageCount
;
114 PointerPte
= MiAddressToPte(Base
);
120 Pfn
= PFN_FROM_PTE(PointerPte
++);
122 } while (MdlPages
< EndPage
);
125 // Set the nonpaged pool flag
127 Mdl
->MdlFlags
|= MDL_SOURCE_IS_NONPAGED_POOL
;
130 // Check if this is an I/O mapping
132 if (Pfn
> MmHighestPhysicalPage
) Mdl
->MdlFlags
|= MDL_IO_SPACE
;
140 MmAllocatePagesForMdl(IN PHYSICAL_ADDRESS LowAddress
,
141 IN PHYSICAL_ADDRESS HighAddress
,
142 IN PHYSICAL_ADDRESS SkipBytes
,
143 IN SIZE_T TotalBytes
)
146 // Call the internal routine
148 return MiAllocatePagesForMdl(LowAddress
,
161 MmAllocatePagesForMdlEx(IN PHYSICAL_ADDRESS LowAddress
,
162 IN PHYSICAL_ADDRESS HighAddress
,
163 IN PHYSICAL_ADDRESS SkipBytes
,
164 IN SIZE_T TotalBytes
,
165 IN MEMORY_CACHING_TYPE CacheType
,
168 MI_PFN_CACHE_ATTRIBUTE CacheAttribute
;
171 // Check for invalid cache type
173 if (CacheType
> MmWriteCombined
)
176 // Normalize to default
178 CacheAttribute
= MiNotMapped
;
183 // Conver to internal caching attribute
185 CacheAttribute
= MiPlatformCacheAttributes
[FALSE
][CacheType
];
189 // Only these flags are allowed
191 if (Flags
& ~(MM_DONT_ZERO_ALLOCATION
| MM_ALLOCATE_FROM_LOCAL_NODE_ONLY
))
200 // Call the internal routine
202 return MiAllocatePagesForMdl(LowAddress
,
215 MmFreePagesFromMdl(IN PMDL Mdl
)
222 DPRINT("Freeing MDL: %p\n", Mdl
);
227 ASSERT(KeGetCurrentIrql() <= APC_LEVEL
);
228 ASSERT((Mdl
->MdlFlags
& MDL_IO_SPACE
) == 0);
229 ASSERT(((ULONG_PTR
)Mdl
->StartVa
& (PAGE_SIZE
- 1)) == 0);
232 // Get address and page information
234 Base
= (PVOID
)((ULONG_PTR
)Mdl
->StartVa
+ Mdl
->ByteOffset
);
235 NumberOfPages
= ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base
, Mdl
->ByteCount
);
240 OldIrql
= KeAcquireQueuedSpinLock(LockQueuePfnLock
);
243 // Loop all the MDL pages
245 Pages
= (PPFN_NUMBER
)(Mdl
+ 1);
249 // Reached the last page
251 if (*Pages
== -1) break;
256 ASSERT(*Pages
<= MmHighestPhysicalPage
);
259 // Get the page entry
261 Pfn1
= MiGetPfnEntry(*Pages
);
262 ASSERT(Pfn1
->u3
.ReferenceCount
== 1);
267 Pfn1
->u3
.e1
.StartOfAllocation
= 0;
268 Pfn1
->u3
.e1
.EndOfAllocation
= 0;
273 MmDereferencePage(*Pages
);
276 // Clear this page and move on
279 } while (--NumberOfPages
!= 0);
284 KeReleaseQueuedSpinLock(LockQueuePfnLock
, OldIrql
);
287 // Remove the pages locked flag
289 Mdl
->MdlFlags
&= ~MDL_PAGES_LOCKED
;
297 MmMapLockedPagesSpecifyCache(IN PMDL Mdl
,
298 IN KPROCESSOR_MODE AccessMode
,
299 IN MEMORY_CACHING_TYPE CacheType
,
300 IN PVOID BaseAddress
,
301 IN ULONG BugCheckOnFailure
,
302 IN MM_PAGE_PRIORITY Priority
)
305 PPFN_NUMBER MdlPages
, LastPage
;
306 PFN_NUMBER PageCount
;
308 MI_PFN_CACHE_ATTRIBUTE CacheAttribute
;
315 ASSERT(Mdl
->ByteCount
!= 0);
320 Base
= (PVOID
)((ULONG_PTR
)Mdl
->StartVa
+ Mdl
->ByteOffset
);
323 // Handle kernel case first
325 if (AccessMode
== KernelMode
)
328 // Get the list of pages and count
330 MdlPages
= (PPFN_NUMBER
)(Mdl
+ 1);
331 PageCount
= ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base
, Mdl
->ByteCount
);
332 LastPage
= MdlPages
+ PageCount
;
337 ASSERT((Mdl
->MdlFlags
& (MDL_MAPPED_TO_SYSTEM_VA
|
338 MDL_SOURCE_IS_NONPAGED_POOL
|
339 MDL_PARTIAL_HAS_BEEN_MAPPED
)) == 0);
340 ASSERT((Mdl
->MdlFlags
& (MDL_PAGES_LOCKED
| MDL_PARTIAL
)) != 0);
343 // Get the correct cache type
345 IsIoMapping
= (Mdl
->MdlFlags
& MDL_IO_SPACE
) != 0;
346 CacheAttribute
= MiPlatformCacheAttributes
[IsIoMapping
][CacheType
];
351 PointerPte
= MiReserveSystemPtes(PageCount
, SystemPteSpace
);
355 // If it can fail, return NULL
357 if (Mdl
->MdlFlags
& MDL_MAPPING_CAN_FAIL
) return NULL
;
360 // Should we bugcheck?
362 if (!BugCheckOnFailure
) return NULL
;
365 // Yes, crash the system
367 KeBugCheckEx(NO_MORE_SYSTEM_PTES
, 0, PageCount
, 0, 0);
371 // Get the mapped address
373 Base
= (PVOID
)((ULONG_PTR
)MiPteToAddress(PointerPte
) + Mdl
->ByteOffset
);
378 TempPte
= ValidKernelPte
;
379 switch (CacheAttribute
)
386 MI_PAGE_DISABLE_CACHE(&TempPte
);
387 MI_PAGE_WRITE_THROUGH(&TempPte
);
390 case MiWriteCombined
:
393 // Enable write combining
395 MI_PAGE_DISABLE_CACHE(&TempPte
);
396 MI_PAGE_WRITE_COMBINED(&TempPte
);
414 if (*MdlPages
== -1) break;
419 ASSERT(PointerPte
->u
.Hard
.Valid
== 0);
420 TempPte
.u
.Hard
.PageFrameNumber
= *MdlPages
;
421 *PointerPte
++ = TempPte
;
422 } while (++MdlPages
< LastPage
);
427 ASSERT((Mdl
->MdlFlags
& MDL_MAPPED_TO_SYSTEM_VA
) == 0);
428 Mdl
->MappedSystemVa
= Base
;
429 Mdl
->MdlFlags
|= MDL_MAPPED_TO_SYSTEM_VA
;
432 // Check if it was partial
434 if (Mdl
->MdlFlags
& MDL_PARTIAL
)
437 // Write the appropriate flag here too
439 Mdl
->MdlFlags
|= MDL_PARTIAL_HAS_BEEN_MAPPED
;
443 // Return the mapped address
449 // In user-mode, let ReactOS do it
451 return MiMapLockedPagesInUserSpace(Mdl
, Base
, CacheType
, BaseAddress
);
459 MmMapLockedPages(IN PMDL Mdl
,
460 IN KPROCESSOR_MODE AccessMode
)
463 // Call the extended version
465 return MmMapLockedPagesSpecifyCache(Mdl
,
478 MmUnmapLockedPages(IN PVOID BaseAddress
,
482 PFN_NUMBER PageCount
;
483 PPFN_NUMBER MdlPages
;
489 ASSERT(Mdl
->ByteCount
!= 0);
492 // Check if this is a kernel request
494 if (BaseAddress
> MM_HIGHEST_USER_ADDRESS
)
497 // Get base and count information
499 Base
= (PVOID
)((ULONG_PTR
)Mdl
->StartVa
+ Mdl
->ByteOffset
);
500 PageCount
= ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base
, Mdl
->ByteCount
);
505 ASSERT((Mdl
->MdlFlags
& MDL_PARENT_MAPPED_SYSTEM_VA
) == 0);
506 ASSERT(PageCount
!= 0);
507 ASSERT(Mdl
->MdlFlags
& MDL_MAPPED_TO_SYSTEM_VA
);
512 PointerPte
= MiAddressToPte(BaseAddress
);
515 // This should be a resident system PTE
517 ASSERT(PointerPte
>= MmSystemPtesStart
[SystemPteSpace
]);
518 ASSERT(PointerPte
<= MmSystemPtesEnd
[SystemPteSpace
]);
519 ASSERT(PointerPte
->u
.Hard
.Valid
== 1);
522 // Check if the caller wants us to free advanced pages
524 if (Mdl
->MdlFlags
& MDL_FREE_EXTRA_PTES
)
527 // Get the MDL page array
529 MdlPages
= (PPFN_NUMBER
)(Mdl
+ 1);
530 MdlPages
+= PageCount
;
535 PageCount
+= *MdlPages
;
536 PointerPte
-= *MdlPages
;
537 ASSERT(PointerPte
>= MmSystemPtesStart
[SystemPteSpace
]);
538 ASSERT(PointerPte
<= MmSystemPtesEnd
[SystemPteSpace
]);
541 // Get the new base address
543 BaseAddress
= (PVOID
)((ULONG_PTR
)BaseAddress
-
544 ((*MdlPages
) << PAGE_SHIFT
));
550 Mdl
->MdlFlags
&= ~(MDL_MAPPED_TO_SYSTEM_VA
|
551 MDL_PARTIAL_HAS_BEEN_MAPPED
|
552 MDL_FREE_EXTRA_PTES
);
555 // Release the system PTEs
557 MiReleaseSystemPtes(PointerPte
, PageCount
, SystemPteSpace
);
562 // Let ReactOS handle it
564 MiUnmapLockedPagesInUserSpace(BaseAddress
, Mdl
);
573 MmProbeAndLockPages(IN PMDL Mdl
,
574 IN KPROCESSOR_MODE AccessMode
,
575 IN LOCK_OPERATION Operation
)
578 PVOID Base
, Address
, LastAddress
, StartAddress
;
579 ULONG LockPages
, TotalPages
;
580 NTSTATUS Status
= STATUS_SUCCESS
;
581 PEPROCESS CurrentProcess
;
583 PMMSUPPORT AddressSpace
;
584 NTSTATUS ProbeStatus
;
585 PMMPTE PointerPte
, LastPte
;
587 PFN_NUMBER PageFrameIndex
;
591 DPRINT("Probing MDL: %p\n", Mdl
);
596 ASSERT(Mdl
->ByteCount
!= 0);
597 ASSERT(((ULONG
)Mdl
->ByteOffset
& ~(PAGE_SIZE
- 1)) == 0);
598 ASSERT(((ULONG_PTR
)Mdl
->StartVa
& (PAGE_SIZE
- 1)) == 0);
599 ASSERT((Mdl
->MdlFlags
& (MDL_PAGES_LOCKED
|
600 MDL_MAPPED_TO_SYSTEM_VA
|
601 MDL_SOURCE_IS_NONPAGED_POOL
|
603 MDL_IO_SPACE
)) == 0);
606 // Get page and base information
608 MdlPages
= (PPFN_NUMBER
)(Mdl
+ 1);
609 Base
= (PVOID
)Mdl
->StartVa
;
612 // Get the addresses and how many pages we span (and need to lock)
614 Address
= (PVOID
)((ULONG_PTR
)Base
+ Mdl
->ByteOffset
);
615 LastAddress
= (PVOID
)((ULONG_PTR
)Address
+ Mdl
->ByteCount
);
616 LockPages
= ADDRESS_AND_SIZE_TO_SPAN_PAGES(Address
, Mdl
->ByteCount
);
617 ASSERT(LockPages
!= 0);
620 // Get the thread and process
622 Thread
= PsGetCurrentThread();
623 if (Address
<= MM_HIGHEST_USER_ADDRESS
)
628 CurrentProcess
= PsGetCurrentProcess();
635 CurrentProcess
= NULL
;
639 // Save the number of pages we'll have to lock, and the start address
641 TotalPages
= LockPages
;
642 StartAddress
= Address
;
647 ProbeStatus
= STATUS_SUCCESS
;
663 *(volatile CHAR
*)Address
;
666 // Check if this is write access (only probe for user-mode)
668 if ((Operation
!= IoReadAccess
) &&
669 (Address
<= MM_HIGHEST_USER_ADDRESS
))
672 // Probe for write too
674 ProbeForWriteChar(Address
);
680 Address
= (PVOID
)((ULONG_PTR
)Address
+ PAGE_SIZE
);
681 Address
= PAGE_ALIGN(Address
);
688 } while (Address
< LastAddress
);
691 // Reset back to the original page
693 ASSERT(LockPages
== 0);
694 MdlPages
= (PPFN_NUMBER
)(Mdl
+ 1);
696 _SEH2_EXCEPT(EXCEPTION_EXECUTE_HANDLER
)
701 ProbeStatus
= _SEH2_GetExceptionCode();
706 // So how did that go?
708 if (ProbeStatus
!= STATUS_SUCCESS
)
713 DPRINT1("MDL PROBE FAILED!\n");
715 ExRaiseStatus(ProbeStatus
);
719 // Get the PTE and PDE
721 PointerPte
= MiAddressToPte(StartAddress
);
722 PointerPde
= MiAddressToPde(StartAddress
);
727 ASSERT(MdlPages
== (PPFN_NUMBER
)(Mdl
+ 1));
730 // Check what kind of operation this is
732 if (Operation
!= IoReadAccess
)
735 // Set the write flag
737 Mdl
->MdlFlags
|= MDL_WRITE_OPERATION
;
742 // Remove the write flag
744 Mdl
->MdlFlags
&= ~(MDL_WRITE_OPERATION
);
748 // Mark the MDL as locked *now*
750 Mdl
->MdlFlags
|= MDL_PAGES_LOCKED
;
753 // Check if this came from kernel mode
755 if (Base
>= MM_HIGHEST_USER_ADDRESS
)
758 // We should not have a process
760 ASSERT(CurrentProcess
== NULL
);
764 // In kernel mode, we don't need to check for write access
766 Operation
= IoReadAccess
;
772 OldIrql
= KeAcquireQueuedSpinLock(LockQueuePfnLock
);
773 AddressSpace
= NULL
; // Keep compiler happy
780 ASSERT(TotalPages
!= 0);
781 ASSERT(CurrentProcess
== PsGetCurrentProcess());
784 // Track locked pages
786 InterlockedExchangeAddSizeT(&CurrentProcess
->NumberOfLockedPages
,
792 Mdl
->Process
= CurrentProcess
;
795 // Use the process lock
798 AddressSpace
= &CurrentProcess
->Vm
;
799 MmLockAddressSpace(AddressSpace
);
800 OldIrql
= DISPATCH_LEVEL
; // Keep compiler happy
806 LastPte
= MiAddressToPte((PVOID
)((ULONG_PTR
)LastAddress
- 1));
814 // Assume failure and check for non-mapped pages
817 while ((PointerPde
->u
.Hard
.Valid
== 0) ||
818 (PointerPte
->u
.Hard
.Valid
== 0))
821 // What kind of lock where we using?
828 KeReleaseQueuedSpinLock(LockQueuePfnLock
, OldIrql
);
833 // Release process address space lock
835 MmUnlockAddressSpace(AddressSpace
);
841 Address
= MiPteToAddress(PointerPte
);
842 Status
= MmAccessFault(FALSE
, Address
, KernelMode
, NULL
);
843 if (!NT_SUCCESS(Status
))
848 DPRINT1("Access fault failed\n");
853 // Waht lock should we use?
860 OldIrql
= KeAcquireQueuedSpinLock(LockQueuePfnLock
);
865 // Use the address space lock
867 MmLockAddressSpace(AddressSpace
);
872 // Check if this was a write or modify
874 if (Operation
!= IoReadAccess
)
877 // Check if the PTE is not writable
879 if (MI_IS_PAGE_WRITEABLE(PointerPte
) == FALSE
)
882 // Check if it's copy on write
884 if (MI_IS_PAGE_COPY_ON_WRITE(PointerPte
))
887 // Get the base address and allow a change for user-mode
889 Address
= MiPteToAddress(PointerPte
);
890 if (Address
<= MM_HIGHEST_USER_ADDRESS
)
893 // What kind of lock where we using?
900 KeReleaseQueuedSpinLock(LockQueuePfnLock
, OldIrql
);
905 // Release process address space lock
907 MmUnlockAddressSpace(AddressSpace
);
913 Status
= MmAccessFault(TRUE
, Address
, KernelMode
, NULL
);
914 if (!NT_SUCCESS(Status
))
919 DPRINT1("Access fault failed\n");
924 // Re-acquire the lock
931 OldIrql
= KeAcquireQueuedSpinLock(LockQueuePfnLock
);
936 // Use the address space lock
938 MmLockAddressSpace(AddressSpace
);
949 // Fail, since we won't allow this
951 Status
= STATUS_ACCESS_VIOLATION
;
952 goto CleanupWithLock
;
959 PageFrameIndex
= PFN_FROM_PTE(PointerPte
);
960 if (PageFrameIndex
<= MmHighestPhysicalPage
)
965 Pfn1
= MiGetPfnEntry(PageFrameIndex
);
966 ASSERT((CurrentProcess
== NULL
) || (UsePfnLock
== FALSE
));
971 MmReferencePage(PageFrameIndex
);
976 // For I/O addresses, just remember this
978 Mdl
->MdlFlags
|= MDL_IO_SPACE
;
982 // Write the page and move on
984 *MdlPages
++ = PageFrameIndex
;
985 if (!((ULONG_PTR
)(++PointerPte
) & (PAGE_SIZE
- 1))) PointerPde
++;
986 } while (PointerPte
<= LastPte
);
989 // What kind of lock where we using?
996 KeReleaseQueuedSpinLock(LockQueuePfnLock
, OldIrql
);
1001 // Release process address space lock
1003 MmUnlockAddressSpace(AddressSpace
);
1009 ASSERT((Mdl
->MdlFlags
& MDL_DESCRIBES_AWE
) == 0);
1014 // This is the failure path
1016 ASSERT(!NT_SUCCESS(Status
));
1019 // What kind of lock where we using?
1026 KeReleaseQueuedSpinLock(LockQueuePfnLock
, OldIrql
);
1031 // Release process address space lock
1033 MmUnlockAddressSpace(AddressSpace
);
1037 // Pages must be locked so MmUnlock can work
1039 ASSERT(Mdl
->MdlFlags
& MDL_PAGES_LOCKED
);
1045 ExRaiseStatus(Status
);
1053 MmUnlockPages(IN PMDL Mdl
)
1055 PPFN_NUMBER MdlPages
, LastPage
;
1058 ULONG Flags
, PageCount
;
1060 DPRINT("Unlocking MDL: %p\n", Mdl
);
1065 ASSERT((Mdl
->MdlFlags
& MDL_PAGES_LOCKED
) != 0);
1066 ASSERT((Mdl
->MdlFlags
& MDL_SOURCE_IS_NONPAGED_POOL
) == 0);
1067 ASSERT((Mdl
->MdlFlags
& MDL_PARTIAL
) == 0);
1068 ASSERT(Mdl
->ByteCount
!= 0);
1071 // Get the process associated and capture the flags which are volatile
1073 Process
= Mdl
->Process
;
1074 Flags
= Mdl
->MdlFlags
;
1077 // Automagically undo any calls to MmGetSystemAddressForMdl's for this MDL
1079 if (Mdl
->MdlFlags
& MDL_MAPPED_TO_SYSTEM_VA
)
1082 // Unmap the pages from system space
1084 MmUnmapLockedPages(Mdl
->MappedSystemVa
, Mdl
);
1088 // Get the page count
1090 MdlPages
= (PPFN_NUMBER
)(Mdl
+ 1);
1091 Base
= (PVOID
)((ULONG_PTR
)Mdl
->StartVa
+ Mdl
->ByteOffset
);
1092 PageCount
= ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base
, Mdl
->ByteCount
);
1093 ASSERT(PageCount
!= 0);
1096 // We don't support AWE
1098 if (Flags
& MDL_DESCRIBES_AWE
) ASSERT(FALSE
);
1101 // Check if the buffer is mapped I/O space
1103 if (Flags
& MDL_IO_SPACE
)
1108 OldIrql
= KeAcquireQueuedSpinLock(LockQueuePfnLock
);
1113 LastPage
= MdlPages
+ PageCount
;
1117 // Last page, break out
1119 if (*MdlPages
== -1) break;
1122 // Check if this page is in the PFN database
1124 if (*MdlPages
<= MmHighestPhysicalPage
)
1127 // Unlock and dereference
1129 MmDereferencePage(*MdlPages
);
1131 } while (++MdlPages
< LastPage
);
1136 KeReleaseQueuedSpinLock(LockQueuePfnLock
, OldIrql
);
1139 // Check if we have a process
1144 // Handle the accounting of locked pages
1146 ASSERT(Process
->NumberOfLockedPages
> 0);
1147 InterlockedExchangeAddSizeT(&Process
->NumberOfLockedPages
,
1154 Mdl
->MdlFlags
&= ~MDL_IO_SPACE
;
1155 Mdl
->MdlFlags
&= ~MDL_PAGES_LOCKED
;
1160 // Check if we have a process
1165 // Handle the accounting of locked pages
1167 ASSERT(Process
->NumberOfLockedPages
> 0);
1168 InterlockedExchangeAddSizeT(&Process
->NumberOfLockedPages
,
1175 LastPage
= MdlPages
+ PageCount
;
1179 // Last page reached
1181 if (*MdlPages
== -1)
1184 // Were there no pages at all?
1186 if (MdlPages
== (PPFN_NUMBER
)(Mdl
+ 1))
1189 // We're already done
1191 Mdl
->MdlFlags
&= ~MDL_PAGES_LOCKED
;
1196 // Otherwise, stop here
1198 LastPage
= MdlPages
;
1205 ASSERT(*MdlPages
<= MmHighestPhysicalPage
);
1206 } while (++MdlPages
< LastPage
);
1211 MdlPages
= (PPFN_NUMBER
)(Mdl
+ 1);
1214 // Now grab the PFN lock for the actual unlock and dereference
1216 OldIrql
= KeAcquireQueuedSpinLock(LockQueuePfnLock
);
1220 // Unlock and dereference
1222 MmDereferencePage(*MdlPages
);
1223 } while (++MdlPages
< LastPage
);
1228 KeReleaseQueuedSpinLock(LockQueuePfnLock
, OldIrql
);
1233 Mdl
->MdlFlags
&= ~MDL_PAGES_LOCKED
;
1241 MmAdvanceMdl(IN PMDL Mdl
,
1242 IN ULONG NumberOfBytes
)
1245 return STATUS_NOT_IMPLEMENTED
;
1253 MmMapLockedPagesWithReservedMapping(IN PVOID MappingAddress
,
1255 IN PMDL MemoryDescriptorList
,
1256 IN MEMORY_CACHING_TYPE CacheType
)
1267 MmUnmapReservedMapping(IN PVOID BaseAddress
,
1269 IN PMDL MemoryDescriptorList
)
1279 MmPrefetchPages(IN ULONG NumberOfLists
,
1280 IN PREAD_LIST
*ReadLists
)
1283 return STATUS_NOT_IMPLEMENTED
;
1291 MmProtectMdlSystemAddress(IN PMDL MemoryDescriptorList
,
1292 IN ULONG NewProtect
)
1295 return STATUS_NOT_IMPLEMENTED
;
1303 MmProbeAndLockProcessPages(IN OUT PMDL MemoryDescriptorList
,
1304 IN PEPROCESS Process
,
1305 IN KPROCESSOR_MODE AccessMode
,
1306 IN LOCK_OPERATION Operation
)
1317 MmProbeAndLockSelectedPages(IN OUT PMDL MemoryDescriptorList
,
1318 IN LARGE_INTEGER PageList
[],
1319 IN KPROCESSOR_MODE AccessMode
,
1320 IN LOCK_OPERATION Operation
)
1330 MmMapMemoryDumpMdl(IN PMDL Mdl
)