2 * PROJECT: ReactOS Kernel
3 * LICENSE: BSD - See COPYING.ARM in the top level directory
4 * FILE: ntoskrnl/mm/ARM3/mdlsup.c
5 * PURPOSE: ARM Memory Manager Memory Descriptor List (MDL) Management
6 * PROGRAMMERS: ReactOS Portable Systems Group
9 /* INCLUDES *******************************************************************/
15 #line 15 "ARMĀ³::MDLSUP"
16 #define MODULE_INVOLVED_IN_ARM3
17 #include "../ARM3/miarm.h"
19 /* PUBLIC FUNCTIONS ***********************************************************/
26 MmCreateMdl(IN PMDL Mdl
,
33 // Check if we don't have an MDL built
38 // Calculate the size we'll need and allocate the MDL
40 Size
= MmSizeOfMdl(Base
, Length
);
41 Mdl
= ExAllocatePoolWithTag(NonPagedPool
, Size
, TAG_MDL
);
42 if (!Mdl
) return NULL
;
48 MmInitializeMdl(Mdl
, Base
, Length
);
57 MmSizeOfMdl(IN PVOID Base
,
61 // Return the MDL size
64 (ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base
, Length
) * sizeof(PFN_NUMBER
));
72 MmBuildMdlForNonPagedPool(IN PMDL Mdl
)
74 PPFN_NUMBER MdlPages
, EndPage
;
75 PFN_NUMBER Pfn
, PageCount
;
82 ASSERT(Mdl
->ByteCount
!= 0);
83 ASSERT((Mdl
->MdlFlags
& (MDL_PAGES_LOCKED
|
84 MDL_MAPPED_TO_SYSTEM_VA
|
85 MDL_SOURCE_IS_NONPAGED_POOL
|
89 // We know the MDL isn't associated to a process now
94 // Get page and VA information
96 MdlPages
= (PPFN_NUMBER
)(Mdl
+ 1);
100 // Set the system address and now get the page count
102 Mdl
->MappedSystemVa
= (PVOID
)((ULONG_PTR
)Base
+ Mdl
->ByteOffset
);
103 PageCount
= ADDRESS_AND_SIZE_TO_SPAN_PAGES(Mdl
->MappedSystemVa
,
105 ASSERT(PageCount
!= 0);
106 EndPage
= MdlPages
+ PageCount
;
111 PointerPte
= MiAddressToPte(Base
);
117 Pfn
= PFN_FROM_PTE(PointerPte
++);
119 } while (MdlPages
< EndPage
);
122 // Set the nonpaged pool flag
124 Mdl
->MdlFlags
|= MDL_SOURCE_IS_NONPAGED_POOL
;
127 // Check if this is an I/O mapping
129 if (Pfn
> MmHighestPhysicalPage
) Mdl
->MdlFlags
|= MDL_IO_SPACE
;
137 MmAllocatePagesForMdl(IN PHYSICAL_ADDRESS LowAddress
,
138 IN PHYSICAL_ADDRESS HighAddress
,
139 IN PHYSICAL_ADDRESS SkipBytes
,
140 IN SIZE_T TotalBytes
)
143 // Call the internal routine
145 return MiAllocatePagesForMdl(LowAddress
,
158 MmAllocatePagesForMdlEx(IN PHYSICAL_ADDRESS LowAddress
,
159 IN PHYSICAL_ADDRESS HighAddress
,
160 IN PHYSICAL_ADDRESS SkipBytes
,
161 IN SIZE_T TotalBytes
,
162 IN MEMORY_CACHING_TYPE CacheType
,
165 MI_PFN_CACHE_ATTRIBUTE CacheAttribute
;
168 // Check for invalid cache type
170 if (CacheType
> MmWriteCombined
)
173 // Normalize to default
175 CacheAttribute
= MiNotMapped
;
180 // Conver to internal caching attribute
182 CacheAttribute
= MiPlatformCacheAttributes
[FALSE
][CacheType
];
186 // Only these flags are allowed
188 if (Flags
& ~(MM_DONT_ZERO_ALLOCATION
| MM_ALLOCATE_FROM_LOCAL_NODE_ONLY
))
197 // Call the internal routine
199 return MiAllocatePagesForMdl(LowAddress
,
212 MmFreePagesFromMdl(IN PMDL Mdl
)
219 DPRINT("Freeing MDL: %p\n", Mdl
);
224 ASSERT(KeGetCurrentIrql() <= APC_LEVEL
);
225 ASSERT((Mdl
->MdlFlags
& MDL_IO_SPACE
) == 0);
226 ASSERT(((ULONG_PTR
)Mdl
->StartVa
& (PAGE_SIZE
- 1)) == 0);
229 // Get address and page information
231 Base
= (PVOID
)((ULONG_PTR
)Mdl
->StartVa
+ Mdl
->ByteOffset
);
232 NumberOfPages
= ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base
, Mdl
->ByteCount
);
237 OldIrql
= KeAcquireQueuedSpinLock(LockQueuePfnLock
);
240 // Loop all the MDL pages
242 Pages
= (PPFN_NUMBER
)(Mdl
+ 1);
246 // Reached the last page
248 if (*Pages
== -1) break;
253 ASSERT(*Pages
<= MmHighestPhysicalPage
);
256 // Get the page entry
258 Pfn1
= MiGetPfnEntry(*Pages
);
259 ASSERT(Pfn1
->u3
.ReferenceCount
== 1);
264 Pfn1
->u3
.e1
.StartOfAllocation
= 0;
265 Pfn1
->u3
.e1
.EndOfAllocation
= 0;
270 MmDereferencePage(*Pages
);
273 // Clear this page and move on
276 } while (--NumberOfPages
!= 0);
281 KeReleaseQueuedSpinLock(LockQueuePfnLock
, OldIrql
);
284 // Remove the pages locked flag
286 Mdl
->MdlFlags
&= ~MDL_PAGES_LOCKED
;
294 MmMapLockedPagesSpecifyCache(IN PMDL Mdl
,
295 IN KPROCESSOR_MODE AccessMode
,
296 IN MEMORY_CACHING_TYPE CacheType
,
297 IN PVOID BaseAddress
,
298 IN ULONG BugCheckOnFailure
,
299 IN MM_PAGE_PRIORITY Priority
)
302 PPFN_NUMBER MdlPages
, LastPage
;
303 PFN_NUMBER PageCount
;
305 MI_PFN_CACHE_ATTRIBUTE CacheAttribute
;
312 ASSERT(Mdl
->ByteCount
!= 0);
317 Base
= (PVOID
)((ULONG_PTR
)Mdl
->StartVa
+ Mdl
->ByteOffset
);
320 // Handle kernel case first
322 if (AccessMode
== KernelMode
)
325 // Get the list of pages and count
327 MdlPages
= (PPFN_NUMBER
)(Mdl
+ 1);
328 PageCount
= ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base
, Mdl
->ByteCount
);
329 LastPage
= MdlPages
+ PageCount
;
334 ASSERT((Mdl
->MdlFlags
& (MDL_MAPPED_TO_SYSTEM_VA
|
335 MDL_SOURCE_IS_NONPAGED_POOL
|
336 MDL_PARTIAL_HAS_BEEN_MAPPED
)) == 0);
337 ASSERT((Mdl
->MdlFlags
& (MDL_PAGES_LOCKED
| MDL_PARTIAL
)) != 0);
340 // Get the correct cache type
342 IsIoMapping
= (Mdl
->MdlFlags
& MDL_IO_SPACE
) != 0;
343 CacheAttribute
= MiPlatformCacheAttributes
[IsIoMapping
][CacheType
];
348 PointerPte
= MiReserveSystemPtes(PageCount
, SystemPteSpace
);
352 // If it can fail, return NULL
354 if (Mdl
->MdlFlags
& MDL_MAPPING_CAN_FAIL
) return NULL
;
357 // Should we bugcheck?
359 if (!BugCheckOnFailure
) return NULL
;
362 // Yes, crash the system
364 KeBugCheckEx(NO_MORE_SYSTEM_PTES
, 0, PageCount
, 0, 0);
368 // Get the mapped address
370 Base
= (PVOID
)((ULONG_PTR
)MiPteToAddress(PointerPte
) + Mdl
->ByteOffset
);
375 TempPte
= ValidKernelPte
;
376 switch (CacheAttribute
)
383 MI_PAGE_DISABLE_CACHE(&TempPte
);
384 MI_PAGE_WRITE_THROUGH(&TempPte
);
387 case MiWriteCombined
:
390 // Enable write combining
392 MI_PAGE_DISABLE_CACHE(&TempPte
);
393 MI_PAGE_WRITE_COMBINED(&TempPte
);
411 if (*MdlPages
== -1) break;
416 ASSERT(PointerPte
->u
.Hard
.Valid
== 0);
417 TempPte
.u
.Hard
.PageFrameNumber
= *MdlPages
;
418 *PointerPte
++ = TempPte
;
419 } while (++MdlPages
< LastPage
);
424 ASSERT((Mdl
->MdlFlags
& MDL_MAPPED_TO_SYSTEM_VA
) == 0);
425 Mdl
->MappedSystemVa
= Base
;
426 Mdl
->MdlFlags
|= MDL_MAPPED_TO_SYSTEM_VA
;
429 // Check if it was partial
431 if (Mdl
->MdlFlags
& MDL_PARTIAL
)
434 // Write the appropriate flag here too
436 Mdl
->MdlFlags
|= MDL_PARTIAL_HAS_BEEN_MAPPED
;
440 // Return the mapped address
446 // In user-mode, let ReactOS do it
448 return MiMapLockedPagesInUserSpace(Mdl
, Base
, CacheType
, BaseAddress
);
456 MmMapLockedPages(IN PMDL Mdl
,
457 IN KPROCESSOR_MODE AccessMode
)
460 // Call the extended version
462 return MmMapLockedPagesSpecifyCache(Mdl
,
475 MmUnmapLockedPages(IN PVOID BaseAddress
,
479 PFN_NUMBER PageCount
;
480 PPFN_NUMBER MdlPages
;
486 ASSERT(Mdl
->ByteCount
!= 0);
489 // Check if this is a kernel request
491 if (BaseAddress
> MM_HIGHEST_USER_ADDRESS
)
494 // Get base and count information
496 Base
= (PVOID
)((ULONG_PTR
)Mdl
->StartVa
+ Mdl
->ByteOffset
);
497 PageCount
= ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base
, Mdl
->ByteCount
);
502 ASSERT((Mdl
->MdlFlags
& MDL_PARENT_MAPPED_SYSTEM_VA
) == 0);
503 ASSERT(PageCount
!= 0);
504 ASSERT(Mdl
->MdlFlags
& MDL_MAPPED_TO_SYSTEM_VA
);
509 PointerPte
= MiAddressToPte(BaseAddress
);
512 // This should be a resident system PTE
514 ASSERT(PointerPte
>= MmSystemPtesStart
[SystemPteSpace
]);
515 ASSERT(PointerPte
<= MmSystemPtesEnd
[SystemPteSpace
]);
516 ASSERT(PointerPte
->u
.Hard
.Valid
== 1);
519 // Check if the caller wants us to free advanced pages
521 if (Mdl
->MdlFlags
& MDL_FREE_EXTRA_PTES
)
524 // Get the MDL page array
526 MdlPages
= (PPFN_NUMBER
)(Mdl
+ 1);
527 MdlPages
+= PageCount
;
532 PageCount
+= *MdlPages
;
533 PointerPte
-= *MdlPages
;
534 ASSERT(PointerPte
>= MmSystemPtesStart
[SystemPteSpace
]);
535 ASSERT(PointerPte
<= MmSystemPtesEnd
[SystemPteSpace
]);
538 // Get the new base address
540 BaseAddress
= (PVOID
)((ULONG_PTR
)BaseAddress
-
541 ((*MdlPages
) << PAGE_SHIFT
));
547 Mdl
->MdlFlags
&= ~(MDL_MAPPED_TO_SYSTEM_VA
|
548 MDL_PARTIAL_HAS_BEEN_MAPPED
|
549 MDL_FREE_EXTRA_PTES
);
552 // Release the system PTEs
554 MiReleaseSystemPtes(PointerPte
, PageCount
, SystemPteSpace
);
559 // Let ReactOS handle it
561 MiUnmapLockedPagesInUserSpace(BaseAddress
, Mdl
);
570 MmProbeAndLockPages(IN PMDL Mdl
,
571 IN KPROCESSOR_MODE AccessMode
,
572 IN LOCK_OPERATION Operation
)
575 PVOID Base
, Address
, LastAddress
, StartAddress
;
576 ULONG LockPages
, TotalPages
;
577 NTSTATUS Status
= STATUS_SUCCESS
;
578 PEPROCESS CurrentProcess
;
580 PMMSUPPORT AddressSpace
;
581 NTSTATUS ProbeStatus
;
582 PMMPTE PointerPte
, LastPte
;
584 PFN_NUMBER PageFrameIndex
;
588 DPRINT("Probing MDL: %p\n", Mdl
);
593 ASSERT(Mdl
->ByteCount
!= 0);
594 ASSERT(((ULONG
)Mdl
->ByteOffset
& ~(PAGE_SIZE
- 1)) == 0);
595 ASSERT(((ULONG_PTR
)Mdl
->StartVa
& (PAGE_SIZE
- 1)) == 0);
596 ASSERT((Mdl
->MdlFlags
& (MDL_PAGES_LOCKED
|
597 MDL_MAPPED_TO_SYSTEM_VA
|
598 MDL_SOURCE_IS_NONPAGED_POOL
|
600 MDL_IO_SPACE
)) == 0);
603 // Get page and base information
605 MdlPages
= (PPFN_NUMBER
)(Mdl
+ 1);
606 Base
= (PVOID
)Mdl
->StartVa
;
609 // Get the addresses and how many pages we span (and need to lock)
611 Address
= (PVOID
)((ULONG_PTR
)Base
+ Mdl
->ByteOffset
);
612 LastAddress
= (PVOID
)((ULONG_PTR
)Address
+ Mdl
->ByteCount
);
613 LockPages
= ADDRESS_AND_SIZE_TO_SPAN_PAGES(Address
, Mdl
->ByteCount
);
614 ASSERT(LockPages
!= 0);
617 // Get the thread and process
619 Thread
= PsGetCurrentThread();
620 if (Address
<= MM_HIGHEST_USER_ADDRESS
)
625 CurrentProcess
= PsGetCurrentProcess();
632 CurrentProcess
= NULL
;
636 // Save the number of pages we'll have to lock, and the start address
638 TotalPages
= LockPages
;
639 StartAddress
= Address
;
644 ProbeStatus
= STATUS_SUCCESS
;
660 *(volatile CHAR
*)Address
;
663 // Check if this is write access (only probe for user-mode)
665 if ((Operation
!= IoReadAccess
) &&
666 (Address
<= MM_HIGHEST_USER_ADDRESS
))
669 // Probe for write too
671 ProbeForWriteChar(Address
);
677 Address
= (PVOID
)((ULONG_PTR
)Address
+ PAGE_SIZE
);
678 Address
= PAGE_ALIGN(Address
);
685 } while (Address
< LastAddress
);
688 // Reset back to the original page
690 ASSERT(LockPages
== 0);
691 MdlPages
= (PPFN_NUMBER
)(Mdl
+ 1);
693 _SEH2_EXCEPT(EXCEPTION_EXECUTE_HANDLER
)
698 ProbeStatus
= _SEH2_GetExceptionCode();
703 // So how did that go?
705 if (ProbeStatus
!= STATUS_SUCCESS
)
710 DPRINT1("MDL PROBE FAILED!\n");
712 ExRaiseStatus(ProbeStatus
);
716 // Get the PTE and PDE
718 PointerPte
= MiAddressToPte(StartAddress
);
719 PointerPde
= MiAddressToPde(StartAddress
);
724 ASSERT(MdlPages
== (PPFN_NUMBER
)(Mdl
+ 1));
727 // Check what kind of operation this is
729 if (Operation
!= IoReadAccess
)
732 // Set the write flag
734 Mdl
->MdlFlags
|= MDL_WRITE_OPERATION
;
739 // Remove the write flag
741 Mdl
->MdlFlags
&= ~(MDL_WRITE_OPERATION
);
745 // Mark the MDL as locked *now*
747 Mdl
->MdlFlags
|= MDL_PAGES_LOCKED
;
750 // Check if this came from kernel mode
752 if (Base
>= MM_HIGHEST_USER_ADDRESS
)
755 // We should not have a process
757 ASSERT(CurrentProcess
== NULL
);
761 // In kernel mode, we don't need to check for write access
763 Operation
= IoReadAccess
;
769 OldIrql
= KeAcquireQueuedSpinLock(LockQueuePfnLock
);
770 AddressSpace
= NULL
; // Keep compiler happy
777 ASSERT(TotalPages
!= 0);
778 ASSERT(CurrentProcess
== PsGetCurrentProcess());
781 // Track locked pages
783 InterlockedExchangeAddSizeT(&CurrentProcess
->NumberOfLockedPages
,
789 Mdl
->Process
= CurrentProcess
;
792 // Use the process lock
795 AddressSpace
= &CurrentProcess
->Vm
;
796 MmLockAddressSpace(AddressSpace
);
797 OldIrql
= DISPATCH_LEVEL
; // Keep compiler happy
803 LastPte
= MiAddressToPte((PVOID
)((ULONG_PTR
)LastAddress
- 1));
811 // Assume failure and check for non-mapped pages
814 while ((PointerPde
->u
.Hard
.Valid
== 0) ||
815 (PointerPte
->u
.Hard
.Valid
== 0))
818 // What kind of lock where we using?
825 KeReleaseQueuedSpinLock(LockQueuePfnLock
, OldIrql
);
830 // Release process address space lock
832 MmUnlockAddressSpace(AddressSpace
);
838 Address
= MiPteToAddress(PointerPte
);
839 Status
= MmAccessFault(FALSE
, Address
, KernelMode
, NULL
);
840 if (!NT_SUCCESS(Status
))
845 DPRINT1("Access fault failed\n");
850 // Waht lock should we use?
857 OldIrql
= KeAcquireQueuedSpinLock(LockQueuePfnLock
);
862 // Use the address space lock
864 MmLockAddressSpace(AddressSpace
);
869 // Check if this was a write or modify
871 if (Operation
!= IoReadAccess
)
874 // Check if the PTE is not writable
876 if (MI_IS_PAGE_WRITEABLE(PointerPte
) == FALSE
)
879 // Check if it's copy on write
881 if (MI_IS_PAGE_COPY_ON_WRITE(PointerPte
))
884 // Get the base address and allow a change for user-mode
886 Address
= MiPteToAddress(PointerPte
);
887 if (Address
<= MM_HIGHEST_USER_ADDRESS
)
890 // What kind of lock where we using?
897 KeReleaseQueuedSpinLock(LockQueuePfnLock
, OldIrql
);
902 // Release process address space lock
904 MmUnlockAddressSpace(AddressSpace
);
910 Status
= MmAccessFault(TRUE
, Address
, KernelMode
, NULL
);
911 if (!NT_SUCCESS(Status
))
916 DPRINT1("Access fault failed\n");
921 // Re-acquire the lock
928 OldIrql
= KeAcquireQueuedSpinLock(LockQueuePfnLock
);
933 // Use the address space lock
935 MmLockAddressSpace(AddressSpace
);
946 // Fail, since we won't allow this
948 Status
= STATUS_ACCESS_VIOLATION
;
949 goto CleanupWithLock
;
956 PageFrameIndex
= PFN_FROM_PTE(PointerPte
);
957 if (PageFrameIndex
<= MmHighestPhysicalPage
)
962 Pfn1
= MiGetPfnEntry(PageFrameIndex
);
963 ASSERT((CurrentProcess
== NULL
) || (UsePfnLock
== FALSE
));
968 MmReferencePage(PageFrameIndex
);
973 // For I/O addresses, just remember this
975 Mdl
->MdlFlags
|= MDL_IO_SPACE
;
979 // Write the page and move on
981 *MdlPages
++ = PageFrameIndex
;
982 if (!((ULONG_PTR
)(++PointerPte
) & (PAGE_SIZE
- 1))) PointerPde
++;
983 } while (PointerPte
<= LastPte
);
986 // What kind of lock where we using?
993 KeReleaseQueuedSpinLock(LockQueuePfnLock
, OldIrql
);
998 // Release process address space lock
1000 MmUnlockAddressSpace(AddressSpace
);
1006 ASSERT((Mdl
->MdlFlags
& MDL_DESCRIBES_AWE
) == 0);
1011 // This is the failure path
1013 ASSERT(!NT_SUCCESS(Status
));
1016 // What kind of lock where we using?
1023 KeReleaseQueuedSpinLock(LockQueuePfnLock
, OldIrql
);
1028 // Release process address space lock
1030 MmUnlockAddressSpace(AddressSpace
);
1034 // Pages must be locked so MmUnlock can work
1036 ASSERT(Mdl
->MdlFlags
& MDL_PAGES_LOCKED
);
1042 ExRaiseStatus(Status
);
1050 MmUnlockPages(IN PMDL Mdl
)
1052 PPFN_NUMBER MdlPages
, LastPage
;
1055 ULONG Flags
, PageCount
;
1057 DPRINT("Unlocking MDL: %p\n", Mdl
);
1062 ASSERT((Mdl
->MdlFlags
& MDL_PAGES_LOCKED
) != 0);
1063 ASSERT((Mdl
->MdlFlags
& MDL_SOURCE_IS_NONPAGED_POOL
) == 0);
1064 ASSERT((Mdl
->MdlFlags
& MDL_PARTIAL
) == 0);
1065 ASSERT(Mdl
->ByteCount
!= 0);
1068 // Get the process associated and capture the flags which are volatile
1070 Process
= Mdl
->Process
;
1071 Flags
= Mdl
->MdlFlags
;
1074 // Automagically undo any calls to MmGetSystemAddressForMdl's for this MDL
1076 if (Mdl
->MdlFlags
& MDL_MAPPED_TO_SYSTEM_VA
)
1079 // Unmap the pages from system space
1081 MmUnmapLockedPages(Mdl
->MappedSystemVa
, Mdl
);
1085 // Get the page count
1087 MdlPages
= (PPFN_NUMBER
)(Mdl
+ 1);
1088 Base
= (PVOID
)((ULONG_PTR
)Mdl
->StartVa
+ Mdl
->ByteOffset
);
1089 PageCount
= ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base
, Mdl
->ByteCount
);
1090 ASSERT(PageCount
!= 0);
1093 // We don't support AWE
1095 if (Flags
& MDL_DESCRIBES_AWE
) ASSERT(FALSE
);
1098 // Check if the buffer is mapped I/O space
1100 if (Flags
& MDL_IO_SPACE
)
1105 OldIrql
= KeAcquireQueuedSpinLock(LockQueuePfnLock
);
1110 LastPage
= MdlPages
+ PageCount
;
1114 // Last page, break out
1116 if (*MdlPages
== -1) break;
1119 // Check if this page is in the PFN database
1121 if (*MdlPages
<= MmHighestPhysicalPage
)
1124 // Unlock and dereference
1126 MmDereferencePage(*MdlPages
);
1128 } while (++MdlPages
< LastPage
);
1133 KeReleaseQueuedSpinLock(LockQueuePfnLock
, OldIrql
);
1136 // Check if we have a process
1141 // Handle the accounting of locked pages
1143 ASSERT(Process
->NumberOfLockedPages
> 0);
1144 InterlockedExchangeAddSizeT(&Process
->NumberOfLockedPages
,
1151 Mdl
->MdlFlags
&= ~MDL_IO_SPACE
;
1152 Mdl
->MdlFlags
&= ~MDL_PAGES_LOCKED
;
1157 // Check if we have a process
1162 // Handle the accounting of locked pages
1164 ASSERT(Process
->NumberOfLockedPages
> 0);
1165 InterlockedExchangeAddSizeT(&Process
->NumberOfLockedPages
,
1172 LastPage
= MdlPages
+ PageCount
;
1176 // Last page reached
1178 if (*MdlPages
== -1)
1181 // Were there no pages at all?
1183 if (MdlPages
== (PPFN_NUMBER
)(Mdl
+ 1))
1186 // We're already done
1188 Mdl
->MdlFlags
&= ~MDL_PAGES_LOCKED
;
1193 // Otherwise, stop here
1195 LastPage
= MdlPages
;
1202 ASSERT(*MdlPages
<= MmHighestPhysicalPage
);
1203 } while (++MdlPages
< LastPage
);
1208 MdlPages
= (PPFN_NUMBER
)(Mdl
+ 1);
1211 // Now grab the PFN lock for the actual unlock and dereference
1213 OldIrql
= KeAcquireQueuedSpinLock(LockQueuePfnLock
);
1217 // Unlock and dereference
1219 MmDereferencePage(*MdlPages
);
1220 } while (++MdlPages
< LastPage
);
1225 KeReleaseQueuedSpinLock(LockQueuePfnLock
, OldIrql
);
1230 Mdl
->MdlFlags
&= ~MDL_PAGES_LOCKED
;
1238 MmAdvanceMdl(IN PMDL Mdl
,
1239 IN ULONG NumberOfBytes
)
1242 return STATUS_NOT_IMPLEMENTED
;
1250 MmMapLockedPagesWithReservedMapping(IN PVOID MappingAddress
,
1252 IN PMDL MemoryDescriptorList
,
1253 IN MEMORY_CACHING_TYPE CacheType
)
1264 MmUnmapReservedMapping(IN PVOID BaseAddress
,
1266 IN PMDL MemoryDescriptorList
)
1276 MmPrefetchPages(IN ULONG NumberOfLists
,
1277 IN PREAD_LIST
*ReadLists
)
1280 return STATUS_NOT_IMPLEMENTED
;
1288 MmProtectMdlSystemAddress(IN PMDL MemoryDescriptorList
,
1289 IN ULONG NewProtect
)
1292 return STATUS_NOT_IMPLEMENTED
;
1300 MmProbeAndLockProcessPages(IN OUT PMDL MemoryDescriptorList
,
1301 IN PEPROCESS Process
,
1302 IN KPROCESSOR_MODE AccessMode
,
1303 IN LOCK_OPERATION Operation
)
1314 MmProbeAndLockSelectedPages(IN OUT PMDL MemoryDescriptorList
,
1315 IN LARGE_INTEGER PageList
[],
1316 IN KPROCESSOR_MODE AccessMode
,
1317 IN LOCK_OPERATION Operation
)
1327 MmMapMemoryDumpMdl(IN PMDL Mdl
)