2 * PROJECT: ReactOS Kernel
3 * LICENSE: BSD - See COPYING.ARM in the top level directory
4 * FILE: ntoskrnl/mm/ARM3/mdlsup.c
5 * PURPOSE: ARM Memory Manager Memory Descriptor List (MDL) Management
6 * PROGRAMMERS: ReactOS Portable Systems Group
9 /* INCLUDES *******************************************************************/
15 #line 15 "ARMĀ³::MDLSUP"
16 #define MODULE_INVOLVED_IN_ARM3
17 #include "../ARM3/miarm.h"
20 BOOLEAN MmTrackLockedPages
;
22 /* PUBLIC FUNCTIONS ***********************************************************/
29 MmCreateMdl(IN PMDL Mdl
,
36 // Check if we don't have an MDL built
41 // Calculate the size we'll need and allocate the MDL
43 Size
= MmSizeOfMdl(Base
, Length
);
44 Mdl
= ExAllocatePoolWithTag(NonPagedPool
, Size
, TAG_MDL
);
45 if (!Mdl
) return NULL
;
51 MmInitializeMdl(Mdl
, Base
, Length
);
60 MmSizeOfMdl(IN PVOID Base
,
64 // Return the MDL size
67 (ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base
, Length
) * sizeof(PFN_NUMBER
));
75 MmBuildMdlForNonPagedPool(IN PMDL Mdl
)
77 PPFN_NUMBER MdlPages
, EndPage
;
78 PFN_NUMBER Pfn
, PageCount
;
85 ASSERT(Mdl
->ByteCount
!= 0);
86 ASSERT((Mdl
->MdlFlags
& (MDL_PAGES_LOCKED
|
87 MDL_MAPPED_TO_SYSTEM_VA
|
88 MDL_SOURCE_IS_NONPAGED_POOL
|
92 // We know the MDL isn't associated to a process now
97 // Get page and VA information
99 MdlPages
= (PPFN_NUMBER
)(Mdl
+ 1);
103 // Set the system address and now get the page count
105 Mdl
->MappedSystemVa
= (PVOID
)((ULONG_PTR
)Base
+ Mdl
->ByteOffset
);
106 PageCount
= ADDRESS_AND_SIZE_TO_SPAN_PAGES(Mdl
->MappedSystemVa
,
108 ASSERT(PageCount
!= 0);
109 EndPage
= MdlPages
+ PageCount
;
114 PointerPte
= MiAddressToPte(Base
);
120 Pfn
= PFN_FROM_PTE(PointerPte
++);
122 } while (MdlPages
< EndPage
);
125 // Set the nonpaged pool flag
127 Mdl
->MdlFlags
|= MDL_SOURCE_IS_NONPAGED_POOL
;
130 // Check if this is an I/O mapping
132 if (!MiGetPfnEntry(Pfn
)) Mdl
->MdlFlags
|= MDL_IO_SPACE
;
140 MmAllocatePagesForMdl(IN PHYSICAL_ADDRESS LowAddress
,
141 IN PHYSICAL_ADDRESS HighAddress
,
142 IN PHYSICAL_ADDRESS SkipBytes
,
143 IN SIZE_T TotalBytes
)
146 // Call the internal routine
148 return MiAllocatePagesForMdl(LowAddress
,
161 MmAllocatePagesForMdlEx(IN PHYSICAL_ADDRESS LowAddress
,
162 IN PHYSICAL_ADDRESS HighAddress
,
163 IN PHYSICAL_ADDRESS SkipBytes
,
164 IN SIZE_T TotalBytes
,
165 IN MEMORY_CACHING_TYPE CacheType
,
168 MI_PFN_CACHE_ATTRIBUTE CacheAttribute
;
171 // Check for invalid cache type
173 if (CacheType
> MmWriteCombined
)
176 // Normalize to default
178 CacheAttribute
= MiNotMapped
;
183 // Conver to internal caching attribute
185 CacheAttribute
= MiPlatformCacheAttributes
[FALSE
][CacheType
];
189 // Only these flags are allowed
191 if (Flags
& ~(MM_DONT_ZERO_ALLOCATION
| MM_ALLOCATE_FROM_LOCAL_NODE_ONLY
))
200 // Call the internal routine
202 return MiAllocatePagesForMdl(LowAddress
,
215 MmFreePagesFromMdl(IN PMDL Mdl
)
222 DPRINT("Freeing MDL: %p\n", Mdl
);
227 ASSERT(KeGetCurrentIrql() <= APC_LEVEL
);
228 ASSERT((Mdl
->MdlFlags
& MDL_IO_SPACE
) == 0);
229 ASSERT(((ULONG_PTR
)Mdl
->StartVa
& (PAGE_SIZE
- 1)) == 0);
232 // Get address and page information
234 Base
= (PVOID
)((ULONG_PTR
)Mdl
->StartVa
+ Mdl
->ByteOffset
);
235 NumberOfPages
= ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base
, Mdl
->ByteCount
);
240 OldIrql
= KeAcquireQueuedSpinLock(LockQueuePfnLock
);
243 // Loop all the MDL pages
245 Pages
= (PPFN_NUMBER
)(Mdl
+ 1);
249 // Reached the last page
251 if (*Pages
== -1) break;
256 ASSERT(*Pages
<= MmHighestPhysicalPage
);
259 // Get the page entry
261 Pfn1
= MiGetPfnEntry(*Pages
);
262 ASSERT(Pfn1
->u3
.ReferenceCount
== 1);
267 Pfn1
->u3
.e1
.StartOfAllocation
= 0;
268 Pfn1
->u3
.e1
.EndOfAllocation
= 0;
273 MmDereferencePage(*Pages
);
276 // Clear this page and move on
279 } while (--NumberOfPages
!= 0);
284 KeReleaseQueuedSpinLock(LockQueuePfnLock
, OldIrql
);
287 // Remove the pages locked flag
289 Mdl
->MdlFlags
&= ~MDL_PAGES_LOCKED
;
297 MmMapLockedPagesSpecifyCache(IN PMDL Mdl
,
298 IN KPROCESSOR_MODE AccessMode
,
299 IN MEMORY_CACHING_TYPE CacheType
,
300 IN PVOID BaseAddress
,
301 IN ULONG BugCheckOnFailure
,
302 IN MM_PAGE_PRIORITY Priority
)
305 PPFN_NUMBER MdlPages
, LastPage
;
306 PFN_NUMBER PageCount
;
308 MI_PFN_CACHE_ATTRIBUTE CacheAttribute
;
315 ASSERT(Mdl
->ByteCount
!= 0);
320 Base
= (PVOID
)((ULONG_PTR
)Mdl
->StartVa
+ Mdl
->ByteOffset
);
323 // Handle kernel case first
325 if (AccessMode
== KernelMode
)
328 // Get the list of pages and count
330 MdlPages
= (PPFN_NUMBER
)(Mdl
+ 1);
331 PageCount
= ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base
, Mdl
->ByteCount
);
332 LastPage
= MdlPages
+ PageCount
;
337 ASSERT((Mdl
->MdlFlags
& (MDL_MAPPED_TO_SYSTEM_VA
|
338 MDL_SOURCE_IS_NONPAGED_POOL
|
339 MDL_PARTIAL_HAS_BEEN_MAPPED
)) == 0);
340 ASSERT((Mdl
->MdlFlags
& (MDL_PAGES_LOCKED
| MDL_PARTIAL
)) != 0);
343 // Get the correct cache type
345 IsIoMapping
= (Mdl
->MdlFlags
& MDL_IO_SPACE
) != 0;
346 CacheAttribute
= MiPlatformCacheAttributes
[IsIoMapping
][CacheType
];
351 PointerPte
= MiReserveSystemPtes(PageCount
, SystemPteSpace
);
355 // If it can fail, return NULL
357 if (Mdl
->MdlFlags
& MDL_MAPPING_CAN_FAIL
) return NULL
;
360 // Should we bugcheck?
362 if (!BugCheckOnFailure
) return NULL
;
365 // Yes, crash the system
367 KeBugCheckEx(NO_MORE_SYSTEM_PTES
, 0, PageCount
, 0, 0);
371 // Get the mapped address
373 Base
= (PVOID
)((ULONG_PTR
)MiPteToAddress(PointerPte
) + Mdl
->ByteOffset
);
378 TempPte
= ValidKernelPte
;
379 switch (CacheAttribute
)
386 MI_PAGE_DISABLE_CACHE(&TempPte
);
387 MI_PAGE_WRITE_THROUGH(&TempPte
);
390 case MiWriteCombined
:
393 // Enable write combining
395 MI_PAGE_DISABLE_CACHE(&TempPte
);
396 MI_PAGE_WRITE_COMBINED(&TempPte
);
414 if (*MdlPages
== -1) break;
419 TempPte
.u
.Hard
.PageFrameNumber
= *MdlPages
;
420 MI_WRITE_VALID_PTE(PointerPte
++, TempPte
);
421 } while (++MdlPages
< LastPage
);
426 ASSERT((Mdl
->MdlFlags
& MDL_MAPPED_TO_SYSTEM_VA
) == 0);
427 Mdl
->MappedSystemVa
= Base
;
428 Mdl
->MdlFlags
|= MDL_MAPPED_TO_SYSTEM_VA
;
431 // Check if it was partial
433 if (Mdl
->MdlFlags
& MDL_PARTIAL
)
436 // Write the appropriate flag here too
438 Mdl
->MdlFlags
|= MDL_PARTIAL_HAS_BEEN_MAPPED
;
442 // Return the mapped address
456 MmMapLockedPages(IN PMDL Mdl
,
457 IN KPROCESSOR_MODE AccessMode
)
460 // Call the extended version
462 return MmMapLockedPagesSpecifyCache(Mdl
,
475 MmUnmapLockedPages(IN PVOID BaseAddress
,
479 PFN_NUMBER PageCount
;
480 PPFN_NUMBER MdlPages
;
486 ASSERT(Mdl
->ByteCount
!= 0);
489 // Check if this is a kernel request
491 if (BaseAddress
> MM_HIGHEST_USER_ADDRESS
)
494 // Get base and count information
496 Base
= (PVOID
)((ULONG_PTR
)Mdl
->StartVa
+ Mdl
->ByteOffset
);
497 PageCount
= ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base
, Mdl
->ByteCount
);
502 ASSERT((Mdl
->MdlFlags
& MDL_PARENT_MAPPED_SYSTEM_VA
) == 0);
503 ASSERT(PageCount
!= 0);
504 ASSERT(Mdl
->MdlFlags
& MDL_MAPPED_TO_SYSTEM_VA
);
509 PointerPte
= MiAddressToPte(BaseAddress
);
512 // This should be a resident system PTE
514 ASSERT(PointerPte
>= MmSystemPtesStart
[SystemPteSpace
]);
515 ASSERT(PointerPte
<= MmSystemPtesEnd
[SystemPteSpace
]);
516 ASSERT(PointerPte
->u
.Hard
.Valid
== 1);
519 // Check if the caller wants us to free advanced pages
521 if (Mdl
->MdlFlags
& MDL_FREE_EXTRA_PTES
)
524 // Get the MDL page array
526 MdlPages
= (PPFN_NUMBER
)(Mdl
+ 1);
527 MdlPages
+= PageCount
;
532 PageCount
+= *MdlPages
;
533 PointerPte
-= *MdlPages
;
534 ASSERT(PointerPte
>= MmSystemPtesStart
[SystemPteSpace
]);
535 ASSERT(PointerPte
<= MmSystemPtesEnd
[SystemPteSpace
]);
538 // Get the new base address
540 BaseAddress
= (PVOID
)((ULONG_PTR
)BaseAddress
-
541 ((*MdlPages
) << PAGE_SHIFT
));
547 Mdl
->MdlFlags
&= ~(MDL_MAPPED_TO_SYSTEM_VA
|
548 MDL_PARTIAL_HAS_BEEN_MAPPED
|
549 MDL_FREE_EXTRA_PTES
);
552 // Release the system PTEs
554 MiReleaseSystemPtes(PointerPte
, PageCount
, SystemPteSpace
);
567 MmProbeAndLockPages(IN PMDL Mdl
,
568 IN KPROCESSOR_MODE AccessMode
,
569 IN LOCK_OPERATION Operation
)
571 PPFN_NUMBER MdlPages
;
572 PVOID Base
, Address
, LastAddress
, StartAddress
;
573 ULONG LockPages
, TotalPages
;
574 NTSTATUS Status
= STATUS_SUCCESS
;
575 PEPROCESS CurrentProcess
;
576 PMMSUPPORT AddressSpace
;
577 NTSTATUS ProbeStatus
;
578 PMMPTE PointerPte
, LastPte
;
580 PFN_NUMBER PageFrameIndex
;
583 DPRINT("Probing MDL: %p\n", Mdl
);
588 ASSERT(Mdl
->ByteCount
!= 0);
589 ASSERT(((ULONG
)Mdl
->ByteOffset
& ~(PAGE_SIZE
- 1)) == 0);
590 ASSERT(((ULONG_PTR
)Mdl
->StartVa
& (PAGE_SIZE
- 1)) == 0);
591 ASSERT((Mdl
->MdlFlags
& (MDL_PAGES_LOCKED
|
592 MDL_MAPPED_TO_SYSTEM_VA
|
593 MDL_SOURCE_IS_NONPAGED_POOL
|
595 MDL_IO_SPACE
)) == 0);
598 // Get page and base information
600 MdlPages
= (PPFN_NUMBER
)(Mdl
+ 1);
601 Base
= (PVOID
)Mdl
->StartVa
;
604 // Get the addresses and how many pages we span (and need to lock)
606 Address
= (PVOID
)((ULONG_PTR
)Base
+ Mdl
->ByteOffset
);
607 LastAddress
= (PVOID
)((ULONG_PTR
)Address
+ Mdl
->ByteCount
);
608 LockPages
= ADDRESS_AND_SIZE_TO_SPAN_PAGES(Address
, Mdl
->ByteCount
);
609 ASSERT(LockPages
!= 0);
614 if (Address
<= MM_HIGHEST_USER_ADDRESS
)
619 CurrentProcess
= PsGetCurrentProcess();
626 CurrentProcess
= NULL
;
630 // Save the number of pages we'll have to lock, and the start address
632 TotalPages
= LockPages
;
633 StartAddress
= Address
;
638 ProbeStatus
= STATUS_SUCCESS
;
654 *(volatile CHAR
*)Address
;
657 // Check if this is write access (only probe for user-mode)
659 if ((Operation
!= IoReadAccess
) &&
660 (Address
<= MM_HIGHEST_USER_ADDRESS
))
663 // Probe for write too
665 ProbeForWriteChar(Address
);
671 Address
= (PVOID
)((ULONG_PTR
)Address
+ PAGE_SIZE
);
672 Address
= PAGE_ALIGN(Address
);
679 } while (Address
< LastAddress
);
682 // Reset back to the original page
684 ASSERT(LockPages
== 0);
685 MdlPages
= (PPFN_NUMBER
)(Mdl
+ 1);
687 _SEH2_EXCEPT(EXCEPTION_EXECUTE_HANDLER
)
692 ProbeStatus
= _SEH2_GetExceptionCode();
697 // So how did that go?
699 if (ProbeStatus
!= STATUS_SUCCESS
)
704 DPRINT1("MDL PROBE FAILED!\n");
706 ExRaiseStatus(ProbeStatus
);
710 // Get the PTE and PDE
712 PointerPte
= MiAddressToPte(StartAddress
);
713 PointerPde
= MiAddressToPde(StartAddress
);
718 ASSERT(MdlPages
== (PPFN_NUMBER
)(Mdl
+ 1));
721 // Check what kind of operation this is
723 if (Operation
!= IoReadAccess
)
726 // Set the write flag
728 Mdl
->MdlFlags
|= MDL_WRITE_OPERATION
;
733 // Remove the write flag
735 Mdl
->MdlFlags
&= ~(MDL_WRITE_OPERATION
);
739 // Mark the MDL as locked *now*
741 Mdl
->MdlFlags
|= MDL_PAGES_LOCKED
;
744 // Check if this came from kernel mode
746 if (Base
>= MM_HIGHEST_USER_ADDRESS
)
749 // We should not have a process
751 ASSERT(CurrentProcess
== NULL
);
755 // In kernel mode, we don't need to check for write access
757 Operation
= IoReadAccess
;
763 OldIrql
= KeAcquireQueuedSpinLock(LockQueuePfnLock
);
764 AddressSpace
= NULL
; // Keep compiler happy
771 ASSERT(TotalPages
!= 0);
772 ASSERT(CurrentProcess
== PsGetCurrentProcess());
775 // Track locked pages
777 InterlockedExchangeAddSizeT(&CurrentProcess
->NumberOfLockedPages
,
783 Mdl
->Process
= CurrentProcess
;
786 // Use the process lock
789 AddressSpace
= &CurrentProcess
->Vm
;
790 MmLockAddressSpace(AddressSpace
);
791 OldIrql
= DISPATCH_LEVEL
; // Keep compiler happy
797 LastPte
= MiAddressToPte((PVOID
)((ULONG_PTR
)LastAddress
- 1));
805 // Assume failure and check for non-mapped pages
808 #if (_MI_PAGING_LEVELS >= 3)
809 /* Should be checking the PPE and PXE */
812 while ((PointerPde
->u
.Hard
.Valid
== 0) ||
813 (PointerPte
->u
.Hard
.Valid
== 0))
816 // What kind of lock where we using?
823 KeReleaseQueuedSpinLock(LockQueuePfnLock
, OldIrql
);
828 // Release process address space lock
830 MmUnlockAddressSpace(AddressSpace
);
836 Address
= MiPteToAddress(PointerPte
);
837 Status
= MmAccessFault(FALSE
, Address
, KernelMode
, NULL
);
838 if (!NT_SUCCESS(Status
))
843 DPRINT1("Access fault failed\n");
848 // Waht lock should we use?
855 OldIrql
= KeAcquireQueuedSpinLock(LockQueuePfnLock
);
860 // Use the address space lock
862 MmLockAddressSpace(AddressSpace
);
867 // Check if this was a write or modify
869 if (Operation
!= IoReadAccess
)
872 // Check if the PTE is not writable
874 if (MI_IS_PAGE_WRITEABLE(PointerPte
) == FALSE
)
877 // Check if it's copy on write
879 if (MI_IS_PAGE_COPY_ON_WRITE(PointerPte
))
882 // Get the base address and allow a change for user-mode
884 Address
= MiPteToAddress(PointerPte
);
885 if (Address
<= MM_HIGHEST_USER_ADDRESS
)
888 // What kind of lock where we using?
895 KeReleaseQueuedSpinLock(LockQueuePfnLock
, OldIrql
);
900 // Release process address space lock
902 MmUnlockAddressSpace(AddressSpace
);
908 Status
= MmAccessFault(TRUE
, Address
, KernelMode
, NULL
);
909 if (!NT_SUCCESS(Status
))
914 DPRINT1("Access fault failed\n");
919 // Re-acquire the lock
926 OldIrql
= KeAcquireQueuedSpinLock(LockQueuePfnLock
);
931 // Use the address space lock
933 MmLockAddressSpace(AddressSpace
);
944 // Fail, since we won't allow this
946 Status
= STATUS_ACCESS_VIOLATION
;
947 goto CleanupWithLock
;
954 PageFrameIndex
= PFN_FROM_PTE(PointerPte
);
955 if (PageFrameIndex
<= MmHighestPhysicalPage
)
957 ASSERT((CurrentProcess
== NULL
) || (UsePfnLock
== FALSE
));
962 MmReferencePage(PageFrameIndex
);
967 // For I/O addresses, just remember this
969 Mdl
->MdlFlags
|= MDL_IO_SPACE
;
973 // Write the page and move on
975 *MdlPages
++ = PageFrameIndex
;
976 if (!((ULONG_PTR
)(++PointerPte
) & (PAGE_SIZE
- 1))) PointerPde
++;
977 } while (PointerPte
<= LastPte
);
980 // What kind of lock where we using?
987 KeReleaseQueuedSpinLock(LockQueuePfnLock
, OldIrql
);
992 // Release process address space lock
994 MmUnlockAddressSpace(AddressSpace
);
1000 ASSERT((Mdl
->MdlFlags
& MDL_DESCRIBES_AWE
) == 0);
1005 // This is the failure path
1007 ASSERT(!NT_SUCCESS(Status
));
1010 // What kind of lock where we using?
1017 KeReleaseQueuedSpinLock(LockQueuePfnLock
, OldIrql
);
1022 // Release process address space lock
1024 MmUnlockAddressSpace(AddressSpace
);
1028 // Pages must be locked so MmUnlock can work
1030 ASSERT(Mdl
->MdlFlags
& MDL_PAGES_LOCKED
);
1036 ExRaiseStatus(Status
);
1044 MmUnlockPages(IN PMDL Mdl
)
1046 PPFN_NUMBER MdlPages
, LastPage
;
1049 ULONG Flags
, PageCount
;
1051 DPRINT("Unlocking MDL: %p\n", Mdl
);
1056 ASSERT((Mdl
->MdlFlags
& MDL_PAGES_LOCKED
) != 0);
1057 ASSERT((Mdl
->MdlFlags
& MDL_SOURCE_IS_NONPAGED_POOL
) == 0);
1058 ASSERT((Mdl
->MdlFlags
& MDL_PARTIAL
) == 0);
1059 ASSERT(Mdl
->ByteCount
!= 0);
1062 // Get the process associated and capture the flags which are volatile
1064 Process
= Mdl
->Process
;
1065 Flags
= Mdl
->MdlFlags
;
1068 // Automagically undo any calls to MmGetSystemAddressForMdl's for this MDL
1070 if (Mdl
->MdlFlags
& MDL_MAPPED_TO_SYSTEM_VA
)
1073 // Unmap the pages from system space
1075 MmUnmapLockedPages(Mdl
->MappedSystemVa
, Mdl
);
1079 // Get the page count
1081 MdlPages
= (PPFN_NUMBER
)(Mdl
+ 1);
1082 Base
= (PVOID
)((ULONG_PTR
)Mdl
->StartVa
+ Mdl
->ByteOffset
);
1083 PageCount
= ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base
, Mdl
->ByteCount
);
1084 ASSERT(PageCount
!= 0);
1087 // We don't support AWE
1089 if (Flags
& MDL_DESCRIBES_AWE
) ASSERT(FALSE
);
1092 // Check if the buffer is mapped I/O space
1094 if (Flags
& MDL_IO_SPACE
)
1099 OldIrql
= KeAcquireQueuedSpinLock(LockQueuePfnLock
);
1104 LastPage
= MdlPages
+ PageCount
;
1108 // Last page, break out
1110 if (*MdlPages
== -1) break;
1113 // Check if this page is in the PFN database
1115 if (*MdlPages
<= MmHighestPhysicalPage
)
1118 // Unlock and dereference
1120 MmDereferencePage(*MdlPages
);
1122 } while (++MdlPages
< LastPage
);
1127 KeReleaseQueuedSpinLock(LockQueuePfnLock
, OldIrql
);
1130 // Check if we have a process
1135 // Handle the accounting of locked pages
1137 ASSERT(Process
->NumberOfLockedPages
> 0);
1138 InterlockedExchangeAddSizeT(&Process
->NumberOfLockedPages
,
1145 Mdl
->MdlFlags
&= ~MDL_IO_SPACE
;
1146 Mdl
->MdlFlags
&= ~MDL_PAGES_LOCKED
;
1151 // Check if we have a process
1156 // Handle the accounting of locked pages
1158 ASSERT(Process
->NumberOfLockedPages
> 0);
1159 InterlockedExchangeAddSizeT(&Process
->NumberOfLockedPages
,
1166 LastPage
= MdlPages
+ PageCount
;
1170 // Last page reached
1172 if (*MdlPages
== -1)
1175 // Were there no pages at all?
1177 if (MdlPages
== (PPFN_NUMBER
)(Mdl
+ 1))
1180 // We're already done
1182 Mdl
->MdlFlags
&= ~MDL_PAGES_LOCKED
;
1187 // Otherwise, stop here
1189 LastPage
= MdlPages
;
1196 ASSERT(*MdlPages
<= MmHighestPhysicalPage
);
1197 } while (++MdlPages
< LastPage
);
1202 MdlPages
= (PPFN_NUMBER
)(Mdl
+ 1);
1205 // Now grab the PFN lock for the actual unlock and dereference
1207 OldIrql
= KeAcquireQueuedSpinLock(LockQueuePfnLock
);
1211 // Unlock and dereference
1213 MmDereferencePage(*MdlPages
);
1214 } while (++MdlPages
< LastPage
);
1219 KeReleaseQueuedSpinLock(LockQueuePfnLock
, OldIrql
);
1224 Mdl
->MdlFlags
&= ~MDL_PAGES_LOCKED
;
1232 MmAdvanceMdl(IN PMDL Mdl
,
1233 IN ULONG NumberOfBytes
)
1236 return STATUS_NOT_IMPLEMENTED
;
1244 MmMapLockedPagesWithReservedMapping(IN PVOID MappingAddress
,
1246 IN PMDL MemoryDescriptorList
,
1247 IN MEMORY_CACHING_TYPE CacheType
)
1258 MmUnmapReservedMapping(IN PVOID BaseAddress
,
1260 IN PMDL MemoryDescriptorList
)
1270 MmPrefetchPages(IN ULONG NumberOfLists
,
1271 IN PREAD_LIST
*ReadLists
)
1274 return STATUS_NOT_IMPLEMENTED
;
1282 MmProtectMdlSystemAddress(IN PMDL MemoryDescriptorList
,
1283 IN ULONG NewProtect
)
1286 return STATUS_NOT_IMPLEMENTED
;
1294 MmProbeAndLockProcessPages(IN OUT PMDL MemoryDescriptorList
,
1295 IN PEPROCESS Process
,
1296 IN KPROCESSOR_MODE AccessMode
,
1297 IN LOCK_OPERATION Operation
)
1308 MmProbeAndLockSelectedPages(IN OUT PMDL MemoryDescriptorList
,
1309 IN LARGE_INTEGER PageList
[],
1310 IN KPROCESSOR_MODE AccessMode
,
1311 IN LOCK_OPERATION Operation
)
1321 MmMapMemoryDumpMdl(IN PMDL Mdl
)