2 * PROJECT: ReactOS Kernel
3 * LICENSE: BSD - See COPYING.ARM in the top level directory
4 * FILE: ntoskrnl/mm/ARM3/mdlsup.c
5 * PURPOSE: ARM Memory Manager Memory Descriptor List (MDL) Management
6 * PROGRAMMERS: ReactOS Portable Systems Group
9 /* INCLUDES *******************************************************************/
15 #define MODULE_INVOLVED_IN_ARM3
16 #include <mm/ARM3/miarm.h>
18 /* GLOBALS ********************************************************************/
21 BOOLEAN MmTrackLockedPages
;
22 SIZE_T MmSystemLockPagesCount
;
24 ULONG MiCacheOverride
[MiNotMapped
+ 1];
26 /* INTERNAL FUNCTIONS *********************************************************/
30 MiMapLockedPagesInUserSpace(
33 _In_ MEMORY_CACHING_TYPE CacheType
,
34 _In_opt_ PVOID BaseAddress
)
37 PEPROCESS Process
= PsGetCurrentProcess();
38 PETHREAD Thread
= PsGetCurrentThread();
39 TABLE_SEARCH_RESULT Result
;
40 MI_PFN_CACHE_ATTRIBUTE CacheAttribute
;
41 MI_PFN_CACHE_ATTRIBUTE EffectiveCacheAttribute
;
46 PMMADDRESS_NODE Parent
;
55 BOOLEAN AddressSpaceLocked
= FALSE
;
59 DPRINT("MiMapLockedPagesInUserSpace(%p, %p, 0x%x, %p)\n",
60 Mdl
, StartVa
, CacheType
, BaseAddress
);
62 NumberOfPages
= ADDRESS_AND_SIZE_TO_SPAN_PAGES(StartVa
,
63 MmGetMdlByteCount(Mdl
));
64 MdlPages
= MmGetMdlPfnArray(Mdl
);
66 ASSERT(CacheType
<= MmWriteCombined
);
68 IsIoMapping
= (Mdl
->MdlFlags
& MDL_IO_SPACE
) != 0;
69 CacheAttribute
= MiPlatformCacheAttributes
[IsIoMapping
][CacheType
];
71 /* Large pages are always cached, make sure we're not asking for those */
72 if (CacheAttribute
!= MiCached
)
74 DPRINT1("FIXME: Need to check for large pages\n");
77 /* Allocate a VAD for our mapped region */
78 Vad
= ExAllocatePoolWithTag(NonPagedPool
, sizeof(MMVAD_LONG
), 'ldaV');
81 Status
= STATUS_INSUFFICIENT_RESOURCES
;
85 /* Initialize PhysicalMemory VAD */
86 RtlZeroMemory(Vad
, sizeof(*Vad
));
87 Vad
->u2
.VadFlags2
.LongVad
= 1;
88 Vad
->u
.VadFlags
.VadType
= VadDevicePhysicalMemory
;
89 Vad
->u
.VadFlags
.Protection
= MM_READWRITE
;
90 Vad
->u
.VadFlags
.PrivateMemory
= 1;
92 /* Did the caller specify an address? */
93 if (BaseAddress
== NULL
)
95 /* We get to pick the address */
96 MmLockAddressSpace(&Process
->Vm
);
97 AddressSpaceLocked
= TRUE
;
98 if (Process
->VmDeleted
)
100 Status
= STATUS_PROCESS_IS_TERMINATING
;
104 Result
= MiFindEmptyAddressRangeInTree(NumberOfPages
<< PAGE_SHIFT
,
105 MM_VIRTMEM_GRANULARITY
,
109 if (Result
== TableFoundNode
)
111 Status
= STATUS_NO_MEMORY
;
114 EndingVa
= StartingVa
+ NumberOfPages
* PAGE_SIZE
- 1;
115 BaseAddress
= (PVOID
)StartingVa
;
119 /* Caller specified a base address */
120 StartingVa
= (ULONG_PTR
)BaseAddress
;
121 EndingVa
= StartingVa
+ NumberOfPages
* PAGE_SIZE
- 1;
123 /* Make sure it's valid */
124 if (BYTE_OFFSET(StartingVa
) != 0 ||
125 EndingVa
<= StartingVa
||
126 EndingVa
> (ULONG_PTR
)MM_HIGHEST_VAD_ADDRESS
)
128 Status
= STATUS_INVALID_ADDRESS
;
132 MmLockAddressSpace(&Process
->Vm
);
133 AddressSpaceLocked
= TRUE
;
134 if (Process
->VmDeleted
)
136 Status
= STATUS_PROCESS_IS_TERMINATING
;
140 /* Check if it's already in use */
141 Result
= MiCheckForConflictingNode(StartingVa
>> PAGE_SHIFT
,
142 EndingVa
>> PAGE_SHIFT
,
145 if (Result
== TableFoundNode
)
147 Status
= STATUS_CONFLICTING_ADDRESSES
;
152 Vad
->StartingVpn
= StartingVa
>> PAGE_SHIFT
;
153 Vad
->EndingVpn
= EndingVa
>> PAGE_SHIFT
;
155 MiLockProcessWorkingSetUnsafe(Process
, Thread
);
157 ASSERT(Vad
->EndingVpn
>= Vad
->StartingVpn
);
159 MiInsertVad((PMMVAD
)Vad
, &Process
->VadRoot
);
161 /* Check if this is uncached */
162 if (CacheAttribute
!= MiCached
)
164 /* Flush all caches */
165 KeFlushEntireTb(TRUE
, TRUE
);
166 KeInvalidateAllCaches();
169 PointerPte
= MiAddressToPte(BaseAddress
);
170 while (NumberOfPages
!= 0 &&
171 *MdlPages
!= LIST_HEAD
)
173 PointerPde
= MiPteToPde(PointerPte
);
174 MiMakePdeExistAndMakeValid(PointerPde
, Process
, MM_NOIRQL
);
175 ASSERT(PointerPte
->u
.Hard
.Valid
== 0);
177 /* Add a PDE reference for each page */
178 MiIncrementPageTableReferences(BaseAddress
);
180 /* Set up our basic user PTE */
181 MI_MAKE_HARDWARE_PTE_USER(&TempPte
,
186 EffectiveCacheAttribute
= CacheAttribute
;
188 /* We need to respect the PFN's caching information in some cases */
189 Pfn2
= MiGetPfnEntry(*MdlPages
);
192 ASSERT(Pfn2
->u3
.e2
.ReferenceCount
!= 0);
194 switch (Pfn2
->u3
.e1
.CacheAttribute
)
197 if (CacheAttribute
!= MiNonCached
)
199 MiCacheOverride
[1]++;
200 EffectiveCacheAttribute
= MiNonCached
;
205 if (CacheAttribute
!= MiCached
)
207 MiCacheOverride
[0]++;
208 EffectiveCacheAttribute
= MiCached
;
212 case MiWriteCombined
:
213 if (CacheAttribute
!= MiWriteCombined
)
215 MiCacheOverride
[2]++;
216 EffectiveCacheAttribute
= MiWriteCombined
;
221 /* We don't support AWE magic (MiNotMapped) */
222 DPRINT1("FIXME: MiNotMapped is not supported\n");
228 /* Configure caching */
229 switch (EffectiveCacheAttribute
)
232 MI_PAGE_DISABLE_CACHE(&TempPte
);
233 MI_PAGE_WRITE_THROUGH(&TempPte
);
237 case MiWriteCombined
:
238 MI_PAGE_DISABLE_CACHE(&TempPte
);
239 MI_PAGE_WRITE_COMBINED(&TempPte
);
246 /* Make the page valid */
247 MI_WRITE_VALID_PTE(PointerPte
, TempPte
);
249 /* Acquire a share count */
250 Pfn1
= MI_PFN_ELEMENT(PointerPde
->u
.Hard
.PageFrameNumber
);
251 OldIrql
= MiAcquirePfnLock();
252 Pfn1
->u2
.ShareCount
++;
253 MiReleasePfnLock(OldIrql
);
259 BaseAddress
= (PVOID
)((ULONG_PTR
)BaseAddress
+ PAGE_SIZE
);
262 MiUnlockProcessWorkingSetUnsafe(Process
, Thread
);
263 ASSERT(AddressSpaceLocked
);
264 MmUnlockAddressSpace(&Process
->Vm
);
266 ASSERT(StartingVa
!= 0);
267 return (PVOID
)((ULONG_PTR
)StartingVa
+ MmGetMdlByteOffset(Mdl
));
270 if (AddressSpaceLocked
)
272 MmUnlockAddressSpace(&Process
->Vm
);
276 ExFreePoolWithTag(Vad
, 'ldaV');
278 ExRaiseStatus(Status
);
284 MiUnmapLockedPagesInUserSpace(
285 _In_ PVOID BaseAddress
,
288 PEPROCESS Process
= PsGetCurrentProcess();
289 PETHREAD Thread
= PsGetCurrentThread();
295 PPFN_NUMBER MdlPages
;
296 PFN_NUMBER PageTablePage
;
298 DPRINT("MiUnmapLockedPagesInUserSpace(%p, %p)\n", BaseAddress
, Mdl
);
300 NumberOfPages
= ADDRESS_AND_SIZE_TO_SPAN_PAGES(MmGetMdlVirtualAddress(Mdl
),
301 MmGetMdlByteCount(Mdl
));
302 ASSERT(NumberOfPages
!= 0);
303 MdlPages
= MmGetMdlPfnArray(Mdl
);
306 MmLockAddressSpace(&Process
->Vm
);
307 Vad
= MiLocateAddress(BaseAddress
);
309 Vad
->u
.VadFlags
.VadType
!= VadDevicePhysicalMemory
)
311 DPRINT1("MiUnmapLockedPagesInUserSpace invalid for %p\n", BaseAddress
);
312 MmUnlockAddressSpace(&Process
->Vm
);
316 MiLockProcessWorkingSetUnsafe(Process
, Thread
);
318 /* Remove it from the process VAD tree */
319 ASSERT(Process
->VadRoot
.NumberGenericTableElements
>= 1);
320 MiRemoveNode((PMMADDRESS_NODE
)Vad
, &Process
->VadRoot
);
322 /* MiRemoveNode should have removed us if we were the hint */
323 ASSERT(Process
->VadRoot
.NodeHint
!= Vad
);
325 PointerPte
= MiAddressToPte(BaseAddress
);
326 OldIrql
= MiAcquirePfnLock();
327 while (NumberOfPages
!= 0 &&
328 *MdlPages
!= LIST_HEAD
)
330 ASSERT(MiAddressToPte(PointerPte
)->u
.Hard
.Valid
== 1);
331 ASSERT(PointerPte
->u
.Hard
.Valid
== 1);
333 /* Dereference the page */
334 MiDecrementPageTableReferences(BaseAddress
);
337 MI_ERASE_PTE(PointerPte
);
339 /* We invalidated this PTE, so dereference the PDE */
340 PointerPde
= MiAddressToPde(BaseAddress
);
341 PageTablePage
= PointerPde
->u
.Hard
.PageFrameNumber
;
342 MiDecrementShareCount(MiGetPfnEntry(PageTablePage
), PageTablePage
);
347 BaseAddress
= (PVOID
)((ULONG_PTR
)BaseAddress
+ PAGE_SIZE
);
350 /* Moving to a new PDE? */
351 if (PointerPde
!= MiAddressToPde(BaseAddress
))
353 /* See if we should delete it */
355 PointerPde
= MiPteToPde(PointerPte
- 1);
356 ASSERT(PointerPde
->u
.Hard
.Valid
== 1);
357 if (MiQueryPageTableReferences(BaseAddress
) == 0)
359 ASSERT(PointerPde
->u
.Long
!= 0);
360 MiDeletePte(PointerPde
,
361 MiPteToAddress(PointerPde
),
369 MiReleasePfnLock(OldIrql
);
370 MiUnlockProcessWorkingSetUnsafe(Process
, Thread
);
371 MmUnlockAddressSpace(&Process
->Vm
);
372 ExFreePoolWithTag(Vad
, 'ldaV');
375 /* PUBLIC FUNCTIONS ***********************************************************/
382 MmCreateMdl(IN PMDL Mdl
,
389 // Check if we don't have an MDL built
394 // Calculate the size we'll need and allocate the MDL
396 Size
= MmSizeOfMdl(Base
, Length
);
397 Mdl
= ExAllocatePoolWithTag(NonPagedPool
, Size
, TAG_MDL
);
398 if (!Mdl
) return NULL
;
404 MmInitializeMdl(Mdl
, Base
, Length
);
413 MmSizeOfMdl(IN PVOID Base
,
417 // Return the MDL size
420 (ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base
, Length
) * sizeof(PFN_NUMBER
));
428 MmBuildMdlForNonPagedPool(IN PMDL Mdl
)
430 PPFN_NUMBER MdlPages
, EndPage
;
431 PFN_NUMBER Pfn
, PageCount
;
438 ASSERT(Mdl
->ByteCount
!= 0);
439 ASSERT((Mdl
->MdlFlags
& (MDL_PAGES_LOCKED
|
440 MDL_MAPPED_TO_SYSTEM_VA
|
441 MDL_SOURCE_IS_NONPAGED_POOL
|
445 // We know the MDL isn't associated to a process now
450 // Get page and VA information
452 MdlPages
= (PPFN_NUMBER
)(Mdl
+ 1);
456 // Set the system address and now get the page count
458 Mdl
->MappedSystemVa
= (PVOID
)((ULONG_PTR
)Base
+ Mdl
->ByteOffset
);
459 PageCount
= ADDRESS_AND_SIZE_TO_SPAN_PAGES(Mdl
->MappedSystemVa
,
461 ASSERT(PageCount
!= 0);
462 EndPage
= MdlPages
+ PageCount
;
467 PointerPte
= MiAddressToPte(Base
);
473 Pfn
= PFN_FROM_PTE(PointerPte
++);
475 } while (MdlPages
< EndPage
);
478 // Set the nonpaged pool flag
480 Mdl
->MdlFlags
|= MDL_SOURCE_IS_NONPAGED_POOL
;
483 // Check if this is an I/O mapping
485 if (!MiGetPfnEntry(Pfn
)) Mdl
->MdlFlags
|= MDL_IO_SPACE
;
493 MmAllocatePagesForMdl(IN PHYSICAL_ADDRESS LowAddress
,
494 IN PHYSICAL_ADDRESS HighAddress
,
495 IN PHYSICAL_ADDRESS SkipBytes
,
496 IN SIZE_T TotalBytes
)
499 // Call the internal routine
501 return MiAllocatePagesForMdl(LowAddress
,
514 MmAllocatePagesForMdlEx(IN PHYSICAL_ADDRESS LowAddress
,
515 IN PHYSICAL_ADDRESS HighAddress
,
516 IN PHYSICAL_ADDRESS SkipBytes
,
517 IN SIZE_T TotalBytes
,
518 IN MEMORY_CACHING_TYPE CacheType
,
521 MI_PFN_CACHE_ATTRIBUTE CacheAttribute
;
524 // Check for invalid cache type
526 if (CacheType
> MmWriteCombined
)
529 // Normalize to default
531 CacheAttribute
= MiNotMapped
;
536 // Conver to internal caching attribute
538 CacheAttribute
= MiPlatformCacheAttributes
[FALSE
][CacheType
];
542 // Only these flags are allowed
544 if (Flags
& ~(MM_DONT_ZERO_ALLOCATION
| MM_ALLOCATE_FROM_LOCAL_NODE_ONLY
))
553 // Call the internal routine
555 return MiAllocatePagesForMdl(LowAddress
,
568 MmFreePagesFromMdl(IN PMDL Mdl
)
575 DPRINT("Freeing MDL: %p\n", Mdl
);
580 ASSERT(KeGetCurrentIrql() <= APC_LEVEL
);
581 ASSERT((Mdl
->MdlFlags
& MDL_IO_SPACE
) == 0);
582 ASSERT(((ULONG_PTR
)Mdl
->StartVa
& (PAGE_SIZE
- 1)) == 0);
585 // Get address and page information
587 Base
= (PVOID
)((ULONG_PTR
)Mdl
->StartVa
+ Mdl
->ByteOffset
);
588 NumberOfPages
= ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base
, Mdl
->ByteCount
);
593 OldIrql
= MiAcquirePfnLock();
596 // Loop all the MDL pages
598 Pages
= (PPFN_NUMBER
)(Mdl
+ 1);
602 // Reached the last page
604 if (*Pages
== LIST_HEAD
) break;
607 // Get the page entry
609 Pfn1
= MiGetPfnEntry(*Pages
);
611 ASSERT(Pfn1
->u2
.ShareCount
== 1);
612 ASSERT(MI_IS_PFN_DELETED(Pfn1
) == TRUE
);
613 if (Pfn1
->u4
.PteFrame
!= 0x1FFEDCB)
615 /* Corrupted PFN entry or invalid free */
616 KeBugCheckEx(MEMORY_MANAGEMENT
, 0x1236, (ULONG_PTR
)Mdl
, (ULONG_PTR
)Pages
, *Pages
);
622 Pfn1
->u3
.e1
.StartOfAllocation
= 0;
623 Pfn1
->u3
.e1
.EndOfAllocation
= 0;
624 Pfn1
->u3
.e1
.PageLocation
= StandbyPageList
;
625 Pfn1
->u2
.ShareCount
= 0;
630 ASSERT(Pfn1
->u3
.e2
.ReferenceCount
!= 0);
631 if (Pfn1
->u3
.e2
.ReferenceCount
!= 1)
633 /* Just take off one reference */
634 InterlockedDecrement16((PSHORT
)&Pfn1
->u3
.e2
.ReferenceCount
);
638 /* We'll be nuking the whole page */
639 MiDecrementReferenceCount(Pfn1
, *Pages
);
643 // Clear this page and move on
645 *Pages
++ = LIST_HEAD
;
646 } while (--NumberOfPages
!= 0);
651 MiReleasePfnLock(OldIrql
);
654 // Remove the pages locked flag
656 Mdl
->MdlFlags
&= ~MDL_PAGES_LOCKED
;
664 MmMapLockedPagesSpecifyCache(IN PMDL Mdl
,
665 IN KPROCESSOR_MODE AccessMode
,
666 IN MEMORY_CACHING_TYPE CacheType
,
667 IN PVOID BaseAddress
,
668 IN ULONG BugCheckOnFailure
,
669 IN MM_PAGE_PRIORITY Priority
)
672 PPFN_NUMBER MdlPages
, LastPage
;
675 MI_PFN_CACHE_ATTRIBUTE CacheAttribute
;
682 ASSERT(Mdl
->ByteCount
!= 0);
687 Base
= (PVOID
)((ULONG_PTR
)Mdl
->StartVa
+ Mdl
->ByteOffset
);
690 // Handle kernel case first
692 if (AccessMode
== KernelMode
)
695 // Get the list of pages and count
697 MdlPages
= (PPFN_NUMBER
)(Mdl
+ 1);
698 PageCount
= ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base
, Mdl
->ByteCount
);
699 LastPage
= MdlPages
+ PageCount
;
704 ASSERT((Mdl
->MdlFlags
& (MDL_MAPPED_TO_SYSTEM_VA
|
705 MDL_SOURCE_IS_NONPAGED_POOL
|
706 MDL_PARTIAL_HAS_BEEN_MAPPED
)) == 0);
707 ASSERT((Mdl
->MdlFlags
& (MDL_PAGES_LOCKED
| MDL_PARTIAL
)) != 0);
710 // Get the correct cache type
712 IsIoMapping
= (Mdl
->MdlFlags
& MDL_IO_SPACE
) != 0;
713 CacheAttribute
= MiPlatformCacheAttributes
[IsIoMapping
][CacheType
];
718 PointerPte
= MiReserveSystemPtes(PageCount
, SystemPteSpace
);
722 // If it can fail, return NULL
724 if (Mdl
->MdlFlags
& MDL_MAPPING_CAN_FAIL
) return NULL
;
727 // Should we bugcheck?
729 if (!BugCheckOnFailure
) return NULL
;
732 // Yes, crash the system
734 KeBugCheckEx(NO_MORE_SYSTEM_PTES
, 0, PageCount
, 0, 0);
738 // Get the mapped address
740 Base
= (PVOID
)((ULONG_PTR
)MiPteToAddress(PointerPte
) + Mdl
->ByteOffset
);
745 TempPte
= ValidKernelPte
;
746 switch (CacheAttribute
)
753 MI_PAGE_DISABLE_CACHE(&TempPte
);
754 MI_PAGE_WRITE_THROUGH(&TempPte
);
757 case MiWriteCombined
:
760 // Enable write combining
762 MI_PAGE_DISABLE_CACHE(&TempPte
);
763 MI_PAGE_WRITE_COMBINED(&TempPte
);
781 if (*MdlPages
== LIST_HEAD
) break;
786 TempPte
.u
.Hard
.PageFrameNumber
= *MdlPages
;
787 MI_WRITE_VALID_PTE(PointerPte
++, TempPte
);
788 } while (++MdlPages
< LastPage
);
793 ASSERT((Mdl
->MdlFlags
& MDL_MAPPED_TO_SYSTEM_VA
) == 0);
794 Mdl
->MappedSystemVa
= Base
;
795 Mdl
->MdlFlags
|= MDL_MAPPED_TO_SYSTEM_VA
;
798 // Check if it was partial
800 if (Mdl
->MdlFlags
& MDL_PARTIAL
)
803 // Write the appropriate flag here too
805 Mdl
->MdlFlags
|= MDL_PARTIAL_HAS_BEEN_MAPPED
;
809 // Return the mapped address
814 return MiMapLockedPagesInUserSpace(Mdl
, Base
, CacheType
, BaseAddress
);
822 MmMapLockedPages(IN PMDL Mdl
,
823 IN KPROCESSOR_MODE AccessMode
)
826 // Call the extended version
828 return MmMapLockedPagesSpecifyCache(Mdl
,
841 MmUnmapLockedPages(IN PVOID BaseAddress
,
845 PFN_COUNT PageCount
, ExtraPageCount
;
846 PPFN_NUMBER MdlPages
;
852 ASSERT(Mdl
->ByteCount
!= 0);
855 // Check if this is a kernel request
857 if (BaseAddress
> MM_HIGHEST_USER_ADDRESS
)
860 // Get base and count information
862 Base
= (PVOID
)((ULONG_PTR
)Mdl
->StartVa
+ Mdl
->ByteOffset
);
863 PageCount
= ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base
, Mdl
->ByteCount
);
868 ASSERT((Mdl
->MdlFlags
& MDL_PARENT_MAPPED_SYSTEM_VA
) == 0);
869 ASSERT(PageCount
!= 0);
870 ASSERT(Mdl
->MdlFlags
& MDL_MAPPED_TO_SYSTEM_VA
);
875 PointerPte
= MiAddressToPte(BaseAddress
);
878 // This should be a resident system PTE
880 ASSERT(PointerPte
>= MmSystemPtesStart
[SystemPteSpace
]);
881 ASSERT(PointerPte
<= MmSystemPtesEnd
[SystemPteSpace
]);
882 ASSERT(PointerPte
->u
.Hard
.Valid
== 1);
885 // Check if the caller wants us to free advanced pages
887 if (Mdl
->MdlFlags
& MDL_FREE_EXTRA_PTES
)
890 // Get the MDL page array
892 MdlPages
= MmGetMdlPfnArray(Mdl
);
894 /* Number of extra pages stored after the PFN array */
895 ExtraPageCount
= (PFN_COUNT
)*(MdlPages
+ PageCount
);
900 PageCount
+= ExtraPageCount
;
901 PointerPte
-= ExtraPageCount
;
902 ASSERT(PointerPte
>= MmSystemPtesStart
[SystemPteSpace
]);
903 ASSERT(PointerPte
<= MmSystemPtesEnd
[SystemPteSpace
]);
906 // Get the new base address
908 BaseAddress
= (PVOID
)((ULONG_PTR
)BaseAddress
-
909 (ExtraPageCount
<< PAGE_SHIFT
));
915 Mdl
->MdlFlags
&= ~(MDL_MAPPED_TO_SYSTEM_VA
|
916 MDL_PARTIAL_HAS_BEEN_MAPPED
|
917 MDL_FREE_EXTRA_PTES
);
920 // Release the system PTEs
922 MiReleaseSystemPtes(PointerPte
, PageCount
, SystemPteSpace
);
926 MiUnmapLockedPagesInUserSpace(BaseAddress
, Mdl
);
935 MmProbeAndLockPages(IN PMDL Mdl
,
936 IN KPROCESSOR_MODE AccessMode
,
937 IN LOCK_OPERATION Operation
)
939 PPFN_NUMBER MdlPages
;
940 PVOID Base
, Address
, LastAddress
, StartAddress
;
941 ULONG LockPages
, TotalPages
;
942 NTSTATUS Status
= STATUS_SUCCESS
;
943 PEPROCESS CurrentProcess
;
944 NTSTATUS ProbeStatus
;
945 PMMPTE PointerPte
, LastPte
;
947 #if (_MI_PAGING_LEVELS >= 3)
950 #if (_MI_PAGING_LEVELS == 4)
953 PFN_NUMBER PageFrameIndex
;
957 DPRINT("Probing MDL: %p\n", Mdl
);
962 ASSERT(Mdl
->ByteCount
!= 0);
963 ASSERT(((ULONG
)Mdl
->ByteOffset
& ~(PAGE_SIZE
- 1)) == 0);
964 ASSERT(((ULONG_PTR
)Mdl
->StartVa
& (PAGE_SIZE
- 1)) == 0);
965 ASSERT((Mdl
->MdlFlags
& (MDL_PAGES_LOCKED
|
966 MDL_MAPPED_TO_SYSTEM_VA
|
967 MDL_SOURCE_IS_NONPAGED_POOL
|
969 MDL_IO_SPACE
)) == 0);
972 // Get page and base information
974 MdlPages
= (PPFN_NUMBER
)(Mdl
+ 1);
978 // Get the addresses and how many pages we span (and need to lock)
980 Address
= (PVOID
)((ULONG_PTR
)Base
+ Mdl
->ByteOffset
);
981 LastAddress
= (PVOID
)((ULONG_PTR
)Address
+ Mdl
->ByteCount
);
982 LockPages
= ADDRESS_AND_SIZE_TO_SPAN_PAGES(Address
, Mdl
->ByteCount
);
983 ASSERT(LockPages
!= 0);
985 /* Block invalid access */
986 if ((AccessMode
!= KernelMode
) &&
987 ((LastAddress
> (PVOID
)MM_USER_PROBE_ADDRESS
) || (Address
>= LastAddress
)))
989 /* Caller should be in SEH, raise the error */
990 *MdlPages
= LIST_HEAD
;
991 ExRaiseStatus(STATUS_ACCESS_VIOLATION
);
997 if (Address
<= MM_HIGHEST_USER_ADDRESS
)
1002 CurrentProcess
= PsGetCurrentProcess();
1009 CurrentProcess
= NULL
;
1013 // Save the number of pages we'll have to lock, and the start address
1015 TotalPages
= LockPages
;
1016 StartAddress
= Address
;
1018 /* Large pages not supported */
1019 ASSERT(!MI_IS_PHYSICAL_ADDRESS(Address
));
1024 ProbeStatus
= STATUS_SUCCESS
;
1035 *MdlPages
= LIST_HEAD
;
1040 *(volatile CHAR
*)Address
;
1043 // Check if this is write access (only probe for user-mode)
1045 if ((Operation
!= IoReadAccess
) &&
1046 (Address
<= MM_HIGHEST_USER_ADDRESS
))
1049 // Probe for write too
1051 ProbeForWriteChar(Address
);
1057 Address
= PAGE_ALIGN((ULONG_PTR
)Address
+ PAGE_SIZE
);
1064 } while (Address
< LastAddress
);
1067 // Reset back to the original page
1069 ASSERT(LockPages
== 0);
1070 MdlPages
= (PPFN_NUMBER
)(Mdl
+ 1);
1072 _SEH2_EXCEPT(EXCEPTION_EXECUTE_HANDLER
)
1077 ProbeStatus
= _SEH2_GetExceptionCode();
1082 // So how did that go?
1084 if (ProbeStatus
!= STATUS_SUCCESS
)
1089 DPRINT1("MDL PROBE FAILED!\n");
1090 Mdl
->Process
= NULL
;
1091 ExRaiseStatus(ProbeStatus
);
1095 // Get the PTE and PDE
1097 PointerPte
= MiAddressToPte(StartAddress
);
1098 PointerPde
= MiAddressToPde(StartAddress
);
1099 #if (_MI_PAGING_LEVELS >= 3)
1100 PointerPpe
= MiAddressToPpe(StartAddress
);
1102 #if (_MI_PAGING_LEVELS == 4)
1103 PointerPxe
= MiAddressToPxe(StartAddress
);
1109 ASSERT(MdlPages
== (PPFN_NUMBER
)(Mdl
+ 1));
1112 // Check what kind of operation this is
1114 if (Operation
!= IoReadAccess
)
1117 // Set the write flag
1119 Mdl
->MdlFlags
|= MDL_WRITE_OPERATION
;
1124 // Remove the write flag
1126 Mdl
->MdlFlags
&= ~(MDL_WRITE_OPERATION
);
1130 // Mark the MDL as locked *now*
1132 Mdl
->MdlFlags
|= MDL_PAGES_LOCKED
;
1135 // Check if this came from kernel mode
1137 if (Base
> MM_HIGHEST_USER_ADDRESS
)
1140 // We should not have a process
1142 ASSERT(CurrentProcess
== NULL
);
1143 Mdl
->Process
= NULL
;
1146 // In kernel mode, we don't need to check for write access
1148 Operation
= IoReadAccess
;
1154 OldIrql
= MiAcquirePfnLock();
1161 ASSERT(TotalPages
!= 0);
1162 ASSERT(CurrentProcess
== PsGetCurrentProcess());
1165 // Track locked pages
1167 InterlockedExchangeAddSizeT(&CurrentProcess
->NumberOfLockedPages
,
1173 Mdl
->Process
= CurrentProcess
;
1175 /* Lock the process working set */
1176 MiLockProcessWorkingSet(CurrentProcess
, PsGetCurrentThread());
1178 OldIrql
= MM_NOIRQL
;
1184 LastPte
= MiAddressToPte((PVOID
)((ULONG_PTR
)LastAddress
- 1));
1192 // Assume failure and check for non-mapped pages
1194 *MdlPages
= LIST_HEAD
;
1196 #if (_MI_PAGING_LEVELS == 4)
1197 (PointerPxe
->u
.Hard
.Valid
== 0) ||
1199 #if (_MI_PAGING_LEVELS >= 3)
1200 (PointerPpe
->u
.Hard
.Valid
== 0) ||
1202 (PointerPde
->u
.Hard
.Valid
== 0) ||
1203 (PointerPte
->u
.Hard
.Valid
== 0))
1206 // What kind of lock were we using?
1213 MiReleasePfnLock(OldIrql
);
1217 /* Release process working set */
1218 MiUnlockProcessWorkingSet(CurrentProcess
, PsGetCurrentThread());
1224 Address
= MiPteToAddress(PointerPte
);
1226 //HACK: Pass a placeholder TrapInformation so the fault handler knows we're unlocked
1227 Status
= MmAccessFault(FALSE
, Address
, KernelMode
, (PVOID
)(ULONG_PTR
)0xBADBADA3BADBADA3ULL
);
1228 if (!NT_SUCCESS(Status
))
1233 DPRINT1("Access fault failed\n");
1238 // What lock should we use?
1243 // Grab the PFN lock
1245 OldIrql
= MiAcquirePfnLock();
1249 /* Lock the process working set */
1250 MiLockProcessWorkingSet(CurrentProcess
, PsGetCurrentThread());
1255 // Check if this was a write or modify
1257 if (Operation
!= IoReadAccess
)
1260 // Check if the PTE is not writable
1262 if (MI_IS_PAGE_WRITEABLE(PointerPte
) == FALSE
)
1265 // Check if it's copy on write
1267 if (MI_IS_PAGE_COPY_ON_WRITE(PointerPte
))
1270 // Get the base address and allow a change for user-mode
1272 Address
= MiPteToAddress(PointerPte
);
1273 if (Address
<= MM_HIGHEST_USER_ADDRESS
)
1276 // What kind of lock were we using?
1283 MiReleasePfnLock(OldIrql
);
1287 /* Release process working set */
1288 MiUnlockProcessWorkingSet(CurrentProcess
, PsGetCurrentThread());
1295 //HACK: Pass a placeholder TrapInformation so the fault handler knows we're unlocked
1296 Status
= MmAccessFault(TRUE
, Address
, KernelMode
, (PVOID
)(ULONG_PTR
)0xBADBADA3BADBADA3ULL
);
1297 if (!NT_SUCCESS(Status
))
1302 DPRINT1("Access fault failed\n");
1307 // Re-acquire the lock
1312 // Grab the PFN lock
1314 OldIrql
= MiAcquirePfnLock();
1318 /* Lock the process working set */
1319 MiLockProcessWorkingSet(CurrentProcess
, PsGetCurrentThread());
1330 // Fail, since we won't allow this
1332 Status
= STATUS_ACCESS_VIOLATION
;
1333 goto CleanupWithLock
;
1340 PageFrameIndex
= PFN_FROM_PTE(PointerPte
);
1341 Pfn1
= MiGetPfnEntry(PageFrameIndex
);
1344 /* Either this is for kernel-mode, or the working set is held */
1345 ASSERT((CurrentProcess
== NULL
) || (UsePfnLock
== FALSE
));
1347 /* No Physical VADs supported yet */
1348 if (CurrentProcess
) ASSERT(CurrentProcess
->PhysicalVadRoot
== NULL
);
1350 /* This address should already exist and be fully valid */
1351 MiReferenceProbedPageAndBumpLockCount(Pfn1
);
1356 // For I/O addresses, just remember this
1358 Mdl
->MdlFlags
|= MDL_IO_SPACE
;
1362 // Write the page and move on
1364 *MdlPages
++ = PageFrameIndex
;
1367 /* Check if we're on a PDE boundary */
1368 if (MiIsPteOnPdeBoundary(PointerPte
)) PointerPde
++;
1369 #if (_MI_PAGING_LEVELS >= 3)
1370 if (MiIsPteOnPpeBoundary(PointerPte
)) PointerPpe
++;
1372 #if (_MI_PAGING_LEVELS == 4)
1373 if (MiIsPteOnPxeBoundary(PointerPte
)) PointerPxe
++;
1376 } while (PointerPte
<= LastPte
);
1379 // What kind of lock were we using?
1386 MiReleasePfnLock(OldIrql
);
1390 /* Release process working set */
1391 MiUnlockProcessWorkingSet(CurrentProcess
, PsGetCurrentThread());
1397 ASSERT((Mdl
->MdlFlags
& MDL_DESCRIBES_AWE
) == 0);
1402 // This is the failure path
1404 ASSERT(!NT_SUCCESS(Status
));
1407 // What kind of lock were we using?
1414 MiReleasePfnLock(OldIrql
);
1418 /* Release process working set */
1419 MiUnlockProcessWorkingSet(CurrentProcess
, PsGetCurrentThread());
1423 // Pages must be locked so MmUnlock can work
1425 ASSERT(Mdl
->MdlFlags
& MDL_PAGES_LOCKED
);
1431 ExRaiseStatus(Status
);
1439 MmUnlockPages(IN PMDL Mdl
)
1441 PPFN_NUMBER MdlPages
, LastPage
;
1444 ULONG Flags
, PageCount
;
1447 DPRINT("Unlocking MDL: %p\n", Mdl
);
1452 ASSERT((Mdl
->MdlFlags
& MDL_PAGES_LOCKED
) != 0);
1453 ASSERT((Mdl
->MdlFlags
& MDL_SOURCE_IS_NONPAGED_POOL
) == 0);
1454 ASSERT((Mdl
->MdlFlags
& MDL_PARTIAL
) == 0);
1455 ASSERT(Mdl
->ByteCount
!= 0);
1458 // Get the process associated and capture the flags which are volatile
1460 Process
= Mdl
->Process
;
1461 Flags
= Mdl
->MdlFlags
;
1464 // Automagically undo any calls to MmGetSystemAddressForMdl's for this MDL
1466 if (Mdl
->MdlFlags
& MDL_MAPPED_TO_SYSTEM_VA
)
1469 // Unmap the pages from system space
1471 MmUnmapLockedPages(Mdl
->MappedSystemVa
, Mdl
);
1475 // Get the page count
1477 MdlPages
= (PPFN_NUMBER
)(Mdl
+ 1);
1478 Base
= (PVOID
)((ULONG_PTR
)Mdl
->StartVa
+ Mdl
->ByteOffset
);
1479 PageCount
= ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base
, Mdl
->ByteCount
);
1480 ASSERT(PageCount
!= 0);
1483 // We don't support AWE
1485 if (Flags
& MDL_DESCRIBES_AWE
) ASSERT(FALSE
);
1488 // Check if the buffer is mapped I/O space
1490 if (Flags
& MDL_IO_SPACE
)
1495 OldIrql
= MiAcquirePfnLock();
1500 LastPage
= MdlPages
+ PageCount
;
1504 // Last page, break out
1506 if (*MdlPages
== LIST_HEAD
) break;
1509 // Check if this page is in the PFN database
1511 Pfn1
= MiGetPfnEntry(*MdlPages
);
1512 if (Pfn1
) MiDereferencePfnAndDropLockCount(Pfn1
);
1513 } while (++MdlPages
< LastPage
);
1518 MiReleasePfnLock(OldIrql
);
1521 // Check if we have a process
1526 // Handle the accounting of locked pages
1528 ASSERT(Process
->NumberOfLockedPages
> 0);
1529 InterlockedExchangeAddSizeT(&Process
->NumberOfLockedPages
,
1530 -(LONG_PTR
)PageCount
);
1536 Mdl
->MdlFlags
&= ~MDL_IO_SPACE
;
1537 Mdl
->MdlFlags
&= ~MDL_PAGES_LOCKED
;
1542 // Check if we have a process
1547 // Handle the accounting of locked pages
1549 ASSERT(Process
->NumberOfLockedPages
> 0);
1550 InterlockedExchangeAddSizeT(&Process
->NumberOfLockedPages
,
1551 -(LONG_PTR
)PageCount
);
1557 LastPage
= MdlPages
+ PageCount
;
1561 // Last page reached
1563 if (*MdlPages
== LIST_HEAD
)
1566 // Were there no pages at all?
1568 if (MdlPages
== (PPFN_NUMBER
)(Mdl
+ 1))
1571 // We're already done
1573 Mdl
->MdlFlags
&= ~MDL_PAGES_LOCKED
;
1578 // Otherwise, stop here
1580 LastPage
= MdlPages
;
1584 /* Save the PFN entry instead for the secondary loop */
1585 *MdlPages
= (PFN_NUMBER
)MiGetPfnEntry(*MdlPages
);
1586 ASSERT(*MdlPages
!= 0);
1587 } while (++MdlPages
< LastPage
);
1592 MdlPages
= (PPFN_NUMBER
)(Mdl
+ 1);
1595 // Now grab the PFN lock for the actual unlock and dereference
1597 OldIrql
= MiAcquirePfnLock();
1600 /* Get the current entry and reference count */
1601 Pfn1
= (PMMPFN
)*MdlPages
;
1602 MiDereferencePfnAndDropLockCount(Pfn1
);
1603 } while (++MdlPages
< LastPage
);
1608 MiReleasePfnLock(OldIrql
);
1613 Mdl
->MdlFlags
&= ~MDL_PAGES_LOCKED
;
1621 MmAdvanceMdl(IN PMDL Mdl
,
1622 IN ULONG NumberOfBytes
)
1625 return STATUS_NOT_IMPLEMENTED
;
1633 MmMapLockedPagesWithReservedMapping(IN PVOID MappingAddress
,
1635 IN PMDL MemoryDescriptorList
,
1636 IN MEMORY_CACHING_TYPE CacheType
)
1647 MmUnmapReservedMapping(IN PVOID BaseAddress
,
1649 IN PMDL MemoryDescriptorList
)
1659 MmPrefetchPages(IN ULONG NumberOfLists
,
1660 IN PREAD_LIST
*ReadLists
)
1663 return STATUS_NOT_IMPLEMENTED
;
1671 MmProtectMdlSystemAddress(IN PMDL MemoryDescriptorList
,
1672 IN ULONG NewProtect
)
1675 return STATUS_NOT_IMPLEMENTED
;
1683 MmProbeAndLockProcessPages(IN OUT PMDL MemoryDescriptorList
,
1684 IN PEPROCESS Process
,
1685 IN KPROCESSOR_MODE AccessMode
,
1686 IN LOCK_OPERATION Operation
)
1697 MmProbeAndLockSelectedPages(IN OUT PMDL MemoryDescriptorList
,
1698 IN LARGE_INTEGER PageList
[],
1699 IN KPROCESSOR_MODE AccessMode
,
1700 IN LOCK_OPERATION Operation
)
1710 MmMapMemoryDumpMdl(IN PMDL Mdl
)