2 * PROJECT: ReactOS Kernel
3 * LICENSE: BSD - See COPYING.ARM in the top level directory
4 * FILE: ntoskrnl/mm/ARM3/mdlsup.c
5 * PURPOSE: ARM Memory Manager Memory Descriptor List (MDL) Management
6 * PROGRAMMERS: ReactOS Portable Systems Group
9 /* INCLUDES *******************************************************************/
15 #define MODULE_INVOLVED_IN_ARM3
16 #include <mm/ARM3/miarm.h>
18 /* GLOBALS ********************************************************************/
21 BOOLEAN MmTrackLockedPages
;
22 SIZE_T MmSystemLockPagesCount
;
24 ULONG MiCacheOverride
[MiNotMapped
+ 1];
26 /* INTERNAL FUNCTIONS *********************************************************/
30 MiMapLockedPagesInUserSpace(
33 _In_ MEMORY_CACHING_TYPE CacheType
,
34 _In_opt_ PVOID BaseAddress
)
37 PEPROCESS Process
= PsGetCurrentProcess();
38 PETHREAD Thread
= PsGetCurrentThread();
39 TABLE_SEARCH_RESULT Result
;
40 MI_PFN_CACHE_ATTRIBUTE CacheAttribute
;
41 MI_PFN_CACHE_ATTRIBUTE EffectiveCacheAttribute
;
46 PMMADDRESS_NODE Parent
;
55 BOOLEAN AddressSpaceLocked
= FALSE
;
59 DPRINT("MiMapLockedPagesInUserSpace(%p, %p, 0x%x, %p)\n",
60 Mdl
, StartVa
, CacheType
, BaseAddress
);
62 NumberOfPages
= ADDRESS_AND_SIZE_TO_SPAN_PAGES(StartVa
,
63 MmGetMdlByteCount(Mdl
));
64 MdlPages
= MmGetMdlPfnArray(Mdl
);
66 ASSERT(CacheType
<= MmWriteCombined
);
68 IsIoMapping
= (Mdl
->MdlFlags
& MDL_IO_SPACE
) != 0;
69 CacheAttribute
= MiPlatformCacheAttributes
[IsIoMapping
][CacheType
];
71 /* Large pages are always cached, make sure we're not asking for those */
72 if (CacheAttribute
!= MiCached
)
74 DPRINT1("FIXME: Need to check for large pages\n");
77 Status
= PsChargeProcessNonPagedPoolQuota(Process
, sizeof(MMVAD_LONG
));
78 if (!NT_SUCCESS(Status
))
84 /* Allocate a VAD for our mapped region */
85 Vad
= ExAllocatePoolWithTag(NonPagedPool
, sizeof(MMVAD_LONG
), 'ldaV');
88 PsReturnProcessNonPagedPoolQuota(Process
, sizeof(MMVAD_LONG
));
89 Status
= STATUS_INSUFFICIENT_RESOURCES
;
93 /* Initialize PhysicalMemory VAD */
94 RtlZeroMemory(Vad
, sizeof(*Vad
));
95 Vad
->u2
.VadFlags2
.LongVad
= 1;
96 Vad
->u
.VadFlags
.VadType
= VadDevicePhysicalMemory
;
97 Vad
->u
.VadFlags
.Protection
= MM_READWRITE
;
98 Vad
->u
.VadFlags
.PrivateMemory
= 1;
100 /* Did the caller specify an address? */
101 if (BaseAddress
== NULL
)
103 /* We get to pick the address */
104 MmLockAddressSpace(&Process
->Vm
);
105 AddressSpaceLocked
= TRUE
;
106 if (Process
->VmDeleted
)
108 Status
= STATUS_PROCESS_IS_TERMINATING
;
112 Result
= MiFindEmptyAddressRangeInTree(NumberOfPages
<< PAGE_SHIFT
,
113 MM_VIRTMEM_GRANULARITY
,
117 if (Result
== TableFoundNode
)
119 Status
= STATUS_NO_MEMORY
;
122 EndingVa
= StartingVa
+ NumberOfPages
* PAGE_SIZE
- 1;
123 BaseAddress
= (PVOID
)StartingVa
;
127 /* Caller specified a base address */
128 StartingVa
= (ULONG_PTR
)BaseAddress
;
129 EndingVa
= StartingVa
+ NumberOfPages
* PAGE_SIZE
- 1;
131 /* Make sure it's valid */
132 if (BYTE_OFFSET(StartingVa
) != 0 ||
133 EndingVa
<= StartingVa
||
134 EndingVa
> (ULONG_PTR
)MM_HIGHEST_VAD_ADDRESS
)
136 Status
= STATUS_INVALID_ADDRESS
;
140 MmLockAddressSpace(&Process
->Vm
);
141 AddressSpaceLocked
= TRUE
;
142 if (Process
->VmDeleted
)
144 Status
= STATUS_PROCESS_IS_TERMINATING
;
148 /* Check if it's already in use */
149 Result
= MiCheckForConflictingNode(StartingVa
>> PAGE_SHIFT
,
150 EndingVa
>> PAGE_SHIFT
,
153 if (Result
== TableFoundNode
)
155 Status
= STATUS_CONFLICTING_ADDRESSES
;
160 Vad
->StartingVpn
= StartingVa
>> PAGE_SHIFT
;
161 Vad
->EndingVpn
= EndingVa
>> PAGE_SHIFT
;
163 MiLockProcessWorkingSetUnsafe(Process
, Thread
);
165 ASSERT(Vad
->EndingVpn
>= Vad
->StartingVpn
);
166 MiInsertVad((PMMVAD
)Vad
, &Process
->VadRoot
);
168 /* Check if this is uncached */
169 if (CacheAttribute
!= MiCached
)
171 /* Flush all caches */
172 KeFlushEntireTb(TRUE
, TRUE
);
173 KeInvalidateAllCaches();
176 PointerPte
= MiAddressToPte(BaseAddress
);
177 while (NumberOfPages
!= 0 &&
178 *MdlPages
!= LIST_HEAD
)
180 PointerPde
= MiPteToPde(PointerPte
);
181 MiMakePdeExistAndMakeValid(PointerPde
, Process
, MM_NOIRQL
);
182 ASSERT(PointerPte
->u
.Hard
.Valid
== 0);
184 /* Add a PDE reference for each page */
185 MiIncrementPageTableReferences(BaseAddress
);
187 /* Set up our basic user PTE */
188 MI_MAKE_HARDWARE_PTE_USER(&TempPte
,
193 EffectiveCacheAttribute
= CacheAttribute
;
195 /* We need to respect the PFN's caching information in some cases */
196 Pfn2
= MiGetPfnEntry(*MdlPages
);
199 ASSERT(Pfn2
->u3
.e2
.ReferenceCount
!= 0);
201 switch (Pfn2
->u3
.e1
.CacheAttribute
)
204 if (CacheAttribute
!= MiNonCached
)
206 MiCacheOverride
[1]++;
207 EffectiveCacheAttribute
= MiNonCached
;
212 if (CacheAttribute
!= MiCached
)
214 MiCacheOverride
[0]++;
215 EffectiveCacheAttribute
= MiCached
;
219 case MiWriteCombined
:
220 if (CacheAttribute
!= MiWriteCombined
)
222 MiCacheOverride
[2]++;
223 EffectiveCacheAttribute
= MiWriteCombined
;
228 /* We don't support AWE magic (MiNotMapped) */
229 DPRINT1("FIXME: MiNotMapped is not supported\n");
235 /* Configure caching */
236 switch (EffectiveCacheAttribute
)
239 MI_PAGE_DISABLE_CACHE(&TempPte
);
240 MI_PAGE_WRITE_THROUGH(&TempPte
);
244 case MiWriteCombined
:
245 MI_PAGE_DISABLE_CACHE(&TempPte
);
246 MI_PAGE_WRITE_COMBINED(&TempPte
);
253 /* Make the page valid */
254 MI_WRITE_VALID_PTE(PointerPte
, TempPte
);
256 /* Acquire a share count */
257 Pfn1
= MI_PFN_ELEMENT(PointerPde
->u
.Hard
.PageFrameNumber
);
258 DPRINT("Incrementing %p from %p\n", Pfn1
, _ReturnAddress());
259 OldIrql
= MiAcquirePfnLock();
260 Pfn1
->u2
.ShareCount
++;
261 MiReleasePfnLock(OldIrql
);
267 BaseAddress
= (PVOID
)((ULONG_PTR
)BaseAddress
+ PAGE_SIZE
);
270 MiUnlockProcessWorkingSetUnsafe(Process
, Thread
);
271 ASSERT(AddressSpaceLocked
);
272 MmUnlockAddressSpace(&Process
->Vm
);
274 ASSERT(StartingVa
!= 0);
275 return (PVOID
)((ULONG_PTR
)StartingVa
+ MmGetMdlByteOffset(Mdl
));
278 if (AddressSpaceLocked
)
280 MmUnlockAddressSpace(&Process
->Vm
);
284 ExFreePoolWithTag(Vad
, 'ldaV');
285 PsReturnProcessNonPagedPoolQuota(Process
, sizeof(MMVAD_LONG
));
287 ExRaiseStatus(Status
);
293 MiUnmapLockedPagesInUserSpace(
294 _In_ PVOID BaseAddress
,
297 PEPROCESS Process
= PsGetCurrentProcess();
298 PETHREAD Thread
= PsGetCurrentThread();
304 PPFN_NUMBER MdlPages
;
305 PFN_NUMBER PageTablePage
;
307 DPRINT("MiUnmapLockedPagesInUserSpace(%p, %p)\n", BaseAddress
, Mdl
);
309 NumberOfPages
= ADDRESS_AND_SIZE_TO_SPAN_PAGES(MmGetMdlVirtualAddress(Mdl
),
310 MmGetMdlByteCount(Mdl
));
311 ASSERT(NumberOfPages
!= 0);
312 MdlPages
= MmGetMdlPfnArray(Mdl
);
315 MmLockAddressSpace(&Process
->Vm
);
316 Vad
= MiLocateAddress(BaseAddress
);
318 Vad
->u
.VadFlags
.VadType
!= VadDevicePhysicalMemory
)
320 DPRINT1("MiUnmapLockedPagesInUserSpace invalid for %p\n", BaseAddress
);
321 MmUnlockAddressSpace(&Process
->Vm
);
325 MiLockProcessWorkingSetUnsafe(Process
, Thread
);
327 /* Remove it from the process VAD tree */
328 ASSERT(Process
->VadRoot
.NumberGenericTableElements
>= 1);
329 MiRemoveNode((PMMADDRESS_NODE
)Vad
, &Process
->VadRoot
);
330 PsReturnProcessNonPagedPoolQuota(Process
, sizeof(MMVAD_LONG
));
332 /* MiRemoveNode should have removed us if we were the hint */
333 ASSERT(Process
->VadRoot
.NodeHint
!= Vad
);
335 PointerPte
= MiAddressToPte(BaseAddress
);
336 OldIrql
= MiAcquirePfnLock();
337 while (NumberOfPages
!= 0 &&
338 *MdlPages
!= LIST_HEAD
)
340 ASSERT(MiAddressToPte(PointerPte
)->u
.Hard
.Valid
== 1);
341 ASSERT(PointerPte
->u
.Hard
.Valid
== 1);
344 MI_ERASE_PTE(PointerPte
);
346 /* We invalidated this PTE, so dereference the PDE */
347 PointerPde
= MiAddressToPde(BaseAddress
);
348 PageTablePage
= PointerPde
->u
.Hard
.PageFrameNumber
;
349 MiDecrementShareCount(MiGetPfnEntry(PageTablePage
), PageTablePage
);
351 if (MiDecrementPageTableReferences(BaseAddress
) == 0)
353 ASSERT(MiIsPteOnPdeBoundary(PointerPte
+ 1) || (NumberOfPages
== 1));
354 MiDeletePde(PointerPde
, Process
);
360 BaseAddress
= (PVOID
)((ULONG_PTR
)BaseAddress
+ PAGE_SIZE
);
365 MiReleasePfnLock(OldIrql
);
366 MiUnlockProcessWorkingSetUnsafe(Process
, Thread
);
367 MmUnlockAddressSpace(&Process
->Vm
);
368 ExFreePoolWithTag(Vad
, 'ldaV');
371 /* PUBLIC FUNCTIONS ***********************************************************/
378 MmCreateMdl(IN PMDL Mdl
,
385 // Check if we don't have an MDL built
390 // Calculate the size we'll need and allocate the MDL
392 Size
= MmSizeOfMdl(Base
, Length
);
393 Mdl
= ExAllocatePoolWithTag(NonPagedPool
, Size
, TAG_MDL
);
394 if (!Mdl
) return NULL
;
400 MmInitializeMdl(Mdl
, Base
, Length
);
409 MmSizeOfMdl(IN PVOID Base
,
413 // Return the MDL size
416 (ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base
, Length
) * sizeof(PFN_NUMBER
));
424 MmBuildMdlForNonPagedPool(IN PMDL Mdl
)
426 PPFN_NUMBER MdlPages
, EndPage
;
427 PFN_NUMBER Pfn
, PageCount
;
434 ASSERT(Mdl
->ByteCount
!= 0);
435 ASSERT((Mdl
->MdlFlags
& (MDL_PAGES_LOCKED
|
436 MDL_MAPPED_TO_SYSTEM_VA
|
437 MDL_SOURCE_IS_NONPAGED_POOL
|
441 // We know the MDL isn't associated to a process now
446 // Get page and VA information
448 MdlPages
= (PPFN_NUMBER
)(Mdl
+ 1);
452 // Set the system address and now get the page count
454 Mdl
->MappedSystemVa
= (PVOID
)((ULONG_PTR
)Base
+ Mdl
->ByteOffset
);
455 PageCount
= ADDRESS_AND_SIZE_TO_SPAN_PAGES(Mdl
->MappedSystemVa
,
457 ASSERT(PageCount
!= 0);
458 EndPage
= MdlPages
+ PageCount
;
463 PointerPte
= MiAddressToPte(Base
);
469 Pfn
= PFN_FROM_PTE(PointerPte
++);
471 } while (MdlPages
< EndPage
);
474 // Set the nonpaged pool flag
476 Mdl
->MdlFlags
|= MDL_SOURCE_IS_NONPAGED_POOL
;
479 // Check if this is an I/O mapping
481 if (!MiGetPfnEntry(Pfn
)) Mdl
->MdlFlags
|= MDL_IO_SPACE
;
489 MmAllocatePagesForMdl(IN PHYSICAL_ADDRESS LowAddress
,
490 IN PHYSICAL_ADDRESS HighAddress
,
491 IN PHYSICAL_ADDRESS SkipBytes
,
492 IN SIZE_T TotalBytes
)
495 // Call the internal routine
497 return MiAllocatePagesForMdl(LowAddress
,
510 MmAllocatePagesForMdlEx(IN PHYSICAL_ADDRESS LowAddress
,
511 IN PHYSICAL_ADDRESS HighAddress
,
512 IN PHYSICAL_ADDRESS SkipBytes
,
513 IN SIZE_T TotalBytes
,
514 IN MEMORY_CACHING_TYPE CacheType
,
517 MI_PFN_CACHE_ATTRIBUTE CacheAttribute
;
520 // Check for invalid cache type
522 if (CacheType
> MmWriteCombined
)
525 // Normalize to default
527 CacheAttribute
= MiNotMapped
;
532 // Conver to internal caching attribute
534 CacheAttribute
= MiPlatformCacheAttributes
[FALSE
][CacheType
];
538 // Only these flags are allowed
540 if (Flags
& ~(MM_DONT_ZERO_ALLOCATION
| MM_ALLOCATE_FROM_LOCAL_NODE_ONLY
))
549 // Call the internal routine
551 return MiAllocatePagesForMdl(LowAddress
,
564 MmFreePagesFromMdl(IN PMDL Mdl
)
571 DPRINT("Freeing MDL: %p\n", Mdl
);
576 ASSERT(KeGetCurrentIrql() <= APC_LEVEL
);
577 ASSERT((Mdl
->MdlFlags
& MDL_IO_SPACE
) == 0);
578 ASSERT(((ULONG_PTR
)Mdl
->StartVa
& (PAGE_SIZE
- 1)) == 0);
581 // Get address and page information
583 Base
= (PVOID
)((ULONG_PTR
)Mdl
->StartVa
+ Mdl
->ByteOffset
);
584 NumberOfPages
= ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base
, Mdl
->ByteCount
);
589 OldIrql
= MiAcquirePfnLock();
592 // Loop all the MDL pages
594 Pages
= (PPFN_NUMBER
)(Mdl
+ 1);
598 // Reached the last page
600 if (*Pages
== LIST_HEAD
) break;
603 // Get the page entry
605 Pfn1
= MiGetPfnEntry(*Pages
);
607 ASSERT(Pfn1
->u2
.ShareCount
== 1);
608 ASSERT(MI_IS_PFN_DELETED(Pfn1
) == TRUE
);
609 if (Pfn1
->u4
.PteFrame
!= 0x1FFEDCB)
611 /* Corrupted PFN entry or invalid free */
612 KeBugCheckEx(MEMORY_MANAGEMENT
, 0x1236, (ULONG_PTR
)Mdl
, (ULONG_PTR
)Pages
, *Pages
);
618 Pfn1
->u3
.e1
.StartOfAllocation
= 0;
619 Pfn1
->u3
.e1
.EndOfAllocation
= 0;
620 Pfn1
->u3
.e1
.PageLocation
= StandbyPageList
;
621 Pfn1
->u2
.ShareCount
= 0;
626 ASSERT(Pfn1
->u3
.e2
.ReferenceCount
!= 0);
627 if (Pfn1
->u3
.e2
.ReferenceCount
!= 1)
629 /* Just take off one reference */
630 InterlockedDecrement16((PSHORT
)&Pfn1
->u3
.e2
.ReferenceCount
);
634 /* We'll be nuking the whole page */
635 MiDecrementReferenceCount(Pfn1
, *Pages
);
639 // Clear this page and move on
641 *Pages
++ = LIST_HEAD
;
642 } while (--NumberOfPages
!= 0);
647 MiReleasePfnLock(OldIrql
);
650 // Remove the pages locked flag
652 Mdl
->MdlFlags
&= ~MDL_PAGES_LOCKED
;
660 MmMapLockedPagesSpecifyCache(IN PMDL Mdl
,
661 IN KPROCESSOR_MODE AccessMode
,
662 IN MEMORY_CACHING_TYPE CacheType
,
663 IN PVOID BaseAddress
,
664 IN ULONG BugCheckOnFailure
,
665 IN ULONG Priority
) // MM_PAGE_PRIORITY
668 PPFN_NUMBER MdlPages
, LastPage
;
671 MI_PFN_CACHE_ATTRIBUTE CacheAttribute
;
678 ASSERT(Mdl
->ByteCount
!= 0);
683 Base
= (PVOID
)((ULONG_PTR
)Mdl
->StartVa
+ Mdl
->ByteOffset
);
686 // Handle kernel case first
688 if (AccessMode
== KernelMode
)
691 // Get the list of pages and count
693 MdlPages
= (PPFN_NUMBER
)(Mdl
+ 1);
694 PageCount
= ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base
, Mdl
->ByteCount
);
695 LastPage
= MdlPages
+ PageCount
;
700 ASSERT((Mdl
->MdlFlags
& (MDL_MAPPED_TO_SYSTEM_VA
|
701 MDL_SOURCE_IS_NONPAGED_POOL
|
702 MDL_PARTIAL_HAS_BEEN_MAPPED
)) == 0);
703 ASSERT((Mdl
->MdlFlags
& (MDL_PAGES_LOCKED
| MDL_PARTIAL
)) != 0);
706 // Get the correct cache type
708 IsIoMapping
= (Mdl
->MdlFlags
& MDL_IO_SPACE
) != 0;
709 CacheAttribute
= MiPlatformCacheAttributes
[IsIoMapping
][CacheType
];
714 PointerPte
= MiReserveSystemPtes(PageCount
, SystemPteSpace
);
718 // If it can fail, return NULL
720 if (Mdl
->MdlFlags
& MDL_MAPPING_CAN_FAIL
) return NULL
;
723 // Should we bugcheck?
725 if (!BugCheckOnFailure
) return NULL
;
728 // Yes, crash the system
730 KeBugCheckEx(NO_MORE_SYSTEM_PTES
, 0, PageCount
, 0, 0);
734 // Get the mapped address
736 Base
= (PVOID
)((ULONG_PTR
)MiPteToAddress(PointerPte
) + Mdl
->ByteOffset
);
741 TempPte
= ValidKernelPte
;
742 switch (CacheAttribute
)
749 MI_PAGE_DISABLE_CACHE(&TempPte
);
750 MI_PAGE_WRITE_THROUGH(&TempPte
);
753 case MiWriteCombined
:
756 // Enable write combining
758 MI_PAGE_DISABLE_CACHE(&TempPte
);
759 MI_PAGE_WRITE_COMBINED(&TempPte
);
777 if (*MdlPages
== LIST_HEAD
) break;
782 TempPte
.u
.Hard
.PageFrameNumber
= *MdlPages
;
783 MI_WRITE_VALID_PTE(PointerPte
++, TempPte
);
784 } while (++MdlPages
< LastPage
);
789 ASSERT((Mdl
->MdlFlags
& MDL_MAPPED_TO_SYSTEM_VA
) == 0);
790 Mdl
->MappedSystemVa
= Base
;
791 Mdl
->MdlFlags
|= MDL_MAPPED_TO_SYSTEM_VA
;
794 // Check if it was partial
796 if (Mdl
->MdlFlags
& MDL_PARTIAL
)
799 // Write the appropriate flag here too
801 Mdl
->MdlFlags
|= MDL_PARTIAL_HAS_BEEN_MAPPED
;
805 // Return the mapped address
810 return MiMapLockedPagesInUserSpace(Mdl
, Base
, CacheType
, BaseAddress
);
818 MmMapLockedPages(IN PMDL Mdl
,
819 IN KPROCESSOR_MODE AccessMode
)
822 // Call the extended version
824 return MmMapLockedPagesSpecifyCache(Mdl
,
837 MmUnmapLockedPages(IN PVOID BaseAddress
,
841 PFN_COUNT PageCount
, ExtraPageCount
;
842 PPFN_NUMBER MdlPages
;
848 ASSERT(Mdl
->ByteCount
!= 0);
851 // Check if this is a kernel request
853 if (BaseAddress
> MM_HIGHEST_USER_ADDRESS
)
856 // Get base and count information
858 Base
= (PVOID
)((ULONG_PTR
)Mdl
->StartVa
+ Mdl
->ByteOffset
);
859 PageCount
= ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base
, Mdl
->ByteCount
);
864 ASSERT((Mdl
->MdlFlags
& MDL_PARENT_MAPPED_SYSTEM_VA
) == 0);
865 ASSERT(PageCount
!= 0);
866 ASSERT(Mdl
->MdlFlags
& MDL_MAPPED_TO_SYSTEM_VA
);
871 PointerPte
= MiAddressToPte(BaseAddress
);
874 // This should be a resident system PTE
876 ASSERT(PointerPte
>= MmSystemPtesStart
[SystemPteSpace
]);
877 ASSERT(PointerPte
<= MmSystemPtesEnd
[SystemPteSpace
]);
878 ASSERT(PointerPte
->u
.Hard
.Valid
== 1);
881 // Check if the caller wants us to free advanced pages
883 if (Mdl
->MdlFlags
& MDL_FREE_EXTRA_PTES
)
886 // Get the MDL page array
888 MdlPages
= MmGetMdlPfnArray(Mdl
);
890 /* Number of extra pages stored after the PFN array */
891 ExtraPageCount
= (PFN_COUNT
)*(MdlPages
+ PageCount
);
896 PageCount
+= ExtraPageCount
;
897 PointerPte
-= ExtraPageCount
;
898 ASSERT(PointerPte
>= MmSystemPtesStart
[SystemPteSpace
]);
899 ASSERT(PointerPte
<= MmSystemPtesEnd
[SystemPteSpace
]);
902 // Get the new base address
904 BaseAddress
= (PVOID
)((ULONG_PTR
)BaseAddress
-
905 (ExtraPageCount
<< PAGE_SHIFT
));
911 Mdl
->MdlFlags
&= ~(MDL_MAPPED_TO_SYSTEM_VA
|
912 MDL_PARTIAL_HAS_BEEN_MAPPED
|
913 MDL_FREE_EXTRA_PTES
);
916 // Release the system PTEs
918 MiReleaseSystemPtes(PointerPte
, PageCount
, SystemPteSpace
);
922 MiUnmapLockedPagesInUserSpace(BaseAddress
, Mdl
);
931 MmProbeAndLockPages(IN PMDL Mdl
,
932 IN KPROCESSOR_MODE AccessMode
,
933 IN LOCK_OPERATION Operation
)
935 PPFN_NUMBER MdlPages
;
936 PVOID Base
, Address
, LastAddress
, StartAddress
;
937 ULONG LockPages
, TotalPages
;
938 NTSTATUS Status
= STATUS_SUCCESS
;
939 PEPROCESS CurrentProcess
;
940 NTSTATUS ProbeStatus
;
941 PMMPTE PointerPte
, LastPte
;
943 #if (_MI_PAGING_LEVELS >= 3)
946 #if (_MI_PAGING_LEVELS == 4)
949 PFN_NUMBER PageFrameIndex
;
953 DPRINT("Probing MDL: %p\n", Mdl
);
958 ASSERT(Mdl
->ByteCount
!= 0);
959 ASSERT(((ULONG
)Mdl
->ByteOffset
& ~(PAGE_SIZE
- 1)) == 0);
960 ASSERT(((ULONG_PTR
)Mdl
->StartVa
& (PAGE_SIZE
- 1)) == 0);
961 ASSERT((Mdl
->MdlFlags
& (MDL_PAGES_LOCKED
|
962 MDL_MAPPED_TO_SYSTEM_VA
|
963 MDL_SOURCE_IS_NONPAGED_POOL
|
965 MDL_IO_SPACE
)) == 0);
968 // Get page and base information
970 MdlPages
= (PPFN_NUMBER
)(Mdl
+ 1);
974 // Get the addresses and how many pages we span (and need to lock)
976 Address
= (PVOID
)((ULONG_PTR
)Base
+ Mdl
->ByteOffset
);
977 LastAddress
= (PVOID
)((ULONG_PTR
)Address
+ Mdl
->ByteCount
);
978 LockPages
= ADDRESS_AND_SIZE_TO_SPAN_PAGES(Address
, Mdl
->ByteCount
);
979 ASSERT(LockPages
!= 0);
981 /* Block invalid access */
982 if ((AccessMode
!= KernelMode
) &&
983 ((LastAddress
> (PVOID
)MM_USER_PROBE_ADDRESS
) || (Address
>= LastAddress
)))
985 /* Caller should be in SEH, raise the error */
986 *MdlPages
= LIST_HEAD
;
987 ExRaiseStatus(STATUS_ACCESS_VIOLATION
);
993 if (Address
<= MM_HIGHEST_USER_ADDRESS
)
998 CurrentProcess
= PsGetCurrentProcess();
1005 CurrentProcess
= NULL
;
1009 // Save the number of pages we'll have to lock, and the start address
1011 TotalPages
= LockPages
;
1012 StartAddress
= Address
;
1014 /* Large pages not supported */
1015 ASSERT(!MI_IS_PHYSICAL_ADDRESS(Address
));
1020 ProbeStatus
= STATUS_SUCCESS
;
1031 *MdlPages
= LIST_HEAD
;
1036 *(volatile CHAR
*)Address
;
1039 // Check if this is write access (only probe for user-mode)
1041 if ((Operation
!= IoReadAccess
) &&
1042 (Address
<= MM_HIGHEST_USER_ADDRESS
))
1045 // Probe for write too
1047 ProbeForWriteChar(Address
);
1053 Address
= PAGE_ALIGN((ULONG_PTR
)Address
+ PAGE_SIZE
);
1060 } while (Address
< LastAddress
);
1063 // Reset back to the original page
1065 ASSERT(LockPages
== 0);
1066 MdlPages
= (PPFN_NUMBER
)(Mdl
+ 1);
1068 _SEH2_EXCEPT(EXCEPTION_EXECUTE_HANDLER
)
1073 ProbeStatus
= _SEH2_GetExceptionCode();
1078 // So how did that go?
1080 if (ProbeStatus
!= STATUS_SUCCESS
)
1085 DPRINT1("MDL PROBE FAILED!\n");
1086 Mdl
->Process
= NULL
;
1087 ExRaiseStatus(ProbeStatus
);
1091 // Get the PTE and PDE
1093 PointerPte
= MiAddressToPte(StartAddress
);
1094 PointerPde
= MiAddressToPde(StartAddress
);
1095 #if (_MI_PAGING_LEVELS >= 3)
1096 PointerPpe
= MiAddressToPpe(StartAddress
);
1098 #if (_MI_PAGING_LEVELS == 4)
1099 PointerPxe
= MiAddressToPxe(StartAddress
);
1105 ASSERT(MdlPages
== (PPFN_NUMBER
)(Mdl
+ 1));
1108 // Check what kind of operation this is
1110 if (Operation
!= IoReadAccess
)
1113 // Set the write flag
1115 Mdl
->MdlFlags
|= MDL_WRITE_OPERATION
;
1120 // Remove the write flag
1122 Mdl
->MdlFlags
&= ~(MDL_WRITE_OPERATION
);
1126 // Mark the MDL as locked *now*
1128 Mdl
->MdlFlags
|= MDL_PAGES_LOCKED
;
1131 // Check if this came from kernel mode
1133 if (Base
> MM_HIGHEST_USER_ADDRESS
)
1136 // We should not have a process
1138 ASSERT(CurrentProcess
== NULL
);
1139 Mdl
->Process
= NULL
;
1142 // In kernel mode, we don't need to check for write access
1144 Operation
= IoReadAccess
;
1150 OldIrql
= MiAcquirePfnLock();
1157 ASSERT(TotalPages
!= 0);
1158 ASSERT(CurrentProcess
== PsGetCurrentProcess());
1161 // Track locked pages
1163 InterlockedExchangeAddSizeT(&CurrentProcess
->NumberOfLockedPages
,
1169 Mdl
->Process
= CurrentProcess
;
1171 /* Lock the process working set */
1172 MiLockProcessWorkingSet(CurrentProcess
, PsGetCurrentThread());
1174 OldIrql
= MM_NOIRQL
;
1180 LastPte
= MiAddressToPte((PVOID
)((ULONG_PTR
)LastAddress
- 1));
1188 // Assume failure and check for non-mapped pages
1190 *MdlPages
= LIST_HEAD
;
1192 #if (_MI_PAGING_LEVELS == 4)
1193 (PointerPxe
->u
.Hard
.Valid
== 0) ||
1195 #if (_MI_PAGING_LEVELS >= 3)
1196 (PointerPpe
->u
.Hard
.Valid
== 0) ||
1198 (PointerPde
->u
.Hard
.Valid
== 0) ||
1199 (PointerPte
->u
.Hard
.Valid
== 0))
1202 // What kind of lock were we using?
1209 MiReleasePfnLock(OldIrql
);
1213 /* Release process working set */
1214 MiUnlockProcessWorkingSet(CurrentProcess
, PsGetCurrentThread());
1220 Address
= MiPteToAddress(PointerPte
);
1222 //HACK: Pass a placeholder TrapInformation so the fault handler knows we're unlocked
1223 Status
= MmAccessFault(FALSE
, Address
, KernelMode
, (PVOID
)(ULONG_PTR
)0xBADBADA3BADBADA3ULL
);
1224 if (!NT_SUCCESS(Status
))
1229 DPRINT1("Access fault failed\n");
1234 // What lock should we use?
1239 // Grab the PFN lock
1241 OldIrql
= MiAcquirePfnLock();
1245 /* Lock the process working set */
1246 MiLockProcessWorkingSet(CurrentProcess
, PsGetCurrentThread());
1251 // Check if this was a write or modify
1253 if (Operation
!= IoReadAccess
)
1256 // Check if the PTE is not writable
1258 if (MI_IS_PAGE_WRITEABLE(PointerPte
) == FALSE
)
1261 // Check if it's copy on write
1263 if (MI_IS_PAGE_COPY_ON_WRITE(PointerPte
))
1266 // Get the base address and allow a change for user-mode
1268 Address
= MiPteToAddress(PointerPte
);
1269 if (Address
<= MM_HIGHEST_USER_ADDRESS
)
1272 // What kind of lock were we using?
1279 MiReleasePfnLock(OldIrql
);
1283 /* Release process working set */
1284 MiUnlockProcessWorkingSet(CurrentProcess
, PsGetCurrentThread());
1291 //HACK: Pass a placeholder TrapInformation so the fault handler knows we're unlocked
1292 Status
= MmAccessFault(TRUE
, Address
, KernelMode
, (PVOID
)(ULONG_PTR
)0xBADBADA3BADBADA3ULL
);
1293 if (!NT_SUCCESS(Status
))
1298 DPRINT1("Access fault failed\n");
1303 // Re-acquire the lock
1308 // Grab the PFN lock
1310 OldIrql
= MiAcquirePfnLock();
1314 /* Lock the process working set */
1315 MiLockProcessWorkingSet(CurrentProcess
, PsGetCurrentThread());
1326 // Fail, since we won't allow this
1328 Status
= STATUS_ACCESS_VIOLATION
;
1329 goto CleanupWithLock
;
1336 PageFrameIndex
= PFN_FROM_PTE(PointerPte
);
1337 Pfn1
= MiGetPfnEntry(PageFrameIndex
);
1340 /* Either this is for kernel-mode, or the working set is held */
1341 ASSERT((CurrentProcess
== NULL
) || (UsePfnLock
== FALSE
));
1343 /* No Physical VADs supported yet */
1344 if (CurrentProcess
) ASSERT(CurrentProcess
->PhysicalVadRoot
== NULL
);
1346 /* This address should already exist and be fully valid */
1347 MiReferenceProbedPageAndBumpLockCount(Pfn1
);
1352 // For I/O addresses, just remember this
1354 Mdl
->MdlFlags
|= MDL_IO_SPACE
;
1358 // Write the page and move on
1360 *MdlPages
++ = PageFrameIndex
;
1363 /* Check if we're on a PDE boundary */
1364 if (MiIsPteOnPdeBoundary(PointerPte
)) PointerPde
++;
1365 #if (_MI_PAGING_LEVELS >= 3)
1366 if (MiIsPteOnPpeBoundary(PointerPte
)) PointerPpe
++;
1368 #if (_MI_PAGING_LEVELS == 4)
1369 if (MiIsPteOnPxeBoundary(PointerPte
)) PointerPxe
++;
1372 } while (PointerPte
<= LastPte
);
1375 // What kind of lock were we using?
1382 MiReleasePfnLock(OldIrql
);
1386 /* Release process working set */
1387 MiUnlockProcessWorkingSet(CurrentProcess
, PsGetCurrentThread());
1393 ASSERT((Mdl
->MdlFlags
& MDL_DESCRIBES_AWE
) == 0);
1398 // This is the failure path
1400 ASSERT(!NT_SUCCESS(Status
));
1403 // What kind of lock were we using?
1410 MiReleasePfnLock(OldIrql
);
1414 /* Release process working set */
1415 MiUnlockProcessWorkingSet(CurrentProcess
, PsGetCurrentThread());
1419 // Pages must be locked so MmUnlock can work
1421 ASSERT(Mdl
->MdlFlags
& MDL_PAGES_LOCKED
);
1427 ExRaiseStatus(Status
);
1435 MmUnlockPages(IN PMDL Mdl
)
1437 PPFN_NUMBER MdlPages
, LastPage
;
1440 ULONG Flags
, PageCount
;
1443 DPRINT("Unlocking MDL: %p\n", Mdl
);
1448 ASSERT((Mdl
->MdlFlags
& MDL_PAGES_LOCKED
) != 0);
1449 ASSERT((Mdl
->MdlFlags
& MDL_SOURCE_IS_NONPAGED_POOL
) == 0);
1450 ASSERT((Mdl
->MdlFlags
& MDL_PARTIAL
) == 0);
1451 ASSERT(Mdl
->ByteCount
!= 0);
1454 // Get the process associated and capture the flags which are volatile
1456 Process
= Mdl
->Process
;
1457 Flags
= Mdl
->MdlFlags
;
1460 // Automagically undo any calls to MmGetSystemAddressForMdl's for this MDL
1462 if (Mdl
->MdlFlags
& MDL_MAPPED_TO_SYSTEM_VA
)
1465 // Unmap the pages from system space
1467 MmUnmapLockedPages(Mdl
->MappedSystemVa
, Mdl
);
1471 // Get the page count
1473 MdlPages
= (PPFN_NUMBER
)(Mdl
+ 1);
1474 Base
= (PVOID
)((ULONG_PTR
)Mdl
->StartVa
+ Mdl
->ByteOffset
);
1475 PageCount
= ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base
, Mdl
->ByteCount
);
1476 ASSERT(PageCount
!= 0);
1479 // We don't support AWE
1481 if (Flags
& MDL_DESCRIBES_AWE
) ASSERT(FALSE
);
1484 // Check if the buffer is mapped I/O space
1486 if (Flags
& MDL_IO_SPACE
)
1491 OldIrql
= MiAcquirePfnLock();
1496 LastPage
= MdlPages
+ PageCount
;
1500 // Last page, break out
1502 if (*MdlPages
== LIST_HEAD
) break;
1505 // Check if this page is in the PFN database
1507 Pfn1
= MiGetPfnEntry(*MdlPages
);
1508 if (Pfn1
) MiDereferencePfnAndDropLockCount(Pfn1
);
1509 } while (++MdlPages
< LastPage
);
1514 MiReleasePfnLock(OldIrql
);
1517 // Check if we have a process
1522 // Handle the accounting of locked pages
1524 ASSERT(Process
->NumberOfLockedPages
> 0);
1525 InterlockedExchangeAddSizeT(&Process
->NumberOfLockedPages
,
1526 -(LONG_PTR
)PageCount
);
1532 Mdl
->MdlFlags
&= ~MDL_IO_SPACE
;
1533 Mdl
->MdlFlags
&= ~MDL_PAGES_LOCKED
;
1538 // Check if we have a process
1543 // Handle the accounting of locked pages
1545 ASSERT(Process
->NumberOfLockedPages
> 0);
1546 InterlockedExchangeAddSizeT(&Process
->NumberOfLockedPages
,
1547 -(LONG_PTR
)PageCount
);
1553 LastPage
= MdlPages
+ PageCount
;
1557 // Last page reached
1559 if (*MdlPages
== LIST_HEAD
)
1562 // Were there no pages at all?
1564 if (MdlPages
== (PPFN_NUMBER
)(Mdl
+ 1))
1567 // We're already done
1569 Mdl
->MdlFlags
&= ~MDL_PAGES_LOCKED
;
1574 // Otherwise, stop here
1576 LastPage
= MdlPages
;
1580 /* Save the PFN entry instead for the secondary loop */
1581 *MdlPages
= (PFN_NUMBER
)MiGetPfnEntry(*MdlPages
);
1582 ASSERT(*MdlPages
!= 0);
1583 } while (++MdlPages
< LastPage
);
1588 MdlPages
= (PPFN_NUMBER
)(Mdl
+ 1);
1591 // Now grab the PFN lock for the actual unlock and dereference
1593 OldIrql
= MiAcquirePfnLock();
1596 /* Get the current entry and reference count */
1597 Pfn1
= (PMMPFN
)*MdlPages
;
1598 MiDereferencePfnAndDropLockCount(Pfn1
);
1599 } while (++MdlPages
< LastPage
);
1604 MiReleasePfnLock(OldIrql
);
1609 Mdl
->MdlFlags
&= ~MDL_PAGES_LOCKED
;
1617 MmAdvanceMdl(IN PMDL Mdl
,
1618 IN ULONG NumberOfBytes
)
1621 return STATUS_NOT_IMPLEMENTED
;
1629 MmMapLockedPagesWithReservedMapping(IN PVOID MappingAddress
,
1631 IN PMDL MemoryDescriptorList
,
1632 IN MEMORY_CACHING_TYPE CacheType
)
1643 MmUnmapReservedMapping(IN PVOID BaseAddress
,
1645 IN PMDL MemoryDescriptorList
)
1655 MmPrefetchPages(IN ULONG NumberOfLists
,
1656 IN PREAD_LIST
*ReadLists
)
1659 return STATUS_NOT_IMPLEMENTED
;
1667 MmProtectMdlSystemAddress(IN PMDL MemoryDescriptorList
,
1668 IN ULONG NewProtect
)
1671 return STATUS_NOT_IMPLEMENTED
;
1679 MmProbeAndLockProcessPages(IN OUT PMDL MemoryDescriptorList
,
1680 IN PEPROCESS Process
,
1681 IN KPROCESSOR_MODE AccessMode
,
1682 IN LOCK_OPERATION Operation
)
1693 MmProbeAndLockSelectedPages(IN OUT PMDL MemoryDescriptorList
,
1694 IN LARGE_INTEGER PageList
[],
1695 IN KPROCESSOR_MODE AccessMode
,
1696 IN LOCK_OPERATION Operation
)
1706 MmMapMemoryDumpMdl(IN PMDL Mdl
)