2 * PROJECT: ReactOS Kernel
3 * LICENSE: BSD - See COPYING.ARM in the top level directory
4 * FILE: ntoskrnl/mm/ARM3/mdlsup.c
5 * PURPOSE: ARM Memory Manager Memory Descriptor List (MDL) Management
6 * PROGRAMMERS: ReactOS Portable Systems Group
9 /* INCLUDES *******************************************************************/
15 #define MODULE_INVOLVED_IN_ARM3
16 #include <mm/ARM3/miarm.h>
18 /* GLOBALS ********************************************************************/
21 BOOLEAN MmTrackLockedPages
;
22 SIZE_T MmSystemLockPagesCount
;
24 /* INTERNAL FUNCTIONS *********************************************************/
28 MiMapLockedPagesInUserSpace(
31 _In_ MEMORY_CACHING_TYPE CacheType
,
32 _In_opt_ PVOID BaseAddress
)
35 PEPROCESS Process
= PsGetCurrentProcess();
36 PETHREAD Thread
= PsGetCurrentThread();
37 TABLE_SEARCH_RESULT Result
;
38 MI_PFN_CACHE_ATTRIBUTE CacheAttribute
;
43 PMMADDRESS_NODE Parent
;
52 BOOLEAN AddressSpaceLocked
= FALSE
;
56 DPRINT("MiMapLockedPagesInUserSpace(%p, %p, 0x%x, %p)\n",
57 Mdl
, StartVa
, CacheType
, BaseAddress
);
59 NumberOfPages
= ADDRESS_AND_SIZE_TO_SPAN_PAGES(StartVa
,
60 MmGetMdlByteCount(Mdl
));
61 MdlPages
= MmGetMdlPfnArray(Mdl
);
63 ASSERT(CacheType
<= MmWriteCombined
);
65 IsIoMapping
= (Mdl
->MdlFlags
& MDL_IO_SPACE
) != 0;
66 CacheAttribute
= MiPlatformCacheAttributes
[IsIoMapping
][CacheType
];
68 /* Large pages are always cached, make sure we're not asking for those */
69 if (CacheAttribute
!= MiCached
)
71 DPRINT1("FIXME: Need to check for large pages\n");
74 /* Allocate a VAD for our mapped region */
75 Vad
= ExAllocatePoolWithTag(NonPagedPool
, sizeof(MMVAD_LONG
), 'ldaV');
78 Status
= STATUS_INSUFFICIENT_RESOURCES
;
82 /* Initialize PhysicalMemory VAD */
83 RtlZeroMemory(Vad
, sizeof(*Vad
));
84 Vad
->u2
.VadFlags2
.LongVad
= 1;
85 Vad
->u
.VadFlags
.VadType
= VadDevicePhysicalMemory
;
86 Vad
->u
.VadFlags
.Protection
= MM_READWRITE
;
87 Vad
->u
.VadFlags
.PrivateMemory
= 1;
89 /* Did the caller specify an address? */
90 if (BaseAddress
== NULL
)
92 /* We get to pick the address */
93 MmLockAddressSpace(&Process
->Vm
);
94 AddressSpaceLocked
= TRUE
;
95 if (Process
->VmDeleted
)
97 Status
= STATUS_PROCESS_IS_TERMINATING
;
101 Result
= MiFindEmptyAddressRangeInTree(NumberOfPages
<< PAGE_SHIFT
,
102 MM_VIRTMEM_GRANULARITY
,
106 if (Result
== TableFoundNode
)
108 Status
= STATUS_NO_MEMORY
;
111 EndingVa
= StartingVa
+ NumberOfPages
* PAGE_SIZE
- 1;
112 BaseAddress
= (PVOID
)StartingVa
;
116 /* Caller specified a base address */
117 StartingVa
= (ULONG_PTR
)BaseAddress
;
118 EndingVa
= StartingVa
+ NumberOfPages
* PAGE_SIZE
- 1;
120 /* Make sure it's valid */
121 if (BYTE_OFFSET(StartingVa
) != 0 ||
122 EndingVa
<= StartingVa
||
123 EndingVa
> (ULONG_PTR
)MM_HIGHEST_VAD_ADDRESS
)
125 Status
= STATUS_INVALID_ADDRESS
;
129 MmLockAddressSpace(&Process
->Vm
);
130 AddressSpaceLocked
= TRUE
;
131 if (Process
->VmDeleted
)
133 Status
= STATUS_PROCESS_IS_TERMINATING
;
137 /* Check if it's already in use */
138 Result
= MiCheckForConflictingNode(StartingVa
>> PAGE_SHIFT
,
139 EndingVa
>> PAGE_SHIFT
,
142 if (Result
== TableFoundNode
)
144 Status
= STATUS_CONFLICTING_ADDRESSES
;
149 Vad
->StartingVpn
= StartingVa
>> PAGE_SHIFT
;
150 Vad
->EndingVpn
= EndingVa
>> PAGE_SHIFT
;
152 MiLockProcessWorkingSetUnsafe(Process
, Thread
);
154 ASSERT(Vad
->EndingVpn
>= Vad
->StartingVpn
);
156 MiInsertVad((PMMVAD
)Vad
, &Process
->VadRoot
);
158 /* Check if this is uncached */
159 if (CacheAttribute
!= MiCached
)
161 /* Flush all caches */
162 KeFlushEntireTb(TRUE
, TRUE
);
163 KeInvalidateAllCaches();
166 PointerPte
= MiAddressToPte(BaseAddress
);
167 while (NumberOfPages
!= 0 &&
168 *MdlPages
!= LIST_HEAD
)
170 PointerPde
= MiPteToPde(PointerPte
);
171 MiMakePdeExistAndMakeValid(PointerPde
, Process
, MM_NOIRQL
);
172 ASSERT(PointerPte
->u
.Hard
.Valid
== 0);
174 /* Add a PDE reference for each page */
175 MiIncrementPageTableReferences(BaseAddress
);
177 /* Set up our basic user PTE */
178 MI_MAKE_HARDWARE_PTE_USER(&TempPte
,
183 /* FIXME: We need to respect the PFN's caching information in some cases */
184 Pfn2
= MiGetPfnEntry(*MdlPages
);
187 ASSERT(Pfn2
->u3
.e2
.ReferenceCount
!= 0);
189 if (Pfn2
->u3
.e1
.CacheAttribute
!= CacheAttribute
)
191 DPRINT1("FIXME: Using caller's cache attribute instead of PFN override\n");
194 /* We don't support AWE magic */
195 ASSERT(Pfn2
->u3
.e1
.CacheAttribute
!= MiNotMapped
);
198 /* Configure caching */
199 switch (CacheAttribute
)
202 MI_PAGE_DISABLE_CACHE(&TempPte
);
203 MI_PAGE_WRITE_THROUGH(&TempPte
);
207 case MiWriteCombined
:
208 MI_PAGE_DISABLE_CACHE(&TempPte
);
209 MI_PAGE_WRITE_COMBINED(&TempPte
);
216 /* Make the page valid */
217 MI_WRITE_VALID_PTE(PointerPte
, TempPte
);
219 /* Acquire a share count */
220 Pfn1
= MI_PFN_ELEMENT(PointerPde
->u
.Hard
.PageFrameNumber
);
221 OldIrql
= KeAcquireQueuedSpinLock(LockQueuePfnLock
);
222 Pfn1
->u2
.ShareCount
++;
223 KeReleaseQueuedSpinLock(LockQueuePfnLock
, OldIrql
);
229 BaseAddress
= (PVOID
)((ULONG_PTR
)BaseAddress
+ PAGE_SIZE
);
232 MiUnlockProcessWorkingSetUnsafe(Process
, Thread
);
233 ASSERT(AddressSpaceLocked
);
234 MmUnlockAddressSpace(&Process
->Vm
);
236 ASSERT(StartingVa
!= 0);
237 return (PVOID
)((ULONG_PTR
)StartingVa
+ MmGetMdlByteOffset(Mdl
));
240 if (AddressSpaceLocked
)
242 MmUnlockAddressSpace(&Process
->Vm
);
246 ExFreePoolWithTag(Vad
, 'ldaV');
248 ExRaiseStatus(Status
);
254 MiUnmapLockedPagesInUserSpace(
255 _In_ PVOID BaseAddress
,
258 PEPROCESS Process
= PsGetCurrentProcess();
259 PETHREAD Thread
= PsGetCurrentThread();
265 PPFN_NUMBER MdlPages
;
266 PFN_NUMBER PageTablePage
;
268 DPRINT("MiUnmapLockedPagesInUserSpace(%p, %p)\n", BaseAddress
, Mdl
);
270 NumberOfPages
= ADDRESS_AND_SIZE_TO_SPAN_PAGES(MmGetMdlVirtualAddress(Mdl
),
271 MmGetMdlByteCount(Mdl
));
272 ASSERT(NumberOfPages
!= 0);
273 MdlPages
= MmGetMdlPfnArray(Mdl
);
276 MmLockAddressSpace(&Process
->Vm
);
277 Vad
= MiLocateAddress(BaseAddress
);
279 Vad
->u
.VadFlags
.VadType
!= VadDevicePhysicalMemory
)
281 DPRINT1("MiUnmapLockedPagesInUserSpace invalid for %p\n", BaseAddress
);
282 MmUnlockAddressSpace(&Process
->Vm
);
286 MiLockProcessWorkingSetUnsafe(Process
, Thread
);
288 /* Remove it from the process VAD tree */
289 ASSERT(Process
->VadRoot
.NumberGenericTableElements
>= 1);
290 MiRemoveNode((PMMADDRESS_NODE
)Vad
, &Process
->VadRoot
);
292 /* MiRemoveNode should have removed us if we were the hint */
293 ASSERT(Process
->VadRoot
.NodeHint
!= Vad
);
295 PointerPte
= MiAddressToPte(BaseAddress
);
296 OldIrql
= KeAcquireQueuedSpinLock(LockQueuePfnLock
);
297 while (NumberOfPages
!= 0 &&
298 *MdlPages
!= LIST_HEAD
)
300 ASSERT(MiAddressToPte(PointerPte
)->u
.Hard
.Valid
== 1);
301 ASSERT(PointerPte
->u
.Hard
.Valid
== 1);
303 /* Dereference the page */
304 MiDecrementPageTableReferences(BaseAddress
);
307 MI_ERASE_PTE(PointerPte
);
309 /* We invalidated this PTE, so dereference the PDE */
310 PointerPde
= MiAddressToPde(BaseAddress
);
311 PageTablePage
= PointerPde
->u
.Hard
.PageFrameNumber
;
312 MiDecrementShareCount(MiGetPfnEntry(PageTablePage
), PageTablePage
);
317 BaseAddress
= (PVOID
)((ULONG_PTR
)BaseAddress
+ PAGE_SIZE
);
320 /* Moving to a new PDE? */
321 if (PointerPde
!= MiAddressToPde(BaseAddress
))
323 /* See if we should delete it */
325 PointerPde
= MiPteToPde(PointerPte
- 1);
326 ASSERT(PointerPde
->u
.Hard
.Valid
== 1);
327 if (MiQueryPageTableReferences(BaseAddress
) == 0)
329 ASSERT(PointerPde
->u
.Long
!= 0);
330 MiDeletePte(PointerPde
,
331 MiPteToAddress(PointerPde
),
339 KeReleaseQueuedSpinLock(LockQueuePfnLock
, OldIrql
);
340 MiUnlockProcessWorkingSetUnsafe(Process
, Thread
);
341 MmUnlockAddressSpace(&Process
->Vm
);
342 ExFreePoolWithTag(Vad
, 'ldaV');
345 /* PUBLIC FUNCTIONS ***********************************************************/
352 MmCreateMdl(IN PMDL Mdl
,
359 // Check if we don't have an MDL built
364 // Calculate the size we'll need and allocate the MDL
366 Size
= MmSizeOfMdl(Base
, Length
);
367 Mdl
= ExAllocatePoolWithTag(NonPagedPool
, Size
, TAG_MDL
);
368 if (!Mdl
) return NULL
;
374 MmInitializeMdl(Mdl
, Base
, Length
);
383 MmSizeOfMdl(IN PVOID Base
,
387 // Return the MDL size
390 (ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base
, Length
) * sizeof(PFN_NUMBER
));
398 MmBuildMdlForNonPagedPool(IN PMDL Mdl
)
400 PPFN_NUMBER MdlPages
, EndPage
;
401 PFN_NUMBER Pfn
, PageCount
;
408 ASSERT(Mdl
->ByteCount
!= 0);
409 ASSERT((Mdl
->MdlFlags
& (MDL_PAGES_LOCKED
|
410 MDL_MAPPED_TO_SYSTEM_VA
|
411 MDL_SOURCE_IS_NONPAGED_POOL
|
415 // We know the MDL isn't associated to a process now
420 // Get page and VA information
422 MdlPages
= (PPFN_NUMBER
)(Mdl
+ 1);
426 // Set the system address and now get the page count
428 Mdl
->MappedSystemVa
= (PVOID
)((ULONG_PTR
)Base
+ Mdl
->ByteOffset
);
429 PageCount
= ADDRESS_AND_SIZE_TO_SPAN_PAGES(Mdl
->MappedSystemVa
,
431 ASSERT(PageCount
!= 0);
432 EndPage
= MdlPages
+ PageCount
;
437 PointerPte
= MiAddressToPte(Base
);
443 Pfn
= PFN_FROM_PTE(PointerPte
++);
445 } while (MdlPages
< EndPage
);
448 // Set the nonpaged pool flag
450 Mdl
->MdlFlags
|= MDL_SOURCE_IS_NONPAGED_POOL
;
453 // Check if this is an I/O mapping
455 if (!MiGetPfnEntry(Pfn
)) Mdl
->MdlFlags
|= MDL_IO_SPACE
;
463 MmAllocatePagesForMdl(IN PHYSICAL_ADDRESS LowAddress
,
464 IN PHYSICAL_ADDRESS HighAddress
,
465 IN PHYSICAL_ADDRESS SkipBytes
,
466 IN SIZE_T TotalBytes
)
469 // Call the internal routine
471 return MiAllocatePagesForMdl(LowAddress
,
484 MmAllocatePagesForMdlEx(IN PHYSICAL_ADDRESS LowAddress
,
485 IN PHYSICAL_ADDRESS HighAddress
,
486 IN PHYSICAL_ADDRESS SkipBytes
,
487 IN SIZE_T TotalBytes
,
488 IN MEMORY_CACHING_TYPE CacheType
,
491 MI_PFN_CACHE_ATTRIBUTE CacheAttribute
;
494 // Check for invalid cache type
496 if (CacheType
> MmWriteCombined
)
499 // Normalize to default
501 CacheAttribute
= MiNotMapped
;
506 // Conver to internal caching attribute
508 CacheAttribute
= MiPlatformCacheAttributes
[FALSE
][CacheType
];
512 // Only these flags are allowed
514 if (Flags
& ~(MM_DONT_ZERO_ALLOCATION
| MM_ALLOCATE_FROM_LOCAL_NODE_ONLY
))
523 // Call the internal routine
525 return MiAllocatePagesForMdl(LowAddress
,
538 MmFreePagesFromMdl(IN PMDL Mdl
)
545 DPRINT("Freeing MDL: %p\n", Mdl
);
550 ASSERT(KeGetCurrentIrql() <= APC_LEVEL
);
551 ASSERT((Mdl
->MdlFlags
& MDL_IO_SPACE
) == 0);
552 ASSERT(((ULONG_PTR
)Mdl
->StartVa
& (PAGE_SIZE
- 1)) == 0);
555 // Get address and page information
557 Base
= (PVOID
)((ULONG_PTR
)Mdl
->StartVa
+ Mdl
->ByteOffset
);
558 NumberOfPages
= ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base
, Mdl
->ByteCount
);
563 OldIrql
= KeAcquireQueuedSpinLock(LockQueuePfnLock
);
566 // Loop all the MDL pages
568 Pages
= (PPFN_NUMBER
)(Mdl
+ 1);
572 // Reached the last page
574 if (*Pages
== LIST_HEAD
) break;
577 // Get the page entry
579 Pfn1
= MiGetPfnEntry(*Pages
);
581 ASSERT(Pfn1
->u2
.ShareCount
== 1);
582 ASSERT(MI_IS_PFN_DELETED(Pfn1
) == TRUE
);
583 if (Pfn1
->u4
.PteFrame
!= 0x1FFEDCB)
585 /* Corrupted PFN entry or invalid free */
586 KeBugCheckEx(MEMORY_MANAGEMENT
, 0x1236, (ULONG_PTR
)Mdl
, (ULONG_PTR
)Pages
, *Pages
);
592 Pfn1
->u3
.e1
.StartOfAllocation
= 0;
593 Pfn1
->u3
.e1
.EndOfAllocation
= 0;
594 Pfn1
->u3
.e1
.PageLocation
= StandbyPageList
;
595 Pfn1
->u2
.ShareCount
= 0;
600 ASSERT(Pfn1
->u3
.e2
.ReferenceCount
!= 0);
601 if (Pfn1
->u3
.e2
.ReferenceCount
!= 1)
603 /* Just take off one reference */
604 InterlockedDecrement16((PSHORT
)&Pfn1
->u3
.e2
.ReferenceCount
);
608 /* We'll be nuking the whole page */
609 MiDecrementReferenceCount(Pfn1
, *Pages
);
613 // Clear this page and move on
615 *Pages
++ = LIST_HEAD
;
616 } while (--NumberOfPages
!= 0);
621 KeReleaseQueuedSpinLock(LockQueuePfnLock
, OldIrql
);
624 // Remove the pages locked flag
626 Mdl
->MdlFlags
&= ~MDL_PAGES_LOCKED
;
634 MmMapLockedPagesSpecifyCache(IN PMDL Mdl
,
635 IN KPROCESSOR_MODE AccessMode
,
636 IN MEMORY_CACHING_TYPE CacheType
,
637 IN PVOID BaseAddress
,
638 IN ULONG BugCheckOnFailure
,
639 IN MM_PAGE_PRIORITY Priority
)
642 PPFN_NUMBER MdlPages
, LastPage
;
645 MI_PFN_CACHE_ATTRIBUTE CacheAttribute
;
652 ASSERT(Mdl
->ByteCount
!= 0);
657 Base
= (PVOID
)((ULONG_PTR
)Mdl
->StartVa
+ Mdl
->ByteOffset
);
660 // Handle kernel case first
662 if (AccessMode
== KernelMode
)
665 // Get the list of pages and count
667 MdlPages
= (PPFN_NUMBER
)(Mdl
+ 1);
668 PageCount
= ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base
, Mdl
->ByteCount
);
669 LastPage
= MdlPages
+ PageCount
;
674 ASSERT((Mdl
->MdlFlags
& (MDL_MAPPED_TO_SYSTEM_VA
|
675 MDL_SOURCE_IS_NONPAGED_POOL
|
676 MDL_PARTIAL_HAS_BEEN_MAPPED
)) == 0);
677 ASSERT((Mdl
->MdlFlags
& (MDL_PAGES_LOCKED
| MDL_PARTIAL
)) != 0);
680 // Get the correct cache type
682 IsIoMapping
= (Mdl
->MdlFlags
& MDL_IO_SPACE
) != 0;
683 CacheAttribute
= MiPlatformCacheAttributes
[IsIoMapping
][CacheType
];
688 PointerPte
= MiReserveSystemPtes(PageCount
, SystemPteSpace
);
692 // If it can fail, return NULL
694 if (Mdl
->MdlFlags
& MDL_MAPPING_CAN_FAIL
) return NULL
;
697 // Should we bugcheck?
699 if (!BugCheckOnFailure
) return NULL
;
702 // Yes, crash the system
704 KeBugCheckEx(NO_MORE_SYSTEM_PTES
, 0, PageCount
, 0, 0);
708 // Get the mapped address
710 Base
= (PVOID
)((ULONG_PTR
)MiPteToAddress(PointerPte
) + Mdl
->ByteOffset
);
715 TempPte
= ValidKernelPte
;
716 switch (CacheAttribute
)
723 MI_PAGE_DISABLE_CACHE(&TempPte
);
724 MI_PAGE_WRITE_THROUGH(&TempPte
);
727 case MiWriteCombined
:
730 // Enable write combining
732 MI_PAGE_DISABLE_CACHE(&TempPte
);
733 MI_PAGE_WRITE_COMBINED(&TempPte
);
751 if (*MdlPages
== LIST_HEAD
) break;
756 TempPte
.u
.Hard
.PageFrameNumber
= *MdlPages
;
757 MI_WRITE_VALID_PTE(PointerPte
++, TempPte
);
758 } while (++MdlPages
< LastPage
);
763 ASSERT((Mdl
->MdlFlags
& MDL_MAPPED_TO_SYSTEM_VA
) == 0);
764 Mdl
->MappedSystemVa
= Base
;
765 Mdl
->MdlFlags
|= MDL_MAPPED_TO_SYSTEM_VA
;
768 // Check if it was partial
770 if (Mdl
->MdlFlags
& MDL_PARTIAL
)
773 // Write the appropriate flag here too
775 Mdl
->MdlFlags
|= MDL_PARTIAL_HAS_BEEN_MAPPED
;
779 // Return the mapped address
784 return MiMapLockedPagesInUserSpace(Mdl
, Base
, CacheType
, BaseAddress
);
792 MmMapLockedPages(IN PMDL Mdl
,
793 IN KPROCESSOR_MODE AccessMode
)
796 // Call the extended version
798 return MmMapLockedPagesSpecifyCache(Mdl
,
811 MmUnmapLockedPages(IN PVOID BaseAddress
,
815 PFN_COUNT PageCount
, ExtraPageCount
;
816 PPFN_NUMBER MdlPages
;
822 ASSERT(Mdl
->ByteCount
!= 0);
825 // Check if this is a kernel request
827 if (BaseAddress
> MM_HIGHEST_USER_ADDRESS
)
830 // Get base and count information
832 Base
= (PVOID
)((ULONG_PTR
)Mdl
->StartVa
+ Mdl
->ByteOffset
);
833 PageCount
= ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base
, Mdl
->ByteCount
);
838 ASSERT((Mdl
->MdlFlags
& MDL_PARENT_MAPPED_SYSTEM_VA
) == 0);
839 ASSERT(PageCount
!= 0);
840 ASSERT(Mdl
->MdlFlags
& MDL_MAPPED_TO_SYSTEM_VA
);
845 PointerPte
= MiAddressToPte(BaseAddress
);
848 // This should be a resident system PTE
850 ASSERT(PointerPte
>= MmSystemPtesStart
[SystemPteSpace
]);
851 ASSERT(PointerPte
<= MmSystemPtesEnd
[SystemPteSpace
]);
852 ASSERT(PointerPte
->u
.Hard
.Valid
== 1);
855 // Check if the caller wants us to free advanced pages
857 if (Mdl
->MdlFlags
& MDL_FREE_EXTRA_PTES
)
860 // Get the MDL page array
862 MdlPages
= MmGetMdlPfnArray(Mdl
);
864 /* Number of extra pages stored after the PFN array */
865 ExtraPageCount
= (PFN_COUNT
)*(MdlPages
+ PageCount
);
870 PageCount
+= ExtraPageCount
;
871 PointerPte
-= ExtraPageCount
;
872 ASSERT(PointerPte
>= MmSystemPtesStart
[SystemPteSpace
]);
873 ASSERT(PointerPte
<= MmSystemPtesEnd
[SystemPteSpace
]);
876 // Get the new base address
878 BaseAddress
= (PVOID
)((ULONG_PTR
)BaseAddress
-
879 (ExtraPageCount
<< PAGE_SHIFT
));
885 Mdl
->MdlFlags
&= ~(MDL_MAPPED_TO_SYSTEM_VA
|
886 MDL_PARTIAL_HAS_BEEN_MAPPED
|
887 MDL_FREE_EXTRA_PTES
);
890 // Release the system PTEs
892 MiReleaseSystemPtes(PointerPte
, PageCount
, SystemPteSpace
);
896 MiUnmapLockedPagesInUserSpace(BaseAddress
, Mdl
);
905 MmProbeAndLockPages(IN PMDL Mdl
,
906 IN KPROCESSOR_MODE AccessMode
,
907 IN LOCK_OPERATION Operation
)
909 PPFN_NUMBER MdlPages
;
910 PVOID Base
, Address
, LastAddress
, StartAddress
;
911 ULONG LockPages
, TotalPages
;
912 NTSTATUS Status
= STATUS_SUCCESS
;
913 PEPROCESS CurrentProcess
;
914 NTSTATUS ProbeStatus
;
915 PMMPTE PointerPte
, LastPte
;
917 #if (_MI_PAGING_LEVELS >= 3)
920 #if (_MI_PAGING_LEVELS == 4)
923 PFN_NUMBER PageFrameIndex
;
927 DPRINT("Probing MDL: %p\n", Mdl
);
932 ASSERT(Mdl
->ByteCount
!= 0);
933 ASSERT(((ULONG
)Mdl
->ByteOffset
& ~(PAGE_SIZE
- 1)) == 0);
934 ASSERT(((ULONG_PTR
)Mdl
->StartVa
& (PAGE_SIZE
- 1)) == 0);
935 ASSERT((Mdl
->MdlFlags
& (MDL_PAGES_LOCKED
|
936 MDL_MAPPED_TO_SYSTEM_VA
|
937 MDL_SOURCE_IS_NONPAGED_POOL
|
939 MDL_IO_SPACE
)) == 0);
942 // Get page and base information
944 MdlPages
= (PPFN_NUMBER
)(Mdl
+ 1);
948 // Get the addresses and how many pages we span (and need to lock)
950 Address
= (PVOID
)((ULONG_PTR
)Base
+ Mdl
->ByteOffset
);
951 LastAddress
= (PVOID
)((ULONG_PTR
)Address
+ Mdl
->ByteCount
);
952 LockPages
= ADDRESS_AND_SIZE_TO_SPAN_PAGES(Address
, Mdl
->ByteCount
);
953 ASSERT(LockPages
!= 0);
955 /* Block invalid access */
956 if ((AccessMode
!= KernelMode
) &&
957 ((LastAddress
> (PVOID
)MM_USER_PROBE_ADDRESS
) || (Address
>= LastAddress
)))
959 /* Caller should be in SEH, raise the error */
960 *MdlPages
= LIST_HEAD
;
961 ExRaiseStatus(STATUS_ACCESS_VIOLATION
);
967 if (Address
<= MM_HIGHEST_USER_ADDRESS
)
972 CurrentProcess
= PsGetCurrentProcess();
979 CurrentProcess
= NULL
;
983 // Save the number of pages we'll have to lock, and the start address
985 TotalPages
= LockPages
;
986 StartAddress
= Address
;
988 /* Large pages not supported */
989 ASSERT(!MI_IS_PHYSICAL_ADDRESS(Address
));
994 ProbeStatus
= STATUS_SUCCESS
;
1005 *MdlPages
= LIST_HEAD
;
1010 *(volatile CHAR
*)Address
;
1013 // Check if this is write access (only probe for user-mode)
1015 if ((Operation
!= IoReadAccess
) &&
1016 (Address
<= MM_HIGHEST_USER_ADDRESS
))
1019 // Probe for write too
1021 ProbeForWriteChar(Address
);
1027 Address
= PAGE_ALIGN((ULONG_PTR
)Address
+ PAGE_SIZE
);
1034 } while (Address
< LastAddress
);
1037 // Reset back to the original page
1039 ASSERT(LockPages
== 0);
1040 MdlPages
= (PPFN_NUMBER
)(Mdl
+ 1);
1042 _SEH2_EXCEPT(EXCEPTION_EXECUTE_HANDLER
)
1047 ProbeStatus
= _SEH2_GetExceptionCode();
1052 // So how did that go?
1054 if (ProbeStatus
!= STATUS_SUCCESS
)
1059 DPRINT1("MDL PROBE FAILED!\n");
1060 Mdl
->Process
= NULL
;
1061 ExRaiseStatus(ProbeStatus
);
1065 // Get the PTE and PDE
1067 PointerPte
= MiAddressToPte(StartAddress
);
1068 PointerPde
= MiAddressToPde(StartAddress
);
1069 #if (_MI_PAGING_LEVELS >= 3)
1070 PointerPpe
= MiAddressToPpe(StartAddress
);
1072 #if (_MI_PAGING_LEVELS == 4)
1073 PointerPxe
= MiAddressToPxe(StartAddress
);
1079 ASSERT(MdlPages
== (PPFN_NUMBER
)(Mdl
+ 1));
1082 // Check what kind of operation this is
1084 if (Operation
!= IoReadAccess
)
1087 // Set the write flag
1089 Mdl
->MdlFlags
|= MDL_WRITE_OPERATION
;
1094 // Remove the write flag
1096 Mdl
->MdlFlags
&= ~(MDL_WRITE_OPERATION
);
1100 // Mark the MDL as locked *now*
1102 Mdl
->MdlFlags
|= MDL_PAGES_LOCKED
;
1105 // Check if this came from kernel mode
1107 if (Base
> MM_HIGHEST_USER_ADDRESS
)
1110 // We should not have a process
1112 ASSERT(CurrentProcess
== NULL
);
1113 Mdl
->Process
= NULL
;
1116 // In kernel mode, we don't need to check for write access
1118 Operation
= IoReadAccess
;
1124 OldIrql
= KeAcquireQueuedSpinLock(LockQueuePfnLock
);
1131 ASSERT(TotalPages
!= 0);
1132 ASSERT(CurrentProcess
== PsGetCurrentProcess());
1135 // Track locked pages
1137 InterlockedExchangeAddSizeT(&CurrentProcess
->NumberOfLockedPages
,
1143 Mdl
->Process
= CurrentProcess
;
1145 /* Lock the process working set */
1146 MiLockProcessWorkingSet(CurrentProcess
, PsGetCurrentThread());
1148 OldIrql
= MM_NOIRQL
;
1154 LastPte
= MiAddressToPte((PVOID
)((ULONG_PTR
)LastAddress
- 1));
1162 // Assume failure and check for non-mapped pages
1164 *MdlPages
= LIST_HEAD
;
1166 #if (_MI_PAGING_LEVELS == 4)
1167 (PointerPxe
->u
.Hard
.Valid
== 0) ||
1169 #if (_MI_PAGING_LEVELS >= 3)
1170 (PointerPpe
->u
.Hard
.Valid
== 0) ||
1172 (PointerPde
->u
.Hard
.Valid
== 0) ||
1173 (PointerPte
->u
.Hard
.Valid
== 0))
1176 // What kind of lock were we using?
1183 KeReleaseQueuedSpinLock(LockQueuePfnLock
, OldIrql
);
1187 /* Release process working set */
1188 MiUnlockProcessWorkingSet(CurrentProcess
, PsGetCurrentThread());
1194 Address
= MiPteToAddress(PointerPte
);
1196 //HACK: Pass a placeholder TrapInformation so the fault handler knows we're unlocked
1197 Status
= MmAccessFault(FALSE
, Address
, KernelMode
, (PVOID
)0xBADBADA3);
1198 if (!NT_SUCCESS(Status
))
1203 DPRINT1("Access fault failed\n");
1208 // What lock should we use?
1213 // Grab the PFN lock
1215 OldIrql
= KeAcquireQueuedSpinLock(LockQueuePfnLock
);
1219 /* Lock the process working set */
1220 MiLockProcessWorkingSet(CurrentProcess
, PsGetCurrentThread());
1225 // Check if this was a write or modify
1227 if (Operation
!= IoReadAccess
)
1230 // Check if the PTE is not writable
1232 if (MI_IS_PAGE_WRITEABLE(PointerPte
) == FALSE
)
1235 // Check if it's copy on write
1237 if (MI_IS_PAGE_COPY_ON_WRITE(PointerPte
))
1240 // Get the base address and allow a change for user-mode
1242 Address
= MiPteToAddress(PointerPte
);
1243 if (Address
<= MM_HIGHEST_USER_ADDRESS
)
1246 // What kind of lock were we using?
1253 KeReleaseQueuedSpinLock(LockQueuePfnLock
, OldIrql
);
1257 /* Release process working set */
1258 MiUnlockProcessWorkingSet(CurrentProcess
, PsGetCurrentThread());
1265 //HACK: Pass a placeholder TrapInformation so the fault handler knows we're unlocked
1266 Status
= MmAccessFault(TRUE
, Address
, KernelMode
, (PVOID
)0xBADBADA3);
1267 if (!NT_SUCCESS(Status
))
1272 DPRINT1("Access fault failed\n");
1277 // Re-acquire the lock
1282 // Grab the PFN lock
1284 OldIrql
= KeAcquireQueuedSpinLock(LockQueuePfnLock
);
1288 /* Lock the process working set */
1289 MiLockProcessWorkingSet(CurrentProcess
, PsGetCurrentThread());
1300 // Fail, since we won't allow this
1302 Status
= STATUS_ACCESS_VIOLATION
;
1303 goto CleanupWithLock
;
1310 PageFrameIndex
= PFN_FROM_PTE(PointerPte
);
1311 Pfn1
= MiGetPfnEntry(PageFrameIndex
);
1314 /* Either this is for kernel-mode, or the working set is held */
1315 ASSERT((CurrentProcess
== NULL
) || (UsePfnLock
== FALSE
));
1317 /* No Physical VADs supported yet */
1318 if (CurrentProcess
) ASSERT(CurrentProcess
->PhysicalVadRoot
== NULL
);
1320 /* This address should already exist and be fully valid */
1321 MiReferenceProbedPageAndBumpLockCount(Pfn1
);
1326 // For I/O addresses, just remember this
1328 Mdl
->MdlFlags
|= MDL_IO_SPACE
;
1332 // Write the page and move on
1334 *MdlPages
++ = PageFrameIndex
;
1337 /* Check if we're on a PDE boundary */
1338 if (MiIsPteOnPdeBoundary(PointerPte
)) PointerPde
++;
1339 #if (_MI_PAGING_LEVELS >= 3)
1340 if (MiIsPteOnPpeBoundary(PointerPte
)) PointerPpe
++;
1342 #if (_MI_PAGING_LEVELS == 4)
1343 if (MiIsPteOnPxeBoundary(PointerPte
)) PointerPxe
++;
1346 } while (PointerPte
<= LastPte
);
1349 // What kind of lock were we using?
1356 KeReleaseQueuedSpinLock(LockQueuePfnLock
, OldIrql
);
1360 /* Release process working set */
1361 MiUnlockProcessWorkingSet(CurrentProcess
, PsGetCurrentThread());
1367 ASSERT((Mdl
->MdlFlags
& MDL_DESCRIBES_AWE
) == 0);
1372 // This is the failure path
1374 ASSERT(!NT_SUCCESS(Status
));
1377 // What kind of lock were we using?
1384 KeReleaseQueuedSpinLock(LockQueuePfnLock
, OldIrql
);
1388 /* Release process working set */
1389 MiUnlockProcessWorkingSet(CurrentProcess
, PsGetCurrentThread());
1393 // Pages must be locked so MmUnlock can work
1395 ASSERT(Mdl
->MdlFlags
& MDL_PAGES_LOCKED
);
1401 ExRaiseStatus(Status
);
1409 MmUnlockPages(IN PMDL Mdl
)
1411 PPFN_NUMBER MdlPages
, LastPage
;
1414 ULONG Flags
, PageCount
;
1417 DPRINT("Unlocking MDL: %p\n", Mdl
);
1422 ASSERT((Mdl
->MdlFlags
& MDL_PAGES_LOCKED
) != 0);
1423 ASSERT((Mdl
->MdlFlags
& MDL_SOURCE_IS_NONPAGED_POOL
) == 0);
1424 ASSERT((Mdl
->MdlFlags
& MDL_PARTIAL
) == 0);
1425 ASSERT(Mdl
->ByteCount
!= 0);
1428 // Get the process associated and capture the flags which are volatile
1430 Process
= Mdl
->Process
;
1431 Flags
= Mdl
->MdlFlags
;
1434 // Automagically undo any calls to MmGetSystemAddressForMdl's for this MDL
1436 if (Mdl
->MdlFlags
& MDL_MAPPED_TO_SYSTEM_VA
)
1439 // Unmap the pages from system space
1441 MmUnmapLockedPages(Mdl
->MappedSystemVa
, Mdl
);
1445 // Get the page count
1447 MdlPages
= (PPFN_NUMBER
)(Mdl
+ 1);
1448 Base
= (PVOID
)((ULONG_PTR
)Mdl
->StartVa
+ Mdl
->ByteOffset
);
1449 PageCount
= ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base
, Mdl
->ByteCount
);
1450 ASSERT(PageCount
!= 0);
1453 // We don't support AWE
1455 if (Flags
& MDL_DESCRIBES_AWE
) ASSERT(FALSE
);
1458 // Check if the buffer is mapped I/O space
1460 if (Flags
& MDL_IO_SPACE
)
1465 OldIrql
= KeAcquireQueuedSpinLock(LockQueuePfnLock
);
1470 LastPage
= MdlPages
+ PageCount
;
1474 // Last page, break out
1476 if (*MdlPages
== LIST_HEAD
) break;
1479 // Check if this page is in the PFN database
1481 Pfn1
= MiGetPfnEntry(*MdlPages
);
1482 if (Pfn1
) MiDereferencePfnAndDropLockCount(Pfn1
);
1483 } while (++MdlPages
< LastPage
);
1488 KeReleaseQueuedSpinLock(LockQueuePfnLock
, OldIrql
);
1491 // Check if we have a process
1496 // Handle the accounting of locked pages
1498 ASSERT(Process
->NumberOfLockedPages
> 0);
1499 InterlockedExchangeAddSizeT(&Process
->NumberOfLockedPages
,
1500 -(LONG_PTR
)PageCount
);
1506 Mdl
->MdlFlags
&= ~MDL_IO_SPACE
;
1507 Mdl
->MdlFlags
&= ~MDL_PAGES_LOCKED
;
1512 // Check if we have a process
1517 // Handle the accounting of locked pages
1519 ASSERT(Process
->NumberOfLockedPages
> 0);
1520 InterlockedExchangeAddSizeT(&Process
->NumberOfLockedPages
,
1521 -(LONG_PTR
)PageCount
);
1527 LastPage
= MdlPages
+ PageCount
;
1531 // Last page reached
1533 if (*MdlPages
== LIST_HEAD
)
1536 // Were there no pages at all?
1538 if (MdlPages
== (PPFN_NUMBER
)(Mdl
+ 1))
1541 // We're already done
1543 Mdl
->MdlFlags
&= ~MDL_PAGES_LOCKED
;
1548 // Otherwise, stop here
1550 LastPage
= MdlPages
;
1554 /* Save the PFN entry instead for the secondary loop */
1555 *MdlPages
= (PFN_NUMBER
)MiGetPfnEntry(*MdlPages
);
1556 ASSERT(*MdlPages
!= 0);
1557 } while (++MdlPages
< LastPage
);
1562 MdlPages
= (PPFN_NUMBER
)(Mdl
+ 1);
1565 // Now grab the PFN lock for the actual unlock and dereference
1567 OldIrql
= KeAcquireQueuedSpinLock(LockQueuePfnLock
);
1570 /* Get the current entry and reference count */
1571 Pfn1
= (PMMPFN
)*MdlPages
;
1572 MiDereferencePfnAndDropLockCount(Pfn1
);
1573 } while (++MdlPages
< LastPage
);
1578 KeReleaseQueuedSpinLock(LockQueuePfnLock
, OldIrql
);
1583 Mdl
->MdlFlags
&= ~MDL_PAGES_LOCKED
;
1591 MmAdvanceMdl(IN PMDL Mdl
,
1592 IN ULONG NumberOfBytes
)
1595 return STATUS_NOT_IMPLEMENTED
;
1603 MmMapLockedPagesWithReservedMapping(IN PVOID MappingAddress
,
1605 IN PMDL MemoryDescriptorList
,
1606 IN MEMORY_CACHING_TYPE CacheType
)
1617 MmUnmapReservedMapping(IN PVOID BaseAddress
,
1619 IN PMDL MemoryDescriptorList
)
1629 MmPrefetchPages(IN ULONG NumberOfLists
,
1630 IN PREAD_LIST
*ReadLists
)
1633 return STATUS_NOT_IMPLEMENTED
;
1641 MmProtectMdlSystemAddress(IN PMDL MemoryDescriptorList
,
1642 IN ULONG NewProtect
)
1645 return STATUS_NOT_IMPLEMENTED
;
1653 MmProbeAndLockProcessPages(IN OUT PMDL MemoryDescriptorList
,
1654 IN PEPROCESS Process
,
1655 IN KPROCESSOR_MODE AccessMode
,
1656 IN LOCK_OPERATION Operation
)
1667 MmProbeAndLockSelectedPages(IN OUT PMDL MemoryDescriptorList
,
1668 IN LARGE_INTEGER PageList
[],
1669 IN KPROCESSOR_MODE AccessMode
,
1670 IN LOCK_OPERATION Operation
)
1680 MmMapMemoryDumpMdl(IN PMDL Mdl
)