2 * PROJECT: ReactOS Kernel
3 * LICENSE: BSD - See COPYING.ARM in the top level directory
4 * FILE: ntoskrnl/mm/ARM3/mdlsup.c
5 * PURPOSE: ARM Memory Manager Memory Descriptor List (MDL) Management
6 * PROGRAMMERS: ReactOS Portable Systems Group
9 /* INCLUDES *******************************************************************/
15 #line 15 "ARMĀ³::MDLSUP"
16 #define MODULE_INVOLVED_IN_ARM3
17 #include "../ARM3/miarm.h"
19 /* GLOBALS ********************************************************************/
22 BOOLEAN MmTrackLockedPages
;
23 SIZE_T MmSystemLockPagesCount
;
25 /* PUBLIC FUNCTIONS ***********************************************************/
32 MmCreateMdl(IN PMDL Mdl
,
39 // Check if we don't have an MDL built
44 // Calculate the size we'll need and allocate the MDL
46 Size
= MmSizeOfMdl(Base
, Length
);
47 Mdl
= ExAllocatePoolWithTag(NonPagedPool
, Size
, TAG_MDL
);
48 if (!Mdl
) return NULL
;
54 MmInitializeMdl(Mdl
, Base
, Length
);
63 MmSizeOfMdl(IN PVOID Base
,
67 // Return the MDL size
70 (ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base
, Length
) * sizeof(PFN_NUMBER
));
78 MmBuildMdlForNonPagedPool(IN PMDL Mdl
)
80 PPFN_NUMBER MdlPages
, EndPage
;
81 PFN_NUMBER Pfn
, PageCount
;
88 ASSERT(Mdl
->ByteCount
!= 0);
89 ASSERT((Mdl
->MdlFlags
& (MDL_PAGES_LOCKED
|
90 MDL_MAPPED_TO_SYSTEM_VA
|
91 MDL_SOURCE_IS_NONPAGED_POOL
|
95 // We know the MDL isn't associated to a process now
100 // Get page and VA information
102 MdlPages
= (PPFN_NUMBER
)(Mdl
+ 1);
106 // Set the system address and now get the page count
108 Mdl
->MappedSystemVa
= (PVOID
)((ULONG_PTR
)Base
+ Mdl
->ByteOffset
);
109 PageCount
= ADDRESS_AND_SIZE_TO_SPAN_PAGES(Mdl
->MappedSystemVa
,
111 ASSERT(PageCount
!= 0);
112 EndPage
= MdlPages
+ PageCount
;
117 PointerPte
= MiAddressToPte(Base
);
123 Pfn
= PFN_FROM_PTE(PointerPte
++);
125 } while (MdlPages
< EndPage
);
128 // Set the nonpaged pool flag
130 Mdl
->MdlFlags
|= MDL_SOURCE_IS_NONPAGED_POOL
;
133 // Check if this is an I/O mapping
135 if (!MiGetPfnEntry(Pfn
)) Mdl
->MdlFlags
|= MDL_IO_SPACE
;
143 MmAllocatePagesForMdl(IN PHYSICAL_ADDRESS LowAddress
,
144 IN PHYSICAL_ADDRESS HighAddress
,
145 IN PHYSICAL_ADDRESS SkipBytes
,
146 IN SIZE_T TotalBytes
)
149 // Call the internal routine
151 return MiAllocatePagesForMdl(LowAddress
,
164 MmAllocatePagesForMdlEx(IN PHYSICAL_ADDRESS LowAddress
,
165 IN PHYSICAL_ADDRESS HighAddress
,
166 IN PHYSICAL_ADDRESS SkipBytes
,
167 IN SIZE_T TotalBytes
,
168 IN MEMORY_CACHING_TYPE CacheType
,
171 MI_PFN_CACHE_ATTRIBUTE CacheAttribute
;
174 // Check for invalid cache type
176 if (CacheType
> MmWriteCombined
)
179 // Normalize to default
181 CacheAttribute
= MiNotMapped
;
186 // Conver to internal caching attribute
188 CacheAttribute
= MiPlatformCacheAttributes
[FALSE
][CacheType
];
192 // Only these flags are allowed
194 if (Flags
& ~(MM_DONT_ZERO_ALLOCATION
| MM_ALLOCATE_FROM_LOCAL_NODE_ONLY
))
203 // Call the internal routine
205 return MiAllocatePagesForMdl(LowAddress
,
218 MmFreePagesFromMdl(IN PMDL Mdl
)
225 DPRINT("Freeing MDL: %p\n", Mdl
);
230 ASSERT(KeGetCurrentIrql() <= APC_LEVEL
);
231 ASSERT((Mdl
->MdlFlags
& MDL_IO_SPACE
) == 0);
232 ASSERT(((ULONG_PTR
)Mdl
->StartVa
& (PAGE_SIZE
- 1)) == 0);
235 // Get address and page information
237 Base
= (PVOID
)((ULONG_PTR
)Mdl
->StartVa
+ Mdl
->ByteOffset
);
238 NumberOfPages
= ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base
, Mdl
->ByteCount
);
243 OldIrql
= KeAcquireQueuedSpinLock(LockQueuePfnLock
);
246 // Loop all the MDL pages
248 Pages
= (PPFN_NUMBER
)(Mdl
+ 1);
252 // Reached the last page
254 if (*Pages
== LIST_HEAD
) break;
257 // Get the page entry
259 Pfn1
= MiGetPfnEntry(*Pages
);
261 ASSERT(Pfn1
->u2
.ShareCount
== 1);
262 ASSERT(MI_IS_PFN_DELETED(Pfn1
) == TRUE
);
263 if (Pfn1
->u4
.PteFrame
!= 0x1FFEDCB)
265 /* Corrupted PFN entry or invalid free */
266 KeBugCheckEx(MEMORY_MANAGEMENT
, 0x1236, (ULONG_PTR
)Mdl
, (ULONG_PTR
)Pages
, *Pages
);
272 Pfn1
->u3
.e1
.StartOfAllocation
= 0;
273 Pfn1
->u3
.e1
.EndOfAllocation
= 0;
274 Pfn1
->u2
.ShareCount
== 0;
279 ASSERT(Pfn1
->u3
.e2
.ReferenceCount
!= 0);
280 if (Pfn1
->u3
.e2
.ReferenceCount
!= 1)
282 /* Just take off one reference */
283 InterlockedDecrement16((PSHORT
)&Pfn1
->u3
.e2
.ReferenceCount
);
287 /* We'll be nuking the whole page */
288 MiDecrementReferenceCount(Pfn1
, *Pages
);
292 // Clear this page and move on
294 *Pages
++ = LIST_HEAD
;
295 } while (--NumberOfPages
!= 0);
300 KeReleaseQueuedSpinLock(LockQueuePfnLock
, OldIrql
);
303 // Remove the pages locked flag
305 Mdl
->MdlFlags
&= ~MDL_PAGES_LOCKED
;
313 MmMapLockedPagesSpecifyCache(IN PMDL Mdl
,
314 IN KPROCESSOR_MODE AccessMode
,
315 IN MEMORY_CACHING_TYPE CacheType
,
316 IN PVOID BaseAddress
,
317 IN ULONG BugCheckOnFailure
,
318 IN MM_PAGE_PRIORITY Priority
)
321 PPFN_NUMBER MdlPages
, LastPage
;
322 PFN_NUMBER PageCount
;
324 MI_PFN_CACHE_ATTRIBUTE CacheAttribute
;
331 ASSERT(Mdl
->ByteCount
!= 0);
336 Base
= (PVOID
)((ULONG_PTR
)Mdl
->StartVa
+ Mdl
->ByteOffset
);
339 // Handle kernel case first
341 if (AccessMode
== KernelMode
)
344 // Get the list of pages and count
346 MdlPages
= (PPFN_NUMBER
)(Mdl
+ 1);
347 PageCount
= ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base
, Mdl
->ByteCount
);
348 LastPage
= MdlPages
+ PageCount
;
353 ASSERT((Mdl
->MdlFlags
& (MDL_MAPPED_TO_SYSTEM_VA
|
354 MDL_SOURCE_IS_NONPAGED_POOL
|
355 MDL_PARTIAL_HAS_BEEN_MAPPED
)) == 0);
356 ASSERT((Mdl
->MdlFlags
& (MDL_PAGES_LOCKED
| MDL_PARTIAL
)) != 0);
359 // Get the correct cache type
361 IsIoMapping
= (Mdl
->MdlFlags
& MDL_IO_SPACE
) != 0;
362 CacheAttribute
= MiPlatformCacheAttributes
[IsIoMapping
][CacheType
];
367 PointerPte
= MiReserveSystemPtes(PageCount
, SystemPteSpace
);
371 // If it can fail, return NULL
373 if (Mdl
->MdlFlags
& MDL_MAPPING_CAN_FAIL
) return NULL
;
376 // Should we bugcheck?
378 if (!BugCheckOnFailure
) return NULL
;
381 // Yes, crash the system
383 KeBugCheckEx(NO_MORE_SYSTEM_PTES
, 0, PageCount
, 0, 0);
387 // Get the mapped address
389 Base
= (PVOID
)((ULONG_PTR
)MiPteToAddress(PointerPte
) + Mdl
->ByteOffset
);
394 TempPte
= ValidKernelPte
;
395 switch (CacheAttribute
)
402 MI_PAGE_DISABLE_CACHE(&TempPte
);
403 MI_PAGE_WRITE_THROUGH(&TempPte
);
406 case MiWriteCombined
:
409 // Enable write combining
411 MI_PAGE_DISABLE_CACHE(&TempPte
);
412 MI_PAGE_WRITE_COMBINED(&TempPte
);
430 if (*MdlPages
== LIST_HEAD
) break;
435 TempPte
.u
.Hard
.PageFrameNumber
= *MdlPages
;
436 MI_WRITE_VALID_PTE(PointerPte
++, TempPte
);
437 } while (++MdlPages
< LastPage
);
442 ASSERT((Mdl
->MdlFlags
& MDL_MAPPED_TO_SYSTEM_VA
) == 0);
443 Mdl
->MappedSystemVa
= Base
;
444 Mdl
->MdlFlags
|= MDL_MAPPED_TO_SYSTEM_VA
;
447 // Check if it was partial
449 if (Mdl
->MdlFlags
& MDL_PARTIAL
)
452 // Write the appropriate flag here too
454 Mdl
->MdlFlags
|= MDL_PARTIAL_HAS_BEEN_MAPPED
;
458 // Return the mapped address
472 MmMapLockedPages(IN PMDL Mdl
,
473 IN KPROCESSOR_MODE AccessMode
)
476 // Call the extended version
478 return MmMapLockedPagesSpecifyCache(Mdl
,
491 MmUnmapLockedPages(IN PVOID BaseAddress
,
495 PFN_NUMBER PageCount
;
496 PPFN_NUMBER MdlPages
;
502 ASSERT(Mdl
->ByteCount
!= 0);
505 // Check if this is a kernel request
507 if (BaseAddress
> MM_HIGHEST_USER_ADDRESS
)
510 // Get base and count information
512 Base
= (PVOID
)((ULONG_PTR
)Mdl
->StartVa
+ Mdl
->ByteOffset
);
513 PageCount
= ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base
, Mdl
->ByteCount
);
518 ASSERT((Mdl
->MdlFlags
& MDL_PARENT_MAPPED_SYSTEM_VA
) == 0);
519 ASSERT(PageCount
!= 0);
520 ASSERT(Mdl
->MdlFlags
& MDL_MAPPED_TO_SYSTEM_VA
);
525 PointerPte
= MiAddressToPte(BaseAddress
);
528 // This should be a resident system PTE
530 ASSERT(PointerPte
>= MmSystemPtesStart
[SystemPteSpace
]);
531 ASSERT(PointerPte
<= MmSystemPtesEnd
[SystemPteSpace
]);
532 ASSERT(PointerPte
->u
.Hard
.Valid
== 1);
535 // Check if the caller wants us to free advanced pages
537 if (Mdl
->MdlFlags
& MDL_FREE_EXTRA_PTES
)
540 // Get the MDL page array
542 MdlPages
= (PPFN_NUMBER
)(Mdl
+ 1);
543 MdlPages
+= PageCount
;
548 PageCount
+= *MdlPages
;
549 PointerPte
-= *MdlPages
;
550 ASSERT(PointerPte
>= MmSystemPtesStart
[SystemPteSpace
]);
551 ASSERT(PointerPte
<= MmSystemPtesEnd
[SystemPteSpace
]);
554 // Get the new base address
556 BaseAddress
= (PVOID
)((ULONG_PTR
)BaseAddress
-
557 ((*MdlPages
) << PAGE_SHIFT
));
563 Mdl
->MdlFlags
&= ~(MDL_MAPPED_TO_SYSTEM_VA
|
564 MDL_PARTIAL_HAS_BEEN_MAPPED
|
565 MDL_FREE_EXTRA_PTES
);
568 // Release the system PTEs
570 MiReleaseSystemPtes(PointerPte
, PageCount
, SystemPteSpace
);
583 MmProbeAndLockPages(IN PMDL Mdl
,
584 IN KPROCESSOR_MODE AccessMode
,
585 IN LOCK_OPERATION Operation
)
587 PPFN_NUMBER MdlPages
;
588 PVOID Base
, Address
, LastAddress
, StartAddress
;
589 ULONG LockPages
, TotalPages
;
590 NTSTATUS Status
= STATUS_SUCCESS
;
591 PEPROCESS CurrentProcess
;
592 NTSTATUS ProbeStatus
;
593 PMMPTE PointerPte
, LastPte
;
595 PFN_NUMBER PageFrameIndex
;
598 USHORT OldRefCount
, RefCount
;
600 DPRINT("Probing MDL: %p\n", Mdl
);
605 ASSERT(Mdl
->ByteCount
!= 0);
606 ASSERT(((ULONG
)Mdl
->ByteOffset
& ~(PAGE_SIZE
- 1)) == 0);
607 ASSERT(((ULONG_PTR
)Mdl
->StartVa
& (PAGE_SIZE
- 1)) == 0);
608 ASSERT((Mdl
->MdlFlags
& (MDL_PAGES_LOCKED
|
609 MDL_MAPPED_TO_SYSTEM_VA
|
610 MDL_SOURCE_IS_NONPAGED_POOL
|
612 MDL_IO_SPACE
)) == 0);
615 // Get page and base information
617 MdlPages
= (PPFN_NUMBER
)(Mdl
+ 1);
618 Base
= (PVOID
)Mdl
->StartVa
;
621 // Get the addresses and how many pages we span (and need to lock)
623 Address
= (PVOID
)((ULONG_PTR
)Base
+ Mdl
->ByteOffset
);
624 LastAddress
= (PVOID
)((ULONG_PTR
)Address
+ Mdl
->ByteCount
);
625 LockPages
= ADDRESS_AND_SIZE_TO_SPAN_PAGES(Address
, Mdl
->ByteCount
);
626 ASSERT(LockPages
!= 0);
628 /* Block invalid access */
629 if ((AccessMode
!= KernelMode
) &&
630 ((LastAddress
> (PVOID
)MM_USER_PROBE_ADDRESS
) || (Address
>= LastAddress
)))
632 /* Caller should be in SEH, raise the error */
633 *MdlPages
= LIST_HEAD
;
634 ExRaiseStatus(STATUS_ACCESS_VIOLATION
);
640 if (Address
<= MM_HIGHEST_USER_ADDRESS
)
645 CurrentProcess
= PsGetCurrentProcess();
652 CurrentProcess
= NULL
;
656 // Save the number of pages we'll have to lock, and the start address
658 TotalPages
= LockPages
;
659 StartAddress
= Address
;
661 /* Large pages not supported */
662 ASSERT(!MI_IS_PHYSICAL_ADDRESS(Address
));
667 ProbeStatus
= STATUS_SUCCESS
;
678 *MdlPages
= LIST_HEAD
;
683 *(volatile CHAR
*)Address
;
686 // Check if this is write access (only probe for user-mode)
688 if ((Operation
!= IoReadAccess
) &&
689 (Address
<= MM_HIGHEST_USER_ADDRESS
))
692 // Probe for write too
694 ProbeForWriteChar(Address
);
700 Address
= PAGE_ALIGN((ULONG_PTR
)Address
+ PAGE_SIZE
);
707 } while (Address
< LastAddress
);
710 // Reset back to the original page
712 ASSERT(LockPages
== 0);
713 MdlPages
= (PPFN_NUMBER
)(Mdl
+ 1);
715 _SEH2_EXCEPT(EXCEPTION_EXECUTE_HANDLER
)
720 ProbeStatus
= _SEH2_GetExceptionCode();
725 // So how did that go?
727 if (ProbeStatus
!= STATUS_SUCCESS
)
732 DPRINT1("MDL PROBE FAILED!\n");
734 ExRaiseStatus(ProbeStatus
);
738 // Get the PTE and PDE
740 PointerPte
= MiAddressToPte(StartAddress
);
741 PointerPde
= MiAddressToPde(StartAddress
);
742 #if (_MI_PAGING_LEVELS >= 3)
743 DPRINT1("PAE/x64 Not Implemented\n");
750 ASSERT(MdlPages
== (PPFN_NUMBER
)(Mdl
+ 1));
753 // Check what kind of operation this is
755 if (Operation
!= IoReadAccess
)
758 // Set the write flag
760 Mdl
->MdlFlags
|= MDL_WRITE_OPERATION
;
765 // Remove the write flag
767 Mdl
->MdlFlags
&= ~(MDL_WRITE_OPERATION
);
771 // Mark the MDL as locked *now*
773 Mdl
->MdlFlags
|= MDL_PAGES_LOCKED
;
776 // Check if this came from kernel mode
778 if (Base
>= MM_HIGHEST_USER_ADDRESS
)
781 // We should not have a process
783 ASSERT(CurrentProcess
== NULL
);
787 // In kernel mode, we don't need to check for write access
789 Operation
= IoReadAccess
;
795 OldIrql
= KeAcquireQueuedSpinLock(LockQueuePfnLock
);
802 ASSERT(TotalPages
!= 0);
803 ASSERT(CurrentProcess
== PsGetCurrentProcess());
806 // Track locked pages
808 InterlockedExchangeAddSizeT(&CurrentProcess
->NumberOfLockedPages
,
814 Mdl
->Process
= CurrentProcess
;
816 /* Lock the process working set */
817 MiLockProcessWorkingSet(CurrentProcess
, PsGetCurrentThread());
825 LastPte
= MiAddressToPte((PVOID
)((ULONG_PTR
)LastAddress
- 1));
833 // Assume failure and check for non-mapped pages
835 *MdlPages
= LIST_HEAD
;
836 #if (_MI_PAGING_LEVELS >= 3)
837 /* Should be checking the PPE and PXE */
840 while ((PointerPde
->u
.Hard
.Valid
== 0) ||
841 (PointerPte
->u
.Hard
.Valid
== 0))
844 // What kind of lock where we using?
851 KeReleaseQueuedSpinLock(LockQueuePfnLock
, OldIrql
);
855 /* Release process working set */
856 MiUnlockProcessWorkingSet(CurrentProcess
, PsGetCurrentThread());
862 Address
= MiPteToAddress(PointerPte
);
863 Status
= MmAccessFault(FALSE
, Address
, KernelMode
, NULL
);
864 if (!NT_SUCCESS(Status
))
869 DPRINT1("Access fault failed\n");
874 // Waht lock should we use?
881 OldIrql
= KeAcquireQueuedSpinLock(LockQueuePfnLock
);
885 /* Lock the process working set */
886 MiLockProcessWorkingSet(CurrentProcess
, PsGetCurrentThread());
891 // Check if this was a write or modify
893 if (Operation
!= IoReadAccess
)
896 // Check if the PTE is not writable
898 if (MI_IS_PAGE_WRITEABLE(PointerPte
) == FALSE
)
901 // Check if it's copy on write
903 if (MI_IS_PAGE_COPY_ON_WRITE(PointerPte
))
906 // Get the base address and allow a change for user-mode
908 Address
= MiPteToAddress(PointerPte
);
909 if (Address
<= MM_HIGHEST_USER_ADDRESS
)
912 // What kind of lock where we using?
919 KeReleaseQueuedSpinLock(LockQueuePfnLock
, OldIrql
);
923 /* Release process working set */
924 MiUnlockProcessWorkingSet(CurrentProcess
, PsGetCurrentThread());
930 Status
= MmAccessFault(TRUE
, Address
, KernelMode
, NULL
);
931 if (!NT_SUCCESS(Status
))
936 DPRINT1("Access fault failed\n");
941 // Re-acquire the lock
948 OldIrql
= KeAcquireQueuedSpinLock(LockQueuePfnLock
);
952 /* Lock the process working set */
953 MiLockProcessWorkingSet(CurrentProcess
, PsGetCurrentThread());
964 // Fail, since we won't allow this
966 Status
= STATUS_ACCESS_VIOLATION
;
967 goto CleanupWithLock
;
974 PageFrameIndex
= PFN_FROM_PTE(PointerPte
);
975 Pfn1
= MiGetPfnEntry(PageFrameIndex
);
978 /* Either this is for kernel-mode, or the working set is held */
979 ASSERT((CurrentProcess
== NULL
) || (UsePfnLock
== FALSE
));
981 /* No Physical VADs supported yet */
982 if (CurrentProcess
) ASSERT(CurrentProcess
->PhysicalVadRoot
== NULL
);
984 /* This address should already exist and be fully valid */
985 ASSERT(Pfn1
->u3
.e2
.ReferenceCount
!= 0);
986 if (MI_IS_ROS_PFN(Pfn1
))
988 /* ReactOS Mm doesn't track share count */
989 ASSERT(Pfn1
->u3
.e1
.PageLocation
== ActiveAndValid
);
993 /* On ARM3 pages, we should see a valid share count */
994 ASSERT((Pfn1
->u2
.ShareCount
!= 0) && (Pfn1
->u3
.e1
.PageLocation
== ActiveAndValid
));
996 /* We don't support mapping a prototype page yet */
997 ASSERT((Pfn1
->u3
.e1
.PrototypePte
== 0) && (Pfn1
->OriginalPte
.u
.Soft
.Prototype
== 0));
1000 /* More locked pages! */
1001 InterlockedExchangeAddSizeT(&MmSystemLockPagesCount
, 1);
1003 /* Loop trying to update the reference count */
1006 /* Get the current reference count, make sure it's valid */
1007 OldRefCount
= Pfn1
->u3
.e2
.ReferenceCount
;
1008 ASSERT(OldRefCount
!= 0);
1009 ASSERT(OldRefCount
< 2500);
1011 /* Bump it up by one */
1012 RefCount
= InterlockedCompareExchange16((PSHORT
)&Pfn1
->u3
.e2
.ReferenceCount
,
1015 ASSERT(RefCount
!= 0);
1016 } while (OldRefCount
!= RefCount
);
1018 /* Was this the first lock attempt? */
1019 if (OldRefCount
!= 1)
1021 /* Someone else came through */
1022 InterlockedExchangeAddSizeT(&MmSystemLockPagesCount
, -1);
1028 // For I/O addresses, just remember this
1030 Mdl
->MdlFlags
|= MDL_IO_SPACE
;
1034 // Write the page and move on
1036 *MdlPages
++ = PageFrameIndex
;
1039 /* Check if we're on a PDE boundary */
1040 if (!((ULONG_PTR
)PointerPte
& (PD_SIZE
- 1))) PointerPde
++;
1041 } while (PointerPte
<= LastPte
);
1044 // What kind of lock where we using?
1051 KeReleaseQueuedSpinLock(LockQueuePfnLock
, OldIrql
);
1055 /* Release process working set */
1056 MiUnlockProcessWorkingSet(CurrentProcess
, PsGetCurrentThread());
1062 ASSERT((Mdl
->MdlFlags
& MDL_DESCRIBES_AWE
) == 0);
1067 // This is the failure path
1069 ASSERT(!NT_SUCCESS(Status
));
1072 // What kind of lock where we using?
1079 KeReleaseQueuedSpinLock(LockQueuePfnLock
, OldIrql
);
1083 /* Release process working set */
1084 MiUnlockProcessWorkingSet(CurrentProcess
, PsGetCurrentThread());
1088 // Pages must be locked so MmUnlock can work
1090 ASSERT(Mdl
->MdlFlags
& MDL_PAGES_LOCKED
);
1096 ExRaiseStatus(Status
);
1104 MmUnlockPages(IN PMDL Mdl
)
1106 PPFN_NUMBER MdlPages
, LastPage
;
1109 ULONG Flags
, PageCount
;
1111 USHORT RefCount
, OldRefCount
;
1113 DPRINT("Unlocking MDL: %p\n", Mdl
);
1118 ASSERT((Mdl
->MdlFlags
& MDL_PAGES_LOCKED
) != 0);
1119 ASSERT((Mdl
->MdlFlags
& MDL_SOURCE_IS_NONPAGED_POOL
) == 0);
1120 ASSERT((Mdl
->MdlFlags
& MDL_PARTIAL
) == 0);
1121 ASSERT(Mdl
->ByteCount
!= 0);
1124 // Get the process associated and capture the flags which are volatile
1126 Process
= Mdl
->Process
;
1127 Flags
= Mdl
->MdlFlags
;
1130 // Automagically undo any calls to MmGetSystemAddressForMdl's for this MDL
1132 if (Mdl
->MdlFlags
& MDL_MAPPED_TO_SYSTEM_VA
)
1135 // Unmap the pages from system space
1137 MmUnmapLockedPages(Mdl
->MappedSystemVa
, Mdl
);
1141 // Get the page count
1143 MdlPages
= (PPFN_NUMBER
)(Mdl
+ 1);
1144 Base
= (PVOID
)((ULONG_PTR
)Mdl
->StartVa
+ Mdl
->ByteOffset
);
1145 PageCount
= ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base
, Mdl
->ByteCount
);
1146 ASSERT(PageCount
!= 0);
1149 // We don't support AWE
1151 if (Flags
& MDL_DESCRIBES_AWE
) ASSERT(FALSE
);
1154 // Check if the buffer is mapped I/O space
1156 if (Flags
& MDL_IO_SPACE
)
1161 OldIrql
= KeAcquireQueuedSpinLock(LockQueuePfnLock
);
1166 LastPage
= MdlPages
+ PageCount
;
1170 // Last page, break out
1172 if (*MdlPages
== LIST_HEAD
) break;
1175 // Check if this page is in the PFN database
1177 Pfn1
= MiGetPfnEntry(*MdlPages
);
1180 /* Get the current entry and reference count */
1181 OldRefCount
= Pfn1
->u3
.e2
.ReferenceCount
;
1182 ASSERT(OldRefCount
!= 0);
1184 /* Is this already the last dereference */
1185 if (OldRefCount
== 1)
1187 /* It should be on a free list waiting for us */
1188 ASSERT(Pfn1
->u3
.e2
.ReferenceCount
== 1);
1189 ASSERT(Pfn1
->u3
.e1
.PageLocation
!= ActiveAndValid
);
1190 ASSERT(Pfn1
->u2
.ShareCount
== 0);
1192 /* Not supported yet */
1193 ASSERT(((Pfn1
->u3
.e1
.PrototypePte
== 0) &&
1194 (Pfn1
->OriginalPte
.u
.Soft
.Prototype
== 0)));
1197 InterlockedExchangeAddSizeT(&MmSystemLockPagesCount
, -1);
1199 /* Do the last dereference, we're done here */
1200 MiDecrementReferenceCount(Pfn1
, *MdlPages
);
1204 /* Loop decrementing one reference */
1207 /* Make sure it's still valid */
1208 OldRefCount
= Pfn1
->u3
.e2
.ReferenceCount
;
1209 ASSERT(OldRefCount
!= 0);
1211 /* Take off one reference */
1212 RefCount
= InterlockedCompareExchange16((PSHORT
)&Pfn1
->u3
.e2
.ReferenceCount
,
1215 ASSERT(RefCount
!= 0);
1216 } while (OldRefCount
!= RefCount
);
1217 ASSERT(RefCount
> 1);
1219 /* Are there only lock references left? */
1222 /* And does the page still have users? */
1223 if (Pfn1
->u2
.ShareCount
>= 1)
1225 /* Then it should still be valid */
1226 ASSERT(Pfn1
->u3
.e1
.PageLocation
== ActiveAndValid
);
1228 /* Not supported yet */
1229 ASSERT(((Pfn1
->u3
.e1
.PrototypePte
== 0) &&
1230 (Pfn1
->OriginalPte
.u
.Soft
.Prototype
== 0)));
1232 /* But there is one less "locked" page though */
1233 InterlockedExchangeAddSizeT(&MmSystemLockPagesCount
, -1);
1238 } while (++MdlPages
< LastPage
);
1243 KeReleaseQueuedSpinLock(LockQueuePfnLock
, OldIrql
);
1246 // Check if we have a process
1251 // Handle the accounting of locked pages
1253 ASSERT(Process
->NumberOfLockedPages
> 0);
1254 InterlockedExchangeAddSizeT(&Process
->NumberOfLockedPages
,
1261 Mdl
->MdlFlags
&= ~MDL_IO_SPACE
;
1262 Mdl
->MdlFlags
&= ~MDL_PAGES_LOCKED
;
1267 // Check if we have a process
1272 // Handle the accounting of locked pages
1274 ASSERT(Process
->NumberOfLockedPages
> 0);
1275 InterlockedExchangeAddSizeT(&Process
->NumberOfLockedPages
,
1282 LastPage
= MdlPages
+ PageCount
;
1286 // Last page reached
1288 if (*MdlPages
== LIST_HEAD
)
1291 // Were there no pages at all?
1293 if (MdlPages
== (PPFN_NUMBER
)(Mdl
+ 1))
1296 // We're already done
1298 Mdl
->MdlFlags
&= ~MDL_PAGES_LOCKED
;
1303 // Otherwise, stop here
1305 LastPage
= MdlPages
;
1309 /* Save the PFN entry instead for the secondary loop */
1310 *MdlPages
= (PFN_NUMBER
)MiGetPfnEntry(*MdlPages
);
1311 ASSERT((*MdlPages
) != 0);
1312 } while (++MdlPages
< LastPage
);
1317 MdlPages
= (PPFN_NUMBER
)(Mdl
+ 1);
1320 // Now grab the PFN lock for the actual unlock and dereference
1322 OldIrql
= KeAcquireQueuedSpinLock(LockQueuePfnLock
);
1325 /* Get the current entry and reference count */
1326 Pfn1
= (PMMPFN
)(*MdlPages
);
1327 OldRefCount
= Pfn1
->u3
.e2
.ReferenceCount
;
1328 ASSERT(OldRefCount
!= 0);
1330 /* Is this already the last dereference */
1331 if (OldRefCount
== 1)
1333 /* It should be on a free list waiting for us */
1334 ASSERT(Pfn1
->u3
.e2
.ReferenceCount
== 1);
1335 ASSERT(Pfn1
->u3
.e1
.PageLocation
!= ActiveAndValid
);
1336 ASSERT(Pfn1
->u2
.ShareCount
== 0);
1338 /* Not supported yet */
1339 ASSERT(((Pfn1
->u3
.e1
.PrototypePte
== 0) &&
1340 (Pfn1
->OriginalPte
.u
.Soft
.Prototype
== 0)));
1343 InterlockedExchangeAddSizeT(&MmSystemLockPagesCount
, -1);
1345 /* Do the last dereference, we're done here */
1346 MiDecrementReferenceCount(Pfn1
, MiGetPfnEntryIndex(Pfn1
));
1350 /* Loop decrementing one reference */
1353 /* Make sure it's still valid */
1354 OldRefCount
= Pfn1
->u3
.e2
.ReferenceCount
;
1355 ASSERT(OldRefCount
!= 0);
1357 /* Take off one reference */
1358 RefCount
= InterlockedCompareExchange16((PSHORT
)&Pfn1
->u3
.e2
.ReferenceCount
,
1361 ASSERT(RefCount
!= 0);
1362 } while (OldRefCount
!= RefCount
);
1363 ASSERT(RefCount
> 1);
1365 /* Are there only lock references left? */
1368 /* And does the page still have users? */
1369 if (Pfn1
->u2
.ShareCount
>= 1)
1371 /* Then it should still be valid */
1372 ASSERT(Pfn1
->u3
.e1
.PageLocation
== ActiveAndValid
);
1374 /* Not supported yet */
1375 ASSERT(((Pfn1
->u3
.e1
.PrototypePte
== 0) &&
1376 (Pfn1
->OriginalPte
.u
.Soft
.Prototype
== 0)));
1378 /* But there is one less "locked" page though */
1379 InterlockedExchangeAddSizeT(&MmSystemLockPagesCount
, -1);
1383 } while (++MdlPages
< LastPage
);
1388 KeReleaseQueuedSpinLock(LockQueuePfnLock
, OldIrql
);
1393 Mdl
->MdlFlags
&= ~MDL_PAGES_LOCKED
;
1401 MmAdvanceMdl(IN PMDL Mdl
,
1402 IN ULONG NumberOfBytes
)
1405 return STATUS_NOT_IMPLEMENTED
;
1413 MmMapLockedPagesWithReservedMapping(IN PVOID MappingAddress
,
1415 IN PMDL MemoryDescriptorList
,
1416 IN MEMORY_CACHING_TYPE CacheType
)
1427 MmUnmapReservedMapping(IN PVOID BaseAddress
,
1429 IN PMDL MemoryDescriptorList
)
1439 MmPrefetchPages(IN ULONG NumberOfLists
,
1440 IN PREAD_LIST
*ReadLists
)
1443 return STATUS_NOT_IMPLEMENTED
;
1451 MmProtectMdlSystemAddress(IN PMDL MemoryDescriptorList
,
1452 IN ULONG NewProtect
)
1455 return STATUS_NOT_IMPLEMENTED
;
1463 MmProbeAndLockProcessPages(IN OUT PMDL MemoryDescriptorList
,
1464 IN PEPROCESS Process
,
1465 IN KPROCESSOR_MODE AccessMode
,
1466 IN LOCK_OPERATION Operation
)
1477 MmProbeAndLockSelectedPages(IN OUT PMDL MemoryDescriptorList
,
1478 IN LARGE_INTEGER PageList
[],
1479 IN KPROCESSOR_MODE AccessMode
,
1480 IN LOCK_OPERATION Operation
)
1490 MmMapMemoryDumpMdl(IN PMDL Mdl
)