2 * PROJECT: ReactOS Kernel
3 * LICENSE: BSD - See COPYING.ARM in the top level directory
4 * FILE: ntoskrnl/mm/ARM3/mdlsup.c
5 * PURPOSE: ARM Memory Manager Memory Descriptor List (MDL) Management
6 * PROGRAMMERS: ReactOS Portable Systems Group
9 /* INCLUDES *******************************************************************/
15 #define MODULE_INVOLVED_IN_ARM3
16 #include "../ARM3/miarm.h"
18 /* GLOBALS ********************************************************************/
21 BOOLEAN MmTrackLockedPages
;
22 SIZE_T MmSystemLockPagesCount
;
24 /* PUBLIC FUNCTIONS ***********************************************************/
31 MmCreateMdl(IN PMDL Mdl
,
38 // Check if we don't have an MDL built
43 // Calculate the size we'll need and allocate the MDL
45 Size
= MmSizeOfMdl(Base
, Length
);
46 Mdl
= ExAllocatePoolWithTag(NonPagedPool
, Size
, TAG_MDL
);
47 if (!Mdl
) return NULL
;
53 MmInitializeMdl(Mdl
, Base
, Length
);
62 MmSizeOfMdl(IN PVOID Base
,
66 // Return the MDL size
69 (ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base
, Length
) * sizeof(PFN_NUMBER
));
77 MmBuildMdlForNonPagedPool(IN PMDL Mdl
)
79 PPFN_NUMBER MdlPages
, EndPage
;
80 PFN_NUMBER Pfn
, PageCount
;
87 ASSERT(Mdl
->ByteCount
!= 0);
88 ASSERT((Mdl
->MdlFlags
& (MDL_PAGES_LOCKED
|
89 MDL_MAPPED_TO_SYSTEM_VA
|
90 MDL_SOURCE_IS_NONPAGED_POOL
|
94 // We know the MDL isn't associated to a process now
99 // Get page and VA information
101 MdlPages
= (PPFN_NUMBER
)(Mdl
+ 1);
105 // Set the system address and now get the page count
107 Mdl
->MappedSystemVa
= (PVOID
)((ULONG_PTR
)Base
+ Mdl
->ByteOffset
);
108 PageCount
= ADDRESS_AND_SIZE_TO_SPAN_PAGES(Mdl
->MappedSystemVa
,
110 ASSERT(PageCount
!= 0);
111 EndPage
= MdlPages
+ PageCount
;
116 PointerPte
= MiAddressToPte(Base
);
122 Pfn
= PFN_FROM_PTE(PointerPte
++);
124 } while (MdlPages
< EndPage
);
127 // Set the nonpaged pool flag
129 Mdl
->MdlFlags
|= MDL_SOURCE_IS_NONPAGED_POOL
;
132 // Check if this is an I/O mapping
134 if (!MiGetPfnEntry(Pfn
)) Mdl
->MdlFlags
|= MDL_IO_SPACE
;
142 MmAllocatePagesForMdl(IN PHYSICAL_ADDRESS LowAddress
,
143 IN PHYSICAL_ADDRESS HighAddress
,
144 IN PHYSICAL_ADDRESS SkipBytes
,
145 IN SIZE_T TotalBytes
)
148 // Call the internal routine
150 return MiAllocatePagesForMdl(LowAddress
,
163 MmAllocatePagesForMdlEx(IN PHYSICAL_ADDRESS LowAddress
,
164 IN PHYSICAL_ADDRESS HighAddress
,
165 IN PHYSICAL_ADDRESS SkipBytes
,
166 IN SIZE_T TotalBytes
,
167 IN MEMORY_CACHING_TYPE CacheType
,
170 MI_PFN_CACHE_ATTRIBUTE CacheAttribute
;
173 // Check for invalid cache type
175 if (CacheType
> MmWriteCombined
)
178 // Normalize to default
180 CacheAttribute
= MiNotMapped
;
185 // Conver to internal caching attribute
187 CacheAttribute
= MiPlatformCacheAttributes
[FALSE
][CacheType
];
191 // Only these flags are allowed
193 if (Flags
& ~(MM_DONT_ZERO_ALLOCATION
| MM_ALLOCATE_FROM_LOCAL_NODE_ONLY
))
202 // Call the internal routine
204 return MiAllocatePagesForMdl(LowAddress
,
217 MmFreePagesFromMdl(IN PMDL Mdl
)
224 DPRINT("Freeing MDL: %p\n", Mdl
);
229 ASSERT(KeGetCurrentIrql() <= APC_LEVEL
);
230 ASSERT((Mdl
->MdlFlags
& MDL_IO_SPACE
) == 0);
231 ASSERT(((ULONG_PTR
)Mdl
->StartVa
& (PAGE_SIZE
- 1)) == 0);
234 // Get address and page information
236 Base
= (PVOID
)((ULONG_PTR
)Mdl
->StartVa
+ Mdl
->ByteOffset
);
237 NumberOfPages
= ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base
, Mdl
->ByteCount
);
242 OldIrql
= KeAcquireQueuedSpinLock(LockQueuePfnLock
);
245 // Loop all the MDL pages
247 Pages
= (PPFN_NUMBER
)(Mdl
+ 1);
251 // Reached the last page
253 if (*Pages
== LIST_HEAD
) break;
256 // Get the page entry
258 Pfn1
= MiGetPfnEntry(*Pages
);
260 ASSERT(Pfn1
->u2
.ShareCount
== 1);
261 ASSERT(MI_IS_PFN_DELETED(Pfn1
) == TRUE
);
262 if (Pfn1
->u4
.PteFrame
!= 0x1FFEDCB)
264 /* Corrupted PFN entry or invalid free */
265 KeBugCheckEx(MEMORY_MANAGEMENT
, 0x1236, (ULONG_PTR
)Mdl
, (ULONG_PTR
)Pages
, *Pages
);
271 Pfn1
->u3
.e1
.StartOfAllocation
= 0;
272 Pfn1
->u3
.e1
.EndOfAllocation
= 0;
273 Pfn1
->u2
.ShareCount
= 0;
278 ASSERT(Pfn1
->u3
.e2
.ReferenceCount
!= 0);
279 if (Pfn1
->u3
.e2
.ReferenceCount
!= 1)
281 /* Just take off one reference */
282 InterlockedDecrement16((PSHORT
)&Pfn1
->u3
.e2
.ReferenceCount
);
286 /* We'll be nuking the whole page */
287 MiDecrementReferenceCount(Pfn1
, *Pages
);
291 // Clear this page and move on
293 *Pages
++ = LIST_HEAD
;
294 } while (--NumberOfPages
!= 0);
299 KeReleaseQueuedSpinLock(LockQueuePfnLock
, OldIrql
);
302 // Remove the pages locked flag
304 Mdl
->MdlFlags
&= ~MDL_PAGES_LOCKED
;
312 MmMapLockedPagesSpecifyCache(IN PMDL Mdl
,
313 IN KPROCESSOR_MODE AccessMode
,
314 IN MEMORY_CACHING_TYPE CacheType
,
315 IN PVOID BaseAddress
,
316 IN ULONG BugCheckOnFailure
,
317 IN MM_PAGE_PRIORITY Priority
)
320 PPFN_NUMBER MdlPages
, LastPage
;
323 MI_PFN_CACHE_ATTRIBUTE CacheAttribute
;
330 ASSERT(Mdl
->ByteCount
!= 0);
335 Base
= (PVOID
)((ULONG_PTR
)Mdl
->StartVa
+ Mdl
->ByteOffset
);
338 // Handle kernel case first
340 if (AccessMode
== KernelMode
)
343 // Get the list of pages and count
345 MdlPages
= (PPFN_NUMBER
)(Mdl
+ 1);
346 PageCount
= ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base
, Mdl
->ByteCount
);
347 LastPage
= MdlPages
+ PageCount
;
352 ASSERT((Mdl
->MdlFlags
& (MDL_MAPPED_TO_SYSTEM_VA
|
353 MDL_SOURCE_IS_NONPAGED_POOL
|
354 MDL_PARTIAL_HAS_BEEN_MAPPED
)) == 0);
355 ASSERT((Mdl
->MdlFlags
& (MDL_PAGES_LOCKED
| MDL_PARTIAL
)) != 0);
358 // Get the correct cache type
360 IsIoMapping
= (Mdl
->MdlFlags
& MDL_IO_SPACE
) != 0;
361 CacheAttribute
= MiPlatformCacheAttributes
[IsIoMapping
][CacheType
];
366 PointerPte
= MiReserveSystemPtes(PageCount
, SystemPteSpace
);
370 // If it can fail, return NULL
372 if (Mdl
->MdlFlags
& MDL_MAPPING_CAN_FAIL
) return NULL
;
375 // Should we bugcheck?
377 if (!BugCheckOnFailure
) return NULL
;
380 // Yes, crash the system
382 KeBugCheckEx(NO_MORE_SYSTEM_PTES
, 0, PageCount
, 0, 0);
386 // Get the mapped address
388 Base
= (PVOID
)((ULONG_PTR
)MiPteToAddress(PointerPte
) + Mdl
->ByteOffset
);
393 TempPte
= ValidKernelPte
;
394 switch (CacheAttribute
)
401 MI_PAGE_DISABLE_CACHE(&TempPte
);
402 MI_PAGE_WRITE_THROUGH(&TempPte
);
405 case MiWriteCombined
:
408 // Enable write combining
410 MI_PAGE_DISABLE_CACHE(&TempPte
);
411 MI_PAGE_WRITE_COMBINED(&TempPte
);
429 if (*MdlPages
== LIST_HEAD
) break;
434 TempPte
.u
.Hard
.PageFrameNumber
= *MdlPages
;
435 MI_WRITE_VALID_PTE(PointerPte
++, TempPte
);
436 } while (++MdlPages
< LastPage
);
441 ASSERT((Mdl
->MdlFlags
& MDL_MAPPED_TO_SYSTEM_VA
) == 0);
442 Mdl
->MappedSystemVa
= Base
;
443 Mdl
->MdlFlags
|= MDL_MAPPED_TO_SYSTEM_VA
;
446 // Check if it was partial
448 if (Mdl
->MdlFlags
& MDL_PARTIAL
)
451 // Write the appropriate flag here too
453 Mdl
->MdlFlags
|= MDL_PARTIAL_HAS_BEEN_MAPPED
;
457 // Return the mapped address
471 MmMapLockedPages(IN PMDL Mdl
,
472 IN KPROCESSOR_MODE AccessMode
)
475 // Call the extended version
477 return MmMapLockedPagesSpecifyCache(Mdl
,
490 MmUnmapLockedPages(IN PVOID BaseAddress
,
494 PFN_COUNT PageCount
, ExtraPageCount
;
495 PPFN_NUMBER MdlPages
;
501 ASSERT(Mdl
->ByteCount
!= 0);
504 // Check if this is a kernel request
506 if (BaseAddress
> MM_HIGHEST_USER_ADDRESS
)
509 // Get base and count information
511 Base
= (PVOID
)((ULONG_PTR
)Mdl
->StartVa
+ Mdl
->ByteOffset
);
512 PageCount
= ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base
, Mdl
->ByteCount
);
517 ASSERT((Mdl
->MdlFlags
& MDL_PARENT_MAPPED_SYSTEM_VA
) == 0);
518 ASSERT(PageCount
!= 0);
519 ASSERT(Mdl
->MdlFlags
& MDL_MAPPED_TO_SYSTEM_VA
);
524 PointerPte
= MiAddressToPte(BaseAddress
);
527 // This should be a resident system PTE
529 ASSERT(PointerPte
>= MmSystemPtesStart
[SystemPteSpace
]);
530 ASSERT(PointerPte
<= MmSystemPtesEnd
[SystemPteSpace
]);
531 ASSERT(PointerPte
->u
.Hard
.Valid
== 1);
534 // Check if the caller wants us to free advanced pages
536 if (Mdl
->MdlFlags
& MDL_FREE_EXTRA_PTES
)
539 // Get the MDL page array
541 MdlPages
= MmGetMdlPfnArray(Mdl
);
543 /* Number of extra pages stored after the PFN array */
544 ExtraPageCount
= (PFN_COUNT
)*(MdlPages
+ PageCount
);
549 PageCount
+= ExtraPageCount
;
550 PointerPte
-= ExtraPageCount
;
551 ASSERT(PointerPte
>= MmSystemPtesStart
[SystemPteSpace
]);
552 ASSERT(PointerPte
<= MmSystemPtesEnd
[SystemPteSpace
]);
555 // Get the new base address
557 BaseAddress
= (PVOID
)((ULONG_PTR
)BaseAddress
-
558 (ExtraPageCount
<< PAGE_SHIFT
));
564 Mdl
->MdlFlags
&= ~(MDL_MAPPED_TO_SYSTEM_VA
|
565 MDL_PARTIAL_HAS_BEEN_MAPPED
|
566 MDL_FREE_EXTRA_PTES
);
569 // Release the system PTEs
571 MiReleaseSystemPtes(PointerPte
, PageCount
, SystemPteSpace
);
584 MmProbeAndLockPages(IN PMDL Mdl
,
585 IN KPROCESSOR_MODE AccessMode
,
586 IN LOCK_OPERATION Operation
)
588 PPFN_NUMBER MdlPages
;
589 PVOID Base
, Address
, LastAddress
, StartAddress
;
590 ULONG LockPages
, TotalPages
;
591 NTSTATUS Status
= STATUS_SUCCESS
;
592 PEPROCESS CurrentProcess
;
593 NTSTATUS ProbeStatus
;
594 PMMPTE PointerPte
, LastPte
;
596 PFN_NUMBER PageFrameIndex
;
599 USHORT OldRefCount
, RefCount
;
601 DPRINT("Probing MDL: %p\n", Mdl
);
606 ASSERT(Mdl
->ByteCount
!= 0);
607 ASSERT(((ULONG
)Mdl
->ByteOffset
& ~(PAGE_SIZE
- 1)) == 0);
608 ASSERT(((ULONG_PTR
)Mdl
->StartVa
& (PAGE_SIZE
- 1)) == 0);
609 ASSERT((Mdl
->MdlFlags
& (MDL_PAGES_LOCKED
|
610 MDL_MAPPED_TO_SYSTEM_VA
|
611 MDL_SOURCE_IS_NONPAGED_POOL
|
613 MDL_IO_SPACE
)) == 0);
616 // Get page and base information
618 MdlPages
= (PPFN_NUMBER
)(Mdl
+ 1);
622 // Get the addresses and how many pages we span (and need to lock)
624 Address
= (PVOID
)((ULONG_PTR
)Base
+ Mdl
->ByteOffset
);
625 LastAddress
= (PVOID
)((ULONG_PTR
)Address
+ Mdl
->ByteCount
);
626 LockPages
= ADDRESS_AND_SIZE_TO_SPAN_PAGES(Address
, Mdl
->ByteCount
);
627 ASSERT(LockPages
!= 0);
629 /* Block invalid access */
630 if ((AccessMode
!= KernelMode
) &&
631 ((LastAddress
> (PVOID
)MM_USER_PROBE_ADDRESS
) || (Address
>= LastAddress
)))
633 /* Caller should be in SEH, raise the error */
634 *MdlPages
= LIST_HEAD
;
635 ExRaiseStatus(STATUS_ACCESS_VIOLATION
);
641 if (Address
<= MM_HIGHEST_USER_ADDRESS
)
646 CurrentProcess
= PsGetCurrentProcess();
653 CurrentProcess
= NULL
;
657 // Save the number of pages we'll have to lock, and the start address
659 TotalPages
= LockPages
;
660 StartAddress
= Address
;
662 /* Large pages not supported */
663 ASSERT(!MI_IS_PHYSICAL_ADDRESS(Address
));
668 ProbeStatus
= STATUS_SUCCESS
;
679 *MdlPages
= LIST_HEAD
;
684 *(volatile CHAR
*)Address
;
687 // Check if this is write access (only probe for user-mode)
689 if ((Operation
!= IoReadAccess
) &&
690 (Address
<= MM_HIGHEST_USER_ADDRESS
))
693 // Probe for write too
695 ProbeForWriteChar(Address
);
701 Address
= PAGE_ALIGN((ULONG_PTR
)Address
+ PAGE_SIZE
);
708 } while (Address
< LastAddress
);
711 // Reset back to the original page
713 ASSERT(LockPages
== 0);
714 MdlPages
= (PPFN_NUMBER
)(Mdl
+ 1);
716 _SEH2_EXCEPT(EXCEPTION_EXECUTE_HANDLER
)
721 ProbeStatus
= _SEH2_GetExceptionCode();
726 // So how did that go?
728 if (ProbeStatus
!= STATUS_SUCCESS
)
733 DPRINT1("MDL PROBE FAILED!\n");
735 ExRaiseStatus(ProbeStatus
);
739 // Get the PTE and PDE
741 PointerPte
= MiAddressToPte(StartAddress
);
742 PointerPde
= MiAddressToPde(StartAddress
);
743 #if (_MI_PAGING_LEVELS >= 3)
744 DPRINT1("PAE/x64 Not Implemented\n");
751 ASSERT(MdlPages
== (PPFN_NUMBER
)(Mdl
+ 1));
754 // Check what kind of operation this is
756 if (Operation
!= IoReadAccess
)
759 // Set the write flag
761 Mdl
->MdlFlags
|= MDL_WRITE_OPERATION
;
766 // Remove the write flag
768 Mdl
->MdlFlags
&= ~(MDL_WRITE_OPERATION
);
772 // Mark the MDL as locked *now*
774 Mdl
->MdlFlags
|= MDL_PAGES_LOCKED
;
777 // Check if this came from kernel mode
779 if (Base
>= MM_HIGHEST_USER_ADDRESS
)
782 // We should not have a process
784 ASSERT(CurrentProcess
== NULL
);
788 // In kernel mode, we don't need to check for write access
790 Operation
= IoReadAccess
;
796 OldIrql
= KeAcquireQueuedSpinLock(LockQueuePfnLock
);
803 ASSERT(TotalPages
!= 0);
804 ASSERT(CurrentProcess
== PsGetCurrentProcess());
807 // Track locked pages
809 InterlockedExchangeAddSizeT(&CurrentProcess
->NumberOfLockedPages
,
815 Mdl
->Process
= CurrentProcess
;
817 /* Lock the process working set */
818 MiLockProcessWorkingSet(CurrentProcess
, PsGetCurrentThread());
826 LastPte
= MiAddressToPte((PVOID
)((ULONG_PTR
)LastAddress
- 1));
834 // Assume failure and check for non-mapped pages
836 *MdlPages
= LIST_HEAD
;
837 #if (_MI_PAGING_LEVELS >= 3)
838 /* Should be checking the PPE and PXE */
841 while ((PointerPde
->u
.Hard
.Valid
== 0) ||
842 (PointerPte
->u
.Hard
.Valid
== 0))
845 // What kind of lock were we using?
852 KeReleaseQueuedSpinLock(LockQueuePfnLock
, OldIrql
);
856 /* Release process working set */
857 MiUnlockProcessWorkingSet(CurrentProcess
, PsGetCurrentThread());
863 Address
= MiPteToAddress(PointerPte
);
865 //HACK: Pass a placeholder TrapInformation so the fault handler knows we're unlocked
866 Status
= MmAccessFault(FALSE
, Address
, KernelMode
, (PVOID
)0xBADBADA3);
867 if (!NT_SUCCESS(Status
))
872 DPRINT1("Access fault failed\n");
877 // What lock should we use?
884 OldIrql
= KeAcquireQueuedSpinLock(LockQueuePfnLock
);
888 /* Lock the process working set */
889 MiLockProcessWorkingSet(CurrentProcess
, PsGetCurrentThread());
894 // Check if this was a write or modify
896 if (Operation
!= IoReadAccess
)
899 // Check if the PTE is not writable
901 if (MI_IS_PAGE_WRITEABLE(PointerPte
) == FALSE
)
904 // Check if it's copy on write
906 if (MI_IS_PAGE_COPY_ON_WRITE(PointerPte
))
909 // Get the base address and allow a change for user-mode
911 Address
= MiPteToAddress(PointerPte
);
912 if (Address
<= MM_HIGHEST_USER_ADDRESS
)
915 // What kind of lock were we using?
922 KeReleaseQueuedSpinLock(LockQueuePfnLock
, OldIrql
);
926 /* Release process working set */
927 MiUnlockProcessWorkingSet(CurrentProcess
, PsGetCurrentThread());
934 //HACK: Pass a placeholder TrapInformation so the fault handler knows we're unlocked
935 Status
= MmAccessFault(TRUE
, Address
, KernelMode
, (PVOID
)0xBADBADA3);
936 if (!NT_SUCCESS(Status
))
941 DPRINT1("Access fault failed\n");
946 // Re-acquire the lock
953 OldIrql
= KeAcquireQueuedSpinLock(LockQueuePfnLock
);
957 /* Lock the process working set */
958 MiLockProcessWorkingSet(CurrentProcess
, PsGetCurrentThread());
969 // Fail, since we won't allow this
971 Status
= STATUS_ACCESS_VIOLATION
;
972 goto CleanupWithLock
;
979 PageFrameIndex
= PFN_FROM_PTE(PointerPte
);
980 Pfn1
= MiGetPfnEntry(PageFrameIndex
);
983 /* Either this is for kernel-mode, or the working set is held */
984 ASSERT((CurrentProcess
== NULL
) || (UsePfnLock
== FALSE
));
986 /* No Physical VADs supported yet */
987 if (CurrentProcess
) ASSERT(CurrentProcess
->PhysicalVadRoot
== NULL
);
989 /* This address should already exist and be fully valid */
990 ASSERT(Pfn1
->u3
.e2
.ReferenceCount
!= 0);
991 if (MI_IS_ROS_PFN(Pfn1
))
993 /* ReactOS Mm doesn't track share count */
994 ASSERT(Pfn1
->u3
.e1
.PageLocation
== ActiveAndValid
);
998 /* On ARM3 pages, we should see a valid share count */
999 ASSERT((Pfn1
->u2
.ShareCount
!= 0) && (Pfn1
->u3
.e1
.PageLocation
== ActiveAndValid
));
1001 /* We don't support mapping a prototype page yet */
1002 ASSERT((Pfn1
->u3
.e1
.PrototypePte
== 0) && (Pfn1
->OriginalPte
.u
.Soft
.Prototype
== 0));
1005 /* More locked pages! */
1006 InterlockedExchangeAddSizeT(&MmSystemLockPagesCount
, 1);
1008 /* Loop trying to update the reference count */
1011 /* Get the current reference count, make sure it's valid */
1012 OldRefCount
= Pfn1
->u3
.e2
.ReferenceCount
;
1013 ASSERT(OldRefCount
!= 0);
1014 ASSERT(OldRefCount
< 2500);
1016 /* Bump it up by one */
1017 RefCount
= InterlockedCompareExchange16((PSHORT
)&Pfn1
->u3
.e2
.ReferenceCount
,
1020 ASSERT(RefCount
!= 0);
1021 } while (OldRefCount
!= RefCount
);
1023 /* Was this the first lock attempt? */
1024 if (OldRefCount
!= 1)
1026 /* Someone else came through */
1027 InterlockedExchangeAddSizeT(&MmSystemLockPagesCount
, -1);
1033 // For I/O addresses, just remember this
1035 Mdl
->MdlFlags
|= MDL_IO_SPACE
;
1039 // Write the page and move on
1041 *MdlPages
++ = PageFrameIndex
;
1044 /* Check if we're on a PDE boundary */
1045 if (!((ULONG_PTR
)PointerPte
& (PD_SIZE
- 1))) PointerPde
++;
1046 } while (PointerPte
<= LastPte
);
1049 // What kind of lock were we using?
1056 KeReleaseQueuedSpinLock(LockQueuePfnLock
, OldIrql
);
1060 /* Release process working set */
1061 MiUnlockProcessWorkingSet(CurrentProcess
, PsGetCurrentThread());
1067 ASSERT((Mdl
->MdlFlags
& MDL_DESCRIBES_AWE
) == 0);
1072 // This is the failure path
1074 ASSERT(!NT_SUCCESS(Status
));
1077 // What kind of lock were we using?
1084 KeReleaseQueuedSpinLock(LockQueuePfnLock
, OldIrql
);
1088 /* Release process working set */
1089 MiUnlockProcessWorkingSet(CurrentProcess
, PsGetCurrentThread());
1093 // Pages must be locked so MmUnlock can work
1095 ASSERT(Mdl
->MdlFlags
& MDL_PAGES_LOCKED
);
1101 ExRaiseStatus(Status
);
1109 MmUnlockPages(IN PMDL Mdl
)
1111 PPFN_NUMBER MdlPages
, LastPage
;
1114 ULONG Flags
, PageCount
;
1116 USHORT RefCount
, OldRefCount
;
1118 DPRINT("Unlocking MDL: %p\n", Mdl
);
1123 ASSERT((Mdl
->MdlFlags
& MDL_PAGES_LOCKED
) != 0);
1124 ASSERT((Mdl
->MdlFlags
& MDL_SOURCE_IS_NONPAGED_POOL
) == 0);
1125 ASSERT((Mdl
->MdlFlags
& MDL_PARTIAL
) == 0);
1126 ASSERT(Mdl
->ByteCount
!= 0);
1129 // Get the process associated and capture the flags which are volatile
1131 Process
= Mdl
->Process
;
1132 Flags
= Mdl
->MdlFlags
;
1135 // Automagically undo any calls to MmGetSystemAddressForMdl's for this MDL
1137 if (Mdl
->MdlFlags
& MDL_MAPPED_TO_SYSTEM_VA
)
1140 // Unmap the pages from system space
1142 MmUnmapLockedPages(Mdl
->MappedSystemVa
, Mdl
);
1146 // Get the page count
1148 MdlPages
= (PPFN_NUMBER
)(Mdl
+ 1);
1149 Base
= (PVOID
)((ULONG_PTR
)Mdl
->StartVa
+ Mdl
->ByteOffset
);
1150 PageCount
= ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base
, Mdl
->ByteCount
);
1151 ASSERT(PageCount
!= 0);
1154 // We don't support AWE
1156 if (Flags
& MDL_DESCRIBES_AWE
) ASSERT(FALSE
);
1159 // Check if the buffer is mapped I/O space
1161 if (Flags
& MDL_IO_SPACE
)
1166 OldIrql
= KeAcquireQueuedSpinLock(LockQueuePfnLock
);
1171 LastPage
= MdlPages
+ PageCount
;
1175 // Last page, break out
1177 if (*MdlPages
== LIST_HEAD
) break;
1180 // Check if this page is in the PFN database
1182 Pfn1
= MiGetPfnEntry(*MdlPages
);
1185 /* Get the current entry and reference count */
1186 OldRefCount
= Pfn1
->u3
.e2
.ReferenceCount
;
1187 ASSERT(OldRefCount
!= 0);
1189 /* Is this already the last dereference */
1190 if (OldRefCount
== 1)
1192 /* It should be on a free list waiting for us */
1193 ASSERT(Pfn1
->u3
.e2
.ReferenceCount
== 1);
1194 ASSERT(Pfn1
->u3
.e1
.PageLocation
!= ActiveAndValid
);
1195 ASSERT(Pfn1
->u2
.ShareCount
== 0);
1197 /* Not supported yet */
1198 ASSERT((Pfn1
->u3
.e1
.PrototypePte
== 0) &&
1199 (Pfn1
->OriginalPte
.u
.Soft
.Prototype
== 0));
1202 InterlockedExchangeAddSizeT(&MmSystemLockPagesCount
, -1);
1204 /* Do the last dereference, we're done here */
1205 MiDecrementReferenceCount(Pfn1
, *MdlPages
);
1209 /* Loop decrementing one reference */
1212 /* Make sure it's still valid */
1213 OldRefCount
= Pfn1
->u3
.e2
.ReferenceCount
;
1214 ASSERT(OldRefCount
!= 0);
1216 /* Take off one reference */
1217 RefCount
= InterlockedCompareExchange16((PSHORT
)&Pfn1
->u3
.e2
.ReferenceCount
,
1220 ASSERT(RefCount
!= 0);
1221 } while (OldRefCount
!= RefCount
);
1222 ASSERT(RefCount
> 1);
1224 /* Are there only lock references left? */
1227 /* And does the page still have users? */
1228 if (Pfn1
->u2
.ShareCount
>= 1)
1230 /* Then it should still be valid */
1231 ASSERT(Pfn1
->u3
.e1
.PageLocation
== ActiveAndValid
);
1233 /* Not supported yet */
1234 ASSERT((Pfn1
->u3
.e1
.PrototypePte
== 0) &&
1235 (Pfn1
->OriginalPte
.u
.Soft
.Prototype
== 0));
1237 /* But there is one less "locked" page though */
1238 InterlockedExchangeAddSizeT(&MmSystemLockPagesCount
, -1);
1243 } while (++MdlPages
< LastPage
);
1248 KeReleaseQueuedSpinLock(LockQueuePfnLock
, OldIrql
);
1251 // Check if we have a process
1256 // Handle the accounting of locked pages
1258 ASSERT(Process
->NumberOfLockedPages
> 0);
1259 InterlockedExchangeAddSizeT(&Process
->NumberOfLockedPages
,
1260 -(LONG_PTR
)PageCount
);
1266 Mdl
->MdlFlags
&= ~MDL_IO_SPACE
;
1267 Mdl
->MdlFlags
&= ~MDL_PAGES_LOCKED
;
1272 // Check if we have a process
1277 // Handle the accounting of locked pages
1279 ASSERT(Process
->NumberOfLockedPages
> 0);
1280 InterlockedExchangeAddSizeT(&Process
->NumberOfLockedPages
,
1281 -(LONG_PTR
)PageCount
);
1287 LastPage
= MdlPages
+ PageCount
;
1291 // Last page reached
1293 if (*MdlPages
== LIST_HEAD
)
1296 // Were there no pages at all?
1298 if (MdlPages
== (PPFN_NUMBER
)(Mdl
+ 1))
1301 // We're already done
1303 Mdl
->MdlFlags
&= ~MDL_PAGES_LOCKED
;
1308 // Otherwise, stop here
1310 LastPage
= MdlPages
;
1314 /* Save the PFN entry instead for the secondary loop */
1315 *MdlPages
= (PFN_NUMBER
)MiGetPfnEntry(*MdlPages
);
1316 ASSERT(*MdlPages
!= 0);
1317 } while (++MdlPages
< LastPage
);
1322 MdlPages
= (PPFN_NUMBER
)(Mdl
+ 1);
1325 // Now grab the PFN lock for the actual unlock and dereference
1327 OldIrql
= KeAcquireQueuedSpinLock(LockQueuePfnLock
);
1330 /* Get the current entry and reference count */
1331 Pfn1
= (PMMPFN
)*MdlPages
;
1332 OldRefCount
= Pfn1
->u3
.e2
.ReferenceCount
;
1333 ASSERT(OldRefCount
!= 0);
1335 /* Is this already the last dereference */
1336 if (OldRefCount
== 1)
1338 /* It should be on a free list waiting for us */
1339 ASSERT(Pfn1
->u3
.e2
.ReferenceCount
== 1);
1340 ASSERT(Pfn1
->u3
.e1
.PageLocation
!= ActiveAndValid
);
1341 ASSERT(Pfn1
->u2
.ShareCount
== 0);
1343 /* Not supported yet */
1344 ASSERT(((Pfn1
->u3
.e1
.PrototypePte
== 0) &&
1345 (Pfn1
->OriginalPte
.u
.Soft
.Prototype
== 0)));
1348 InterlockedExchangeAddSizeT(&MmSystemLockPagesCount
, -1);
1350 /* Do the last dereference, we're done here */
1351 MiDecrementReferenceCount(Pfn1
, MiGetPfnEntryIndex(Pfn1
));
1355 /* Loop decrementing one reference */
1358 /* Make sure it's still valid */
1359 OldRefCount
= Pfn1
->u3
.e2
.ReferenceCount
;
1360 ASSERT(OldRefCount
!= 0);
1362 /* Take off one reference */
1363 RefCount
= InterlockedCompareExchange16((PSHORT
)&Pfn1
->u3
.e2
.ReferenceCount
,
1366 ASSERT(RefCount
!= 0);
1367 } while (OldRefCount
!= RefCount
);
1368 ASSERT(RefCount
> 1);
1370 /* Are there only lock references left? */
1373 /* And does the page still have users? */
1374 if (Pfn1
->u2
.ShareCount
>= 1)
1376 /* Then it should still be valid */
1377 ASSERT(Pfn1
->u3
.e1
.PageLocation
== ActiveAndValid
);
1379 /* Not supported yet */
1380 ASSERT(((Pfn1
->u3
.e1
.PrototypePte
== 0) &&
1381 (Pfn1
->OriginalPte
.u
.Soft
.Prototype
== 0)));
1383 /* But there is one less "locked" page though */
1384 InterlockedExchangeAddSizeT(&MmSystemLockPagesCount
, -1);
1388 } while (++MdlPages
< LastPage
);
1393 KeReleaseQueuedSpinLock(LockQueuePfnLock
, OldIrql
);
1398 Mdl
->MdlFlags
&= ~MDL_PAGES_LOCKED
;
1406 MmAdvanceMdl(IN PMDL Mdl
,
1407 IN ULONG NumberOfBytes
)
1410 return STATUS_NOT_IMPLEMENTED
;
1418 MmMapLockedPagesWithReservedMapping(IN PVOID MappingAddress
,
1420 IN PMDL MemoryDescriptorList
,
1421 IN MEMORY_CACHING_TYPE CacheType
)
1432 MmUnmapReservedMapping(IN PVOID BaseAddress
,
1434 IN PMDL MemoryDescriptorList
)
1444 MmPrefetchPages(IN ULONG NumberOfLists
,
1445 IN PREAD_LIST
*ReadLists
)
1448 return STATUS_NOT_IMPLEMENTED
;
1456 MmProtectMdlSystemAddress(IN PMDL MemoryDescriptorList
,
1457 IN ULONG NewProtect
)
1460 return STATUS_NOT_IMPLEMENTED
;
1468 MmProbeAndLockProcessPages(IN OUT PMDL MemoryDescriptorList
,
1469 IN PEPROCESS Process
,
1470 IN KPROCESSOR_MODE AccessMode
,
1471 IN LOCK_OPERATION Operation
)
1482 MmProbeAndLockSelectedPages(IN OUT PMDL MemoryDescriptorList
,
1483 IN LARGE_INTEGER PageList
[],
1484 IN KPROCESSOR_MODE AccessMode
,
1485 IN LOCK_OPERATION Operation
)
1495 MmMapMemoryDumpMdl(IN PMDL Mdl
)