#define MODULE_INVOLVED_IN_ARM3
#include "../ARM3/miarm.h"
+/* GLOBALS ********************************************************************/
+
BOOLEAN MmTrackPtes;
BOOLEAN MmTrackLockedPages;
+SIZE_T MmSystemLockPagesCount;
/* PUBLIC FUNCTIONS ***********************************************************/
//
// Reached the last page
//
- if (*Pages == -1) break;
-
- //
- // Sanity check
- //
- ASSERT(*Pages <= MmHighestPhysicalPage);
-
+ if (*Pages == LIST_HEAD) break;
+
//
// Get the page entry
//
Pfn1 = MiGetPfnEntry(*Pages);
- ASSERT(Pfn1->u3.ReferenceCount == 1);
+ ASSERT(Pfn1);
+ ASSERT(Pfn1->u2.ShareCount == 1);
+ ASSERT(MI_IS_PFN_DELETED(Pfn1) == TRUE);
+ if (Pfn1->u4.PteFrame != 0x1FFEDCB)
+ {
+ /* Corrupted PFN entry or invalid free */
+ KeBugCheckEx(MEMORY_MANAGEMENT, 0x1236, (ULONG_PTR)Mdl, (ULONG_PTR)Pages, *Pages);
+ }
//
// Clear it
//
Pfn1->u3.e1.StartOfAllocation = 0;
Pfn1->u3.e1.EndOfAllocation = 0;
+ Pfn1->u2.ShareCount == 0;
//
// Dereference it
//
- MmDereferencePage(*Pages);
+ ASSERT(Pfn1->u3.e2.ReferenceCount != 0);
+ if (Pfn1->u3.e2.ReferenceCount != 1)
+ {
+ /* Just take off one reference */
+ InterlockedDecrement16((PSHORT)&Pfn1->u3.e2.ReferenceCount);
+ }
+ else
+ {
+ /* We'll be nuking the whole page */
+ MiDecrementReferenceCount(Pfn1, *Pages);
+ }
//
// Clear this page and move on
//
- *Pages++ = -1;
+ *Pages++ = LIST_HEAD;
} while (--NumberOfPages != 0);
//
//
// We're done here
//
- if (*MdlPages == -1) break;
+ if (*MdlPages == LIST_HEAD) break;
//
// Write the PTE
return Base;
}
- //
- // In user-mode, let ReactOS do it
- //
- return MiMapLockedPagesInUserSpace(Mdl, Base, CacheType, BaseAddress);
+ UNIMPLEMENTED;
+ return NULL;
}
/*
}
else
{
- //
- // Let ReactOS handle it
- //
- MiUnmapLockedPagesInUserSpace(BaseAddress, Mdl);
+ UNIMPLEMENTED;
}
}
ULONG LockPages, TotalPages;
NTSTATUS Status = STATUS_SUCCESS;
PEPROCESS CurrentProcess;
- PETHREAD Thread;
- PMMSUPPORT AddressSpace;
NTSTATUS ProbeStatus;
PMMPTE PointerPte, LastPte;
PMMPDE PointerPde;
PFN_NUMBER PageFrameIndex;
- PMMPFN Pfn1;
BOOLEAN UsePfnLock;
KIRQL OldIrql;
+ USHORT OldRefCount, RefCount;
+ PMMPFN Pfn1;
DPRINT("Probing MDL: %p\n", Mdl);
//
LockPages = ADDRESS_AND_SIZE_TO_SPAN_PAGES(Address, Mdl->ByteCount);
ASSERT(LockPages != 0);
+ /* Block invalid access */
+ if ((AccessMode != KernelMode) &&
+ ((LastAddress > (PVOID)MM_USER_PROBE_ADDRESS) || (Address >= LastAddress)))
+ {
+ /* Caller should be in SEH, raise the error */
+ *MdlPages = LIST_HEAD;
+ ExRaiseStatus(STATUS_ACCESS_VIOLATION);
+ }
+
//
- // Get the thread and process
+ // Get the process
//
- Thread = PsGetCurrentThread();
if (Address <= MM_HIGHEST_USER_ADDRESS)
{
//
TotalPages = LockPages;
StartAddress = Address;
+ /* Large pages not supported */
+ ASSERT(!MI_IS_PHYSICAL_ADDRESS(Address));
+
//
// Now probe them
//
//
// Assume failure
//
- *MdlPages = -1;
+ *MdlPages = LIST_HEAD;
//
// Read
//
// Next address...
//
- Address = (PVOID)((ULONG_PTR)Address + PAGE_SIZE);
- Address = PAGE_ALIGN(Address);
+ Address = PAGE_ALIGN((ULONG_PTR)Address + PAGE_SIZE);
//
// Next page...
//
PointerPte = MiAddressToPte(StartAddress);
PointerPde = MiAddressToPde(StartAddress);
+#if (_MI_PAGING_LEVELS >= 3)
+ DPRINT1("PAE/x64 Not Implemented\n");
+ ASSERT(FALSE);
+#endif
//
// Sanity check
//
UsePfnLock = TRUE;
OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
- AddressSpace = NULL; // Keep compiler happy
}
else
{
//
Mdl->Process = CurrentProcess;
- //
- // Use the process lock
- //
+ /* Lock the process working set */
+ MiLockProcessWorkingSet(CurrentProcess, PsGetCurrentThread());
UsePfnLock = FALSE;
- AddressSpace = &CurrentProcess->Vm;
- MmLockAddressSpace(AddressSpace);
- OldIrql = DISPATCH_LEVEL; // Keep compiler happy
+ OldIrql = MM_NOIRQL;
}
//
//
// Assume failure and check for non-mapped pages
//
- *MdlPages = -1;
+ *MdlPages = LIST_HEAD;
#if (_MI_PAGING_LEVELS >= 3)
/* Should be checking the PPE and PXE */
ASSERT(FALSE);
}
else
{
- //
- // Release process address space lock
- //
- MmUnlockAddressSpace(AddressSpace);
+ /* Release process working set */
+ MiUnlockProcessWorkingSet(CurrentProcess, PsGetCurrentThread());
}
//
}
else
{
- //
- // Use the address space lock
- //
- MmLockAddressSpace(AddressSpace);
+ /* Lock the process working set */
+ MiLockProcessWorkingSet(CurrentProcess, PsGetCurrentThread());
}
}
}
else
{
- //
- // Release process address space lock
- //
- MmUnlockAddressSpace(AddressSpace);
+ /* Release process working set */
+ MiUnlockProcessWorkingSet(CurrentProcess, PsGetCurrentThread());
}
//
}
else
{
- //
- // Use the address space lock
- //
- MmLockAddressSpace(AddressSpace);
+ /* Lock the process working set */
+ MiLockProcessWorkingSet(CurrentProcess, PsGetCurrentThread());
}
//
// Grab the PFN
//
PageFrameIndex = PFN_FROM_PTE(PointerPte);
- if (PageFrameIndex <= MmHighestPhysicalPage)
+ Pfn1 = MiGetPfnEntry(PageFrameIndex);
+ if (Pfn1)
{
- //
- // Get the PFN entry
- //
- Pfn1 = MiGetPfnEntry(PageFrameIndex);
+ /* Either this is for kernel-mode, or the working set is held */
ASSERT((CurrentProcess == NULL) || (UsePfnLock == FALSE));
- //
- // Now lock the page
- //
- MmReferencePage(PageFrameIndex);
+ /* No Physical VADs supported yet */
+ if (CurrentProcess) ASSERT(CurrentProcess->PhysicalVadRoot == NULL);
+
+ /* This address should already exist and be fully valid */
+ ASSERT(Pfn1->u3.e2.ReferenceCount != 0);
+ if (MI_IS_ROS_PFN(Pfn1))
+ {
+ /* ReactOS Mm doesn't track share count */
+ ASSERT(Pfn1->u3.e1.PageLocation == ActiveAndValid);
+ }
+ else
+ {
+ /* On ARM3 pages, we should see a valid share count */
+ ASSERT((Pfn1->u2.ShareCount != 0) && (Pfn1->u3.e1.PageLocation == ActiveAndValid));
+
+ /* We don't support mapping a prototype page yet */
+ ASSERT((Pfn1->u3.e1.PrototypePte == 0) && (Pfn1->OriginalPte.u.Soft.Prototype == 0));
+ }
+
+ /* More locked pages! */
+ InterlockedExchangeAddSizeT(&MmSystemLockPagesCount, 1);
+
+ /* Loop trying to update the reference count */
+ do
+ {
+ /* Get the current reference count, make sure it's valid */
+ OldRefCount = Pfn1->u3.e2.ReferenceCount;
+ ASSERT(OldRefCount != 0);
+ ASSERT(OldRefCount < 2500);
+
+ /* Bump it up by one */
+ RefCount = InterlockedCompareExchange16((PSHORT)&Pfn1->u3.e2.ReferenceCount,
+ OldRefCount + 1,
+ OldRefCount);
+ ASSERT(RefCount != 0);
+ } while (OldRefCount != RefCount);
+
+ /* Was this the first lock attempt? */
+ if (OldRefCount != 1)
+ {
+ /* Someone else came through */
+ InterlockedExchangeAddSizeT(&MmSystemLockPagesCount, -1);
+ }
}
else
{
// Write the page and move on
//
*MdlPages++ = PageFrameIndex;
- if (!((ULONG_PTR)(++PointerPte) & (PAGE_SIZE - 1))) PointerPde++;
+ PointerPte++;
+
+ /* Check if we're on a PDE boundary */
+ if (!((ULONG_PTR)PointerPte & (PD_SIZE - 1))) PointerPde++;
} while (PointerPte <= LastPte);
//
}
else
{
- //
- // Release process address space lock
- //
- MmUnlockAddressSpace(AddressSpace);
+ /* Release process working set */
+ MiUnlockProcessWorkingSet(CurrentProcess, PsGetCurrentThread());
}
//
}
else
{
- //
- // Release process address space lock
- //
- MmUnlockAddressSpace(AddressSpace);
+ /* Release process working set */
+ MiUnlockProcessWorkingSet(CurrentProcess, PsGetCurrentThread());
}
Cleanup:
//
PVOID Base;
ULONG Flags, PageCount;
KIRQL OldIrql;
+ USHORT RefCount, OldRefCount;
+ PMMPFN Pfn1;
DPRINT("Unlocking MDL: %p\n", Mdl);
//
//
// Last page, break out
//
- if (*MdlPages == -1) break;
+ if (*MdlPages == LIST_HEAD) break;
//
// Check if this page is in the PFN database
//
- if (*MdlPages <= MmHighestPhysicalPage)
+ Pfn1 = MiGetPfnEntry(*MdlPages);
+ if (Pfn1);
{
- //
- // Unlock and dereference
- //
- MmDereferencePage(*MdlPages);
+ /* Get the current entry and reference count */
+ OldRefCount = Pfn1->u3.e2.ReferenceCount;
+ ASSERT(OldRefCount != 0);
+
+ /* Is this already the last dereference */
+ if (OldRefCount == 1)
+ {
+ /* It should be on a free list waiting for us */
+ ASSERT(Pfn1->u3.e2.ReferenceCount == 1);
+ ASSERT(Pfn1->u3.e1.PageLocation != ActiveAndValid);
+ ASSERT(Pfn1->u2.ShareCount == 0);
+
+ /* Not supported yet */
+ ASSERT(((Pfn1->u3.e1.PrototypePte == 0) &&
+ (Pfn1->OriginalPte.u.Soft.Prototype == 0)));
+
+ /* One less page */
+ InterlockedExchangeAddSizeT(&MmSystemLockPagesCount, -1);
+
+ /* Do the last dereference, we're done here */
+ MiDecrementReferenceCount(Pfn1, *MdlPages);
+ }
+ else
+ {
+ /* Loop decrementing one reference */
+ do
+ {
+ /* Make sure it's still valid */
+ OldRefCount = Pfn1->u3.e2.ReferenceCount;
+ ASSERT(OldRefCount != 0);
+
+ /* Take off one reference */
+ RefCount = InterlockedCompareExchange16((PSHORT)&Pfn1->u3.e2.ReferenceCount,
+ OldRefCount - 1,
+ OldRefCount);
+ ASSERT(RefCount != 0);
+ } while (OldRefCount != RefCount);
+ ASSERT(RefCount > 1);
+
+ /* Are there only lock references left? */
+ if (RefCount == 2)
+ {
+ /* And does the page still have users? */
+ if (Pfn1->u2.ShareCount >= 1)
+ {
+ /* Then it should still be valid */
+ ASSERT(Pfn1->u3.e1.PageLocation == ActiveAndValid);
+
+ /* Not supported yet */
+ ASSERT(((Pfn1->u3.e1.PrototypePte == 0) &&
+ (Pfn1->OriginalPte.u.Soft.Prototype == 0)));
+
+ /* But there is one less "locked" page though */
+ InterlockedExchangeAddSizeT(&MmSystemLockPagesCount, -1);
+ }
+ }
+ }
}
} while (++MdlPages < LastPage);
//
// Last page reached
//
- if (*MdlPages == -1)
+ if (*MdlPages == LIST_HEAD)
{
//
// Were there no pages at all?
break;
}
- //
- // Sanity check
- //
- ASSERT(*MdlPages <= MmHighestPhysicalPage);
+ /* Save the PFN entry instead for the secondary loop */
+ *MdlPages = (PFN_NUMBER)MiGetPfnEntry(*MdlPages);
+ ASSERT((*MdlPages) != 0);
} while (++MdlPages < LastPage);
//
OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
do
{
- //
- // Unlock and dereference
- //
- MmDereferencePage(*MdlPages);
+ /* Get the current entry and reference count */
+ Pfn1 = (PMMPFN)(*MdlPages);
+ OldRefCount = Pfn1->u3.e2.ReferenceCount;
+ ASSERT(OldRefCount != 0);
+
+ /* Is this already the last dereference */
+ if (OldRefCount == 1)
+ {
+ /* It should be on a free list waiting for us */
+ ASSERT(Pfn1->u3.e2.ReferenceCount == 1);
+ ASSERT(Pfn1->u3.e1.PageLocation != ActiveAndValid);
+ ASSERT(Pfn1->u2.ShareCount == 0);
+
+ /* Not supported yet */
+ ASSERT(((Pfn1->u3.e1.PrototypePte == 0) &&
+ (Pfn1->OriginalPte.u.Soft.Prototype == 0)));
+
+ /* One less page */
+ InterlockedExchangeAddSizeT(&MmSystemLockPagesCount, -1);
+
+ /* Do the last dereference, we're done here */
+ MiDecrementReferenceCount(Pfn1, MiGetPfnEntryIndex(Pfn1));
+ }
+ else
+ {
+ /* Loop decrementing one reference */
+ do
+ {
+ /* Make sure it's still valid */
+ OldRefCount = Pfn1->u3.e2.ReferenceCount;
+ ASSERT(OldRefCount != 0);
+
+ /* Take off one reference */
+ RefCount = InterlockedCompareExchange16((PSHORT)&Pfn1->u3.e2.ReferenceCount,
+ OldRefCount - 1,
+ OldRefCount);
+ ASSERT(RefCount != 0);
+ } while (OldRefCount != RefCount);
+ ASSERT(RefCount > 1);
+
+ /* Are there only lock references left? */
+ if (RefCount == 2)
+ {
+ /* And does the page still have users? */
+ if (Pfn1->u2.ShareCount >= 1)
+ {
+ /* Then it should still be valid */
+ ASSERT(Pfn1->u3.e1.PageLocation == ActiveAndValid);
+
+ /* Not supported yet */
+ ASSERT(((Pfn1->u3.e1.PrototypePte == 0) &&
+ (Pfn1->OriginalPte.u.Soft.Prototype == 0)));
+
+ /* But there is one less "locked" page though */
+ InterlockedExchangeAddSizeT(&MmSystemLockPagesCount, -1);
+ }
+ }
+ }
} while (++MdlPages < LastPage);
//