*/
/* INCLUDES *******************************************************************/
+/* So long, and Thanks for All the Fish */
#include <ntoskrnl.h>
#define NDEBUG
IN ULONG NewAccessProtection,
OUT PULONG OldAccessProtection OPTIONAL);
+VOID
+NTAPI
+MiFlushTbAndCapture(IN PMMVAD FoundVad,
+ IN PMMPTE PointerPte,
+ IN ULONG ProtectionMask,
+ IN PMMPFN Pfn1,
+ IN BOOLEAN CaptureDirtyBit);
+
+
/* PRIVATE FUNCTIONS **********************************************************/
+ULONG
+NTAPI
+MiCalculatePageCommitment(IN ULONG_PTR StartingAddress,
+ IN ULONG_PTR EndingAddress,
+ IN PMMVAD Vad,
+ IN PEPROCESS Process)
+{
+ PMMPTE PointerPte, LastPte, PointerPde;
+ ULONG CommittedPages;
+
+ /* Compute starting and ending PTE and PDE addresses */
+ PointerPde = MiAddressToPde(StartingAddress);
+ PointerPte = MiAddressToPte(StartingAddress);
+ LastPte = MiAddressToPte(EndingAddress);
+
+ /* Handle commited pages first */
+ if (Vad->u.VadFlags.MemCommit == 1)
+ {
+ /* This is a committed VAD, so Assume the whole range is committed */
+ CommittedPages = (ULONG)BYTES_TO_PAGES(EndingAddress - StartingAddress);
+
+ /* Is the PDE demand-zero? */
+ PointerPde = MiAddressToPte(PointerPte);
+ if (PointerPde->u.Long != 0)
+ {
+ /* It is not. Is it valid? */
+ if (PointerPde->u.Hard.Valid == 0)
+ {
+ /* Fault it in */
+ PointerPte = MiPteToAddress(PointerPde);
+ MiMakeSystemAddressValid(PointerPte, Process);
+ }
+ }
+ else
+ {
+ /* It is, skip it and move to the next PDE, unless we're done */
+ PointerPde++;
+ PointerPte = MiPteToAddress(PointerPde);
+ if (PointerPte > LastPte) return CommittedPages;
+ }
+
+ /* Now loop all the PTEs in the range */
+ while (PointerPte <= LastPte)
+ {
+ /* Have we crossed a PDE boundary? */
+ if (MiIsPteOnPdeBoundary(PointerPte))
+ {
+ /* Is this PDE demand zero? */
+ PointerPde = MiAddressToPte(PointerPte);
+ if (PointerPde->u.Long != 0)
+ {
+ /* It isn't -- is it valid? */
+ if (PointerPde->u.Hard.Valid == 0)
+ {
+ /* Nope, fault it in */
+ PointerPte = MiPteToAddress(PointerPde);
+ MiMakeSystemAddressValid(PointerPte, Process);
+ }
+ }
+ else
+ {
+ /* It is, skip it and move to the next PDE */
+ PointerPde++;
+ PointerPte = MiPteToAddress(PointerPde);
+ continue;
+ }
+ }
+
+ /* Is this PTE demand zero? */
+ if (PointerPte->u.Long != 0)
+ {
+ /* It isn't -- is it a decommited, invalid, or faulted PTE? */
+ if ((PointerPte->u.Soft.Protection == MM_DECOMMIT) &&
+ (PointerPte->u.Hard.Valid == 0) &&
+ ((PointerPte->u.Soft.Prototype == 0) ||
+ (PointerPte->u.Soft.PageFileHigh == MI_PTE_LOOKUP_NEEDED)))
+ {
+ /* It is, so remove it from the count of commited pages */
+ CommittedPages--;
+ }
+ }
+
+ /* Move to the next PTE */
+ PointerPte++;
+ }
+
+ /* Return how many committed pages there still are */
+ return CommittedPages;
+ }
+
+ /* This is a non-commited VAD, so assume none of it is committed */
+ CommittedPages = 0;
+
+ /* Is the PDE demand-zero? */
+ PointerPde = MiAddressToPte(PointerPte);
+ if (PointerPde->u.Long != 0)
+ {
+ /* It isn't -- is it invalid? */
+ if (PointerPde->u.Hard.Valid == 0)
+ {
+ /* It is, so page it in */
+ PointerPte = MiPteToAddress(PointerPde);
+ MiMakeSystemAddressValid(PointerPte, Process);
+ }
+ }
+ else
+ {
+ /* It is, so skip it and move to the next PDE */
+ PointerPde++;
+ PointerPte = MiPteToAddress(PointerPde);
+ if (PointerPte > LastPte) return CommittedPages;
+ }
+
+ /* Loop all the PTEs in this PDE */
+ while (PointerPte <= LastPte)
+ {
+ /* Have we crossed a PDE boundary? */
+ if (MiIsPteOnPdeBoundary(PointerPte))
+ {
+ /* Is this new PDE demand-zero? */
+ PointerPde = MiAddressToPte(PointerPte);
+ if (PointerPde->u.Long != 0)
+ {
+ /* It isn't. Is it valid? */
+ if (PointerPde->u.Hard.Valid == 0)
+ {
+ /* It isn't, so make it valid */
+ PointerPte = MiPteToAddress(PointerPde);
+ MiMakeSystemAddressValid(PointerPte, Process);
+ }
+ }
+ else
+ {
+ /* It is, so skip it and move to the next one */
+ PointerPde++;
+ PointerPte = MiPteToAddress(PointerPde);
+ continue;
+ }
+ }
+
+ /* Is this PTE demand-zero? */
+ if (PointerPte->u.Long != 0)
+ {
+ /* Nope. Is it a valid, non-decommited, non-paged out PTE? */
+ if ((PointerPte->u.Soft.Protection != MM_DECOMMIT) ||
+ (PointerPte->u.Hard.Valid == 1) ||
+ ((PointerPte->u.Soft.Prototype == 1) &&
+ (PointerPte->u.Soft.PageFileHigh != MI_PTE_LOOKUP_NEEDED)))
+ {
+ /* It is! So we'll treat this as a committed page */
+ CommittedPages++;
+ }
+ }
+
+ /* Move to the next PTE */
+ PointerPte++;
+ }
+
+ /* Return how many committed pages we found in this VAD */
+ return CommittedPages;
+}
+
ULONG
NTAPI
MiMakeSystemAddressValid(IN PVOID PageTableVirtualAddress,
IN PEPROCESS CurrentProcess)
{
NTSTATUS Status;
- BOOLEAN LockChange = FALSE;
+ BOOLEAN WsShared = FALSE, WsSafe = FALSE, LockChange = FALSE;
+ PETHREAD CurrentThread = PsGetCurrentThread();
/* Must be a non-pool page table, since those are double-mapped already */
ASSERT(PageTableVirtualAddress > MM_HIGHEST_USER_ADDRESS);
/* Check if the page table is valid */
while (!MmIsAddressValid(PageTableVirtualAddress))
{
+ /* Release the working set lock */
+ MiUnlockProcessWorkingSetForFault(CurrentProcess,
+ CurrentThread,
+ &WsSafe,
+ &WsShared);
+
/* Fault it in */
Status = MmAccessFault(FALSE, PageTableVirtualAddress, KernelMode, NULL);
if (!NT_SUCCESS(Status))
(ULONG_PTR)PageTableVirtualAddress);
}
+ /* Lock the working set again */
+ MiLockProcessWorkingSetForFault(CurrentProcess,
+ CurrentThread,
+ WsSafe,
+ WsShared);
+
/* This flag will be useful later when we do better locking */
LockChange = TRUE;
}
{
PFN_COUNT ActualPages = 0;
PETHREAD CurrentThread = PsGetCurrentThread();
- PMMPFN Pfn1;
- //PMMPFN Pfn2;
+ PMMPFN Pfn1, Pfn2;
PFN_NUMBER PageFrameIndex, PageTableIndex;
KIRQL OldIrql;
ASSERT(KeGetCurrentIrql() <= APC_LEVEL);
/* Get the page table entry */
PageTableIndex = Pfn1->u4.PteFrame;
- //Pfn2 = MiGetPfnEntry(PageTableIndex);
+ Pfn2 = MiGetPfnEntry(PageTableIndex);
/* Lock the PFN database */
OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
MiDecrementShareCount(Pfn1, PageFrameIndex);
/* Decrement the page table too */
- DPRINT("FIXME: ARM3 should decrement the pool PDE refcount for: %p\n", PageTableIndex);
- #if 0 // ARM3: Dont't trust this yet
MiDecrementShareCount(Pfn2, PageTableIndex);
- #endif
/* Release the PFN database */
KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
/* Destroy the PTE */
- PointerPte->u.Long = 0;
+ MI_ERASE_PTE(PointerPte);
+ }
+ else
+ {
+ /*
+ * The only other ARM3 possibility is a demand zero page, which would
+ * mean freeing some of the paged pool pages that haven't even been
+ * touched yet, as part of a larger allocation.
+ *
+ * Right now, we shouldn't expect any page file information in the PTE
+ */
+ ASSERT(PointerPte->u.Soft.PageFileHigh == 0);
+
+ /* Destroy the PTE */
+ MI_ERASE_PTE(PointerPte);
}
/* Actual legitimate pages */
ActualPages++;
}
- else
- {
- /*
- * The only other ARM3 possibility is a demand zero page, which would
- * mean freeing some of the paged pool pages that haven't even been
- * touched yet, as part of a larger allocation.
- *
- * Right now, we shouldn't expect any page file information in the PTE
- */
- ASSERT(PointerPte->u.Soft.PageFileHigh == 0);
-
- /* Destroy the PTE */
- PointerPte->u.Long = 0;
- }
/* Keep going */
PointerPte++;
MiDecrementShareCount(Pfn1, PageFrameIndex);
/* Either a fork, or this is the shared user data page */
- if (PointerPte <= MiHighestUserPte)
+ if ((PointerPte <= MiHighestUserPte) && (PrototypePte != Pfn1->PteAddress))
{
/* If it's not the shared user page, then crash, since there's no fork() yet */
if ((PAGE_ALIGN(VirtualAddress) != (PVOID)USER_SHARED_DATA) ||
{
/* Must be some sort of memory corruption */
KeBugCheckEx(MEMORY_MANAGEMENT,
- 0x400,
+ 0x400,
(ULONG_PTR)PointerPte,
(ULONG_PTR)PrototypePte,
(ULONG_PTR)Pfn1->PteAddress);
/* There should only be 1 shared reference count */
ASSERT(Pfn1->u2.ShareCount == 1);
- /* FIXME: Drop the reference on the page table. For now, leak it until RosMM is gone */
- //DPRINT1("Dropping a ref...\n");
+ /* Drop the reference on the page table. */
MiDecrementShareCount(MiGetPfnEntry(Pfn1->u4.PteFrame), Pfn1->u4.PteFrame);
/* Mark the PFN for deletion and dereference what should be the last ref */
}
/* Destroy the PTE and flush the TLB */
- PointerPte->u.Long = 0;
+ MI_ERASE_PTE(PointerPte);
KeFlushCurrentTb();
}
KIRQL OldIrql;
BOOLEAN AddressGap = FALSE;
PSUBSECTION Subsection;
- PUSHORT UsedPageTableEntries;
/* Get out if this is a fake VAD, RosMm will free the marea pages */
if ((Vad) && (Vad->u.VadFlags.Spare == 1)) return;
/* Now we should have a valid PDE, mapped in, and still have some VA */
ASSERT(PointerPde->u.Hard.Valid == 1);
ASSERT(Va <= EndingAddress);
- UsedPageTableEntries = &MmWorkingSetList->UsedPageTableEntries[MiGetPdeOffset(Va)];
/* Check if this is a section VAD with gaps in it */
if ((AddressGap) && (LastPrototypePte))
TempPte = *PointerPte;
if (TempPte.u.Long)
{
- DPRINT("Decrement used PTEs by address: %lx\n", Va);
- (*UsedPageTableEntries)--;
- ASSERT((*UsedPageTableEntries) < PTE_COUNT);
- DPRINT("Refs: %lx\n", (*UsedPageTableEntries));
-
+ MiDecrementPageTableReferences((PVOID)Va);
+
/* Check if the PTE is actually mapped in */
- if (TempPte.u.Long & 0xFFFFFC01)
+ if (MI_IS_MAPPED_PTE(&TempPte))
{
/* Are we dealing with section VAD? */
if ((LastPrototypePte) && (PrototypePte > LastPrototypePte))
(TempPte.u.Soft.Prototype == 1))
{
/* Just nuke it */
- PointerPte->u.Long = 0;
+ MI_ERASE_PTE(PointerPte);
}
else
{
else
{
/* The PTE was never mapped, just nuke it here */
- PointerPte->u.Long = 0;
+ MI_ERASE_PTE(PointerPte);
}
}
/* The PDE should still be valid at this point */
ASSERT(PointerPde->u.Hard.Valid == 1);
- DPRINT("Should check if handles for: %p are zero (PDE: %lx)\n", Va, PointerPde->u.Hard.PageFrameNumber);
- if (!(*UsedPageTableEntries))
+ /* Check remaining PTE count (go back 1 page due to above loop) */
+ if (MiQueryPageTableReferences((PVOID)(Va - PAGE_SIZE)) == 0)
{
- DPRINT("They are!\n");
if (PointerPde->u.Long != 0)
{
- DPRINT("PDE active: %lx in %16s\n", PointerPde->u.Hard.PageFrameNumber, CurrentProcess->ImageFileName);
-
/* Delete the PTE proper */
MiDeletePte(PointerPde,
MiPteToAddress(PointerPde),
NULL);
}
}
-
+
/* Release the lock and get out if we're done */
KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
if (Va > EndingAddress) return;
//
// Return the error
//
- return STATUS_WORKING_SET_QUOTA;
+ _SEH2_YIELD(return STATUS_WORKING_SET_QUOTA);
}
//
//
// Check if we had allocated pool
//
- if (HavePoolAddress) ExFreePool(PoolAddress);
+ if (HavePoolAddress) ExFreePoolWithTag(PoolAddress, 'VmRw');
//
// Check if we failed during the probe
//
// Check if we had allocated pool
//
- if (HavePoolAddress) ExFreePool(PoolAddress);
+ if (HavePoolAddress) ExFreePoolWithTag(PoolAddress, 'VmRw');
//
// All bytes read
{
MMPTE TempPte;
PMMPFN Pfn;
+ PEPROCESS CurrentProcess;
+ PETHREAD CurrentThread;
+ BOOLEAN WsSafe, WsShared;
+ ULONG Protect;
+ KIRQL OldIrql;
PAGED_CODE();
/* Copy this PTE's contents */
ASSERT(TempPte.u.Long);
/* Check for a special prototype format */
- if (TempPte.u.Soft.Valid == 0 &&
- TempPte.u.Soft.Prototype == 1)
+ if ((TempPte.u.Soft.Valid == 0) &&
+ (TempPte.u.Soft.Prototype == 1))
{
- /* Unsupported now */
- UNIMPLEMENTED;
- ASSERT(FALSE);
+ /* Check if the prototype PTE is not yet pointing to a PTE */
+ if (TempPte.u.Soft.PageFileHigh == MI_PTE_LOOKUP_NEEDED)
+ {
+ /* The prototype PTE contains the protection */
+ return MmProtectToValue[TempPte.u.Soft.Protection];
+ }
+
+ /* Get a pointer to the underlying shared PTE */
+ PointerPte = MiProtoPteToPte(&TempPte);
+
+ /* Since the PTE we want to read can be paged out at any time, we need
+ to release the working set lock first, so that it can be paged in */
+ CurrentThread = PsGetCurrentThread();
+ CurrentProcess = PsGetCurrentProcess();
+ MiUnlockProcessWorkingSetForFault(CurrentProcess,
+ CurrentThread,
+ &WsSafe,
+ &WsShared);
+
+ /* Now read the PTE value */
+ TempPte = *PointerPte;
+
+ /* Check if that one is invalid */
+ if (!TempPte.u.Hard.Valid)
+ {
+ /* We get the protection directly from this PTE */
+ Protect = MmProtectToValue[TempPte.u.Soft.Protection];
+ }
+ else
+ {
+ /* The PTE is valid, so we might need to get the protection from
+ the PFN. Lock the PFN database */
+ OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
+
+ /* Check if the PDE is still valid */
+ if (MiAddressToPte(PointerPte)->u.Hard.Valid == 0)
+ {
+ /* It's not, make it valid */
+ MiMakeSystemAddressValidPfn(PointerPte, OldIrql);
+ }
+
+ /* Now it's safe to read the PTE value again */
+ TempPte = *PointerPte;
+ ASSERT(TempPte.u.Long != 0);
+
+ /* Check again if the PTE is invalid */
+ if (!TempPte.u.Hard.Valid)
+ {
+ /* The PTE is not valid, so we can use it's protection field */
+ Protect = MmProtectToValue[TempPte.u.Soft.Protection];
+ }
+ else
+ {
+ /* The PTE is valid, so we can find the protection in the
+ OriginalPte field of the PFN */
+ Pfn = MI_PFN_ELEMENT(TempPte.u.Hard.PageFrameNumber);
+ Protect = MmProtectToValue[Pfn->OriginalPte.u.Soft.Protection];
+ }
+
+ /* Release the PFN database */
+ KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
+ }
+
+ /* Lock the working set again */
+ MiLockProcessWorkingSetForFault(CurrentProcess,
+ CurrentThread,
+ WsSafe,
+ WsShared);
+
+ return Protect;
}
/* In the easy case of transition or demand zero PTE just return its protection */
/* If we get here, the PTE is valid, so look up the page in PFN database */
Pfn = MiGetPfnEntry(TempPte.u.Hard.PageFrameNumber);
-
if (!Pfn->u3.e1.PrototypePte)
{
/* Return protection of the original pte */
+ ASSERT(Pfn->u4.AweAllocation == 0);
return MmProtectToValue[Pfn->OriginalPte.u.Soft.Protection];
}
- /* This is hardware PTE */
- UNIMPLEMENTED;
- ASSERT(FALSE);
-
- return PAGE_NOACCESS;
+ /* This is software PTE */
+ DPRINT("Prototype PTE: %lx %p\n", TempPte.u.Hard.PageFrameNumber, Pfn);
+ DPRINT("VA: %p\n", MiPteToAddress(&TempPte));
+ DPRINT("Mask: %lx\n", TempPte.u.Soft.Protection);
+ DPRINT("Mask2: %lx\n", Pfn->OriginalPte.u.Soft.Protection);
+ return MmProtectToValue[TempPte.u.Soft.Protection];
}
ULONG
OUT PVOID *NextVa)
{
- PMMPTE PointerPte;
+ PMMPTE PointerPte, ProtoPte;
PMMPDE PointerPde;
- MMPTE TempPte;
+#if (_MI_PAGING_LEVELS >= 3)
+ PMMPPE PointerPpe;
+#endif
+#if (_MI_PAGING_LEVELS >= 4)
+ PMMPXE PointerPxe;
+#endif
+ MMPTE TempPte, TempProtoPte;
BOOLEAN DemandZeroPte = TRUE, ValidPte = FALSE;
- ULONG State = MEM_RESERVE, Protect = 0, LockChange;
+ ULONG State = MEM_RESERVE, Protect = 0;
ASSERT((Vad->StartingVpn <= ((ULONG_PTR)Va >> PAGE_SHIFT)) &&
(Vad->EndingVpn >= ((ULONG_PTR)Va >> PAGE_SHIFT)));
/* Get the PDE and PTE for the address */
PointerPde = MiAddressToPde(Va);
PointerPte = MiAddressToPte(Va);
+#if (_MI_PAGING_LEVELS >= 3)
+ PointerPpe = MiAddressToPpe(Va);
+#endif
+#if (_MI_PAGING_LEVELS >= 4)
+ PointerPxe = MiAddressToPxe(Va);
+#endif
/* Return the next range */
*NextVa = (PVOID)((ULONG_PTR)Va + PAGE_SIZE);
- /* Loop to make sure the PDE is valid */
do
{
- /* Try again */
- LockChange = 0;
+#if (_MI_PAGING_LEVELS >= 4)
+ /* Does the PXE exist? */
+ if (PointerPxe->u.Long == 0)
+ {
+ /* It does not, next range starts at the next PXE */
+ *NextVa = MiPxeToAddress(PointerPxe + 1);
+ break;
+ }
+
+ /* Is the PXE valid? */
+ if (PointerPxe->u.Hard.Valid == 0)
+ {
+ /* Is isn't, fault it in (make the PPE accessible) */
+ MiMakeSystemAddressValid(PointerPpe, TargetProcess);
+ }
+#endif
+#if (_MI_PAGING_LEVELS >= 3)
+ /* Does the PPE exist? */
+ if (PointerPpe->u.Long == 0)
+ {
+ /* It does not, next range starts at the next PPE */
+ *NextVa = MiPpeToAddress(PointerPpe + 1);
+ break;
+ }
+
+ /* Is the PPE valid? */
+ if (PointerPpe->u.Hard.Valid == 0)
+ {
+ /* Is isn't, fault it in (make the PDE accessible) */
+ MiMakeSystemAddressValid(PointerPde, TargetProcess);
+ }
+#endif
- /* Is the PDE empty? */
- if (!PointerPde->u.Long)
+ /* Does the PDE exist? */
+ if (PointerPde->u.Long == 0)
{
- /* No address in this range used yet, move to the next PDE range */
+ /* It does not, next range starts at the next PDE */
*NextVa = MiPdeToAddress(PointerPde + 1);
break;
}
- /* The PDE is not empty, but is it faulted in? */
- if (!PointerPde->u.Hard.Valid)
+ /* Is the PDE valid? */
+ if (PointerPde->u.Hard.Valid == 0)
{
- /* It isn't, go ahead and do the fault */
- LockChange = MiMakeSystemAddressValid(MiPdeToPte(PointerPde),
- TargetProcess);
+ /* Is isn't, fault it in (make the PTE accessible) */
+ MiMakeSystemAddressValid(PointerPte, TargetProcess);
}
- /* Check if the PDE was faulted in, making the PTE readable */
- if (!LockChange) ValidPte = TRUE;
- } while (LockChange);
+ /* We have a PTE that we can access now! */
+ ValidPte = TRUE;
+
+ } while (FALSE);
/* Is it safe to try reading the PTE? */
if (ValidPte)
{
/* FIXME: watch out for large pages */
+ ASSERT(PointerPde->u.Hard.LargePage == FALSE);
/* Capture the PTE */
TempPte = *PointerPte;
- if (TempPte.u.Long)
+ if (TempPte.u.Long != 0)
{
/* The PTE is valid, so it's not zeroed out */
DemandZeroPte = FALSE;
- /* Check if it's valid or has a valid protection mask */
- ASSERT(TempPte.u.Soft.Prototype == 0);
- if ((TempPte.u.Soft.Protection != MM_DECOMMIT) ||
- (TempPte.u.Hard.Valid == 1))
+ /* Is it a decommited, invalid, or faulted PTE? */
+ if ((TempPte.u.Soft.Protection == MM_DECOMMIT) &&
+ (TempPte.u.Hard.Valid == 0) &&
+ ((TempPte.u.Soft.Prototype == 0) ||
+ (TempPte.u.Soft.PageFileHigh == MI_PTE_LOOKUP_NEEDED)))
+ {
+ /* Otherwise our defaults should hold */
+ ASSERT(Protect == 0);
+ ASSERT(State == MEM_RESERVE);
+ }
+ else
{
/* This means it's committed */
State = MEM_COMMIT;
+ /* We don't support these */
+ ASSERT(Vad->u.VadFlags.VadType != VadDevicePhysicalMemory);
+ ASSERT(Vad->u.VadFlags.VadType != VadRotatePhysical);
+ ASSERT(Vad->u.VadFlags.VadType != VadAwe);
+
/* Get protection state of this page */
Protect = MiGetPageProtection(PointerPte);
- }
- else
- {
- /* Otherwise our defaults should hold */
- ASSERT(Protect == 0);
- ASSERT(State == MEM_RESERVE);
+
+ /* Check if this is an image-backed VAD */
+ if ((TempPte.u.Soft.Valid == 0) &&
+ (TempPte.u.Soft.Prototype == 1) &&
+ (Vad->u.VadFlags.PrivateMemory == 0) &&
+ (Vad->ControlArea))
+ {
+ DPRINT1("Not supported\n");
+ ASSERT(FALSE);
+ }
}
}
}
/* Check if this was a demand-zero PTE, since we need to find the state */
if (DemandZeroPte)
{
- /* Check if the VAD is for committed memory */
- if (Vad->u.VadFlags.MemCommit)
+ /* Not yet handled */
+ ASSERT(Vad->u.VadFlags.VadType != VadDevicePhysicalMemory);
+ ASSERT(Vad->u.VadFlags.VadType != VadAwe);
+
+ /* Check if this is private commited memory, or an section-backed VAD */
+ if ((Vad->u.VadFlags.PrivateMemory == 0) && (Vad->ControlArea))
+ {
+ /* Tell caller about the next range */
+ *NextVa = (PVOID)((ULONG_PTR)Va + PAGE_SIZE);
+
+ /* Get the prototype PTE for this VAD */
+ ProtoPte = MI_GET_PROTOTYPE_PTE_FOR_VPN(Vad,
+ (ULONG_PTR)Va >> PAGE_SHIFT);
+ if (ProtoPte)
+ {
+ /* We should unlock the working set, but it's not being held! */
+
+ /* Is the prototype PTE actually valid (committed)? */
+ TempProtoPte = *ProtoPte;
+ if (TempProtoPte.u.Long)
+ {
+ /* Unless this is a memory-mapped file, handle it like private VAD */
+ State = MEM_COMMIT;
+ ASSERT(Vad->u.VadFlags.VadType != VadImageMap);
+ Protect = MmProtectToValue[Vad->u.VadFlags.Protection];
+ }
+
+ /* We should re-lock the working set */
+ }
+ }
+ else if (Vad->u.VadFlags.MemCommit)
{
/* This is committed memory */
State = MEM_COMMIT;
return State;
}
-/* PUBLIC FUNCTIONS ***********************************************************/
-
-/*
- * @unimplemented
- */
-PVOID
-NTAPI
-MmGetVirtualForPhysical(IN PHYSICAL_ADDRESS PhysicalAddress)
-{
- UNIMPLEMENTED;
- return 0;
-}
-
-/*
- * @unimplemented
- */
-PVOID
-NTAPI
-MmSecureVirtualMemory(IN PVOID Address,
- IN SIZE_T Length,
- IN ULONG Mode)
-{
- static BOOLEAN Warn; if (!Warn++) UNIMPLEMENTED;
- return Address;
-}
-
-/*
- * @unimplemented
- */
-VOID
-NTAPI
-MmUnsecureVirtualMemory(IN PVOID SecureMem)
-{
- static BOOLEAN Warn; if (!Warn++) UNIMPLEMENTED;
-}
-
-/* SYSTEM CALLS ***************************************************************/
-
NTSTATUS
NTAPI
-NtReadVirtualMemory(IN HANDLE ProcessHandle,
- IN PVOID BaseAddress,
- OUT PVOID Buffer,
- IN SIZE_T NumberOfBytesToRead,
- OUT PSIZE_T NumberOfBytesRead OPTIONAL)
+MiQueryMemoryBasicInformation(IN HANDLE ProcessHandle,
+ IN PVOID BaseAddress,
+ OUT PVOID MemoryInformation,
+ IN SIZE_T MemoryInformationLength,
+ OUT PSIZE_T ReturnLength)
{
- KPROCESSOR_MODE PreviousMode = ExGetPreviousMode();
- PEPROCESS Process;
+ PEPROCESS TargetProcess;
NTSTATUS Status = STATUS_SUCCESS;
- SIZE_T BytesRead = 0;
- PAGED_CODE();
+ PMMVAD Vad = NULL;
+ PVOID Address, NextAddress;
+ BOOLEAN Found = FALSE;
+ ULONG NewProtect, NewState;
+ ULONG_PTR BaseVpn;
+ MEMORY_BASIC_INFORMATION MemoryInfo;
+ KAPC_STATE ApcState;
+ KPROCESSOR_MODE PreviousMode = ExGetPreviousMode();
+ PMEMORY_AREA MemoryArea;
+ SIZE_T ResultLength;
- //
- // Check if we came from user mode
- //
- if (PreviousMode != KernelMode)
+ /* Check for illegal addresses in user-space, or the shared memory area */
+ if ((BaseAddress > MM_HIGHEST_VAD_ADDRESS) ||
+ (PAGE_ALIGN(BaseAddress) == (PVOID)MM_SHARED_USER_DATA_VA))
{
- //
- // Validate the read addresses
- //
- if ((((ULONG_PTR)BaseAddress + NumberOfBytesToRead) < (ULONG_PTR)BaseAddress) ||
- (((ULONG_PTR)Buffer + NumberOfBytesToRead) < (ULONG_PTR)Buffer) ||
- (((ULONG_PTR)BaseAddress + NumberOfBytesToRead) > MmUserProbeAddress) ||
- (((ULONG_PTR)Buffer + NumberOfBytesToRead) > MmUserProbeAddress))
- {
- //
- // Don't allow to write into kernel space
- //
- return STATUS_ACCESS_VIOLATION;
- }
+ Address = PAGE_ALIGN(BaseAddress);
- //
+ /* Make up an info structure describing this range */
+ MemoryInfo.BaseAddress = Address;
+ MemoryInfo.AllocationProtect = PAGE_READONLY;
+ MemoryInfo.Type = MEM_PRIVATE;
+
+ /* Special case for shared data */
+ if (Address == (PVOID)MM_SHARED_USER_DATA_VA)
+ {
+ MemoryInfo.AllocationBase = (PVOID)MM_SHARED_USER_DATA_VA;
+ MemoryInfo.State = MEM_COMMIT;
+ MemoryInfo.Protect = PAGE_READONLY;
+ MemoryInfo.RegionSize = PAGE_SIZE;
+ }
+ else
+ {
+ MemoryInfo.AllocationBase = (PCHAR)MM_HIGHEST_VAD_ADDRESS + 1;
+ MemoryInfo.State = MEM_RESERVE;
+ MemoryInfo.Protect = PAGE_NOACCESS;
+ MemoryInfo.RegionSize = (ULONG_PTR)MM_HIGHEST_USER_ADDRESS + 1 - (ULONG_PTR)Address;
+ }
+
+ /* Return the data, NtQueryInformation already probed it*/
+ if (PreviousMode != KernelMode)
+ {
+ _SEH2_TRY
+ {
+ *(PMEMORY_BASIC_INFORMATION)MemoryInformation = MemoryInfo;
+ if (ReturnLength) *ReturnLength = sizeof(MEMORY_BASIC_INFORMATION);
+ }
+ _SEH2_EXCEPT(EXCEPTION_EXECUTE_HANDLER)
+ {
+ Status = _SEH2_GetExceptionCode();
+ }
+ _SEH2_END;
+ }
+ else
+ {
+ *(PMEMORY_BASIC_INFORMATION)MemoryInformation = MemoryInfo;
+ if (ReturnLength) *ReturnLength = sizeof(MEMORY_BASIC_INFORMATION);
+ }
+
+ return Status;
+ }
+
+ /* Check if this is for a local or remote process */
+ if (ProcessHandle == NtCurrentProcess())
+ {
+ TargetProcess = PsGetCurrentProcess();
+ }
+ else
+ {
+ /* Reference the target process */
+ Status = ObReferenceObjectByHandle(ProcessHandle,
+ PROCESS_QUERY_INFORMATION,
+ PsProcessType,
+ ExGetPreviousMode(),
+ (PVOID*)&TargetProcess,
+ NULL);
+ if (!NT_SUCCESS(Status)) return Status;
+
+ /* Attach to it now */
+ KeStackAttachProcess(&TargetProcess->Pcb, &ApcState);
+ }
+
+ /* Lock the address space and make sure the process isn't already dead */
+ MmLockAddressSpace(&TargetProcess->Vm);
+ if (TargetProcess->VmDeleted)
+ {
+ /* Unlock the address space of the process */
+ MmUnlockAddressSpace(&TargetProcess->Vm);
+
+ /* Check if we were attached */
+ if (ProcessHandle != NtCurrentProcess())
+ {
+ /* Detach and dereference the process */
+ KeUnstackDetachProcess(&ApcState);
+ ObDereferenceObject(TargetProcess);
+ }
+
+ /* Bail out */
+ DPRINT1("Process is dying\n");
+ return STATUS_PROCESS_IS_TERMINATING;
+ }
+
+ /* Loop the VADs */
+ ASSERT(TargetProcess->VadRoot.NumberGenericTableElements);
+ if (TargetProcess->VadRoot.NumberGenericTableElements)
+ {
+ /* Scan on the right */
+ Vad = (PMMVAD)TargetProcess->VadRoot.BalancedRoot.RightChild;
+ BaseVpn = (ULONG_PTR)BaseAddress >> PAGE_SHIFT;
+ while (Vad)
+ {
+ /* Check if this VAD covers the allocation range */
+ if ((BaseVpn >= Vad->StartingVpn) &&
+ (BaseVpn <= Vad->EndingVpn))
+ {
+ /* We're done */
+ Found = TRUE;
+ break;
+ }
+
+ /* Check if this VAD is too high */
+ if (BaseVpn < Vad->StartingVpn)
+ {
+ /* Stop if there is no left child */
+ if (!Vad->LeftChild) break;
+
+ /* Search on the left next */
+ Vad = Vad->LeftChild;
+ }
+ else
+ {
+ /* Then this VAD is too low, keep searching on the right */
+ ASSERT(BaseVpn > Vad->EndingVpn);
+
+ /* Stop if there is no right child */
+ if (!Vad->RightChild) break;
+
+ /* Search on the right next */
+ Vad = Vad->RightChild;
+ }
+ }
+ }
+
+ /* Was a VAD found? */
+ if (!Found)
+ {
+ Address = PAGE_ALIGN(BaseAddress);
+
+ /* Calculate region size */
+ if (Vad)
+ {
+ if (Vad->StartingVpn >= BaseVpn)
+ {
+ /* Region size is the free space till the start of that VAD */
+ MemoryInfo.RegionSize = (ULONG_PTR)(Vad->StartingVpn << PAGE_SHIFT) - (ULONG_PTR)Address;
+ }
+ else
+ {
+ /* Get the next VAD */
+ Vad = (PMMVAD)MiGetNextNode((PMMADDRESS_NODE)Vad);
+ if (Vad)
+ {
+ /* Region size is the free space till the start of that VAD */
+ MemoryInfo.RegionSize = (ULONG_PTR)(Vad->StartingVpn << PAGE_SHIFT) - (ULONG_PTR)Address;
+ }
+ else
+ {
+ /* Maximum possible region size with that base address */
+ MemoryInfo.RegionSize = (PCHAR)MM_HIGHEST_VAD_ADDRESS + 1 - (PCHAR)Address;
+ }
+ }
+ }
+ else
+ {
+ /* Maximum possible region size with that base address */
+ MemoryInfo.RegionSize = (PCHAR)MM_HIGHEST_VAD_ADDRESS + 1 - (PCHAR)Address;
+ }
+
+ /* Unlock the address space of the process */
+ MmUnlockAddressSpace(&TargetProcess->Vm);
+
+ /* Check if we were attached */
+ if (ProcessHandle != NtCurrentProcess())
+ {
+ /* Detach and derefernece the process */
+ KeUnstackDetachProcess(&ApcState);
+ ObDereferenceObject(TargetProcess);
+ }
+
+ /* Build the rest of the initial information block */
+ MemoryInfo.BaseAddress = Address;
+ MemoryInfo.AllocationBase = NULL;
+ MemoryInfo.AllocationProtect = 0;
+ MemoryInfo.State = MEM_FREE;
+ MemoryInfo.Protect = PAGE_NOACCESS;
+ MemoryInfo.Type = 0;
+
+ /* Return the data, NtQueryInformation already probed it*/
+ if (PreviousMode != KernelMode)
+ {
+ _SEH2_TRY
+ {
+ *(PMEMORY_BASIC_INFORMATION)MemoryInformation = MemoryInfo;
+ if (ReturnLength) *ReturnLength = sizeof(MEMORY_BASIC_INFORMATION);
+ }
+ _SEH2_EXCEPT(EXCEPTION_EXECUTE_HANDLER)
+ {
+ Status = _SEH2_GetExceptionCode();
+ }
+ _SEH2_END;
+ }
+ else
+ {
+ *(PMEMORY_BASIC_INFORMATION)MemoryInformation = MemoryInfo;
+ if (ReturnLength) *ReturnLength = sizeof(MEMORY_BASIC_INFORMATION);
+ }
+
+ return Status;
+ }
+
+ /* Set the correct memory type based on what kind of VAD this is */
+ if ((Vad->u.VadFlags.PrivateMemory) ||
+ (Vad->u.VadFlags.VadType == VadRotatePhysical))
+ {
+ MemoryInfo.Type = MEM_PRIVATE;
+ }
+ else if (Vad->u.VadFlags.VadType == VadImageMap)
+ {
+ MemoryInfo.Type = MEM_IMAGE;
+ }
+ else
+ {
+ MemoryInfo.Type = MEM_MAPPED;
+ }
+
+ /* Find the memory area the specified address belongs to */
+ MemoryArea = MmLocateMemoryAreaByAddress(&TargetProcess->Vm, BaseAddress);
+ ASSERT(MemoryArea != NULL);
+
+ /* Determine information dependent on the memory area type */
+ if (MemoryArea->Type == MEMORY_AREA_SECTION_VIEW)
+ {
+ Status = MmQuerySectionView(MemoryArea, BaseAddress, &MemoryInfo, &ResultLength);
+ if (!NT_SUCCESS(Status))
+ {
+ DPRINT1("MmQuerySectionView failed. MemoryArea=%p (%p-%p), BaseAddress=%p\n",
+ MemoryArea, MemoryArea->StartingAddress, MemoryArea->EndingAddress, BaseAddress);
+ NT_ASSERT(NT_SUCCESS(Status));
+ }
+ }
+ else
+ {
+ /* Build the initial information block */
+ Address = PAGE_ALIGN(BaseAddress);
+ MemoryInfo.BaseAddress = Address;
+ MemoryInfo.AllocationBase = (PVOID)(Vad->StartingVpn << PAGE_SHIFT);
+ MemoryInfo.AllocationProtect = MmProtectToValue[Vad->u.VadFlags.Protection];
+ MemoryInfo.Type = MEM_PRIVATE;
+
+ /* Acquire the working set lock (shared is enough) */
+ MiLockProcessWorkingSetShared(TargetProcess, PsGetCurrentThread());
+
+ /* Find the largest chunk of memory which has the same state and protection mask */
+ MemoryInfo.State = MiQueryAddressState(Address,
+ Vad,
+ TargetProcess,
+ &MemoryInfo.Protect,
+ &NextAddress);
+ Address = NextAddress;
+ while (((ULONG_PTR)Address >> PAGE_SHIFT) <= Vad->EndingVpn)
+ {
+ /* Keep going unless the state or protection mask changed */
+ NewState = MiQueryAddressState(Address, Vad, TargetProcess, &NewProtect, &NextAddress);
+ if ((NewState != MemoryInfo.State) || (NewProtect != MemoryInfo.Protect)) break;
+ Address = NextAddress;
+ }
+
+ /* Release the working set lock */
+ MiUnlockProcessWorkingSetShared(TargetProcess, PsGetCurrentThread());
+
+ /* Check if we went outside of the VAD */
+ if (((ULONG_PTR)Address >> PAGE_SHIFT) > Vad->EndingVpn)
+ {
+ /* Set the end of the VAD as the end address */
+ Address = (PVOID)((Vad->EndingVpn + 1) << PAGE_SHIFT);
+ }
+
+ /* Now that we know the last VA address, calculate the region size */
+ MemoryInfo.RegionSize = ((ULONG_PTR)Address - (ULONG_PTR)MemoryInfo.BaseAddress);
+ }
+
+ /* Unlock the address space of the process */
+ MmUnlockAddressSpace(&TargetProcess->Vm);
+
+ /* Check if we were attached */
+ if (ProcessHandle != NtCurrentProcess())
+ {
+ /* Detach and derefernece the process */
+ KeUnstackDetachProcess(&ApcState);
+ ObDereferenceObject(TargetProcess);
+ }
+
+ /* Return the data, NtQueryInformation already probed it */
+ if (PreviousMode != KernelMode)
+ {
+ _SEH2_TRY
+ {
+ *(PMEMORY_BASIC_INFORMATION)MemoryInformation = MemoryInfo;
+ if (ReturnLength) *ReturnLength = sizeof(MEMORY_BASIC_INFORMATION);
+ }
+ _SEH2_EXCEPT(EXCEPTION_EXECUTE_HANDLER)
+ {
+ Status = _SEH2_GetExceptionCode();
+ }
+ _SEH2_END;
+ }
+ else
+ {
+ *(PMEMORY_BASIC_INFORMATION)MemoryInformation = MemoryInfo;
+ if (ReturnLength) *ReturnLength = sizeof(MEMORY_BASIC_INFORMATION);
+ }
+
+ /* All went well */
+ DPRINT("Base: %p AllocBase: %p AllocProtect: %lx Protect: %lx "
+ "State: %lx Type: %lx Size: %lx\n",
+ MemoryInfo.BaseAddress, MemoryInfo.AllocationBase,
+ MemoryInfo.AllocationProtect, MemoryInfo.Protect,
+ MemoryInfo.State, MemoryInfo.Type, MemoryInfo.RegionSize);
+
+ return Status;
+}
+
+BOOLEAN
+NTAPI
+MiIsEntireRangeCommitted(IN ULONG_PTR StartingAddress,
+ IN ULONG_PTR EndingAddress,
+ IN PMMVAD Vad,
+ IN PEPROCESS Process)
+{
+ PMMPTE PointerPte, LastPte, PointerPde;
+ BOOLEAN OnBoundary = TRUE;
+ PAGED_CODE();
+
+ /* Get the PDE and PTE addresses */
+ PointerPde = MiAddressToPde(StartingAddress);
+ PointerPte = MiAddressToPte(StartingAddress);
+ LastPte = MiAddressToPte(EndingAddress);
+
+ /* Loop all the PTEs */
+ while (PointerPte <= LastPte)
+ {
+ /* Check if we've hit an new PDE boundary */
+ if (OnBoundary)
+ {
+ /* Is this PDE demand zero? */
+ PointerPde = MiAddressToPte(PointerPte);
+ if (PointerPde->u.Long != 0)
+ {
+ /* It isn't -- is it valid? */
+ if (PointerPde->u.Hard.Valid == 0)
+ {
+ /* Nope, fault it in */
+ PointerPte = MiPteToAddress(PointerPde);
+ MiMakeSystemAddressValid(PointerPte, Process);
+ }
+ }
+ else
+ {
+ /* The PTE was already valid, so move to the next one */
+ PointerPde++;
+ PointerPte = MiPteToAddress(PointerPde);
+
+ /* Is the entire VAD committed? If not, fail */
+ if (!Vad->u.VadFlags.MemCommit) return FALSE;
+
+ /* Everything is committed so far past the range, return true */
+ if (PointerPte > LastPte) return TRUE;
+ }
+ }
+
+ /* Is the PTE demand zero? */
+ if (PointerPte->u.Long == 0)
+ {
+ /* Is the entire VAD committed? If not, fail */
+ if (!Vad->u.VadFlags.MemCommit) return FALSE;
+ }
+ else
+ {
+ /* It isn't -- is it a decommited, invalid, or faulted PTE? */
+ if ((PointerPte->u.Soft.Protection == MM_DECOMMIT) &&
+ (PointerPte->u.Hard.Valid == 0) &&
+ ((PointerPte->u.Soft.Prototype == 0) ||
+ (PointerPte->u.Soft.PageFileHigh == MI_PTE_LOOKUP_NEEDED)))
+ {
+ /* Then part of the range is decommitted, so fail */
+ return FALSE;
+ }
+ }
+
+ /* Move to the next PTE */
+ PointerPte++;
+ OnBoundary = MiIsPteOnPdeBoundary(PointerPte);
+ }
+
+ /* All PTEs seem valid, and no VAD checks failed, the range is okay */
+ return TRUE;
+}
+
+NTSTATUS
+NTAPI
+MiRosProtectVirtualMemory(IN PEPROCESS Process,
+ IN OUT PVOID *BaseAddress,
+ IN OUT PSIZE_T NumberOfBytesToProtect,
+ IN ULONG NewAccessProtection,
+ OUT PULONG OldAccessProtection OPTIONAL)
+{
+ PMEMORY_AREA MemoryArea;
+ PMMSUPPORT AddressSpace;
+ ULONG OldAccessProtection_;
+ NTSTATUS Status;
+
+ *NumberOfBytesToProtect = PAGE_ROUND_UP((ULONG_PTR)(*BaseAddress) + (*NumberOfBytesToProtect)) - PAGE_ROUND_DOWN(*BaseAddress);
+ *BaseAddress = (PVOID)PAGE_ROUND_DOWN(*BaseAddress);
+
+ AddressSpace = &Process->Vm;
+ MmLockAddressSpace(AddressSpace);
+ MemoryArea = MmLocateMemoryAreaByAddress(AddressSpace, *BaseAddress);
+ if (MemoryArea == NULL || MemoryArea->DeleteInProgress)
+ {
+ MmUnlockAddressSpace(AddressSpace);
+ return STATUS_UNSUCCESSFUL;
+ }
+
+ if (OldAccessProtection == NULL) OldAccessProtection = &OldAccessProtection_;
+
+ ASSERT(MemoryArea->Type == MEMORY_AREA_SECTION_VIEW);
+ Status = MmProtectSectionView(AddressSpace,
+ MemoryArea,
+ *BaseAddress,
+ *NumberOfBytesToProtect,
+ NewAccessProtection,
+ OldAccessProtection);
+
+ MmUnlockAddressSpace(AddressSpace);
+
+ return Status;
+}
+
+NTSTATUS
+NTAPI
+MiProtectVirtualMemory(IN PEPROCESS Process,
+ IN OUT PVOID *BaseAddress,
+ IN OUT PSIZE_T NumberOfBytesToProtect,
+ IN ULONG NewAccessProtection,
+ OUT PULONG OldAccessProtection OPTIONAL)
+{
+ PMEMORY_AREA MemoryArea;
+ PMMVAD Vad;
+ PMMSUPPORT AddressSpace;
+ ULONG_PTR StartingAddress, EndingAddress;
+ PMMPTE PointerPde, PointerPte, LastPte;
+ MMPTE PteContents;
+ PMMPFN Pfn1;
+ ULONG ProtectionMask, OldProtect;
+ BOOLEAN Committed;
+ NTSTATUS Status = STATUS_SUCCESS;
+ PETHREAD Thread = PsGetCurrentThread();
+ TABLE_SEARCH_RESULT Result;
+
+ /* Calculate base address for the VAD */
+ StartingAddress = (ULONG_PTR)PAGE_ALIGN((*BaseAddress));
+ EndingAddress = (((ULONG_PTR)*BaseAddress + *NumberOfBytesToProtect - 1) | (PAGE_SIZE - 1));
+
+ /* Calculate the protection mask and make sure it's valid */
+ ProtectionMask = MiMakeProtectionMask(NewAccessProtection);
+ if (ProtectionMask == MM_INVALID_PROTECTION)
+ {
+ DPRINT1("Invalid protection mask\n");
+ return STATUS_INVALID_PAGE_PROTECTION;
+ }
+
+ /* Check for ROS specific memory area */
+ MemoryArea = MmLocateMemoryAreaByAddress(&Process->Vm, *BaseAddress);
+ if ((MemoryArea) && (MemoryArea->Type == MEMORY_AREA_SECTION_VIEW))
+ {
+ /* Evil hack */
+ return MiRosProtectVirtualMemory(Process,
+ BaseAddress,
+ NumberOfBytesToProtect,
+ NewAccessProtection,
+ OldAccessProtection);
+ }
+
+ /* Lock the address space and make sure the process isn't already dead */
+ AddressSpace = MmGetCurrentAddressSpace();
+ MmLockAddressSpace(AddressSpace);
+ if (Process->VmDeleted)
+ {
+ DPRINT1("Process is dying\n");
+ Status = STATUS_PROCESS_IS_TERMINATING;
+ goto FailPath;
+ }
+
+ /* Get the VAD for this address range, and make sure it exists */
+ Result = MiCheckForConflictingNode(StartingAddress >> PAGE_SHIFT,
+ EndingAddress >> PAGE_SHIFT,
+ &Process->VadRoot,
+ (PMMADDRESS_NODE*)&Vad);
+ if (Result != TableFoundNode)
+ {
+ DPRINT("Could not find a VAD for this allocation\n");
+ Status = STATUS_CONFLICTING_ADDRESSES;
+ goto FailPath;
+ }
+
+ /* Make sure the address is within this VAD's boundaries */
+ if ((((ULONG_PTR)StartingAddress >> PAGE_SHIFT) < Vad->StartingVpn) ||
+ (((ULONG_PTR)EndingAddress >> PAGE_SHIFT) > Vad->EndingVpn))
+ {
+ Status = STATUS_CONFLICTING_ADDRESSES;
+ goto FailPath;
+ }
+
+ /* These kinds of VADs are not supported atm */
+ if ((Vad->u.VadFlags.VadType == VadAwe) ||
+ (Vad->u.VadFlags.VadType == VadDevicePhysicalMemory) ||
+ (Vad->u.VadFlags.VadType == VadLargePages))
+ {
+ DPRINT1("Illegal VAD for attempting to set protection\n");
+ Status = STATUS_CONFLICTING_ADDRESSES;
+ goto FailPath;
+ }
+
+ /* Check for a VAD whose protection can't be changed */
+ if (Vad->u.VadFlags.NoChange == 1)
+ {
+ DPRINT1("Trying to change protection of a NoChange VAD\n");
+ Status = STATUS_INVALID_PAGE_PROTECTION;
+ goto FailPath;
+ }
+
+ /* Is this section, or private memory? */
+ if (Vad->u.VadFlags.PrivateMemory == 0)
+ {
+ /* Not yet supported */
+ if (Vad->u.VadFlags.VadType == VadLargePageSection)
+ {
+ DPRINT1("Illegal VAD for attempting to set protection\n");
+ Status = STATUS_CONFLICTING_ADDRESSES;
+ goto FailPath;
+ }
+
+ /* Rotate VADs are not yet supported */
+ if (Vad->u.VadFlags.VadType == VadRotatePhysical)
+ {
+ DPRINT1("Illegal VAD for attempting to set protection\n");
+ Status = STATUS_CONFLICTING_ADDRESSES;
+ goto FailPath;
+ }
+
+ /* Not valid on section files */
+ if (NewAccessProtection & (PAGE_NOCACHE | PAGE_WRITECOMBINE))
+ {
+ /* Fail */
+ DPRINT1("Invalid protection flags for section\n");
+ Status = STATUS_INVALID_PARAMETER_4;
+ goto FailPath;
+ }
+
+ /* Check if data or page file mapping protection PTE is compatible */
+ if (!Vad->ControlArea->u.Flags.Image)
+ {
+ /* Not yet */
+ DPRINT1("Fixme: Not checking for valid protection\n");
+ }
+
+ /* This is a section, and this is not yet supported */
+ DPRINT1("Section protection not yet supported\n");
+ OldProtect = 0;
+ }
+ else
+ {
+ /* Private memory, check protection flags */
+ if ((NewAccessProtection & PAGE_WRITECOPY) ||
+ (NewAccessProtection & PAGE_EXECUTE_WRITECOPY))
+ {
+ DPRINT1("Invalid protection flags for private memory\n");
+ Status = STATUS_INVALID_PARAMETER_4;
+ goto FailPath;
+ }
+
+ /* Lock the working set */
+ MiLockProcessWorkingSetUnsafe(Process, Thread);
+
+ /* Check if all pages in this range are committed */
+ Committed = MiIsEntireRangeCommitted(StartingAddress,
+ EndingAddress,
+ Vad,
+ Process);
+ if (!Committed)
+ {
+ /* Fail */
+ DPRINT1("The entire range is not committed\n");
+ Status = STATUS_NOT_COMMITTED;
+ MiUnlockProcessWorkingSetUnsafe(Process, Thread);
+ goto FailPath;
+ }
+
+ /* Compute starting and ending PTE and PDE addresses */
+ PointerPde = MiAddressToPde(StartingAddress);
+ PointerPte = MiAddressToPte(StartingAddress);
+ LastPte = MiAddressToPte(EndingAddress);
+
+ /* Make this PDE valid */
+ MiMakePdeExistAndMakeValid(PointerPde, Process, MM_NOIRQL);
+
+ /* Save protection of the first page */
+ if (PointerPte->u.Long != 0)
+ {
+ /* Capture the page protection and make the PDE valid */
+ OldProtect = MiGetPageProtection(PointerPte);
+ MiMakePdeExistAndMakeValid(PointerPde, Process, MM_NOIRQL);
+ }
+ else
+ {
+ /* Grab the old protection from the VAD itself */
+ OldProtect = MmProtectToValue[Vad->u.VadFlags.Protection];
+ }
+
+ /* Loop all the PTEs now */
+ while (PointerPte <= LastPte)
+ {
+ /* Check if we've crossed a PDE boundary and make the new PDE valid too */
+ if (MiIsPteOnPdeBoundary(PointerPte))
+ {
+ PointerPde = MiAddressToPte(PointerPte);
+ MiMakePdeExistAndMakeValid(PointerPde, Process, MM_NOIRQL);
+ }
+
+ /* Capture the PTE and check if it was empty */
+ PteContents = *PointerPte;
+ if (PteContents.u.Long == 0)
+ {
+ /* This used to be a zero PTE and it no longer is, so we must add a
+ reference to the pagetable. */
+ MiIncrementPageTableReferences(MiPteToAddress(PointerPte));
+ }
+
+ /* Check what kind of PTE we are dealing with */
+ if (PteContents.u.Hard.Valid == 1)
+ {
+ /* Get the PFN entry */
+ Pfn1 = MiGetPfnEntry(PFN_FROM_PTE(&PteContents));
+
+ /* We don't support this yet */
+ ASSERT(Pfn1->u3.e1.PrototypePte == 0);
+
+ /* Check if the page should not be accessible at all */
+ if ((NewAccessProtection & PAGE_NOACCESS) ||
+ (NewAccessProtection & PAGE_GUARD))
+ {
+ /* The page should be in the WS and we should make it transition now */
+ DPRINT1("Making valid page invalid is not yet supported!\n");
+ Status = STATUS_NOT_IMPLEMENTED;
+ /* Unlock the working set */
+ MiUnlockProcessWorkingSetUnsafe(Process, Thread);
+ goto FailPath;
+ }
+
+ /* Write the protection mask and write it with a TLB flush */
+ Pfn1->OriginalPte.u.Soft.Protection = ProtectionMask;
+ MiFlushTbAndCapture(Vad,
+ PointerPte,
+ ProtectionMask,
+ Pfn1,
+ TRUE);
+ }
+ else
+ {
+ /* We don't support these cases yet */
+ ASSERT(PteContents.u.Soft.Prototype == 0);
+ ASSERT(PteContents.u.Soft.Transition == 0);
+
+ /* The PTE is already demand-zero, just update the protection mask */
+ PteContents.u.Soft.Protection = ProtectionMask;
+ MI_WRITE_INVALID_PTE(PointerPte, PteContents);
+ ASSERT(PointerPte->u.Long != 0);
+ }
+
+ /* Move to the next PTE */
+ PointerPte++;
+ }
+
+ /* Unlock the working set */
+ MiUnlockProcessWorkingSetUnsafe(Process, Thread);
+ }
+
+ /* Unlock the address space */
+ MmUnlockAddressSpace(AddressSpace);
+
+ /* Return parameters and success */
+ *NumberOfBytesToProtect = EndingAddress - StartingAddress + 1;
+ *BaseAddress = (PVOID)StartingAddress;
+ *OldAccessProtection = OldProtect;
+ return STATUS_SUCCESS;
+
+FailPath:
+ /* Unlock the address space and return the failure code */
+ MmUnlockAddressSpace(AddressSpace);
+ return Status;
+}
+
+VOID
+NTAPI
+MiMakePdeExistAndMakeValid(IN PMMPTE PointerPde,
+ IN PEPROCESS TargetProcess,
+ IN KIRQL OldIrql)
+{
+ PMMPTE PointerPte, PointerPpe, PointerPxe;
+
+ //
+ // Sanity checks. The latter is because we only use this function with the
+ // PFN lock not held, so it may go away in the future.
+ //
+ ASSERT(KeAreAllApcsDisabled() == TRUE);
+ ASSERT(OldIrql == MM_NOIRQL);
+
+ //
+ // Also get the PPE and PXE. This is okay not to #ifdef because they will
+ // return the same address as the PDE on 2-level page table systems.
+ //
+ // If everything is already valid, there is nothing to do.
+ //
+ PointerPpe = MiAddressToPte(PointerPde);
+ PointerPxe = MiAddressToPde(PointerPde);
+ if ((PointerPxe->u.Hard.Valid) &&
+ (PointerPpe->u.Hard.Valid) &&
+ (PointerPde->u.Hard.Valid))
+ {
+ return;
+ }
+
+ //
+ // At least something is invalid, so begin by getting the PTE for the PDE itself
+ // and then lookup each additional level. We must do it in this precise order
+ // because the pagfault.c code (as well as in Windows) depends that the next
+ // level up (higher) must be valid when faulting a lower level
+ //
+ PointerPte = MiPteToAddress(PointerPde);
+ do
+ {
+ //
+ // Make sure APCs continued to be disabled
+ //
+ ASSERT(KeAreAllApcsDisabled() == TRUE);
+
+ //
+ // First, make the PXE valid if needed
+ //
+ if (!PointerPxe->u.Hard.Valid)
+ {
+ MiMakeSystemAddressValid(PointerPpe, TargetProcess);
+ ASSERT(PointerPxe->u.Hard.Valid == 1);
+ }
+
+ //
+ // Next, the PPE
+ //
+ if (!PointerPpe->u.Hard.Valid)
+ {
+ MiMakeSystemAddressValid(PointerPde, TargetProcess);
+ ASSERT(PointerPpe->u.Hard.Valid == 1);
+ }
+
+ //
+ // And finally, make the PDE itself valid.
+ //
+ MiMakeSystemAddressValid(PointerPte, TargetProcess);
+
+ //
+ // This should've worked the first time so the loop is really just for
+ // show -- ASSERT that we're actually NOT going to be looping.
+ //
+ ASSERT(PointerPxe->u.Hard.Valid == 1);
+ ASSERT(PointerPpe->u.Hard.Valid == 1);
+ ASSERT(PointerPde->u.Hard.Valid == 1);
+ } while (!(PointerPxe->u.Hard.Valid) ||
+ !(PointerPpe->u.Hard.Valid) ||
+ !(PointerPde->u.Hard.Valid));
+}
+
+VOID
+NTAPI
+MiProcessValidPteList(IN PMMPTE *ValidPteList,
+ IN ULONG Count)
+{
+ KIRQL OldIrql;
+ ULONG i;
+ MMPTE TempPte;
+ PFN_NUMBER PageFrameIndex;
+ PMMPFN Pfn1, Pfn2;
+
+ //
+ // Acquire the PFN lock and loop all the PTEs in the list
+ //
+ OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
+ for (i = 0; i != Count; i++)
+ {
+ //
+ // The PTE must currently be valid
+ //
+ TempPte = *ValidPteList[i];
+ ASSERT(TempPte.u.Hard.Valid == 1);
+
+ //
+ // Get the PFN entry for the page itself, and then for its page table
+ //
+ PageFrameIndex = PFN_FROM_PTE(&TempPte);
+ Pfn1 = MiGetPfnEntry(PageFrameIndex);
+ Pfn2 = MiGetPfnEntry(Pfn1->u4.PteFrame);
+
+ //
+ // Decrement the share count on the page table, and then on the page
+ // itself
+ //
+ MiDecrementShareCount(Pfn2, Pfn1->u4.PteFrame);
+ MI_SET_PFN_DELETED(Pfn1);
+ MiDecrementShareCount(Pfn1, PageFrameIndex);
+
+ //
+ // Make the page decommitted
+ //
+ MI_WRITE_INVALID_PTE(ValidPteList[i], MmDecommittedPte);
+ }
+
+ //
+ // All the PTEs have been dereferenced and made invalid, flush the TLB now
+ // and then release the PFN lock
+ //
+ KeFlushCurrentTb();
+ KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
+}
+
+ULONG
+NTAPI
+MiDecommitPages(IN PVOID StartingAddress,
+ IN PMMPTE EndingPte,
+ IN PEPROCESS Process,
+ IN PMMVAD Vad)
+{
+ PMMPTE PointerPde, PointerPte, CommitPte = NULL;
+ ULONG CommitReduction = 0;
+ PMMPTE ValidPteList[256];
+ ULONG PteCount = 0;
+ PMMPFN Pfn1;
+ MMPTE PteContents;
+ PETHREAD CurrentThread = PsGetCurrentThread();
+
+ //
+ // Get the PTE and PTE for the address, and lock the working set
+ // If this was a VAD for a MEM_COMMIT allocation, also figure out where the
+ // commited range ends so that we can do the right accounting.
+ //
+ PointerPde = MiAddressToPde(StartingAddress);
+ PointerPte = MiAddressToPte(StartingAddress);
+ if (Vad->u.VadFlags.MemCommit) CommitPte = MiAddressToPte(Vad->EndingVpn << PAGE_SHIFT);
+ MiLockProcessWorkingSetUnsafe(Process, CurrentThread);
+
+ //
+ // Make the PDE valid, and now loop through each page's worth of data
+ //
+ MiMakePdeExistAndMakeValid(PointerPde, Process, MM_NOIRQL);
+ while (PointerPte <= EndingPte)
+ {
+ //
+ // Check if we've crossed a PDE boundary
+ //
+ if (MiIsPteOnPdeBoundary(PointerPte))
+ {
+ //
+ // Get the new PDE and flush the valid PTEs we had built up until
+ // now. This helps reduce the amount of TLB flushing we have to do.
+ // Note that Windows does a much better job using timestamps and
+ // such, and does not flush the entire TLB all the time, but right
+ // now we have bigger problems to worry about than TLB flushing.
+ //
+ PointerPde = MiAddressToPde(StartingAddress);
+ if (PteCount)
+ {
+ MiProcessValidPteList(ValidPteList, PteCount);
+ PteCount = 0;
+ }
+
+ //
+ // Make this PDE valid
+ //
+ MiMakePdeExistAndMakeValid(PointerPde, Process, MM_NOIRQL);
+ }
+
+ //
+ // Read this PTE. It might be active or still demand-zero.
+ //
+ PteContents = *PointerPte;
+ if (PteContents.u.Long)
+ {
+ //
+ // The PTE is active. It might be valid and in a working set, or
+ // it might be a prototype PTE or paged out or even in transition.
+ //
+ if (PointerPte->u.Long == MmDecommittedPte.u.Long)
+ {
+ //
+ // It's already decommited, so there's nothing for us to do here
+ //
+ CommitReduction++;
+ }
+ else
+ {
+ //
+ // Remove it from the counters, and check if it was valid or not
+ //
+ //Process->NumberOfPrivatePages--;
+ if (PteContents.u.Hard.Valid)
+ {
+ //
+ // It's valid. At this point make sure that it is not a ROS
+ // PFN. Also, we don't support ProtoPTEs in this code path.
+ //
+ Pfn1 = MiGetPfnEntry(PteContents.u.Hard.PageFrameNumber);
+ ASSERT(MI_IS_ROS_PFN(Pfn1) == FALSE);
+ ASSERT(Pfn1->u3.e1.PrototypePte == FALSE);
+
+ //
+ // Flush any pending PTEs that we had not yet flushed, if our
+ // list has gotten too big, then add this PTE to the flush list.
+ //
+ if (PteCount == 256)
+ {
+ MiProcessValidPteList(ValidPteList, PteCount);
+ PteCount = 0;
+ }
+ ValidPteList[PteCount++] = PointerPte;
+ }
+ else
+ {
+ //
+ // We do not support any of these other scenarios at the moment
+ //
+ ASSERT(PteContents.u.Soft.Prototype == 0);
+ ASSERT(PteContents.u.Soft.Transition == 0);
+ ASSERT(PteContents.u.Soft.PageFileHigh == 0);
+
+ //
+ // So the only other possibility is that it is still a demand
+ // zero PTE, in which case we undo the accounting we did
+ // earlier and simply make the page decommitted.
+ //
+ //Process->NumberOfPrivatePages++;
+ MI_WRITE_INVALID_PTE(PointerPte, MmDecommittedPte);
+ }
+ }
+ }
+ else
+ {
+ //
+ // This used to be a zero PTE and it no longer is, so we must add a
+ // reference to the pagetable.
+ //
+ MiIncrementPageTableReferences(StartingAddress);
+
+ //
+ // Next, we account for decommitted PTEs and make the PTE as such
+ //
+ if (PointerPte > CommitPte) CommitReduction++;
+ MI_WRITE_INVALID_PTE(PointerPte, MmDecommittedPte);
+ }
+
+ //
+ // Move to the next PTE and the next address
+ //
+ PointerPte++;
+ StartingAddress = (PVOID)((ULONG_PTR)StartingAddress + PAGE_SIZE);
+ }
+
+ //
+ // Flush any dangling PTEs from the loop in the last page table, and then
+ // release the working set and return the commit reduction accounting.
+ //
+ if (PteCount) MiProcessValidPteList(ValidPteList, PteCount);
+ MiUnlockProcessWorkingSetUnsafe(Process, CurrentThread);
+ return CommitReduction;
+}
+
+/* PUBLIC FUNCTIONS ***********************************************************/
+
+/*
+ * @unimplemented
+ */
+PVOID
+NTAPI
+MmGetVirtualForPhysical(IN PHYSICAL_ADDRESS PhysicalAddress)
+{
+ UNIMPLEMENTED;
+ return 0;
+}
+
+/*
+ * @unimplemented
+ */
+PVOID
+NTAPI
+MmSecureVirtualMemory(IN PVOID Address,
+ IN SIZE_T Length,
+ IN ULONG Mode)
+{
+ static BOOLEAN Warn; if (!Warn++) UNIMPLEMENTED;
+ return Address;
+}
+
+/*
+ * @unimplemented
+ */
+VOID
+NTAPI
+MmUnsecureVirtualMemory(IN PVOID SecureMem)
+{
+ static BOOLEAN Warn; if (!Warn++) UNIMPLEMENTED;
+}
+
+/* SYSTEM CALLS ***************************************************************/
+
+NTSTATUS
+NTAPI
+NtReadVirtualMemory(IN HANDLE ProcessHandle,
+ IN PVOID BaseAddress,
+ OUT PVOID Buffer,
+ IN SIZE_T NumberOfBytesToRead,
+ OUT PSIZE_T NumberOfBytesRead OPTIONAL)
+{
+ KPROCESSOR_MODE PreviousMode = ExGetPreviousMode();
+ PEPROCESS Process;
+ NTSTATUS Status = STATUS_SUCCESS;
+ SIZE_T BytesRead = 0;
+ PAGED_CODE();
+
+ //
+ // Check if we came from user mode
+ //
+ if (PreviousMode != KernelMode)
+ {
+ //
+ // Validate the read addresses
+ //
+ if ((((ULONG_PTR)BaseAddress + NumberOfBytesToRead) < (ULONG_PTR)BaseAddress) ||
+ (((ULONG_PTR)Buffer + NumberOfBytesToRead) < (ULONG_PTR)Buffer) ||
+ (((ULONG_PTR)BaseAddress + NumberOfBytesToRead) > MmUserProbeAddress) ||
+ (((ULONG_PTR)Buffer + NumberOfBytesToRead) > MmUserProbeAddress))
+ {
+ //
+ // Don't allow to write into kernel space
+ //
+ return STATUS_ACCESS_VIOLATION;
+ }
+
+ //
// Enter SEH for probe
//
_SEH2_TRY
return Status;
}
+FORCEINLINE
+BOOLEAN
+MI_IS_LOCKED_VA(
+ PMMPFN Pfn1,
+ ULONG LockType)
+{
+ // HACK until we have proper WSLIST support
+ PMMWSLE Wsle = &Pfn1->Wsle;
+
+ if ((LockType & MAP_PROCESS) && (Wsle->u1.e1.LockedInWs))
+ return TRUE;
+ if ((LockType & MAP_SYSTEM) && (Wsle->u1.e1.LockedInMemory))
+ return TRUE;
+
+ return FALSE;
+}
+
+FORCEINLINE
+VOID
+MI_LOCK_VA(
+ PMMPFN Pfn1,
+ ULONG LockType)
+{
+ // HACK until we have proper WSLIST support
+ PMMWSLE Wsle = &Pfn1->Wsle;
+
+ if (!Wsle->u1.e1.LockedInWs &&
+ !Wsle->u1.e1.LockedInMemory)
+ {
+ MiReferenceProbedPageAndBumpLockCount(Pfn1);
+ }
+
+ if (LockType & MAP_PROCESS)
+ Wsle->u1.e1.LockedInWs = 1;
+ if (LockType & MAP_SYSTEM)
+ Wsle->u1.e1.LockedInMemory = 1;
+}
+
+FORCEINLINE
+VOID
+MI_UNLOCK_VA(
+ PMMPFN Pfn1,
+ ULONG LockType)
+{
+ // HACK until we have proper WSLIST support
+ PMMWSLE Wsle = &Pfn1->Wsle;
+
+ if (LockType & MAP_PROCESS)
+ Wsle->u1.e1.LockedInWs = 0;
+ if (LockType & MAP_SYSTEM)
+ Wsle->u1.e1.LockedInMemory = 0;
+
+ if (!Wsle->u1.e1.LockedInWs &&
+ !Wsle->u1.e1.LockedInMemory)
+ {
+ MiDereferencePfnAndDropLockCount(Pfn1);
+ }
+}
+
+static
+NTSTATUS
+MiCheckVadsForLockOperation(
+ _Inout_ PVOID *BaseAddress,
+ _Inout_ PSIZE_T RegionSize,
+ _Inout_ PVOID *EndAddress)
+
+{
+ PMMVAD Vad;
+ PVOID CurrentVa;
+
+ /* Get the base address and align the start address */
+ *EndAddress = (PUCHAR)*BaseAddress + *RegionSize;
+ *EndAddress = ALIGN_UP_POINTER_BY(*EndAddress, PAGE_SIZE);
+ *BaseAddress = ALIGN_DOWN_POINTER_BY(*BaseAddress, PAGE_SIZE);
+
+ /* First loop and check all VADs */
+ CurrentVa = *BaseAddress;
+ while (CurrentVa < *EndAddress)
+ {
+ /* Get VAD */
+ Vad = MiLocateAddress(CurrentVa);
+ if (Vad == NULL)
+ {
+ /// FIXME: this might be a memory area for a section view...
+ return STATUS_ACCESS_VIOLATION;
+ }
+
+ /* Check VAD type */
+ if ((Vad->u.VadFlags.VadType != VadNone) &&
+ (Vad->u.VadFlags.VadType != VadImageMap) &&
+ (Vad->u.VadFlags.VadType != VadWriteWatch))
+ {
+ *EndAddress = CurrentVa;
+ *RegionSize = (PUCHAR)*EndAddress - (PUCHAR)*BaseAddress;
+ return STATUS_INCOMPATIBLE_FILE_MAP;
+ }
+
+ CurrentVa = (PVOID)((Vad->EndingVpn + 1) << PAGE_SHIFT);
+ }
+
+ *RegionSize = (PUCHAR)*EndAddress - (PUCHAR)*BaseAddress;
+ return STATUS_SUCCESS;
+}
+
+static
+NTSTATUS
+MiLockVirtualMemory(
+ IN OUT PVOID *BaseAddress,
+ IN OUT PSIZE_T RegionSize,
+ IN ULONG MapType)
+{
+ PEPROCESS CurrentProcess;
+ PMMSUPPORT AddressSpace;
+ PVOID CurrentVa, EndAddress;
+ PMMPTE PointerPte, LastPte;
+ PMMPDE PointerPde;
+#if (_MI_PAGING_LEVELS >= 3)
+ PMMPDE PointerPpe;
+#endif
+#if (_MI_PAGING_LEVELS == 4)
+ PMMPDE PointerPxe;
+#endif
+ PMMPFN Pfn1;
+ NTSTATUS Status, TempStatus;
+
+ /* Lock the address space */
+ AddressSpace = MmGetCurrentAddressSpace();
+ MmLockAddressSpace(AddressSpace);
+
+ /* Make sure we still have an address space */
+ CurrentProcess = PsGetCurrentProcess();
+ if (CurrentProcess->VmDeleted)
+ {
+ Status = STATUS_PROCESS_IS_TERMINATING;
+ goto Cleanup;
+ }
+
+ /* Check the VADs in the requested range */
+ Status = MiCheckVadsForLockOperation(BaseAddress, RegionSize, &EndAddress);
+ if (!NT_SUCCESS(Status))
+ {
+ goto Cleanup;
+ }
+
+ /* Enter SEH for probing */
+ _SEH2_TRY
+ {
+ /* Loop all pages and probe them */
+ CurrentVa = *BaseAddress;
+ while (CurrentVa < EndAddress)
+ {
+ (void)(*(volatile CHAR*)CurrentVa);
+ CurrentVa = (PUCHAR)CurrentVa + PAGE_SIZE;
+ }
+ }
+ _SEH2_EXCEPT(EXCEPTION_EXECUTE_HANDLER)
+ {
+ Status = _SEH2_GetExceptionCode();
+ goto Cleanup;
+ }
+ _SEH2_END;
+
+ /* All pages were accessible, since we hold the address space lock, nothing
+ can be de-committed. Assume success for now. */
+ Status = STATUS_SUCCESS;
+
+ /* Get the PTE and PDE */
+ PointerPte = MiAddressToPte(*BaseAddress);
+ PointerPde = MiAddressToPde(*BaseAddress);
+#if (_MI_PAGING_LEVELS >= 3)
+ PointerPpe = MiAddressToPpe(*BaseAddress);
+#endif
+#if (_MI_PAGING_LEVELS == 4)
+ PointerPxe = MiAddressToPxe(*BaseAddress);
+#endif
+
+ /* Get the last PTE */
+ LastPte = MiAddressToPte((PVOID)((ULONG_PTR)EndAddress - 1));
+
+ /* Lock the process working set */
+ MiLockProcessWorkingSet(CurrentProcess, PsGetCurrentThread());
+
+ /* Loop the pages */
+ do
+ {
+ /* Check for a page that is not accessible */
+ while (
+#if (_MI_PAGING_LEVELS == 4)
+ (PointerPxe->u.Hard.Valid == 0) ||
+#endif
+#if (_MI_PAGING_LEVELS >= 3)
+ (PointerPpe->u.Hard.Valid == 0) ||
+#endif
+ (PointerPde->u.Hard.Valid == 0) ||
+ (PointerPte->u.Hard.Valid == 0))
+ {
+ /* Release process working set */
+ MiUnlockProcessWorkingSet(CurrentProcess, PsGetCurrentThread());
+
+ /* Access the page */
+ CurrentVa = MiPteToAddress(PointerPte);
+
+ //HACK: Pass a placeholder TrapInformation so the fault handler knows we're unlocked
+ TempStatus = MmAccessFault(TRUE, CurrentVa, KernelMode, (PVOID)0xBADBADA3);
+ if (!NT_SUCCESS(TempStatus))
+ {
+ // This should only happen, when remote backing storage is not accessible
+ ASSERT(FALSE);
+ Status = TempStatus;
+ goto Cleanup;
+ }
+
+ /* Lock the process working set */
+ MiLockProcessWorkingSet(CurrentProcess, PsGetCurrentThread());
+ }
+
+ /* Get the PFN */
+ Pfn1 = MiGetPfnEntry(PFN_FROM_PTE(PointerPte));
+ ASSERT(Pfn1 != NULL);
+
+ /* Check the previous lock status */
+ if (MI_IS_LOCKED_VA(Pfn1, MapType))
+ {
+ Status = STATUS_WAS_LOCKED;
+ }
+
+ /* Lock it */
+ MI_LOCK_VA(Pfn1, MapType);
+
+ /* Go to the next PTE */
+ PointerPte++;
+
+ /* Check if we're on a PDE boundary */
+ if (MiIsPteOnPdeBoundary(PointerPte)) PointerPde++;
+#if (_MI_PAGING_LEVELS >= 3)
+ if (MiIsPteOnPpeBoundary(PointerPte)) PointerPpe++;
+#endif
+#if (_MI_PAGING_LEVELS == 4)
+ if (MiIsPteOnPxeBoundary(PointerPte)) PointerPxe++;
+#endif
+ } while (PointerPte <= LastPte);
+
+ /* Release process working set */
+ MiUnlockProcessWorkingSet(CurrentProcess, PsGetCurrentThread());
+
+Cleanup:
+ /* Unlock address space */
+ MmUnlockAddressSpace(AddressSpace);
+
+ return Status;
+}
+
NTSTATUS
NTAPI
NtLockVirtualMemory(IN HANDLE ProcessHandle,
}
//
- // Oops :(
+ // Call the internal function
//
- UNIMPLEMENTED;
+ Status = MiLockVirtualMemory(&CapturedBaseAddress,
+ &CapturedBytesToLock,
+ MapType);
+
+ //
+ // Detach if needed
+ //
+ if (Attached) KeUnstackDetachProcess(&ApcState);
+
+ //
+ // Release reference
+ //
+ ObDereferenceObject(Process);
+
+ //
+ // Enter SEH to return data
+ //
+ _SEH2_TRY
+ {
+ //
+ // Return data to user
+ //
+ *BaseAddress = CapturedBaseAddress;
+ *NumberOfBytesToLock = CapturedBytesToLock;
+ }
+ _SEH2_EXCEPT(EXCEPTION_EXECUTE_HANDLER)
+ {
+ //
+ // Get exception code
+ //
+ _SEH2_YIELD(return _SEH2_GetExceptionCode());
+ }
+ _SEH2_END;
+
+ //
+ // Return status
+ //
+ return Status;
+}
+
+
+static
+NTSTATUS
+MiUnlockVirtualMemory(
+ IN OUT PVOID *BaseAddress,
+ IN OUT PSIZE_T RegionSize,
+ IN ULONG MapType)
+{
+ PEPROCESS CurrentProcess;
+ PMMSUPPORT AddressSpace;
+ PVOID EndAddress;
+ PMMPTE PointerPte, LastPte;
+ PMMPDE PointerPde;
+#if (_MI_PAGING_LEVELS >= 3)
+ PMMPDE PointerPpe;
+#endif
+#if (_MI_PAGING_LEVELS == 4)
+ PMMPDE PointerPxe;
+#endif
+ PMMPFN Pfn1;
+ NTSTATUS Status;
+
+ /* Lock the address space */
+ AddressSpace = MmGetCurrentAddressSpace();
+ MmLockAddressSpace(AddressSpace);
+
+ /* Make sure we still have an address space */
+ CurrentProcess = PsGetCurrentProcess();
+ if (CurrentProcess->VmDeleted)
+ {
+ Status = STATUS_PROCESS_IS_TERMINATING;
+ goto Cleanup;
+ }
+
+ /* Check the VADs in the requested range */
+ Status = MiCheckVadsForLockOperation(BaseAddress, RegionSize, &EndAddress);
+
+ /* Note: only bail out, if we hit an area without a VAD. If we hit an
+ incompatible VAD we continue, like Windows does */
+ if (Status == STATUS_ACCESS_VIOLATION)
+ {
+ Status = STATUS_NOT_LOCKED;
+ goto Cleanup;
+ }
+
+ /* Get the PTE and PDE */
+ PointerPte = MiAddressToPte(*BaseAddress);
+ PointerPde = MiAddressToPde(*BaseAddress);
+#if (_MI_PAGING_LEVELS >= 3)
+ PointerPpe = MiAddressToPpe(*BaseAddress);
+#endif
+#if (_MI_PAGING_LEVELS == 4)
+ PointerPxe = MiAddressToPxe(*BaseAddress);
+#endif
+
+ /* Get the last PTE */
+ LastPte = MiAddressToPte((PVOID)((ULONG_PTR)EndAddress - 1));
+
+ /* Lock the process working set */
+ MiLockProcessWorkingSet(CurrentProcess, PsGetCurrentThread());
+
+ /* Loop the pages */
+ do
+ {
+ /* Check for a page that is not present */
+ if (
+#if (_MI_PAGING_LEVELS == 4)
+ (PointerPxe->u.Hard.Valid == 0) ||
+#endif
+#if (_MI_PAGING_LEVELS >= 3)
+ (PointerPpe->u.Hard.Valid == 0) ||
+#endif
+ (PointerPde->u.Hard.Valid == 0) ||
+ (PointerPte->u.Hard.Valid == 0))
+ {
+ /* Remember it, but keep going */
+ Status = STATUS_NOT_LOCKED;
+ }
+ else
+ {
+ /* Get the PFN */
+ Pfn1 = MiGetPfnEntry(PFN_FROM_PTE(PointerPte));
+ ASSERT(Pfn1 != NULL);
+
+ /* Check if all of the requested locks are present */
+ if (((MapType & MAP_SYSTEM) && !MI_IS_LOCKED_VA(Pfn1, MAP_SYSTEM)) ||
+ ((MapType & MAP_PROCESS) && !MI_IS_LOCKED_VA(Pfn1, MAP_PROCESS)))
+ {
+ /* Remember it, but keep going */
+ Status = STATUS_NOT_LOCKED;
+
+ /* Check if no lock is present */
+ if (!MI_IS_LOCKED_VA(Pfn1, MAP_PROCESS | MAP_SYSTEM))
+ {
+ DPRINT1("FIXME: Should remove the page from WS\n");
+ }
+ }
+ }
- //
- // Detach if needed
- //
- if (Attached) KeUnstackDetachProcess(&ApcState);
+ /* Go to the next PTE */
+ PointerPte++;
- //
- // Release reference
- //
- ObDereferenceObject(Process);
+ /* Check if we're on a PDE boundary */
+ if (MiIsPteOnPdeBoundary(PointerPte)) PointerPde++;
+#if (_MI_PAGING_LEVELS >= 3)
+ if (MiIsPteOnPpeBoundary(PointerPte)) PointerPpe++;
+#endif
+#if (_MI_PAGING_LEVELS == 4)
+ if (MiIsPteOnPxeBoundary(PointerPte)) PointerPxe++;
+#endif
+ } while (PointerPte <= LastPte);
- //
- // Enter SEH to return data
- //
- _SEH2_TRY
+ /* Check if we hit a page that was not locked */
+ if (Status == STATUS_NOT_LOCKED)
{
- //
- // Return data to user
- //
- *BaseAddress = CapturedBaseAddress;
- *NumberOfBytesToLock = 0;
+ goto CleanupWithWsLock;
}
- _SEH2_EXCEPT(EXCEPTION_EXECUTE_HANDLER)
+
+ /* All pages in the region were locked, so unlock them all */
+
+ /* Get the PTE and PDE */
+ PointerPte = MiAddressToPte(*BaseAddress);
+ PointerPde = MiAddressToPde(*BaseAddress);
+#if (_MI_PAGING_LEVELS >= 3)
+ PointerPpe = MiAddressToPpe(*BaseAddress);
+#endif
+#if (_MI_PAGING_LEVELS == 4)
+ PointerPxe = MiAddressToPxe(*BaseAddress);
+#endif
+
+ /* Loop the pages */
+ do
{
- //
- // Get exception code
- //
- _SEH2_YIELD(return _SEH2_GetExceptionCode());
- }
- _SEH2_END;
+ /* Unlock it */
+ Pfn1 = MiGetPfnEntry(PFN_FROM_PTE(PointerPte));
+ MI_UNLOCK_VA(Pfn1, MapType);
- //
- // Return status
- //
- return STATUS_SUCCESS;
+ /* Go to the next PTE */
+ PointerPte++;
+
+ /* Check if we're on a PDE boundary */
+ if (MiIsPteOnPdeBoundary(PointerPte)) PointerPde++;
+#if (_MI_PAGING_LEVELS >= 3)
+ if (MiIsPteOnPpeBoundary(PointerPte)) PointerPpe++;
+#endif
+#if (_MI_PAGING_LEVELS == 4)
+ if (MiIsPteOnPxeBoundary(PointerPte)) PointerPxe++;
+#endif
+ } while (PointerPte <= LastPte);
+
+ /* Everything is done */
+ Status = STATUS_SUCCESS;
+
+CleanupWithWsLock:
+
+ /* Release process working set */
+ MiUnlockProcessWorkingSet(CurrentProcess, PsGetCurrentThread());
+
+Cleanup:
+ /* Unlock address space */
+ MmUnlockAddressSpace(AddressSpace);
+
+ return Status;
}
+
NTSTATUS
NTAPI
NtUnlockVirtualMemory(IN HANDLE ProcessHandle,
}
//
- // Oops :(
+ // Call the internal function
//
- UNIMPLEMENTED;
+ Status = MiUnlockVirtualMemory(&CapturedBaseAddress,
+ &CapturedBytesToUnlock,
+ MapType);
//
// Detach if needed
//
// Return data to user
//
- *BaseAddress = PAGE_ALIGN(CapturedBaseAddress);
- *NumberOfBytesToUnlock = 0;
+ *BaseAddress = CapturedBaseAddress;
+ *NumberOfBytesToUnlock = CapturedBytesToUnlock;
}
_SEH2_EXCEPT(EXCEPTION_EXECUTE_HANDLER)
{
//
// Catch illegal base address
//
- if (BaseAddress > MM_HIGHEST_USER_ADDRESS) return STATUS_INVALID_PARAMETER_2;
+ if (BaseAddress > MM_HIGHEST_USER_ADDRESS) _SEH2_YIELD(return STATUS_INVALID_PARAMETER_2);
//
// Catch illegal region size
//
// Fail
//
- return STATUS_INVALID_PARAMETER_3;
+ _SEH2_YIELD(return STATUS_INVALID_PARAMETER_3);
}
//
//
// Must have a count
//
- if (CapturedEntryCount == 0) return STATUS_INVALID_PARAMETER_5;
+ if (CapturedEntryCount == 0) _SEH2_YIELD(return STATUS_INVALID_PARAMETER_5);
//
// Can't be larger than the maximum
//
// Fail
//
- return STATUS_INVALID_PARAMETER_5;
+ _SEH2_YIELD(return STATUS_INVALID_PARAMETER_5);
}
//
NTSTATUS
NTAPI
-MiQueryMemoryBasicInformation(IN HANDLE ProcessHandle,
- IN PVOID BaseAddress,
- OUT PVOID MemoryInformation,
- IN SIZE_T MemoryInformationLength,
- OUT PSIZE_T ReturnLength)
+NtQueryVirtualMemory(IN HANDLE ProcessHandle,
+ IN PVOID BaseAddress,
+ IN MEMORY_INFORMATION_CLASS MemoryInformationClass,
+ OUT PVOID MemoryInformation,
+ IN SIZE_T MemoryInformationLength,
+ OUT PSIZE_T ReturnLength)
{
- PEPROCESS TargetProcess;
NTSTATUS Status = STATUS_SUCCESS;
- PMMVAD Vad = NULL;
- PVOID Address, NextAddress;
- BOOLEAN Found = FALSE;
- ULONG NewProtect, NewState;
- ULONG_PTR BaseVpn;
- MEMORY_BASIC_INFORMATION MemoryInfo;
- KAPC_STATE ApcState;
- KPROCESSOR_MODE PreviousMode = ExGetPreviousMode();
- PMEMORY_AREA MemoryArea;
- SIZE_T ResultLength;
+ KPROCESSOR_MODE PreviousMode;
- /* Check for illegal addresses in user-space, or the shared memory area */
- if ((BaseAddress > MM_HIGHEST_VAD_ADDRESS) ||
- (PAGE_ALIGN(BaseAddress) == (PVOID)MM_SHARED_USER_DATA_VA))
- {
- Address = PAGE_ALIGN(BaseAddress);
+ DPRINT("Querying class %d about address: %p\n", MemoryInformationClass, BaseAddress);
- /* Make up an info structure describing this range */
- MemoryInfo.BaseAddress = Address;
- MemoryInfo.AllocationProtect = PAGE_READONLY;
- MemoryInfo.Type = MEM_PRIVATE;
+ /* Bail out if the address is invalid */
+ if (BaseAddress > MM_HIGHEST_USER_ADDRESS) return STATUS_INVALID_PARAMETER;
- /* Special case for shared data */
- if (Address == (PVOID)MM_SHARED_USER_DATA_VA)
+ /* Probe return buffer */
+ PreviousMode = ExGetPreviousMode();
+ if (PreviousMode != KernelMode)
+ {
+ _SEH2_TRY
{
- MemoryInfo.AllocationBase = (PVOID)MM_SHARED_USER_DATA_VA;
- MemoryInfo.State = MEM_COMMIT;
- MemoryInfo.Protect = PAGE_READONLY;
- MemoryInfo.RegionSize = PAGE_SIZE;
+ ProbeForWrite(MemoryInformation,
+ MemoryInformationLength,
+ sizeof(ULONG_PTR));
+
+ if (ReturnLength) ProbeForWriteSize_t(ReturnLength);
}
- else
+ _SEH2_EXCEPT(EXCEPTION_EXECUTE_HANDLER)
{
- MemoryInfo.AllocationBase = (PCHAR)MM_HIGHEST_VAD_ADDRESS + 1;
- MemoryInfo.State = MEM_RESERVE;
- MemoryInfo.Protect = PAGE_NOACCESS;
- MemoryInfo.RegionSize = (ULONG_PTR)MM_HIGHEST_USER_ADDRESS + 1 - (ULONG_PTR)Address;
+ Status = _SEH2_GetExceptionCode();
}
+ _SEH2_END;
- /* Return the data, NtQueryInformation already probed it*/
- if (PreviousMode != KernelMode)
+ if (!NT_SUCCESS(Status))
{
- _SEH2_TRY
+ return Status;
+ }
+ }
+
+ switch(MemoryInformationClass)
+ {
+ case MemoryBasicInformation:
+ /* Validate the size information of the class */
+ if (MemoryInformationLength < sizeof(MEMORY_BASIC_INFORMATION))
{
- *(PMEMORY_BASIC_INFORMATION)MemoryInformation = MemoryInfo;
- if (ReturnLength) *ReturnLength = sizeof(MEMORY_BASIC_INFORMATION);
+ /* The size is invalid */
+ return STATUS_INFO_LENGTH_MISMATCH;
}
- _SEH2_EXCEPT(EXCEPTION_EXECUTE_HANDLER)
+ Status = MiQueryMemoryBasicInformation(ProcessHandle,
+ BaseAddress,
+ MemoryInformation,
+ MemoryInformationLength,
+ ReturnLength);
+ break;
+
+ case MemorySectionName:
+ /* Validate the size information of the class */
+ if (MemoryInformationLength < sizeof(MEMORY_SECTION_NAME))
{
- Status = _SEH2_GetExceptionCode();
+ /* The size is invalid */
+ return STATUS_INFO_LENGTH_MISMATCH;
}
- _SEH2_END;
+ Status = MiQueryMemorySectionName(ProcessHandle,
+ BaseAddress,
+ MemoryInformation,
+ MemoryInformationLength,
+ ReturnLength);
+ break;
+ case MemoryWorkingSetList:
+ case MemoryBasicVlmInformation:
+ default:
+ DPRINT1("Unhandled memory information class %d\n", MemoryInformationClass);
+ break;
+ }
+
+ return Status;
+}
+
+/*
+ * @implemented
+ */
+NTSTATUS
+NTAPI
+NtAllocateVirtualMemory(IN HANDLE ProcessHandle,
+ IN OUT PVOID* UBaseAddress,
+ IN ULONG_PTR ZeroBits,
+ IN OUT PSIZE_T URegionSize,
+ IN ULONG AllocationType,
+ IN ULONG Protect)
+{
+ PEPROCESS Process;
+ PMEMORY_AREA MemoryArea;
+ PFN_NUMBER PageCount;
+ PMMVAD Vad = NULL, FoundVad;
+ NTSTATUS Status;
+ PMMSUPPORT AddressSpace;
+ PVOID PBaseAddress;
+ ULONG_PTR PRegionSize, StartingAddress, EndingAddress, HighestAddress;
+ PEPROCESS CurrentProcess = PsGetCurrentProcess();
+ KPROCESSOR_MODE PreviousMode = KeGetPreviousMode();
+ PETHREAD CurrentThread = PsGetCurrentThread();
+ KAPC_STATE ApcState;
+ ULONG ProtectionMask, QuotaCharge = 0, QuotaFree = 0;
+ BOOLEAN Attached = FALSE, ChangeProtection = FALSE;
+ MMPTE TempPte;
+ PMMPTE PointerPte, PointerPde, LastPte;
+ TABLE_SEARCH_RESULT Result;
+ PMMADDRESS_NODE Parent;
+ PAGED_CODE();
+
+ /* Check for valid Zero bits */
+ if (ZeroBits > MI_MAX_ZERO_BITS)
+ {
+ DPRINT1("Too many zero bits\n");
+ return STATUS_INVALID_PARAMETER_3;
+ }
+
+ /* Check for valid Allocation Types */
+ if ((AllocationType & ~(MEM_COMMIT | MEM_RESERVE | MEM_RESET | MEM_PHYSICAL |
+ MEM_TOP_DOWN | MEM_WRITE_WATCH | MEM_LARGE_PAGES)))
+ {
+ DPRINT1("Invalid Allocation Type\n");
+ return STATUS_INVALID_PARAMETER_5;
+ }
+
+ /* Check for at least one of these Allocation Types to be set */
+ if (!(AllocationType & (MEM_COMMIT | MEM_RESERVE | MEM_RESET)))
+ {
+ DPRINT1("No memory allocation base type\n");
+ return STATUS_INVALID_PARAMETER_5;
+ }
+
+ /* MEM_RESET is an exclusive flag, make sure that is valid too */
+ if ((AllocationType & MEM_RESET) && (AllocationType != MEM_RESET))
+ {
+ DPRINT1("Invalid use of MEM_RESET\n");
+ return STATUS_INVALID_PARAMETER_5;
+ }
+
+ /* Check if large pages are being used */
+ if (AllocationType & MEM_LARGE_PAGES)
+ {
+ /* Large page allocations MUST be committed */
+ if (!(AllocationType & MEM_COMMIT))
+ {
+ DPRINT1("Must supply MEM_COMMIT with MEM_LARGE_PAGES\n");
+ return STATUS_INVALID_PARAMETER_5;
}
- else
+
+ /* These flags are not allowed with large page allocations */
+ if (AllocationType & (MEM_PHYSICAL | MEM_RESET | MEM_WRITE_WATCH))
+ {
+ DPRINT1("Using illegal flags with MEM_LARGE_PAGES\n");
+ return STATUS_INVALID_PARAMETER_5;
+ }
+ }
+
+ /* MEM_WRITE_WATCH can only be used if MEM_RESERVE is also used */
+ if ((AllocationType & MEM_WRITE_WATCH) && !(AllocationType & MEM_RESERVE))
+ {
+ DPRINT1("MEM_WRITE_WATCH used without MEM_RESERVE\n");
+ return STATUS_INVALID_PARAMETER_5;
+ }
+
+ /* Check for valid MEM_PHYSICAL usage */
+ if (AllocationType & MEM_PHYSICAL)
+ {
+ /* MEM_PHYSICAL can only be used if MEM_RESERVE is also used */
+ if (!(AllocationType & MEM_RESERVE))
+ {
+ DPRINT1("MEM_PHYSICAL used without MEM_RESERVE\n");
+ return STATUS_INVALID_PARAMETER_5;
+ }
+
+ /* Only these flags are allowed with MEM_PHYSIAL */
+ if (AllocationType & ~(MEM_RESERVE | MEM_TOP_DOWN | MEM_PHYSICAL))
+ {
+ DPRINT1("Using illegal flags with MEM_PHYSICAL\n");
+ return STATUS_INVALID_PARAMETER_5;
+ }
+
+ /* Then make sure PAGE_READWRITE is used */
+ if (Protect != PAGE_READWRITE)
+ {
+ DPRINT1("MEM_PHYSICAL used without PAGE_READWRITE\n");
+ return STATUS_INVALID_PARAMETER_6;
+ }
+ }
+
+ /* Calculate the protection mask and make sure it's valid */
+ ProtectionMask = MiMakeProtectionMask(Protect);
+ if (ProtectionMask == MM_INVALID_PROTECTION)
+ {
+ DPRINT1("Invalid protection mask\n");
+ return STATUS_INVALID_PAGE_PROTECTION;
+ }
+
+ /* Enter SEH */
+ _SEH2_TRY
+ {
+ /* Check for user-mode parameters */
+ if (PreviousMode != KernelMode)
{
- *(PMEMORY_BASIC_INFORMATION)MemoryInformation = MemoryInfo;
- if (ReturnLength) *ReturnLength = sizeof(MEMORY_BASIC_INFORMATION);
+ /* Make sure they are writable */
+ ProbeForWritePointer(UBaseAddress);
+ ProbeForWriteSize_t(URegionSize);
}
- return Status;
+ /* Capture their values */
+ PBaseAddress = *UBaseAddress;
+ PRegionSize = *URegionSize;
}
+ _SEH2_EXCEPT(EXCEPTION_EXECUTE_HANDLER)
+ {
+ /* Return the exception code */
+ _SEH2_YIELD(return _SEH2_GetExceptionCode());
+ }
+ _SEH2_END;
- /* Check if this is for a local or remote process */
+ /* Make sure the allocation isn't past the VAD area */
+ if (PBaseAddress > MM_HIGHEST_VAD_ADDRESS)
+ {
+ DPRINT1("Virtual allocation base above User Space\n");
+ return STATUS_INVALID_PARAMETER_2;
+ }
+
+ /* Make sure the allocation wouldn't overflow past the VAD area */
+ if ((((ULONG_PTR)MM_HIGHEST_VAD_ADDRESS + 1) - (ULONG_PTR)PBaseAddress) < PRegionSize)
+ {
+ DPRINT1("Region size would overflow into kernel-memory\n");
+ return STATUS_INVALID_PARAMETER_4;
+ }
+
+ /* Make sure there's a size specified */
+ if (!PRegionSize)
+ {
+ DPRINT1("Region size is invalid (zero)\n");
+ return STATUS_INVALID_PARAMETER_4;
+ }
+
+ //
+ // If this is for the current process, just use PsGetCurrentProcess
+ //
if (ProcessHandle == NtCurrentProcess())
{
- TargetProcess = PsGetCurrentProcess();
+ Process = CurrentProcess;
}
else
{
- /* Reference the target process */
+ //
+ // Otherwise, reference the process with VM rights and attach to it if
+ // this isn't the current process. We must attach because we'll be touching
+ // PTEs and PDEs that belong to user-mode memory, and also touching the
+ // Working Set which is stored in Hyperspace.
+ //
Status = ObReferenceObjectByHandle(ProcessHandle,
- PROCESS_QUERY_INFORMATION,
+ PROCESS_VM_OPERATION,
PsProcessType,
- ExGetPreviousMode(),
- (PVOID*)&TargetProcess,
+ PreviousMode,
+ (PVOID*)&Process,
NULL);
if (!NT_SUCCESS(Status)) return Status;
-
- /* Attach to it now */
- KeStackAttachProcess(&TargetProcess->Pcb, &ApcState);
+ if (CurrentProcess != Process)
+ {
+ KeStackAttachProcess(&Process->Pcb, &ApcState);
+ Attached = TRUE;
+ }
}
- /* Loop the VADs */
- ASSERT(TargetProcess->VadRoot.NumberGenericTableElements);
- if (TargetProcess->VadRoot.NumberGenericTableElements)
+ //
+ // Check for large page allocations and make sure that the required privilege
+ // is being held, before attempting to handle them.
+ //
+ if ((AllocationType & MEM_LARGE_PAGES) &&
+ !(SeSinglePrivilegeCheck(SeLockMemoryPrivilege, PreviousMode)))
{
- /* Scan on the right */
- Vad = (PMMVAD)TargetProcess->VadRoot.BalancedRoot.RightChild;
- BaseVpn = (ULONG_PTR)BaseAddress >> PAGE_SHIFT;
- while (Vad)
- {
- /* Check if this VAD covers the allocation range */
- if ((BaseVpn >= Vad->StartingVpn) &&
- (BaseVpn <= Vad->EndingVpn))
- {
- /* We're done */
- Found = TRUE;
- break;
- }
-
- /* Check if this VAD is too high */
- if (BaseVpn < Vad->StartingVpn)
- {
- /* Stop if there is no left child */
- if (!Vad->LeftChild) break;
-
- /* Search on the left next */
- Vad = Vad->LeftChild;
- }
- else
- {
- /* Then this VAD is too low, keep searching on the right */
- ASSERT(BaseVpn > Vad->EndingVpn);
-
- /* Stop if there is no right child */
- if (!Vad->RightChild) break;
+ /* Fail without it */
+ DPRINT1("Privilege not held for MEM_LARGE_PAGES\n");
+ Status = STATUS_PRIVILEGE_NOT_HELD;
+ goto FailPathNoLock;
+ }
- /* Search on the right next */
- Vad = Vad->RightChild;
- }
- }
+ //
+ // Fail on the things we don't yet support
+ //
+ if ((AllocationType & MEM_LARGE_PAGES) == MEM_LARGE_PAGES)
+ {
+ DPRINT1("MEM_LARGE_PAGES not supported\n");
+ Status = STATUS_INVALID_PARAMETER;
+ goto FailPathNoLock;
+ }
+ if ((AllocationType & MEM_PHYSICAL) == MEM_PHYSICAL)
+ {
+ DPRINT1("MEM_PHYSICAL not supported\n");
+ Status = STATUS_INVALID_PARAMETER;
+ goto FailPathNoLock;
+ }
+ if ((AllocationType & MEM_WRITE_WATCH) == MEM_WRITE_WATCH)
+ {
+ DPRINT1("MEM_WRITE_WATCH not supported\n");
+ Status = STATUS_INVALID_PARAMETER;
+ goto FailPathNoLock;
}
- /* Was a VAD found? */
- if (!Found)
+ //
+ // Check if the caller is reserving memory, or committing memory and letting
+ // us pick the base address
+ //
+ if (!(PBaseAddress) || (AllocationType & MEM_RESERVE))
{
- Address = PAGE_ALIGN(BaseAddress);
+ //
+ // Do not allow COPY_ON_WRITE through this API
+ //
+ if (Protect & (PAGE_WRITECOPY | PAGE_EXECUTE_WRITECOPY))
+ {
+ DPRINT1("Copy on write not allowed through this path\n");
+ Status = STATUS_INVALID_PAGE_PROTECTION;
+ goto FailPathNoLock;
+ }
- /* Calculate region size */
- if (Vad)
+ //
+ // Does the caller have an address in mind, or is this a blind commit?
+ //
+ if (!PBaseAddress)
{
- if (Vad->StartingVpn >= BaseVpn)
+ //
+ // This is a blind commit, all we need is the region size
+ //
+ PRegionSize = ROUND_TO_PAGES(PRegionSize);
+ PageCount = BYTES_TO_PAGES(PRegionSize);
+ EndingAddress = 0;
+ StartingAddress = 0;
+
+ //
+ // Check if ZeroBits were specified
+ //
+ if (ZeroBits != 0)
{
- /* Region size is the free space till the start of that VAD */
- MemoryInfo.RegionSize = (ULONG_PTR)(Vad->StartingVpn << PAGE_SHIFT) - (ULONG_PTR)Address;
+ //
+ // Calculate the highest address and check if it's valid
+ //
+ HighestAddress = MAXULONG_PTR >> ZeroBits;
+ if (HighestAddress > (ULONG_PTR)MM_HIGHEST_VAD_ADDRESS)
+ {
+ Status = STATUS_INVALID_PARAMETER_3;
+ goto FailPathNoLock;
+ }
}
else
{
- /* Get the next VAD */
- Vad = (PMMVAD)MiGetNextNode((PMMADDRESS_NODE)Vad);
- if (Vad)
- {
- /* Region size is the free space till the start of that VAD */
- MemoryInfo.RegionSize = (ULONG_PTR)(Vad->StartingVpn << PAGE_SHIFT) - (ULONG_PTR)Address;
- }
- else
- {
- /* Maximum possible region size with that base address */
- MemoryInfo.RegionSize = (PCHAR)MM_HIGHEST_VAD_ADDRESS + 1 - (PCHAR)Address;
- }
+ //
+ // Use the highest VAD address as maximum
+ //
+ HighestAddress = (ULONG_PTR)MM_HIGHEST_VAD_ADDRESS;
}
}
else
{
- /* Maximum possible region size with that base address */
- MemoryInfo.RegionSize = (PCHAR)MM_HIGHEST_VAD_ADDRESS + 1 - (PCHAR)Address;
+ //
+ // This is a reservation, so compute the starting address on the
+ // expected 64KB granularity, and see where the ending address will
+ // fall based on the aligned address and the passed in region size
+ //
+ EndingAddress = ((ULONG_PTR)PBaseAddress + PRegionSize - 1) | (PAGE_SIZE - 1);
+ StartingAddress = ROUND_DOWN((ULONG_PTR)PBaseAddress, _64K);
+ PageCount = BYTES_TO_PAGES(EndingAddress - StartingAddress);
}
- /* Check if we were attached */
- if (ProcessHandle != NtCurrentProcess())
+ //
+ // Allocate and initialize the VAD
+ //
+ Vad = ExAllocatePoolWithTag(NonPagedPool, sizeof(MMVAD_LONG), 'SdaV');
+ if (Vad == NULL)
{
- /* Detach and derefernece the process */
- KeUnstackDetachProcess(&ApcState);
- ObDereferenceObject(TargetProcess);
+ DPRINT1("Failed to allocate a VAD!\n");
+ Status = STATUS_INSUFFICIENT_RESOURCES;
+ goto FailPathNoLock;
}
- /* Build the rest of the initial information block */
- MemoryInfo.BaseAddress = Address;
- MemoryInfo.AllocationBase = NULL;
- MemoryInfo.AllocationProtect = 0;
- MemoryInfo.State = MEM_FREE;
- MemoryInfo.Protect = PAGE_NOACCESS;
- MemoryInfo.Type = 0;
+ Vad->u.LongFlags = 0;
+ if (AllocationType & MEM_COMMIT) Vad->u.VadFlags.MemCommit = 1;
+ Vad->u.VadFlags.Protection = ProtectionMask;
+ Vad->u.VadFlags.PrivateMemory = 1;
+ Vad->u.VadFlags.CommitCharge = AllocationType & MEM_COMMIT ? PageCount : 0;
- /* Return the data, NtQueryInformation already probed it*/
- if (PreviousMode != KernelMode)
+ //
+ // Lock the address space and make sure the process isn't already dead
+ //
+ AddressSpace = MmGetCurrentAddressSpace();
+ MmLockAddressSpace(AddressSpace);
+ if (Process->VmDeleted)
{
- _SEH2_TRY
+ Status = STATUS_PROCESS_IS_TERMINATING;
+ goto FailPath;
+ }
+
+ //
+ // Did we have a base address? If no, find a valid address that is 64KB
+ // aligned in the VAD tree. Otherwise, make sure that the address range
+ // which was passed in isn't already conflicting with an existing address
+ // range.
+ //
+ if (!PBaseAddress)
+ {
+ /* Which way should we search? */
+ if ((AllocationType & MEM_TOP_DOWN) || Process->VmTopDown)
{
- *(PMEMORY_BASIC_INFORMATION)MemoryInformation = MemoryInfo;
- if (ReturnLength) *ReturnLength = sizeof(MEMORY_BASIC_INFORMATION);
+ /* Find an address top-down */
+ Result = MiFindEmptyAddressRangeDownTree(PRegionSize,
+ HighestAddress,
+ _64K,
+ &Process->VadRoot,
+ &StartingAddress,
+ &Parent);
}
- _SEH2_EXCEPT(EXCEPTION_EXECUTE_HANDLER)
+ else
{
- Status = _SEH2_GetExceptionCode();
+ /* Find an address bottom-up */
+ Result = MiFindEmptyAddressRangeInTree(PRegionSize,
+ _64K,
+ &Process->VadRoot,
+ &Parent,
+ &StartingAddress);
+ }
+
+ if (Result == TableFoundNode)
+ {
+ Status = STATUS_NO_MEMORY;
+ goto FailPath;
+ }
+
+ //
+ // Now we know where the allocation ends. Make sure it doesn't end up
+ // somewhere in kernel mode.
+ //
+ ASSERT(StartingAddress != 0);
+ ASSERT(StartingAddress < (ULONG_PTR)MM_HIGHEST_USER_ADDRESS);
+ EndingAddress = (StartingAddress + PRegionSize - 1) | (PAGE_SIZE - 1);
+ ASSERT(EndingAddress > StartingAddress);
+ if (EndingAddress > HighestAddress)
+ {
+ Status = STATUS_NO_MEMORY;
+ goto FailPath;
}
- _SEH2_END;
}
else
{
- *(PMEMORY_BASIC_INFORMATION)MemoryInformation = MemoryInfo;
- if (ReturnLength) *ReturnLength = sizeof(MEMORY_BASIC_INFORMATION);
+ /* Make sure it doesn't conflict with an existing allocation */
+ Result = MiCheckForConflictingNode(StartingAddress >> PAGE_SHIFT,
+ EndingAddress >> PAGE_SHIFT,
+ &Process->VadRoot,
+ &Parent);
+ if (Result == TableFoundNode)
+ {
+ //
+ // The address specified is in conflict!
+ //
+ Status = STATUS_CONFLICTING_ADDRESSES;
+ goto FailPath;
+ }
}
- return Status;
- }
-
- /* This must be a VM VAD */
- ASSERT(Vad->u.VadFlags.PrivateMemory);
+ //
+ // Write out the VAD fields for this allocation
+ //
+ Vad->StartingVpn = StartingAddress >> PAGE_SHIFT;
+ Vad->EndingVpn = EndingAddress >> PAGE_SHIFT;
- /* Lock the address space of the process */
- MmLockAddressSpace(&TargetProcess->Vm);
+ //
+ // FIXME: Should setup VAD bitmap
+ //
+ Status = STATUS_SUCCESS;
- /* Find the memory area the specified address belongs to */
- MemoryArea = MmLocateMemoryAreaByAddress(&TargetProcess->Vm, BaseAddress);
- ASSERT(MemoryArea != NULL);
+ //
+ // Lock the working set and insert the VAD into the process VAD tree
+ //
+ MiLockProcessWorkingSetUnsafe(Process, CurrentThread);
+ Vad->ControlArea = NULL; // For Memory-Area hack
+ Process->VadRoot.NodeHint = Vad;
+ MiInsertNode(&Process->VadRoot, (PVOID)Vad, Parent, Result);
+ MiUnlockProcessWorkingSetUnsafe(Process, CurrentThread);
- /* Determine information dependent on the memory area type */
- if (MemoryArea->Type == MEMORY_AREA_SECTION_VIEW)
- {
- Status = MmQuerySectionView(MemoryArea, BaseAddress, &MemoryInfo, &ResultLength);
- ASSERT(NT_SUCCESS(Status));
- }
- else
- {
- /* Build the initial information block */
- Address = PAGE_ALIGN(BaseAddress);
- MemoryInfo.BaseAddress = Address;
- MemoryInfo.AllocationBase = (PVOID)(Vad->StartingVpn << PAGE_SHIFT);
- MemoryInfo.AllocationProtect = MmProtectToValue[Vad->u.VadFlags.Protection];
- MemoryInfo.Type = MEM_PRIVATE;
+ //
+ // Make sure the actual region size is at least as big as the
+ // requested region size and update the value
+ //
+ ASSERT(PRegionSize <= (EndingAddress + 1 - StartingAddress));
+ PRegionSize = (EndingAddress + 1 - StartingAddress);
- /* Find the largest chunk of memory which has the same state and protection mask */
- MemoryInfo.State = MiQueryAddressState(Address,
- Vad,
- TargetProcess,
- &MemoryInfo.Protect,
- &NextAddress);
- Address = NextAddress;
- while (((ULONG_PTR)Address >> PAGE_SHIFT) <= Vad->EndingVpn)
+ //
+ // Update the virtual size of the process, and if this is now the highest
+ // virtual size we have ever seen, update the peak virtual size to reflect
+ // this.
+ //
+ Process->VirtualSize += PRegionSize;
+ if (Process->VirtualSize > Process->PeakVirtualSize)
{
- /* Keep going unless the state or protection mask changed */
- NewState = MiQueryAddressState(Address, Vad, TargetProcess, &NewProtect, &NextAddress);
- if ((NewState != MemoryInfo.State) || (NewProtect != MemoryInfo.Protect)) break;
- Address = NextAddress;
+ Process->PeakVirtualSize = Process->VirtualSize;
}
- /* Now that we know the last VA address, calculate the region size */
- MemoryInfo.RegionSize = ((ULONG_PTR)Address - (ULONG_PTR)MemoryInfo.BaseAddress);
- }
-
- /* Unlock the address space of the process */
- MmUnlockAddressSpace(&TargetProcess->Vm);
-
- /* Check if we were attached */
- if (ProcessHandle != NtCurrentProcess())
- {
- /* Detach and derefernece the process */
- KeUnstackDetachProcess(&ApcState);
- ObDereferenceObject(TargetProcess);
- }
+ //
+ // Release address space and detach and dereference the target process if
+ // it was different from the current process
+ //
+ MmUnlockAddressSpace(AddressSpace);
+ if (Attached) KeUnstackDetachProcess(&ApcState);
+ if (ProcessHandle != NtCurrentProcess()) ObDereferenceObject(Process);
- /* Return the data, NtQueryInformation already probed it*/
- if (PreviousMode != KernelMode)
- {
+ //
+ // Use SEH to write back the base address and the region size. In the case
+ // of an exception, we do not return back the exception code, as the memory
+ // *has* been allocated. The caller would now have to call VirtualQuery
+ // or do some other similar trick to actually find out where its memory
+ // allocation ended up
+ //
_SEH2_TRY
{
- *(PMEMORY_BASIC_INFORMATION)MemoryInformation = MemoryInfo;
- if (ReturnLength) *ReturnLength = sizeof(MEMORY_BASIC_INFORMATION);
+ *URegionSize = PRegionSize;
+ *UBaseAddress = (PVOID)StartingAddress;
}
- _SEH2_EXCEPT(EXCEPTION_EXECUTE_HANDLER)
+ _SEH2_EXCEPT(EXCEPTION_EXECUTE_HANDLER)
{
- Status = _SEH2_GetExceptionCode();
+ //
+ // Ignore exception!
+ //
}
_SEH2_END;
+ return STATUS_SUCCESS;
}
- else
+
+ //
+ // This is a MEM_COMMIT on top of an existing address which must have been
+ // MEM_RESERVED already. Compute the start and ending base addresses based
+ // on the user input, and then compute the actual region size once all the
+ // alignments have been done.
+ //
+ EndingAddress = (((ULONG_PTR)PBaseAddress + PRegionSize - 1) | (PAGE_SIZE - 1));
+ StartingAddress = (ULONG_PTR)PAGE_ALIGN(PBaseAddress);
+ PRegionSize = EndingAddress - StartingAddress + 1;
+
+ //
+ // Lock the address space and make sure the process isn't already dead
+ //
+ AddressSpace = MmGetCurrentAddressSpace();
+ MmLockAddressSpace(AddressSpace);
+ if (Process->VmDeleted)
{
- *(PMEMORY_BASIC_INFORMATION)MemoryInformation = MemoryInfo;
- if (ReturnLength) *ReturnLength = sizeof(MEMORY_BASIC_INFORMATION);
+ DPRINT1("Process is dying\n");
+ Status = STATUS_PROCESS_IS_TERMINATING;
+ goto FailPath;
}
- /* All went well */
- DPRINT("Base: %p AllocBase: %p AllocProtect: %lx Protect: %lx "
- "State: %lx Type: %lx Size: %lx\n",
- MemoryInfo.BaseAddress, MemoryInfo.AllocationBase,
- MemoryInfo.AllocationProtect, MemoryInfo.Protect,
- MemoryInfo.State, MemoryInfo.Type, MemoryInfo.RegionSize);
-
- return Status;
-}
+ //
+ // Get the VAD for this address range, and make sure it exists
+ //
+ Result = MiCheckForConflictingNode(StartingAddress >> PAGE_SHIFT,
+ EndingAddress >> PAGE_SHIFT,
+ &Process->VadRoot,
+ (PMMADDRESS_NODE*)&FoundVad);
+ if (Result != TableFoundNode)
+ {
+ DPRINT1("Could not find a VAD for this allocation\n");
+ Status = STATUS_CONFLICTING_ADDRESSES;
+ goto FailPath;
+ }
-NTSTATUS
-NTAPI
-MiQueryMemorySectionName(IN HANDLE ProcessHandle,
- IN PVOID BaseAddress,
- OUT PVOID MemoryInformation,
- IN SIZE_T MemoryInformationLength,
- OUT PSIZE_T ReturnLength)
-{
- PEPROCESS Process;
- NTSTATUS Status;
- WCHAR ModuleFileNameBuffer[MAX_PATH] = {0};
- UNICODE_STRING ModuleFileName;
- PMEMORY_SECTION_NAME SectionName = NULL;
- KPROCESSOR_MODE PreviousMode = ExGetPreviousMode();
+ if ((AllocationType & MEM_RESET) == MEM_RESET)
+ {
+ /// @todo HACK: pretend success
+ DPRINT("MEM_RESET not supported\n");
+ Status = STATUS_SUCCESS;
+ goto FailPath;
+ }
- Status = ObReferenceObjectByHandle(ProcessHandle,
- PROCESS_QUERY_INFORMATION,
- NULL,
- PreviousMode,
- (PVOID*)(&Process),
- NULL);
+ //
+ // These kinds of VADs are illegal for this Windows function when trying to
+ // commit an existing range
+ //
+ if ((FoundVad->u.VadFlags.VadType == VadAwe) ||
+ (FoundVad->u.VadFlags.VadType == VadDevicePhysicalMemory) ||
+ (FoundVad->u.VadFlags.VadType == VadLargePages))
+ {
+ DPRINT1("Illegal VAD for attempting a MEM_COMMIT\n");
+ Status = STATUS_CONFLICTING_ADDRESSES;
+ goto FailPath;
+ }
- if (!NT_SUCCESS(Status))
+ //
+ // Make sure that this address range actually fits within the VAD for it
+ //
+ if (((StartingAddress >> PAGE_SHIFT) < FoundVad->StartingVpn) ||
+ ((EndingAddress >> PAGE_SHIFT) > FoundVad->EndingVpn))
{
- DPRINT("MiQueryMemorySectionName: ObReferenceObjectByHandle returned %x\n",Status);
- return Status;
+ DPRINT1("Address range does not fit into the VAD\n");
+ Status = STATUS_CONFLICTING_ADDRESSES;
+ goto FailPath;
}
- RtlInitEmptyUnicodeString(&ModuleFileName, ModuleFileNameBuffer, sizeof(ModuleFileNameBuffer));
- Status = MmGetFileNameForAddress(BaseAddress, &ModuleFileName);
+ //
+ // Make sure this is an ARM3 section
+ //
+ MemoryArea = MmLocateMemoryAreaByAddress(AddressSpace, (PVOID)PAGE_ROUND_DOWN(PBaseAddress));
+ if (MemoryArea->Type != MEMORY_AREA_OWNED_BY_ARM3)
+ {
+ DPRINT1("Illegal commit of non-ARM3 section!\n");
+ Status = STATUS_ALREADY_COMMITTED;
+ goto FailPath;
+ }
- if (NT_SUCCESS(Status))
+ // Is this a previously reserved section being committed? If so, enter the
+ // special section path
+ //
+ if (FoundVad->u.VadFlags.PrivateMemory == FALSE)
{
- SectionName = MemoryInformation;
- if (PreviousMode != KernelMode)
+ //
+ // You cannot commit large page sections through this API
+ //
+ if (FoundVad->u.VadFlags.VadType == VadLargePageSection)
{
- _SEH2_TRY
- {
- RtlInitUnicodeString(&SectionName->SectionFileName, SectionName->NameBuffer);
- SectionName->SectionFileName.MaximumLength = (USHORT)MemoryInformationLength;
- RtlCopyUnicodeString(&SectionName->SectionFileName, &ModuleFileName);
+ DPRINT1("Large page sections cannot be VirtualAlloc'd\n");
+ Status = STATUS_INVALID_PAGE_PROTECTION;
+ goto FailPath;
+ }
- if (ReturnLength) *ReturnLength = ModuleFileName.Length;
+ //
+ // You can only use caching flags on a rotate VAD
+ //
+ if ((Protect & (PAGE_NOCACHE | PAGE_WRITECOMBINE)) &&
+ (FoundVad->u.VadFlags.VadType != VadRotatePhysical))
+ {
+ DPRINT1("Cannot use caching flags with anything but rotate VADs\n");
+ Status = STATUS_INVALID_PAGE_PROTECTION;
+ goto FailPath;
+ }
- }
- _SEH2_EXCEPT(EXCEPTION_EXECUTE_HANDLER)
+ //
+ // We should make sure that the section's permissions aren't being
+ // messed with
+ //
+ if (FoundVad->u.VadFlags.NoChange)
+ {
+ //
+ // Make sure it's okay to touch it
+ // Note: The Windows 2003 kernel has a bug here, passing the
+ // unaligned base address together with the aligned size,
+ // potentially covering a region larger than the actual allocation.
+ // Might be exposed through NtGdiCreateDIBSection w/ section handle
+ // For now we keep this behavior.
+ // TODO: analyze possible implications, create test case
+ //
+ Status = MiCheckSecuredVad(FoundVad,
+ PBaseAddress,
+ PRegionSize,
+ ProtectionMask);
+ if (!NT_SUCCESS(Status))
{
- Status = _SEH2_GetExceptionCode();
+ DPRINT1("Secured VAD being messed around with\n");
+ goto FailPath;
}
- _SEH2_END;
- }
- else
- {
- RtlInitUnicodeString(&SectionName->SectionFileName, SectionName->NameBuffer);
- SectionName->SectionFileName.MaximumLength = (USHORT)MemoryInformationLength;
- RtlCopyUnicodeString(&SectionName->SectionFileName, &ModuleFileName);
-
- if (ReturnLength) *ReturnLength = ModuleFileName.Length;
-
}
- }
- ObDereferenceObject(Process);
- return Status;
-}
-
-NTSTATUS
-NTAPI
-NtQueryVirtualMemory(IN HANDLE ProcessHandle,
- IN PVOID BaseAddress,
- IN MEMORY_INFORMATION_CLASS MemoryInformationClass,
- OUT PVOID MemoryInformation,
- IN SIZE_T MemoryInformationLength,
- OUT PSIZE_T ReturnLength)
-{
- NTSTATUS Status = STATUS_SUCCESS;
- KPROCESSOR_MODE PreviousMode;
-
- DPRINT("Querying class %d about address: %p\n", MemoryInformationClass, BaseAddress);
-
- /* Bail out if the address is invalid */
- if (BaseAddress > MM_HIGHEST_USER_ADDRESS) return STATUS_INVALID_PARAMETER;
- /* Probe return buffer */
- PreviousMode = ExGetPreviousMode();
- if (PreviousMode != KernelMode)
- {
- _SEH2_TRY
- {
- ProbeForWrite(MemoryInformation,
- MemoryInformationLength,
- sizeof(ULONG_PTR));
+ //
+ // ARM3 does not support file-backed sections, only shared memory
+ //
+ ASSERT(FoundVad->ControlArea->FilePointer == NULL);
- if (ReturnLength) ProbeForWriteSize_t(ReturnLength);
- }
- _SEH2_EXCEPT(EXCEPTION_EXECUTE_HANDLER)
+ //
+ // Rotate VADs cannot be guard pages or inaccessible, nor copy on write
+ //
+ if ((FoundVad->u.VadFlags.VadType == VadRotatePhysical) &&
+ (Protect & (PAGE_WRITECOPY | PAGE_EXECUTE_WRITECOPY | PAGE_NOACCESS | PAGE_GUARD)))
{
- Status = _SEH2_GetExceptionCode();
+ DPRINT1("Invalid page protection for rotate VAD\n");
+ Status = STATUS_INVALID_PAGE_PROTECTION;
+ goto FailPath;
}
- _SEH2_END;
- if (!NT_SUCCESS(Status))
- {
- return Status;
- }
- }
+ //
+ // Compute PTE addresses and the quota charge, then grab the commit lock
+ //
+ PointerPte = MI_GET_PROTOTYPE_PTE_FOR_VPN(FoundVad, StartingAddress >> PAGE_SHIFT);
+ LastPte = MI_GET_PROTOTYPE_PTE_FOR_VPN(FoundVad, EndingAddress >> PAGE_SHIFT);
+ QuotaCharge = (ULONG)(LastPte - PointerPte + 1);
+ KeAcquireGuardedMutexUnsafe(&MmSectionCommitMutex);
- switch(MemoryInformationClass)
- {
- case MemoryBasicInformation:
- /* Validate the size information of the class */
- if (MemoryInformationLength < sizeof(MEMORY_BASIC_INFORMATION))
+ //
+ // Get the segment template PTE and start looping each page
+ //
+ TempPte = FoundVad->ControlArea->Segment->SegmentPteTemplate;
+ ASSERT(TempPte.u.Long != 0);
+ while (PointerPte <= LastPte)
+ {
+ //
+ // For each non-already-committed page, write the invalid template PTE
+ //
+ if (PointerPte->u.Long == 0)
{
- /* The size is invalid */
- return STATUS_INFO_LENGTH_MISMATCH;
+ MI_WRITE_INVALID_PTE(PointerPte, TempPte);
}
- Status = MiQueryMemoryBasicInformation(ProcessHandle,
- BaseAddress,
- MemoryInformation,
- MemoryInformationLength,
- ReturnLength);
- break;
-
- case MemorySectionName:
- /* Validate the size information of the class */
- if (MemoryInformationLength < sizeof(MEMORY_SECTION_NAME))
+ else
{
- /* The size is invalid */
- return STATUS_INFO_LENGTH_MISMATCH;
+ QuotaFree++;
}
- Status = MiQueryMemorySectionName(ProcessHandle,
- BaseAddress,
- MemoryInformation,
- MemoryInformationLength,
- ReturnLength);
- break;
- case MemoryWorkingSetList:
- case MemoryBasicVlmInformation:
- default:
- DPRINT1("Unhandled memory information class %d\n", MemoryInformationClass);
- break;
+ PointerPte++;
+ }
+
+ //
+ // Now do the commit accounting and release the lock
+ //
+ ASSERT(QuotaCharge >= QuotaFree);
+ QuotaCharge -= QuotaFree;
+ FoundVad->ControlArea->Segment->NumberOfCommittedPages += QuotaCharge;
+ KeReleaseGuardedMutexUnsafe(&MmSectionCommitMutex);
+
+ //
+ // We are done with committing the section pages
+ //
+ Status = STATUS_SUCCESS;
+ goto FailPath;
}
- return Status;
-}
+ //
+ // This is a specific ReactOS check because we only use normal VADs
+ //
+ ASSERT(FoundVad->u.VadFlags.VadType == VadNone);
-#ifdef __USE_ARM3__
-/*
-* @implemented
-*/
-NTSTATUS
-NTAPI
-NtAllocateVirtualMemory(IN HANDLE ProcessHandle,
- IN OUT PVOID* UBaseAddress,
- IN ULONG_PTR ZeroBits,
- IN OUT PSIZE_T URegionSize,
- IN ULONG AllocationType,
- IN ULONG Protect)
-{
- PEPROCESS Process;
- ULONG Type;
- NTSTATUS Status = STATUS_SUCCESS;
- PVOID BaseAddress;
- ULONG RegionSize;
- PMMVAD Vad;
- PMMADDRESS_NODE ParentNode;
- ULONG_PTR StartVpn, EndVpn;
- PHYSICAL_ADDRESS BoundaryAddressMultiple;
- PEPROCESS CurrentProcess = PsGetCurrentProcess();
- KPROCESSOR_MODE PreviousMode = KeGetPreviousMode();
- KAPC_STATE ApcState;
- ULONG ProtectionMask;
- BOOLEAN Attached = FALSE;
- BoundaryAddressMultiple.QuadPart = 0;
- TABLE_SEARCH_RESULT Result;
-
- PAGED_CODE();
+ //
+ // While this is an actual Windows check
+ //
+ ASSERT(FoundVad->u.VadFlags.VadType != VadRotatePhysical);
- /* Check for valid Zero bits */
- if (ZeroBits > 21)
+ //
+ // Throw out attempts to use copy-on-write through this API path
+ //
+ if ((Protect & PAGE_WRITECOPY) || (Protect & PAGE_EXECUTE_WRITECOPY))
{
- DPRINT1("Too many zero bits\n");
- return STATUS_INVALID_PARAMETER_3;
+ DPRINT1("Write copy attempted when not allowed\n");
+ Status = STATUS_INVALID_PAGE_PROTECTION;
+ goto FailPath;
}
- /* Check for valid Allocation Types */
- if ((AllocationType & ~(MEM_COMMIT | MEM_RESERVE | MEM_RESET | MEM_PHYSICAL |
- MEM_TOP_DOWN | MEM_WRITE_WATCH)))
- {
- DPRINT1("Invalid Allocation Type\n");
- return STATUS_INVALID_PARAMETER_5;
- }
+ //
+ // Initialize a demand-zero PTE
+ //
+ TempPte.u.Long = 0;
+ TempPte.u.Soft.Protection = ProtectionMask;
+ NT_ASSERT(TempPte.u.Long != 0);
- /* Check for at least one of these Allocation Types to be set */
- if (!(AllocationType & (MEM_COMMIT | MEM_RESERVE | MEM_RESET)))
- {
- DPRINT1("No memory allocation base type\n");
- return STATUS_INVALID_PARAMETER_5;
- }
+ //
+ // Get the PTE, PDE and the last PTE for this address range
+ //
+ PointerPde = MiAddressToPde(StartingAddress);
+ PointerPte = MiAddressToPte(StartingAddress);
+ LastPte = MiAddressToPte(EndingAddress);
- /* MEM_RESET is an exclusive flag, make sure that is valid too */
- if ((AllocationType & MEM_RESET) && (AllocationType != MEM_RESET))
+ //
+ // Update the commit charge in the VAD as well as in the process, and check
+ // if this commit charge was now higher than the last recorded peak, in which
+ // case we also update the peak
+ //
+ FoundVad->u.VadFlags.CommitCharge += (1 + LastPte - PointerPte);
+ Process->CommitCharge += (1 + LastPte - PointerPte);
+ if (Process->CommitCharge > Process->CommitChargePeak)
{
- DPRINT1("Invalid use of MEM_RESET\n");
- return STATUS_INVALID_PARAMETER_5;
+ Process->CommitChargePeak = Process->CommitCharge;
}
- /* Check if large pages are being used */
- if (AllocationType & MEM_LARGE_PAGES)
+ //
+ // Lock the working set while we play with user pages and page tables
+ //
+ MiLockProcessWorkingSetUnsafe(Process, CurrentThread);
+
+ //
+ // Make the current page table valid, and then loop each page within it
+ //
+ MiMakePdeExistAndMakeValid(PointerPde, Process, MM_NOIRQL);
+ while (PointerPte <= LastPte)
{
- /* Large page allocations MUST be committed */
- if (!(AllocationType & MEM_COMMIT))
+ //
+ // Have we crossed into a new page table?
+ //
+ if (MiIsPteOnPdeBoundary(PointerPte))
{
- DPRINT1("Must supply MEM_COMMIT with MEM_LARGE_PAGES\n");
- return STATUS_INVALID_PARAMETER_5;
+ //
+ // Get the PDE and now make it valid too
+ //
+ PointerPde = MiAddressToPte(PointerPte);
+ MiMakePdeExistAndMakeValid(PointerPde, Process, MM_NOIRQL);
}
- /* These flags are not allowed with large page allocations */
- if (AllocationType & (MEM_PHYSICAL | MEM_RESET | MEM_WRITE_WATCH))
+ //
+ // Is this a zero PTE as expected?
+ //
+ if (PointerPte->u.Long == 0)
{
- DPRINT1("Using illegal flags with MEM_LARGE_PAGES\n");
- return STATUS_INVALID_PARAMETER_5;
+ //
+ // First increment the count of pages in the page table for this
+ // process
+ //
+ MiIncrementPageTableReferences(MiPteToAddress(PointerPte));
+
+ //
+ // And now write the invalid demand-zero PTE as requested
+ //
+ MI_WRITE_INVALID_PTE(PointerPte, TempPte);
+ }
+ else if (PointerPte->u.Long == MmDecommittedPte.u.Long)
+ {
+ //
+ // If the PTE was already decommitted, there is nothing else to do
+ // but to write the new demand-zero PTE
+ //
+ MI_WRITE_INVALID_PTE(PointerPte, TempPte);
+ }
+ else if (!(ChangeProtection) && (Protect != MiGetPageProtection(PointerPte)))
+ {
+ //
+ // We don't handle these scenarios yet
+ //
+ if (PointerPte->u.Soft.Valid == 0)
+ {
+ ASSERT(PointerPte->u.Soft.Prototype == 0);
+ ASSERT(PointerPte->u.Soft.PageFileHigh == 0);
+ }
+
+ //
+ // There's a change in protection, remember this for later, but do
+ // not yet handle it.
+ //
+ ChangeProtection = TRUE;
}
+
+ //
+ // Move to the next PTE
+ //
+ PointerPte++;
}
- /* MEM_WRITE_WATCH can only be used if MEM_RESERVE is also used */
- if ((AllocationType & MEM_WRITE_WATCH) && !(AllocationType & MEM_RESERVE))
+ //
+ // Release the working set lock, unlock the address space, and detach from
+ // the target process if it was not the current process. Also dereference the
+ // target process if this wasn't the case.
+ //
+ MiUnlockProcessWorkingSetUnsafe(Process, CurrentThread);
+ Status = STATUS_SUCCESS;
+FailPath:
+ MmUnlockAddressSpace(AddressSpace);
+
+ if (!NT_SUCCESS(Status))
{
- DPRINT1("MEM_WRITE_WATCH used without MEM_RESERVE\n");
- return STATUS_INVALID_PARAMETER_5;
+ if (Vad != NULL)
+ {
+ ExFreePoolWithTag(Vad, 'SdaV');
+ }
}
- /* MEM_PHYSICAL can only be used if MEM_RESERVE is also used */
- if ((AllocationType & MEM_PHYSICAL) && !(AllocationType & MEM_RESERVE))
+ //
+ // Check if we need to update the protection
+ //
+ if (ChangeProtection)
{
- DPRINT1("MEM_WRITE_WATCH used without MEM_RESERVE\n");
- return STATUS_INVALID_PARAMETER_5;
+ PVOID ProtectBaseAddress = (PVOID)StartingAddress;
+ SIZE_T ProtectSize = PRegionSize;
+ ULONG OldProtection;
+
+ //
+ // Change the protection of the region
+ //
+ MiProtectVirtualMemory(Process,
+ &ProtectBaseAddress,
+ &ProtectSize,
+ Protect,
+ &OldProtection);
}
- /* Check for valid MEM_PHYSICAL usage */
- if (AllocationType & MEM_PHYSICAL)
+FailPathNoLock:
+ if (Attached) KeUnstackDetachProcess(&ApcState);
+ if (ProcessHandle != NtCurrentProcess()) ObDereferenceObject(Process);
+
+ //
+ // Only write back results on success
+ //
+ if (NT_SUCCESS(Status))
{
- /* Only these flags are allowed with MEM_PHYSIAL */
- if (AllocationType & ~(MEM_RESERVE | MEM_TOP_DOWN | MEM_PHYSICAL))
+ //
+ // Use SEH to write back the base address and the region size. In the case
+ // of an exception, we strangely do return back the exception code, even
+ // though the memory *has* been allocated. This mimics Windows behavior and
+ // there is not much we can do about it.
+ //
+ _SEH2_TRY
{
- DPRINT1("Using illegal flags with MEM_PHYSICAL\n");
- return STATUS_INVALID_PARAMETER_5;
+ *URegionSize = PRegionSize;
+ *UBaseAddress = (PVOID)StartingAddress;
}
-
- /* Then make sure PAGE_READWRITE is used */
- if (Protect != PAGE_READWRITE)
+ _SEH2_EXCEPT(EXCEPTION_EXECUTE_HANDLER)
{
- DPRINT1("MEM_PHYSICAL used without PAGE_READWRITE\n");
- return STATUS_INVALID_PARAMETER_6;
+ Status = _SEH2_GetExceptionCode();
}
+ _SEH2_END;
}
- /* Calculate the protection mask and make sure it's valid */
- ProtectionMask = MiMakeProtectionMask(Protect);
- if (ProtectionMask == MM_INVALID_PROTECTION)
+ return Status;
+}
+
+/*
+ * @implemented
+ */
+NTSTATUS
+NTAPI
+NtFreeVirtualMemory(IN HANDLE ProcessHandle,
+ IN PVOID* UBaseAddress,
+ IN PSIZE_T URegionSize,
+ IN ULONG FreeType)
+{
+ PMEMORY_AREA MemoryArea;
+ SIZE_T PRegionSize;
+ PVOID PBaseAddress;
+ LONG_PTR CommitReduction = 0;
+ ULONG_PTR StartingAddress, EndingAddress;
+ PMMVAD Vad;
+ NTSTATUS Status;
+ PEPROCESS Process;
+ PMMSUPPORT AddressSpace;
+ PETHREAD CurrentThread = PsGetCurrentThread();
+ PEPROCESS CurrentProcess = PsGetCurrentProcess();
+ KPROCESSOR_MODE PreviousMode = KeGetPreviousMode();
+ KAPC_STATE ApcState;
+ BOOLEAN Attached = FALSE;
+ PAGED_CODE();
+
+ //
+ // Only two flags are supported
+ //
+ if (!(FreeType & (MEM_RELEASE | MEM_DECOMMIT)))
{
- DPRINT1("Invalid protection mask\n");
- return STATUS_INVALID_PAGE_PROTECTION;
+ DPRINT1("Invalid FreeType\n");
+ return STATUS_INVALID_PARAMETER_4;
}
- /* Enter SEH */
+ //
+ // Check if no flag was used, or if both flags were used
+ //
+ if (!((FreeType & (MEM_DECOMMIT | MEM_RELEASE))) ||
+ ((FreeType & (MEM_DECOMMIT | MEM_RELEASE)) == (MEM_DECOMMIT | MEM_RELEASE)))
+ {
+ DPRINT1("Invalid FreeType combination\n");
+ return STATUS_INVALID_PARAMETER_4;
+ }
+
+ //
+ // Enter SEH for probe and capture. On failure, return back to the caller
+ // with an exception violation.
+ //
_SEH2_TRY
{
- /* Check for user-mode parameters */
+ //
+ // Check for user-mode parameters and make sure that they are writeable
+ //
if (PreviousMode != KernelMode)
{
- /* Make sure they are writable */
ProbeForWritePointer(UBaseAddress);
ProbeForWriteUlong(URegionSize);
}
- /* Capture their values */
- BaseAddress = *UBaseAddress;
- RegionSize = *URegionSize;
+ //
+ // Capture the current values
+ //
+ PBaseAddress = *UBaseAddress;
+ PRegionSize = *URegionSize;
}
_SEH2_EXCEPT(EXCEPTION_EXECUTE_HANDLER)
{
- /* Return the exception code */
_SEH2_YIELD(return _SEH2_GetExceptionCode());
}
_SEH2_END;
-
- /* Make sure there's a size specified */
- if (!RegionSize)
- {
- DPRINT1("Region size is invalid (zero)\n");
- return STATUS_INVALID_PARAMETER_4;
- }
-
- RegionSize = PAGE_ROUND_UP((ULONG_PTR)BaseAddress + RegionSize) -
- PAGE_ROUND_DOWN(BaseAddress);
- BaseAddress = (PVOID)PAGE_ROUND_DOWN(BaseAddress);
- StartVpn = (ULONG_PTR)BaseAddress >> PAGE_SHIFT;
- EndVpn = ((ULONG_PTR)BaseAddress + RegionSize - 1) >> PAGE_SHIFT;
- /* Make sure the allocation isn't past the VAD area */
- if (BaseAddress >= MM_HIGHEST_VAD_ADDRESS)
+ //
+ // Make sure the allocation isn't past the user area
+ //
+ if (PBaseAddress >= MM_HIGHEST_USER_ADDRESS)
{
- DPRINT1("Virtual allocation base above User Space\n");
+ DPRINT1("Virtual free base above User Space\n");
return STATUS_INVALID_PARAMETER_2;
}
- /* Make sure the allocation wouldn't overflow past the VAD area */
- if ((((ULONG_PTR)MM_HIGHEST_VAD_ADDRESS + 1) - (ULONG_PTR)BaseAddress) < RegionSize)
+ //
+ // Make sure the allocation wouldn't overflow past the user area
+ //
+ if (((ULONG_PTR)MM_HIGHEST_USER_ADDRESS - (ULONG_PTR)PBaseAddress) < PRegionSize)
{
DPRINT1("Region size would overflow into kernel-memory\n");
- return STATUS_INVALID_PARAMETER_4;
+ return STATUS_INVALID_PARAMETER_3;
}
- /* Check if this is for the current process */
+ //
+ // If this is for the current process, just use PsGetCurrentProcess
+ //
if (ProcessHandle == NtCurrentProcess())
{
- /* We already have the current process, no need to go through Ob */
Process = CurrentProcess;
}
else
{
- /* Reference the handle for correct permissions */
+ //
+ // Otherwise, reference the process with VM rights and attach to it if
+ // this isn't the current process. We must attach because we'll be touching
+ // PTEs and PDEs that belong to user-mode memory, and also touching the
+ // Working Set which is stored in Hyperspace.
+ //
Status = ObReferenceObjectByHandle(ProcessHandle,
- PROCESS_VM_OPERATION,
- PsProcessType,
- PreviousMode,
- (PVOID*)&Process,
- NULL);
+ PROCESS_VM_OPERATION,
+ PsProcessType,
+ PreviousMode,
+ (PVOID*)&Process,
+ NULL);
if (!NT_SUCCESS(Status)) return Status;
-
- /* Check if not running in the current process */
if (CurrentProcess != Process)
{
- /* Attach to it */
KeStackAttachProcess(&Process->Pcb, &ApcState);
Attached = TRUE;
}
}
- /* Check for large page allocations */
- if (AllocationType & MEM_LARGE_PAGES)
+ //
+ // Lock the address space
+ //
+ AddressSpace = MmGetCurrentAddressSpace();
+ MmLockAddressSpace(AddressSpace);
+
+ //
+ // If the address space is being deleted, fail the de-allocation since it's
+ // too late to do anything about it
+ //
+ if (Process->VmDeleted)
{
- /* The lock memory privilege is required */
- if (!SeSinglePrivilegeCheck(SeLockMemoryPrivilege, PreviousMode))
- {
- /* Fail without it */
- DPRINT1("Privilege not held for MEM_LARGE_PAGES\n");
- if (Attached) KeUnstackDetachProcess(&ApcState);
- if (ProcessHandle != NtCurrentProcess()) ObDereferenceObject(Process);
- return STATUS_PRIVILEGE_NOT_HELD;
- }
+ DPRINT1("Process is dead\n");
+ Status = STATUS_PROCESS_IS_TERMINATING;
+ goto FailPath;
+ }
+
+ //
+ // Compute start and end addresses, and locate the VAD
+ //
+ StartingAddress = (ULONG_PTR)PAGE_ALIGN(PBaseAddress);
+ EndingAddress = ((ULONG_PTR)PBaseAddress + PRegionSize - 1) | (PAGE_SIZE - 1);
+ Vad = MiLocateAddress((PVOID)StartingAddress);
+ if (!Vad)
+ {
+ DPRINT1("Unable to find VAD for address 0x%p\n", StartingAddress);
+ Status = STATUS_MEMORY_NOT_ALLOCATED;
+ goto FailPath;
}
+ //
+ // If the range exceeds the VAD's ending VPN, fail this request
+ //
+ if (Vad->EndingVpn < (EndingAddress >> PAGE_SHIFT))
+ {
+ DPRINT1("Address 0x%p is beyond the VAD\n", EndingAddress);
+ Status = STATUS_UNABLE_TO_FREE_VM;
+ goto FailPath;
+ }
- /*
- * Copy on Write is reserved for system use. This case is a certain failure
- * but there may be other cases...needs more testing
- */
- if ((!BaseAddress || (AllocationType & MEM_RESERVE)) &&
- (Protect & (PAGE_WRITECOPY | PAGE_EXECUTE_WRITECOPY)))
+ //
+ // Only private memory (except rotate VADs) can be freed through here */
+ //
+ if ((!(Vad->u.VadFlags.PrivateMemory) &&
+ (Vad->u.VadFlags.VadType != VadRotatePhysical)) ||
+ (Vad->u.VadFlags.VadType == VadDevicePhysicalMemory))
{
- DPRINT1("Copy on write is not supported by VirtualAlloc\n");
- if (Attached) KeUnstackDetachProcess(&ApcState);
- if (ProcessHandle != NtCurrentProcess()) ObDereferenceObject(Process);
- return STATUS_INVALID_PAGE_PROTECTION;
+ DPRINT1("Attempt to free section memory\n");
+ Status = STATUS_UNABLE_TO_DELETE_SECTION;
+ goto FailPath;
}
- Type = (AllocationType & MEM_COMMIT) ? MEM_COMMIT : MEM_RESERVE;
- DPRINT("Type %x\n", Type);
+ //
+ // ARM3 does not yet handle protected VM
+ //
+ ASSERT(Vad->u.VadFlags.NoChange == 0);
+
+ //
+ // Finally, make sure there is a ReactOS Mm MEMORY_AREA for this allocation
+ // and that is is an ARM3 memory area, and not a section view, as we currently
+ // don't support freeing those though this interface.
+ //
+ MemoryArea = MmLocateMemoryAreaByAddress(AddressSpace, (PVOID)StartingAddress);
+ ASSERT(MemoryArea);
+ ASSERT(MemoryArea->Type == MEMORY_AREA_OWNED_BY_ARM3);
- /* Lock the process address space */
- KeAcquireGuardedMutex(&Process->AddressCreationLock);
-
- if(BaseAddress != 0)
+ //
+ // Now we can try the operation. First check if this is a RELEASE or a DECOMMIT
+ //
+ if (FreeType & MEM_RELEASE)
{
- /*
- * An address was provided. Let's see if we've already
- * something there
- */
- if(MiCheckForConflictingNode(StartVpn, EndVpn, &Process->VadRoot) != NULL)
+ //
+ // ARM3 only supports this VAD in this path
+ //
+ ASSERT(Vad->u.VadFlags.VadType == VadNone);
+
+ //
+ // Is the caller trying to remove the whole VAD, or remove only a portion
+ // of it? If no region size is specified, then the assumption is that the
+ // whole VAD is to be destroyed
+ //
+ if (!PRegionSize)
{
- /* Can't reserve twice the same range */
- if(AllocationType & MEM_RESERVE)
+ //
+ // The caller must specify the base address identically to the range
+ // that is stored in the VAD.
+ //
+ if (((ULONG_PTR)PBaseAddress >> PAGE_SHIFT) != Vad->StartingVpn)
{
- Status = STATUS_CONFLICTING_ADDRESSES;
- DPRINT1("Trying to reserve twice the same range.\n");
- goto cleanup;
+ DPRINT1("Address 0x%p does not match the VAD\n", PBaseAddress);
+ Status = STATUS_FREE_VM_NOT_AT_BASE;
+ goto FailPath;
}
- /* Great there's already something there. What shall we do ? */
- if(AllocationType == MEM_RESET)
+
+ //
+ // Now compute the actual start/end addresses based on the VAD
+ //
+ StartingAddress = Vad->StartingVpn << PAGE_SHIFT;
+ EndingAddress = (Vad->EndingVpn << PAGE_SHIFT) | (PAGE_SIZE - 1);
+
+ //
+ // Finally lock the working set and remove the VAD from the VAD tree
+ //
+ MiLockProcessWorkingSetUnsafe(Process, CurrentThread);
+ ASSERT(Process->VadRoot.NumberGenericTableElements >= 1);
+ MiRemoveNode((PMMADDRESS_NODE)Vad, &Process->VadRoot);
+ }
+ else
+ {
+ //
+ // This means the caller wants to release a specific region within
+ // the range. We have to find out which range this is -- the following
+ // possibilities exist plus their union (CASE D):
+ //
+ // STARTING ADDRESS ENDING ADDRESS
+ // [<========][========================================][=========>]
+ // CASE A CASE B CASE C
+ //
+ //
+ // First, check for case A or D
+ //
+ if ((StartingAddress >> PAGE_SHIFT) == Vad->StartingVpn)
{
- UNIMPLEMENTED;
- /* Reset the dirty bits for each PTEs */
- goto cleanup;
+ //
+ // Check for case D
+ //
+ if ((EndingAddress >> PAGE_SHIFT) == Vad->EndingVpn)
+ {
+ //
+ // This is the easiest one to handle -- it is identical to
+ // the code path above when the caller sets a zero region size
+ // and the whole VAD is destroyed
+ //
+ MiLockProcessWorkingSetUnsafe(Process, CurrentThread);
+ ASSERT(Process->VadRoot.NumberGenericTableElements >= 1);
+ MiRemoveNode((PMMADDRESS_NODE)Vad, &Process->VadRoot);
+ }
+ else
+ {
+ //
+ // This case is pretty easy too -- we compute a bunch of
+ // pages to decommit, and then push the VAD's starting address
+ // a bit further down, then decrement the commit charge
+ //
+ // NOT YET IMPLEMENTED IN ARM3.
+ //
+ DPRINT1("Case A not handled\n");
+ Status = STATUS_FREE_VM_NOT_AT_BASE;
+ goto FailPath;
+
+ //
+ // After analyzing the VAD, set it to NULL so that we don't
+ // free it in the exit path
+ //
+ Vad = NULL;
+ }
}
else
{
- ASSERT(AllocationType & MEM_COMMIT);
- UNIMPLEMENTED;
- /* Mark the VAD as committed */
- goto cleanup;
+ //
+ // This is case B or case C. First check for case C
+ //
+ if ((EndingAddress >> PAGE_SHIFT) == Vad->EndingVpn)
+ {
+ PMEMORY_AREA MemoryArea;
+
+ //
+ // This is pretty easy and similar to case A. We compute the
+ // amount of pages to decommit, update the VAD's commit charge
+ // and then change the ending address of the VAD to be a bit
+ // smaller.
+ //
+ MiLockProcessWorkingSetUnsafe(Process, CurrentThread);
+ CommitReduction = MiCalculatePageCommitment(StartingAddress,
+ EndingAddress,
+ Vad,
+ Process);
+ Vad->u.VadFlags.CommitCharge -= CommitReduction;
+ // For ReactOS: shrink the corresponding memory area
+ MemoryArea = MmLocateMemoryAreaByAddress(AddressSpace, (PVOID)StartingAddress);
+ ASSERT(Vad->StartingVpn << PAGE_SHIFT == (ULONG_PTR)MemoryArea->StartingAddress);
+ ASSERT((Vad->EndingVpn + 1) << PAGE_SHIFT == (ULONG_PTR)MemoryArea->EndingAddress);
+ Vad->EndingVpn = ((ULONG_PTR)StartingAddress - 1) >> PAGE_SHIFT;
+ MemoryArea->EndingAddress = (PVOID)(((Vad->EndingVpn + 1) << PAGE_SHIFT) - 1);
+ }
+ else
+ {
+ //
+ // This is case B and the hardest one. Because we are removing
+ // a chunk of memory from the very middle of the VAD, we must
+ // actually split the VAD into two new VADs and compute the
+ // commit charges for each of them, and reinsert new charges.
+ //
+ // NOT YET IMPLEMENTED IN ARM3.
+ //
+ DPRINT1("Case B not handled\n");
+ Status = STATUS_FREE_VM_NOT_AT_BASE;
+ goto FailPath;
+ }
+
+ //
+ // After analyzing the VAD, set it to NULL so that we don't
+ // free it in the exit path
+ //
+ Vad = NULL;
}
}
-
- /* There's nothing */
- if(!(AllocationType & MEM_RESERVE))
+
+ //
+ // Now we have a range of pages to dereference, so call the right API
+ // to do that and then release the working set, since we're done messing
+ // around with process pages.
+ //
+ MiDeleteVirtualAddresses(StartingAddress, EndingAddress, NULL);
+ MiUnlockProcessWorkingSetUnsafe(Process, CurrentThread);
+ Status = STATUS_SUCCESS;
+
+FinalPath:
+ //
+ // Update the process counters
+ //
+ PRegionSize = EndingAddress - StartingAddress + 1;
+ Process->CommitCharge -= CommitReduction;
+ if (FreeType & MEM_RELEASE) Process->VirtualSize -= PRegionSize;
+
+ //
+ // Unlock the address space and free the VAD in failure cases. Next,
+ // detach from the target process so we can write the region size and the
+ // base address to the correct source process, and dereference the target
+ // process.
+ //
+ MmUnlockAddressSpace(AddressSpace);
+ if (Vad) ExFreePool(Vad);
+ if (Attached) KeUnstackDetachProcess(&ApcState);
+ if (ProcessHandle != NtCurrentProcess()) ObDereferenceObject(Process);
+
+ //
+ // Use SEH to safely return the region size and the base address of the
+ // deallocation. If we get an access violation, don't return a failure code
+ // as the deallocation *has* happened. The caller will just have to figure
+ // out another way to find out where it is (such as VirtualQuery).
+ //
+ _SEH2_TRY
+ {
+ *URegionSize = PRegionSize;
+ *UBaseAddress = (PVOID)StartingAddress;
+ }
+ _SEH2_EXCEPT(EXCEPTION_EXECUTE_HANDLER)
{
- Status = STATUS_ACCESS_DENIED;
- goto cleanup;
}
-
- /* Now we can reserve our chunk of memory */
- goto buildVad;
+ _SEH2_END;
+ return Status;
}
-
- /* No base address was given. */
- if(!(AllocationType & MEM_RESERVE))
+
+ //
+ // This is the decommit path. You cannot decommit from the following VADs in
+ // Windows, so fail the vall
+ //
+ if ((Vad->u.VadFlags.VadType == VadAwe) ||
+ (Vad->u.VadFlags.VadType == VadLargePages) ||
+ (Vad->u.VadFlags.VadType == VadRotatePhysical))
{
- DPRINT1("Providing NULL base address witout MEM_RESERVE.\n");
- ASSERT(FALSE);
- Status = STATUS_INVALID_PARAMETER_5;
- goto cleanup;
+ DPRINT1("Trying to decommit from invalid VAD\n");
+ Status = STATUS_MEMORY_NOT_ALLOCATED;
+ goto FailPath;
}
-
- /* Find an empty range in Address Space */
- if(AllocationType & MEM_TOP_DOWN)
+
+ //
+ // If the caller did not specify a region size, first make sure that this
+ // region is actually committed. If it is, then compute the ending address
+ // based on the VAD.
+ //
+ if (!PRegionSize)
{
- /* Top down allocation */
- Result = MiFindEmptyAddressRangeDownTree(RegionSize,
- (ULONG_PTR)MM_HIGHEST_VAD_ADDRESS,
- (ZeroBits > PAGE_SHIFT) ? 1 << ZeroBits : PAGE_SIZE,
- &Process->VadRoot,
- (PULONG_PTR)&BaseAddress,
- &ParentNode);
-
- if(Result == TableFoundNode)
+ if (((ULONG_PTR)PBaseAddress >> PAGE_SHIFT) != Vad->StartingVpn)
{
- /* This means failure */
- Status = STATUS_NO_MEMORY;
- goto cleanup;
+ DPRINT1("Decomitting non-committed memory\n");
+ Status = STATUS_FREE_VM_NOT_AT_BASE;
+ goto FailPath;
}
+ EndingAddress = (Vad->EndingVpn << PAGE_SHIFT) | (PAGE_SIZE - 1);
}
- else
- {
- /* Good old bottom up allocation */
- Status = MiFindEmptyAddressRangeInTree(RegionSize,
- (ZeroBits > PAGE_SHIFT) ? 1 << ZeroBits : PAGE_SIZE,
- &Process->VadRoot,
- &ParentNode,
- (PULONG_PTR)&BaseAddress);
- if(!NT_SUCCESS(Status))
- {
- /* Failed... */
- goto cleanup;
- }
- }
- StartVpn = (ULONG_PTR)BaseAddress >> PAGE_SHIFT;
- EndVpn = ((ULONG_PTR)BaseAddress + RegionSize - 1) >> PAGE_SHIFT;
-
- /* Build the Vad */
-buildVad:
- Vad = ExAllocatePoolWithTag(NonPagedPool, sizeof(MMVAD), TAG_MVAD);
- if(!Vad)
- {
- Status = STATUS_INSUFFICIENT_RESOURCES;
- goto cleanup;
- }
- RtlZeroMemory(Vad, sizeof(MMVAD));
-
- /* Set min/max */
- Vad->StartingVpn = StartVpn;
- Vad->EndingVpn = EndVpn;
- /* Set protection */
- Vad->u.VadFlags.Protection = ProtectionMask;
- /* Should it be already marked as committed ? */
- if(AllocationType & MEM_COMMIT)
- Vad->u.VadFlags.MemCommit = 1;
- if(AllocationType & MEM_PHYSICAL)
- {
- UNIMPLEMENTED;
- Vad->u.VadFlags.VadType = VadAwe;
- }
- /* Add it */
- MiLockProcessWorkingSet(Process, PsGetCurrentThread());
- MiInsertVad(Vad, Process);
- MiUnlockProcessWorkingSet(Process, PsGetCurrentThread());
-
- /* we're done */
-cleanup:
- KeReleaseGuardedMutex(&Process->AddressCreationLock);
- if (Attached) KeUnstackDetachProcess(&ApcState);
- if (ProcessHandle != NtCurrentProcess()) ObDereferenceObject(Process);
- *UBaseAddress = BaseAddress;
- *URegionSize = RegionSize;
- DPRINT("*UBaseAddress %x *URegionSize %x\n", BaseAddress, RegionSize);
+ //
+ // Decommit the PTEs for the range plus the actual backing pages for the
+ // range, then reduce that amount from the commit charge in the VAD
+ //
+ CommitReduction = MiAddressToPte(EndingAddress) -
+ MiAddressToPte(StartingAddress) +
+ 1 -
+ MiDecommitPages((PVOID)StartingAddress,
+ MiAddressToPte(EndingAddress),
+ Process,
+ Vad);
+ ASSERT(CommitReduction >= 0);
+ Vad->u.VadFlags.CommitCharge -= CommitReduction;
+ ASSERT(Vad->u.VadFlags.CommitCharge >= 0);
+
+ //
+ // We are done, go to the exit path without freeing the VAD as it remains
+ // valid since we have not released the allocation.
+ //
+ Vad = NULL;
+ Status = STATUS_SUCCESS;
+ goto FinalPath;
+ //
+ // In the failure path, we detach and derefernece the target process, and
+ // return whatever failure code was sent.
+ //
+FailPath:
+ MmUnlockAddressSpace(AddressSpace);
+ if (Attached) KeUnstackDetachProcess(&ApcState);
+ if (ProcessHandle != NtCurrentProcess()) ObDereferenceObject(Process);
return Status;
}
+
+
+PHYSICAL_ADDRESS
+NTAPI
+MmGetPhysicalAddress(PVOID Address)
+{
+ PHYSICAL_ADDRESS PhysicalAddress;
+ MMPDE TempPde;
+ MMPTE TempPte;
+
+ /* Check if the PXE/PPE/PDE is valid */
+ if (
+#if (_MI_PAGING_LEVELS == 4)
+ (MiAddressToPxe(Address)->u.Hard.Valid) &&
#endif
+#if (_MI_PAGING_LEVELS >= 3)
+ (MiAddressToPpe(Address)->u.Hard.Valid) &&
+#endif
+ (MiAddressToPde(Address)->u.Hard.Valid))
+ {
+ /* Check for large pages */
+ TempPde = *MiAddressToPde(Address);
+ if (TempPde.u.Hard.LargePage)
+ {
+ /* Physical address is base page + large page offset */
+ PhysicalAddress.QuadPart = (ULONG64)TempPde.u.Hard.PageFrameNumber << PAGE_SHIFT;
+ PhysicalAddress.QuadPart += ((ULONG_PTR)Address & (PAGE_SIZE * PTE_PER_PAGE - 1));
+ return PhysicalAddress;
+ }
+
+ /* Check if the PTE is valid */
+ TempPte = *MiAddressToPte(Address);
+ if (TempPte.u.Hard.Valid)
+ {
+ /* Physical address is base page + page offset */
+ PhysicalAddress.QuadPart = (ULONG64)TempPte.u.Hard.PageFrameNumber << PAGE_SHIFT;
+ PhysicalAddress.QuadPart += ((ULONG_PTR)Address & (PAGE_SIZE - 1));
+ return PhysicalAddress;
+ }
+ }
+
+ KeRosDumpStackFrames(NULL, 20);
+ DPRINT1("MM:MmGetPhysicalAddressFailed base address was %p\n", Address);
+ PhysicalAddress.QuadPart = 0;
+ return PhysicalAddress;
+}
+
+
/* EOF */