*/
/* INCLUDES *******************************************************************/
+/* So long, and Thanks for All the Fish */
#include <ntoskrnl.h>
#define NDEBUG
IN ULONG NewAccessProtection,
OUT PULONG OldAccessProtection OPTIONAL);
+VOID
+NTAPI
+MiFlushTbAndCapture(IN PMMVAD FoundVad,
+ IN PMMPTE PointerPte,
+ IN ULONG ProtectionMask,
+ IN PMMPFN Pfn1,
+ IN BOOLEAN CaptureDirtyBit);
+
+
/* PRIVATE FUNCTIONS **********************************************************/
+ULONG
+NTAPI
+MiCalculatePageCommitment(IN ULONG_PTR StartingAddress,
+ IN ULONG_PTR EndingAddress,
+ IN PMMVAD Vad,
+ IN PEPROCESS Process)
+{
+ PMMPTE PointerPte, LastPte, PointerPde;
+ ULONG CommittedPages;
+
+ /* Compute starting and ending PTE and PDE addresses */
+ PointerPde = MiAddressToPde(StartingAddress);
+ PointerPte = MiAddressToPte(StartingAddress);
+ LastPte = MiAddressToPte(EndingAddress);
+
+ /* Handle commited pages first */
+ if (Vad->u.VadFlags.MemCommit == 1)
+ {
+ /* This is a committed VAD, so Assume the whole range is committed */
+ CommittedPages = BYTES_TO_PAGES(EndingAddress - StartingAddress);
+
+ /* Is the PDE demand-zero? */
+ PointerPde = MiAddressToPte(PointerPte);
+ if (PointerPde->u.Long != 0)
+ {
+ /* It is not. Is it valid? */
+ if (PointerPde->u.Hard.Valid == 0)
+ {
+ /* Fault it in */
+ PointerPte = MiPteToAddress(PointerPde);
+ MiMakeSystemAddressValid(PointerPte, Process);
+ }
+ }
+ else
+ {
+ /* It is, skip it and move to the next PDE, unless we're done */
+ PointerPde++;
+ PointerPte = MiPteToAddress(PointerPde);
+ if (PointerPte > LastPte) return CommittedPages;
+ }
+
+ /* Now loop all the PTEs in the range */
+ while (PointerPte <= LastPte)
+ {
+ /* Have we crossed a PDE boundary? */
+ if (MiIsPteOnPdeBoundary(PointerPte))
+ {
+ /* Is this PDE demand zero? */
+ PointerPde = MiAddressToPte(PointerPte);
+ if (PointerPde->u.Long != 0)
+ {
+ /* It isn't -- is it valid? */
+ if (PointerPde->u.Hard.Valid == 0)
+ {
+ /* Nope, fault it in */
+ PointerPte = MiPteToAddress(PointerPde);
+ MiMakeSystemAddressValid(PointerPte, Process);
+ }
+ }
+ else
+ {
+ /* It is, skip it and move to the next PDE */
+ PointerPde++;
+ PointerPte = MiPteToAddress(PointerPde);
+ continue;
+ }
+ }
+
+ /* Is this PTE demand zero? */
+ if (PointerPte->u.Long != 0)
+ {
+ /* It isn't -- is it a decommited, invalid, or faulted PTE? */
+ if ((PointerPte->u.Soft.Protection == MM_DECOMMIT) &&
+ (PointerPte->u.Hard.Valid == 0) &&
+ ((PointerPte->u.Soft.Prototype == 0) ||
+ (PointerPte->u.Soft.PageFileHigh == MI_PTE_LOOKUP_NEEDED)))
+ {
+ /* It is, so remove it from the count of commited pages */
+ CommittedPages--;
+ }
+ }
+
+ /* Move to the next PTE */
+ PointerPte++;
+ }
+
+ /* Return how many committed pages there still are */
+ return CommittedPages;
+ }
+
+ /* This is a non-commited VAD, so assume none of it is committed */
+ CommittedPages = 0;
+
+ /* Is the PDE demand-zero? */
+ PointerPde = MiAddressToPte(PointerPte);
+ if (PointerPde->u.Long != 0)
+ {
+ /* It isn't -- is it invalid? */
+ if (PointerPde->u.Hard.Valid == 0)
+ {
+ /* It is, so page it in */
+ PointerPte = MiPteToAddress(PointerPde);
+ MiMakeSystemAddressValid(PointerPte, Process);
+ }
+ }
+ else
+ {
+ /* It is, so skip it and move to the next PDE */
+ PointerPde++;
+ PointerPte = MiPteToAddress(PointerPde);
+ if (PointerPte > LastPte) return CommittedPages;
+ }
+
+ /* Loop all the PTEs in this PDE */
+ while (PointerPte <= LastPte)
+ {
+ /* Have we crossed a PDE boundary? */
+ if (MiIsPteOnPdeBoundary(PointerPte))
+ {
+ /* Is this new PDE demand-zero? */
+ PointerPde = MiAddressToPte(PointerPte);
+ if (PointerPde->u.Long != 0)
+ {
+ /* It isn't. Is it valid? */
+ if (PointerPde->u.Hard.Valid == 0)
+ {
+ /* It isn't, so make it valid */
+ PointerPte = MiPteToAddress(PointerPde);
+ MiMakeSystemAddressValid(PointerPte, Process);
+ }
+ }
+ else
+ {
+ /* It is, so skip it and move to the next one */
+ PointerPde++;
+ PointerPte = MiPteToAddress(PointerPde);
+ continue;
+ }
+ }
+
+ /* Is this PTE demand-zero? */
+ if (PointerPte->u.Long != 0)
+ {
+ /* Nope. Is it a valid, non-decommited, non-paged out PTE? */
+ if ((PointerPte->u.Soft.Protection != MM_DECOMMIT) ||
+ (PointerPte->u.Hard.Valid == 1) ||
+ ((PointerPte->u.Soft.Prototype == 1) &&
+ (PointerPte->u.Soft.PageFileHigh != MI_PTE_LOOKUP_NEEDED)))
+ {
+ /* It is! So we'll treat this as a committed page */
+ CommittedPages++;
+ }
+ }
+
+ /* Move to the next PTE */
+ PointerPte++;
+ }
+
+ /* Return how many committed pages we found in this VAD */
+ return CommittedPages;
+}
+
ULONG
NTAPI
MiMakeSystemAddressValid(IN PVOID PageTableVirtualAddress,
IN PEPROCESS CurrentProcess)
{
NTSTATUS Status;
- BOOLEAN LockChange = FALSE;
+ BOOLEAN WsShared = FALSE, WsSafe = FALSE, LockChange = FALSE;
+ PETHREAD CurrentThread = PsGetCurrentThread();
/* Must be a non-pool page table, since those are double-mapped already */
ASSERT(PageTableVirtualAddress > MM_HIGHEST_USER_ADDRESS);
/* Check if the page table is valid */
while (!MmIsAddressValid(PageTableVirtualAddress))
{
+ /* Release the working set lock */
+ MiUnlockProcessWorkingSetForFault(CurrentProcess,
+ CurrentThread,
+ &WsSafe,
+ &WsShared);
+
/* Fault it in */
Status = MmAccessFault(FALSE, PageTableVirtualAddress, KernelMode, NULL);
if (!NT_SUCCESS(Status))
(ULONG_PTR)PageTableVirtualAddress);
}
+ /* Lock the working set again */
+ MiLockProcessWorkingSetForFault(CurrentProcess,
+ CurrentThread,
+ WsSafe,
+ WsShared);
+
/* This flag will be useful later when we do better locking */
LockChange = TRUE;
}
KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
/* Destroy the PTE */
- PointerPte->u.Long = 0;
+ MI_ERASE_PTE(PointerPte);
+ }
+ else
+ {
+ /*
+ * The only other ARM3 possibility is a demand zero page, which would
+ * mean freeing some of the paged pool pages that haven't even been
+ * touched yet, as part of a larger allocation.
+ *
+ * Right now, we shouldn't expect any page file information in the PTE
+ */
+ ASSERT(PointerPte->u.Soft.PageFileHigh == 0);
+
+ /* Destroy the PTE */
+ MI_ERASE_PTE(PointerPte);
}
/* Actual legitimate pages */
ActualPages++;
}
- else
- {
- /*
- * The only other ARM3 possibility is a demand zero page, which would
- * mean freeing some of the paged pool pages that haven't even been
- * touched yet, as part of a larger allocation.
- *
- * Right now, we shouldn't expect any page file information in the PTE
- */
- ASSERT(PointerPte->u.Soft.PageFileHigh == 0);
-
- /* Destroy the PTE */
- PointerPte->u.Long = 0;
- }
/* Keep going */
PointerPte++;
}
/* Destroy the PTE and flush the TLB */
- PointerPte->u.Long = 0;
+ MI_ERASE_PTE(PointerPte);
KeFlushCurrentTb();
}
KIRQL OldIrql;
BOOLEAN AddressGap = FALSE;
PSUBSECTION Subsection;
- PUSHORT UsedPageTableEntries;
/* Get out if this is a fake VAD, RosMm will free the marea pages */
if ((Vad) && (Vad->u.VadFlags.Spare == 1)) return;
/* Now we should have a valid PDE, mapped in, and still have some VA */
ASSERT(PointerPde->u.Hard.Valid == 1);
ASSERT(Va <= EndingAddress);
- UsedPageTableEntries = &MmWorkingSetList->UsedPageTableEntries[MiGetPdeOffset(Va)];
/* Check if this is a section VAD with gaps in it */
if ((AddressGap) && (LastPrototypePte))
TempPte = *PointerPte;
if (TempPte.u.Long)
{
- DPRINT("Decrement used PTEs by address: %lx\n", Va);
- (*UsedPageTableEntries)--;
- ASSERT((*UsedPageTableEntries) < PTE_COUNT);
- DPRINT("Refs: %lx\n", (*UsedPageTableEntries));
+ MiDecrementPageTableReferences((PVOID)Va);
/* Check if the PTE is actually mapped in */
- if (TempPte.u.Long & 0xFFFFFC01)
+ if (MI_IS_MAPPED_PTE(&TempPte))
{
/* Are we dealing with section VAD? */
if ((LastPrototypePte) && (PrototypePte > LastPrototypePte))
(TempPte.u.Soft.Prototype == 1))
{
/* Just nuke it */
- PointerPte->u.Long = 0;
+ MI_ERASE_PTE(PointerPte);
}
else
{
else
{
/* The PTE was never mapped, just nuke it here */
- PointerPte->u.Long = 0;
+ MI_ERASE_PTE(PointerPte);
}
}
/* The PDE should still be valid at this point */
ASSERT(PointerPde->u.Hard.Valid == 1);
- DPRINT("Should check if handles for: %p are zero (PDE: %lx)\n", Va, PointerPde->u.Hard.PageFrameNumber);
- if (!(*UsedPageTableEntries))
+ /* Check remaining PTE count (go back 1 page due to above loop) */
+ if (MiQueryPageTableReferences((PVOID)(Va - PAGE_SIZE)) == 0)
{
- DPRINT("They are!\n");
if (PointerPde->u.Long != 0)
{
- DPRINT("PDE active: %lx in %16s\n", PointerPde->u.Hard.PageFrameNumber, CurrentProcess->ImageFileName);
-
/* Delete the PTE proper */
MiDeletePte(PointerPde,
MiPteToAddress(PointerPde),
//
// Return the error
//
- return STATUS_WORKING_SET_QUOTA;
+ _SEH2_YIELD(return STATUS_WORKING_SET_QUOTA);
}
//
//
// Check if we had allocated pool
//
- if (HavePoolAddress) ExFreePool(PoolAddress);
+ if (HavePoolAddress) ExFreePoolWithTag(PoolAddress, 'VmRw');
//
// Check if we failed during the probe
//
// Check if we had allocated pool
//
- if (HavePoolAddress) ExFreePool(PoolAddress);
+ if (HavePoolAddress) ExFreePoolWithTag(PoolAddress, 'VmRw');
//
// All bytes read
/* If we get here, the PTE is valid, so look up the page in PFN database */
Pfn = MiGetPfnEntry(TempPte.u.Hard.PageFrameNumber);
-
if (!Pfn->u3.e1.PrototypePte)
{
/* Return protection of the original pte */
+ ASSERT(Pfn->u4.AweAllocation == 0);
return MmProtectToValue[Pfn->OriginalPte.u.Soft.Protection];
}
- /* This is hardware PTE */
- UNIMPLEMENTED;
- ASSERT(FALSE);
-
- return PAGE_NOACCESS;
+ /* This is software PTE */
+ DPRINT1("Prototype PTE: %lx %p\n", TempPte.u.Hard.PageFrameNumber, Pfn);
+ DPRINT1("VA: %p\n", MiPteToAddress(&TempPte));
+ DPRINT1("Mask: %lx\n", TempPte.u.Soft.Protection);
+ DPRINT1("Mask2: %lx\n", Pfn->OriginalPte.u.Soft.Protection);
+ return MmProtectToValue[TempPte.u.Soft.Protection];
}
ULONG
OUT PVOID *NextVa)
{
- PMMPTE PointerPte;
+ PMMPTE PointerPte, ProtoPte;
PMMPDE PointerPde;
- MMPTE TempPte;
+ MMPTE TempPte, TempProtoPte;
BOOLEAN DemandZeroPte = TRUE, ValidPte = FALSE;
- ULONG State = MEM_RESERVE, Protect = 0, LockChange;
+ ULONG State = MEM_RESERVE, Protect = 0;
ASSERT((Vad->StartingVpn <= ((ULONG_PTR)Va >> PAGE_SHIFT)) &&
(Vad->EndingVpn >= ((ULONG_PTR)Va >> PAGE_SHIFT)));
/* Return the next range */
*NextVa = (PVOID)((ULONG_PTR)Va + PAGE_SIZE);
- /* Loop to make sure the PDE is valid */
- do
+ /* Is the PDE demand-zero? */
+ if (PointerPde->u.Long != 0)
{
- /* Try again */
- LockChange = 0;
-
- /* Is the PDE empty? */
- if (!PointerPde->u.Long)
- {
- /* No address in this range used yet, move to the next PDE range */
- *NextVa = MiPdeToAddress(PointerPde + 1);
- break;
- }
-
- /* The PDE is not empty, but is it faulted in? */
- if (!PointerPde->u.Hard.Valid)
+ /* It is not. Is it valid? */
+ if (PointerPde->u.Hard.Valid == 0)
{
- /* It isn't, go ahead and do the fault */
- LockChange = MiMakeSystemAddressValid(MiPdeToPte(PointerPde),
- TargetProcess);
+ /* Is isn't, fault it in */
+ PointerPte = MiPteToAddress(PointerPde);
+ MiMakeSystemAddressValid(PointerPte, TargetProcess);
+ ValidPte = TRUE;
}
-
- /* Check if the PDE was faulted in, making the PTE readable */
- if (!LockChange) ValidPte = TRUE;
- } while (LockChange);
+ }
+ else
+ {
+ /* It is, skip it and move to the next PDE */
+ *NextVa = MiPdeToAddress(PointerPde + 1);
+ }
/* Is it safe to try reading the PTE? */
if (ValidPte)
{
/* FIXME: watch out for large pages */
+ ASSERT(PointerPde->u.Hard.LargePage == FALSE);
/* Capture the PTE */
TempPte = *PointerPte;
- if (TempPte.u.Long)
+ if (TempPte.u.Long != 0)
{
/* The PTE is valid, so it's not zeroed out */
DemandZeroPte = FALSE;
- /* Check if it's valid or has a valid protection mask */
- ASSERT(TempPte.u.Soft.Prototype == 0);
- if ((TempPte.u.Soft.Protection != MM_DECOMMIT) ||
- (TempPte.u.Hard.Valid == 1))
+ /* Is it a decommited, invalid, or faulted PTE? */
+ if ((TempPte.u.Soft.Protection == MM_DECOMMIT) &&
+ (TempPte.u.Hard.Valid == 0) &&
+ ((TempPte.u.Soft.Prototype == 0) ||
+ (TempPte.u.Soft.PageFileHigh == MI_PTE_LOOKUP_NEEDED)))
+ {
+ /* Otherwise our defaults should hold */
+ ASSERT(Protect == 0);
+ ASSERT(State == MEM_RESERVE);
+ }
+ else
{
/* This means it's committed */
State = MEM_COMMIT;
+ /* We don't support these */
+ ASSERT(Vad->u.VadFlags.VadType != VadDevicePhysicalMemory);
+ ASSERT(Vad->u.VadFlags.VadType != VadRotatePhysical);
+ ASSERT(Vad->u.VadFlags.VadType != VadAwe);
+
/* Get protection state of this page */
Protect = MiGetPageProtection(PointerPte);
- }
- else
- {
- /* Otherwise our defaults should hold */
- ASSERT(Protect == 0);
- ASSERT(State == MEM_RESERVE);
+
+ /* Check if this is an image-backed VAD */
+ if ((TempPte.u.Soft.Valid == 0) &&
+ (TempPte.u.Soft.Prototype == 1) &&
+ (Vad->u.VadFlags.PrivateMemory == 0) &&
+ (Vad->ControlArea))
+ {
+ DPRINT1("Not supported\n");
+ ASSERT(FALSE);
+ }
}
}
}
/* Check if this was a demand-zero PTE, since we need to find the state */
if (DemandZeroPte)
{
- /* Check if the VAD is for committed memory */
- if (Vad->u.VadFlags.MemCommit)
+ /* Not yet handled */
+ ASSERT(Vad->u.VadFlags.VadType != VadDevicePhysicalMemory);
+ ASSERT(Vad->u.VadFlags.VadType != VadAwe);
+
+ /* Check if this is private commited memory, or an section-backed VAD */
+ if ((Vad->u.VadFlags.PrivateMemory == 0) && (Vad->ControlArea))
+ {
+ /* Tell caller about the next range */
+ *NextVa = (PVOID)((ULONG_PTR)Va + PAGE_SIZE);
+
+ /* Get the prototype PTE for this VAD */
+ ProtoPte = MI_GET_PROTOTYPE_PTE_FOR_VPN(Vad,
+ (ULONG_PTR)Va >> PAGE_SHIFT);
+ if (ProtoPte)
+ {
+ /* We should unlock the working set, but it's not being held! */
+
+ /* Is the prototype PTE actually valid (committed)? */
+ TempProtoPte = *ProtoPte;
+ if (TempProtoPte.u.Long)
+ {
+ /* Unless this is a memory-mapped file, handle it like private VAD */
+ State = MEM_COMMIT;
+ ASSERT(Vad->u.VadFlags.VadType != VadImageMap);
+ Protect = MmProtectToValue[Vad->u.VadFlags.Protection];
+ }
+
+ /* We should re-lock the working set */
+ }
+ }
+ else if (Vad->u.VadFlags.MemCommit)
{
/* This is committed memory */
State = MEM_COMMIT;
KeStackAttachProcess(&TargetProcess->Pcb, &ApcState);
}
+ /* Lock the address space and make sure the process isn't already dead */
+ MmLockAddressSpace(&TargetProcess->Vm);
+ if (TargetProcess->VmDeleted)
+ {
+ /* Unlock the address space of the process */
+ MmUnlockAddressSpace(&TargetProcess->Vm);
+
+ /* Check if we were attached */
+ if (ProcessHandle != NtCurrentProcess())
+ {
+ /* Detach and dereference the process */
+ KeUnstackDetachProcess(&ApcState);
+ ObDereferenceObject(TargetProcess);
+ }
+
+ /* Bail out */
+ DPRINT1("Process is dying\n");
+ return STATUS_PROCESS_IS_TERMINATING;
+ }
+
/* Loop the VADs */
ASSERT(TargetProcess->VadRoot.NumberGenericTableElements);
if (TargetProcess->VadRoot.NumberGenericTableElements)
MemoryInfo.RegionSize = (PCHAR)MM_HIGHEST_VAD_ADDRESS + 1 - (PCHAR)Address;
}
+ /* Unlock the address space of the process */
+ MmUnlockAddressSpace(&TargetProcess->Vm);
+
/* Check if we were attached */
if (ProcessHandle != NtCurrentProcess())
{
return Status;
}
- /* This must be a VM VAD */
- ASSERT(Vad->u.VadFlags.PrivateMemory);
-
- /* Lock the address space of the process */
- MmLockAddressSpace(&TargetProcess->Vm);
+ /* Set the correct memory type based on what kind of VAD this is */
+ if ((Vad->u.VadFlags.PrivateMemory) ||
+ (Vad->u.VadFlags.VadType == VadRotatePhysical))
+ {
+ MemoryInfo.Type = MEM_PRIVATE;
+ }
+ else if (Vad->u.VadFlags.VadType == VadImageMap)
+ {
+ MemoryInfo.Type = MEM_IMAGE;
+ }
+ else
+ {
+ MemoryInfo.Type = MEM_MAPPED;
+ }
/* Find the memory area the specified address belongs to */
MemoryArea = MmLocateMemoryAreaByAddress(&TargetProcess->Vm, BaseAddress);
if (MemoryArea->Type == MEMORY_AREA_SECTION_VIEW)
{
Status = MmQuerySectionView(MemoryArea, BaseAddress, &MemoryInfo, &ResultLength);
- ASSERT(NT_SUCCESS(Status));
+ if (!NT_SUCCESS(Status))
+ {
+ DPRINT1("MmQuerySectionView failed. MemoryArea=%p (%p-%p), BaseAddress=%p\n",
+ MemoryArea, MemoryArea->StartingAddress, MemoryArea->EndingAddress, BaseAddress);
+ NT_ASSERT(NT_SUCCESS(Status));
+ }
}
else
{
ObDereferenceObject(TargetProcess);
}
- /* Return the data, NtQueryInformation already probed it*/
+ /* Return the data, NtQueryInformation already probed it */
if (PreviousMode != KernelMode)
{
_SEH2_TRY
return Status;
}
+BOOLEAN
+NTAPI
+MiIsEntireRangeCommitted(IN ULONG_PTR StartingAddress,
+ IN ULONG_PTR EndingAddress,
+ IN PMMVAD Vad,
+ IN PEPROCESS Process)
+{
+ PMMPTE PointerPte, LastPte, PointerPde;
+ BOOLEAN OnBoundary = TRUE;
+ PAGED_CODE();
+
+ /* Get the PDE and PTE addresses */
+ PointerPde = MiAddressToPde(StartingAddress);
+ PointerPte = MiAddressToPte(StartingAddress);
+ LastPte = MiAddressToPte(EndingAddress);
+
+ /* Loop all the PTEs */
+ while (PointerPte <= LastPte)
+ {
+ /* Check if we've hit an new PDE boundary */
+ if (OnBoundary)
+ {
+ /* Is this PDE demand zero? */
+ PointerPde = MiAddressToPte(PointerPte);
+ if (PointerPde->u.Long != 0)
+ {
+ /* It isn't -- is it valid? */
+ if (PointerPde->u.Hard.Valid == 0)
+ {
+ /* Nope, fault it in */
+ PointerPte = MiPteToAddress(PointerPde);
+ MiMakeSystemAddressValid(PointerPte, Process);
+ }
+ }
+ else
+ {
+ /* The PTE was already valid, so move to the next one */
+ PointerPde++;
+ PointerPte = MiPteToAddress(PointerPde);
+
+ /* Is the entire VAD committed? If not, fail */
+ if (!Vad->u.VadFlags.MemCommit) return FALSE;
+
+ /* Everything is committed so far past the range, return true */
+ if (PointerPte > LastPte) return TRUE;
+ }
+ }
+
+ /* Is the PTE demand zero? */
+ if (PointerPte->u.Long == 0)
+ {
+ /* Is the entire VAD committed? If not, fail */
+ if (!Vad->u.VadFlags.MemCommit) return FALSE;
+ }
+ else
+ {
+ /* It isn't -- is it a decommited, invalid, or faulted PTE? */
+ if ((PointerPte->u.Soft.Protection == MM_DECOMMIT) &&
+ (PointerPte->u.Hard.Valid == 0) &&
+ ((PointerPte->u.Soft.Prototype == 0) ||
+ (PointerPte->u.Soft.PageFileHigh == MI_PTE_LOOKUP_NEEDED)))
+ {
+ /* Then part of the range is decommitted, so fail */
+ return FALSE;
+ }
+ }
+
+ /* Move to the next PTE */
+ PointerPte++;
+ OnBoundary = MiIsPteOnPdeBoundary(PointerPte);
+ }
+
+ /* All PTEs seem valid, and no VAD checks failed, the range is okay */
+ return TRUE;
+}
+
+NTSTATUS
+NTAPI
+MiRosProtectVirtualMemory(IN PEPROCESS Process,
+ IN OUT PVOID *BaseAddress,
+ IN OUT PSIZE_T NumberOfBytesToProtect,
+ IN ULONG NewAccessProtection,
+ OUT PULONG OldAccessProtection OPTIONAL)
+{
+ PMEMORY_AREA MemoryArea;
+ PMMSUPPORT AddressSpace;
+ ULONG OldAccessProtection_;
+ NTSTATUS Status;
+
+ *NumberOfBytesToProtect = PAGE_ROUND_UP((ULONG_PTR)(*BaseAddress) + (*NumberOfBytesToProtect)) - PAGE_ROUND_DOWN(*BaseAddress);
+ *BaseAddress = (PVOID)PAGE_ROUND_DOWN(*BaseAddress);
+
+ AddressSpace = &Process->Vm;
+ MmLockAddressSpace(AddressSpace);
+ MemoryArea = MmLocateMemoryAreaByAddress(AddressSpace, *BaseAddress);
+ if (MemoryArea == NULL || MemoryArea->DeleteInProgress)
+ {
+ MmUnlockAddressSpace(AddressSpace);
+ return STATUS_UNSUCCESSFUL;
+ }
+
+ if (OldAccessProtection == NULL) OldAccessProtection = &OldAccessProtection_;
+
+ ASSERT(MemoryArea->Type == MEMORY_AREA_SECTION_VIEW);
+ Status = MmProtectSectionView(AddressSpace,
+ MemoryArea,
+ *BaseAddress,
+ *NumberOfBytesToProtect,
+ NewAccessProtection,
+ OldAccessProtection);
+
+ MmUnlockAddressSpace(AddressSpace);
+
+ return Status;
+}
+
NTSTATUS
NTAPI
MiProtectVirtualMemory(IN PEPROCESS Process,
OUT PULONG OldAccessProtection OPTIONAL)
{
PMEMORY_AREA MemoryArea;
+ PMMVAD Vad;
+ PMMSUPPORT AddressSpace;
+ ULONG_PTR StartingAddress, EndingAddress;
+ PMMPTE PointerPde, PointerPte, LastPte;
+ MMPTE PteContents;
+ PMMPFN Pfn1;
+ ULONG ProtectionMask, OldProtect;
+ BOOLEAN Committed;
+ NTSTATUS Status = STATUS_SUCCESS;
+ PETHREAD Thread = PsGetCurrentThread();
+
+ /* Calculate base address for the VAD */
+ StartingAddress = (ULONG_PTR)PAGE_ALIGN((*BaseAddress));
+ EndingAddress = (((ULONG_PTR)*BaseAddress + *NumberOfBytesToProtect - 1) | (PAGE_SIZE - 1));
+ /* Calculate the protection mask and make sure it's valid */
+ ProtectionMask = MiMakeProtectionMask(NewAccessProtection);
+ if (ProtectionMask == MM_INVALID_PROTECTION)
+ {
+ DPRINT1("Invalid protection mask\n");
+ return STATUS_INVALID_PAGE_PROTECTION;
+ }
+
+ /* Check for ROS specific memory area */
MemoryArea = MmLocateMemoryAreaByAddress(&Process->Vm, *BaseAddress);
if ((MemoryArea) && (MemoryArea->Type == MEMORY_AREA_SECTION_VIEW))
{
+ /* Evil hack */
return MiRosProtectVirtualMemory(Process,
BaseAddress,
NumberOfBytesToProtect,
OldAccessProtection);
}
- UNIMPLEMENTED;
- return STATUS_CONFLICTING_ADDRESSES;
+ /* Lock the address space and make sure the process isn't already dead */
+ AddressSpace = MmGetCurrentAddressSpace();
+ MmLockAddressSpace(AddressSpace);
+ if (Process->VmDeleted)
+ {
+ DPRINT1("Process is dying\n");
+ Status = STATUS_PROCESS_IS_TERMINATING;
+ goto FailPath;
+ }
+
+ /* Get the VAD for this address range, and make sure it exists */
+ Vad = (PMMVAD)MiCheckForConflictingNode(StartingAddress >> PAGE_SHIFT,
+ EndingAddress >> PAGE_SHIFT,
+ &Process->VadRoot);
+ if (!Vad)
+ {
+ DPRINT("Could not find a VAD for this allocation\n");
+ Status = STATUS_CONFLICTING_ADDRESSES;
+ goto FailPath;
+ }
+
+ /* Make sure the address is within this VAD's boundaries */
+ if ((((ULONG_PTR)StartingAddress >> PAGE_SHIFT) < Vad->StartingVpn) ||
+ (((ULONG_PTR)EndingAddress >> PAGE_SHIFT) > Vad->EndingVpn))
+ {
+ Status = STATUS_CONFLICTING_ADDRESSES;
+ goto FailPath;
+ }
+
+ /* These kinds of VADs are not supported atm */
+ if ((Vad->u.VadFlags.VadType == VadAwe) ||
+ (Vad->u.VadFlags.VadType == VadDevicePhysicalMemory) ||
+ (Vad->u.VadFlags.VadType == VadLargePages))
+ {
+ DPRINT1("Illegal VAD for attempting to set protection\n");
+ Status = STATUS_CONFLICTING_ADDRESSES;
+ goto FailPath;
+ }
+
+ /* Check for a VAD whose protection can't be changed */
+ if (Vad->u.VadFlags.NoChange == 1)
+ {
+ DPRINT1("Trying to change protection of a NoChange VAD\n");
+ Status = STATUS_INVALID_PAGE_PROTECTION;
+ goto FailPath;
+ }
+
+ /* Is this section, or private memory? */
+ if (Vad->u.VadFlags.PrivateMemory == 0)
+ {
+ /* Not yet supported */
+ if (Vad->u.VadFlags.VadType == VadLargePageSection)
+ {
+ DPRINT1("Illegal VAD for attempting to set protection\n");
+ Status = STATUS_CONFLICTING_ADDRESSES;
+ goto FailPath;
+ }
+
+ /* Rotate VADs are not yet supported */
+ if (Vad->u.VadFlags.VadType == VadRotatePhysical)
+ {
+ DPRINT1("Illegal VAD for attempting to set protection\n");
+ Status = STATUS_CONFLICTING_ADDRESSES;
+ goto FailPath;
+ }
+
+ /* Not valid on section files */
+ if (NewAccessProtection & (PAGE_NOCACHE | PAGE_WRITECOMBINE))
+ {
+ /* Fail */
+ DPRINT1("Invalid protection flags for section\n");
+ Status = STATUS_INVALID_PARAMETER_4;
+ goto FailPath;
+ }
+
+ /* Check if data or page file mapping protection PTE is compatible */
+ if (!Vad->ControlArea->u.Flags.Image)
+ {
+ /* Not yet */
+ DPRINT1("Fixme: Not checking for valid protection\n");
+ }
+
+ /* This is a section, and this is not yet supported */
+ DPRINT1("Section protection not yet supported\n");
+ OldProtect = 0;
+ }
+ else
+ {
+ /* Private memory, check protection flags */
+ if ((NewAccessProtection & PAGE_WRITECOPY) ||
+ (NewAccessProtection & PAGE_EXECUTE_WRITECOPY))
+ {
+ DPRINT1("Invalid protection flags for private memory\n");
+ Status = STATUS_INVALID_PARAMETER_4;
+ goto FailPath;
+ }
+
+ /* Lock the working set */
+ MiLockProcessWorkingSetUnsafe(Process, Thread);
+
+ /* Check if all pages in this range are committed */
+ Committed = MiIsEntireRangeCommitted(StartingAddress,
+ EndingAddress,
+ Vad,
+ Process);
+ if (!Committed)
+ {
+ /* Fail */
+ DPRINT1("The entire range is not committed\n");
+ Status = STATUS_NOT_COMMITTED;
+ MiUnlockProcessWorkingSetUnsafe(Process, Thread);
+ goto FailPath;
+ }
+
+ /* Compute starting and ending PTE and PDE addresses */
+ PointerPde = MiAddressToPde(StartingAddress);
+ PointerPte = MiAddressToPte(StartingAddress);
+ LastPte = MiAddressToPte(EndingAddress);
+
+ /* Make this PDE valid */
+ MiMakePdeExistAndMakeValid(PointerPde, Process, MM_NOIRQL);
+
+ /* Save protection of the first page */
+ if (PointerPte->u.Long != 0)
+ {
+ /* Capture the page protection and make the PDE valid */
+ OldProtect = MiGetPageProtection(PointerPte);
+ MiMakePdeExistAndMakeValid(PointerPde, Process, MM_NOIRQL);
+ }
+ else
+ {
+ /* Grab the old protection from the VAD itself */
+ OldProtect = MmProtectToValue[Vad->u.VadFlags.Protection];
+ }
+
+ /* Loop all the PTEs now */
+ while (PointerPte <= LastPte)
+ {
+ /* Check if we've crossed a PDE boundary and make the new PDE valid too */
+ if (MiIsPteOnPdeBoundary(PointerPte))
+ {
+ PointerPde = MiAddressToPte(PointerPte);
+ MiMakePdeExistAndMakeValid(PointerPde, Process, MM_NOIRQL);
+ }
+
+ /* Capture the PTE and check if it was empty */
+ PteContents = *PointerPte;
+ if (PteContents.u.Long == 0)
+ {
+ /* This used to be a zero PTE and it no longer is, so we must add a
+ reference to the pagetable. */
+ MiIncrementPageTableReferences(MiPteToAddress(PointerPte));
+ }
+
+ /* Check what kind of PTE we are dealing with */
+ if (PteContents.u.Hard.Valid == 1)
+ {
+ /* Get the PFN entry */
+ Pfn1 = MiGetPfnEntry(PFN_FROM_PTE(&PteContents));
+
+ /* We don't support this yet */
+ ASSERT(Pfn1->u3.e1.PrototypePte == 0);
+
+ /* Check if the page should not be accessible at all */
+ if ((NewAccessProtection & PAGE_NOACCESS) ||
+ (NewAccessProtection & PAGE_GUARD))
+ {
+ /* The page should be in the WS and we should make it transition now */
+ DPRINT1("Making valid page invalid is not yet supported!\n");
+ Status = STATUS_NOT_IMPLEMENTED;
+ /* Unlock the working set */
+ MiUnlockProcessWorkingSetUnsafe(Process, Thread);
+ goto FailPath;
+ }
+
+ /* Write the protection mask and write it with a TLB flush */
+ Pfn1->OriginalPte.u.Soft.Protection = ProtectionMask;
+ MiFlushTbAndCapture(Vad,
+ PointerPte,
+ ProtectionMask,
+ Pfn1,
+ TRUE);
+ }
+ else
+ {
+ /* We don't support these cases yet */
+ ASSERT(PteContents.u.Soft.Prototype == 0);
+ ASSERT(PteContents.u.Soft.Transition == 0);
+
+ /* The PTE is already demand-zero, just update the protection mask */
+ PteContents.u.Soft.Protection = ProtectionMask;
+ MI_WRITE_INVALID_PTE(PointerPte, PteContents);
+ ASSERT(PointerPte->u.Long != 0);
+ }
+
+ /* Move to the next PTE */
+ PointerPte++;
+ }
+
+ /* Unlock the working set */
+ MiUnlockProcessWorkingSetUnsafe(Process, Thread);
+ }
+
+ /* Unlock the address space */
+ MmUnlockAddressSpace(AddressSpace);
+
+ /* Return parameters and success */
+ *NumberOfBytesToProtect = EndingAddress - StartingAddress + 1;
+ *BaseAddress = (PVOID)StartingAddress;
+ *OldAccessProtection = OldProtect;
+ return STATUS_SUCCESS;
+
+FailPath:
+ /* Unlock the address space and return the failure code */
+ MmUnlockAddressSpace(AddressSpace);
+ return Status;
}
VOID
ULONG PteCount = 0;
PMMPFN Pfn1;
MMPTE PteContents;
- PUSHORT UsedPageTableEntries;
PETHREAD CurrentThread = PsGetCurrentThread();
//
PointerPde = MiAddressToPde(StartingAddress);
PointerPte = MiAddressToPte(StartingAddress);
if (Vad->u.VadFlags.MemCommit) CommitPte = MiAddressToPte(Vad->EndingVpn << PAGE_SHIFT);
- MiLockWorkingSet(CurrentThread, &Process->Vm);
+ MiLockProcessWorkingSetUnsafe(Process, CurrentThread);
//
// Make the PDE valid, and now loop through each page's worth of data
//
// Check if we've crossed a PDE boundary
//
- if ((((ULONG_PTR)PointerPte) & (SYSTEM_PD_SIZE - 1)) == 0)
+ if (MiIsPteOnPdeBoundary(PointerPte))
{
//
// Get the new PDE and flush the valid PTEs we had built up until
// This used to be a zero PTE and it no longer is, so we must add a
// reference to the pagetable.
//
- UsedPageTableEntries = &MmWorkingSetList->UsedPageTableEntries[MiGetPdeOffset(StartingAddress)];
- (*UsedPageTableEntries)++;
- ASSERT((*UsedPageTableEntries) <= PTE_COUNT);
+ MiIncrementPageTableReferences(StartingAddress);
//
// Next, we account for decommitted PTEs and make the PTE as such
// release the working set and return the commit reduction accounting.
//
if (PteCount) MiProcessValidPteList(ValidPteList, PteCount);
- MiUnlockWorkingSet(CurrentThread, &Process->Vm);
+ MiUnlockProcessWorkingSetUnsafe(Process, CurrentThread);
return CommitReduction;
}
}
_SEH2_END;
- //
- // Return status
- //
+ //
+ // Return status
+ //
+ return Status;
+}
+
+FORCEINLINE
+BOOLEAN
+MI_IS_LOCKED_VA(
+ PMMPFN Pfn1,
+ ULONG LockType)
+{
+ // HACK until we have proper WSLIST support
+ PMMWSLE Wsle = &Pfn1->Wsle;
+
+ if ((LockType & MAP_PROCESS) && (Wsle->u1.e1.LockedInWs))
+ return TRUE;
+ if ((LockType & MAP_SYSTEM) && (Wsle->u1.e1.LockedInMemory))
+ return TRUE;
+
+ return FALSE;
+}
+
+FORCEINLINE
+VOID
+MI_LOCK_VA(
+ PMMPFN Pfn1,
+ ULONG LockType)
+{
+ // HACK until we have proper WSLIST support
+ PMMWSLE Wsle = &Pfn1->Wsle;
+
+ if (!Wsle->u1.e1.LockedInWs &&
+ !Wsle->u1.e1.LockedInMemory)
+ {
+ MiReferenceProbedPageAndBumpLockCount(Pfn1);
+ }
+
+ if (LockType & MAP_PROCESS)
+ Wsle->u1.e1.LockedInWs = 1;
+ if (LockType & MAP_SYSTEM)
+ Wsle->u1.e1.LockedInMemory = 1;
+}
+
+FORCEINLINE
+VOID
+MI_UNLOCK_VA(
+ PMMPFN Pfn1,
+ ULONG LockType)
+{
+ // HACK until we have proper WSLIST support
+ PMMWSLE Wsle = &Pfn1->Wsle;
+
+ if (LockType & MAP_PROCESS)
+ Wsle->u1.e1.LockedInWs = 0;
+ if (LockType & MAP_SYSTEM)
+ Wsle->u1.e1.LockedInMemory = 0;
+
+ if (!Wsle->u1.e1.LockedInWs &&
+ !Wsle->u1.e1.LockedInMemory)
+ {
+ MiDereferencePfnAndDropLockCount(Pfn1);
+ }
+}
+
+static
+NTSTATUS
+MiCheckVadsForLockOperation(
+ _Inout_ PVOID *BaseAddress,
+ _Inout_ PSIZE_T RegionSize,
+ _Inout_ PVOID *EndAddress)
+
+{
+ PMMVAD Vad;
+ PVOID CurrentVa;
+
+ /* Get the base address and align the start address */
+ *EndAddress = (PUCHAR)*BaseAddress + *RegionSize;
+ *EndAddress = ALIGN_UP_POINTER_BY(*EndAddress, PAGE_SIZE);
+ *BaseAddress = ALIGN_DOWN_POINTER_BY(*BaseAddress, PAGE_SIZE);
+
+ /* First loop and check all VADs */
+ CurrentVa = *BaseAddress;
+ while (CurrentVa < *EndAddress)
+ {
+ /* Get VAD */
+ Vad = MiLocateAddress(CurrentVa);
+ if (Vad == NULL)
+ {
+ /// FIXME: this might be a memory area for a section view...
+ return STATUS_ACCESS_VIOLATION;
+ }
+
+ /* Check VAD type */
+ if ((Vad->u.VadFlags.VadType != VadNone) &&
+ (Vad->u.VadFlags.VadType != VadImageMap) &&
+ (Vad->u.VadFlags.VadType != VadWriteWatch))
+ {
+ *EndAddress = CurrentVa;
+ *RegionSize = (PUCHAR)*EndAddress - (PUCHAR)*BaseAddress;
+ return STATUS_INCOMPATIBLE_FILE_MAP;
+ }
+
+ CurrentVa = (PVOID)((Vad->EndingVpn + 1) << PAGE_SHIFT);
+ }
+
+ *RegionSize = (PUCHAR)*EndAddress - (PUCHAR)*BaseAddress;
+ return STATUS_SUCCESS;
+}
+
+static
+NTSTATUS
+MiLockVirtualMemory(
+ IN OUT PVOID *BaseAddress,
+ IN OUT PSIZE_T RegionSize,
+ IN ULONG MapType)
+{
+ PEPROCESS CurrentProcess;
+ PMMSUPPORT AddressSpace;
+ PVOID CurrentVa, EndAddress;
+ PMMPTE PointerPte, LastPte;
+ PMMPDE PointerPde;
+#if (_MI_PAGING_LEVELS >= 3)
+ PMMPDE PointerPpe;
+#endif
+#if (_MI_PAGING_LEVELS == 4)
+ PMMPDE PointerPxe;
+#endif
+ PMMPFN Pfn1;
+ NTSTATUS Status, TempStatus;
+
+ /* Lock the address space */
+ AddressSpace = MmGetCurrentAddressSpace();
+ MmLockAddressSpace(AddressSpace);
+
+ /* Make sure we still have an address space */
+ CurrentProcess = PsGetCurrentProcess();
+ if (CurrentProcess->VmDeleted)
+ {
+ Status = STATUS_PROCESS_IS_TERMINATING;
+ goto Cleanup;
+ }
+
+ /* Check the VADs in the requested range */
+ Status = MiCheckVadsForLockOperation(BaseAddress, RegionSize, &EndAddress);
+ if (!NT_SUCCESS(Status))
+ {
+ goto Cleanup;
+ }
+
+ /* Enter SEH for probing */
+ _SEH2_TRY
+ {
+ /* Loop all pages and probe them */
+ CurrentVa = *BaseAddress;
+ while (CurrentVa < EndAddress)
+ {
+ (void)(*(volatile CHAR*)CurrentVa);
+ CurrentVa = (PUCHAR)CurrentVa + PAGE_SIZE;
+ }
+ }
+ _SEH2_EXCEPT(EXCEPTION_EXECUTE_HANDLER)
+ {
+ Status = _SEH2_GetExceptionCode();
+ goto Cleanup;
+ }
+ _SEH2_END;
+
+ /* All pages were accessible, since we hold the address space lock, nothing
+ can be de-committed. Assume success for now. */
+ Status = STATUS_SUCCESS;
+
+ /* Get the PTE and PDE */
+ PointerPte = MiAddressToPte(*BaseAddress);
+ PointerPde = MiAddressToPde(*BaseAddress);
+#if (_MI_PAGING_LEVELS >= 3)
+ PointerPpe = MiAddressToPpe(*BaseAddress);
+#endif
+#if (_MI_PAGING_LEVELS == 4)
+ PointerPxe = MiAddressToPxe(*BaseAddress);
+#endif
+
+ /* Get the last PTE */
+ LastPte = MiAddressToPte((PVOID)((ULONG_PTR)EndAddress - 1));
+
+ /* Lock the process working set */
+ MiLockProcessWorkingSet(CurrentProcess, PsGetCurrentThread());
+
+ /* Loop the pages */
+ do
+ {
+ /* Check for a page that is not accessible */
+ while (
+#if (_MI_PAGING_LEVELS == 4)
+ (PointerPxe->u.Hard.Valid == 0) ||
+#endif
+#if (_MI_PAGING_LEVELS >= 3)
+ (PointerPpe->u.Hard.Valid == 0) ||
+#endif
+ (PointerPde->u.Hard.Valid == 0) ||
+ (PointerPte->u.Hard.Valid == 0))
+ {
+ /* Release process working set */
+ MiUnlockProcessWorkingSet(CurrentProcess, PsGetCurrentThread());
+
+ /* Access the page */
+ CurrentVa = MiPteToAddress(PointerPte);
+
+ //HACK: Pass a placeholder TrapInformation so the fault handler knows we're unlocked
+ TempStatus = MmAccessFault(TRUE, CurrentVa, KernelMode, (PVOID)0xBADBADA3);
+ if (!NT_SUCCESS(TempStatus))
+ {
+ // This should only happen, when remote backing storage is not accessible
+ ASSERT(FALSE);
+ Status = TempStatus;
+ goto Cleanup;
+ }
+
+ /* Lock the process working set */
+ MiLockProcessWorkingSet(CurrentProcess, PsGetCurrentThread());
+ }
+
+ /* Get the PFN */
+ Pfn1 = MiGetPfnEntry(PFN_FROM_PTE(PointerPte));
+ ASSERT(Pfn1 != NULL);
+
+ /* Check the previous lock status */
+ if (MI_IS_LOCKED_VA(Pfn1, MapType))
+ {
+ Status = STATUS_WAS_LOCKED;
+ }
+
+ /* Lock it */
+ MI_LOCK_VA(Pfn1, MapType);
+
+ /* Go to the next PTE */
+ PointerPte++;
+
+ /* Check if we're on a PDE boundary */
+ if (MiIsPteOnPdeBoundary(PointerPte)) PointerPde++;
+#if (_MI_PAGING_LEVELS >= 3)
+ if (MiIsPteOnPpeBoundary(PointerPte)) PointerPpe++;
+#endif
+#if (_MI_PAGING_LEVELS == 4)
+ if (MiIsPteOnPxeBoundary(PointerPte)) PointerPxe++;
+#endif
+ } while (PointerPte <= LastPte);
+
+ /* Release process working set */
+ MiUnlockProcessWorkingSet(CurrentProcess, PsGetCurrentThread());
+
+Cleanup:
+ /* Unlock address space */
+ MmUnlockAddressSpace(AddressSpace);
+
return Status;
}
}
//
- // Oops :(
+ // Call the internal function
//
- UNIMPLEMENTED;
+ Status = MiLockVirtualMemory(&CapturedBaseAddress,
+ &CapturedBytesToLock,
+ MapType);
//
// Detach if needed
// Return data to user
//
*BaseAddress = CapturedBaseAddress;
- *NumberOfBytesToLock = 0;
+ *NumberOfBytesToLock = CapturedBytesToLock;
}
_SEH2_EXCEPT(EXCEPTION_EXECUTE_HANDLER)
{
//
// Return status
//
- return STATUS_SUCCESS;
+ return Status;
+}
+
+
+static
+NTSTATUS
+MiUnlockVirtualMemory(
+ IN OUT PVOID *BaseAddress,
+ IN OUT PSIZE_T RegionSize,
+ IN ULONG MapType)
+{
+ PEPROCESS CurrentProcess;
+ PMMSUPPORT AddressSpace;
+ PVOID EndAddress;
+ PMMPTE PointerPte, LastPte;
+ PMMPDE PointerPde;
+#if (_MI_PAGING_LEVELS >= 3)
+ PMMPDE PointerPpe;
+#endif
+#if (_MI_PAGING_LEVELS == 4)
+ PMMPDE PointerPxe;
+#endif
+ PMMPFN Pfn1;
+ NTSTATUS Status;
+
+ /* Lock the address space */
+ AddressSpace = MmGetCurrentAddressSpace();
+ MmLockAddressSpace(AddressSpace);
+
+ /* Make sure we still have an address space */
+ CurrentProcess = PsGetCurrentProcess();
+ if (CurrentProcess->VmDeleted)
+ {
+ Status = STATUS_PROCESS_IS_TERMINATING;
+ goto Cleanup;
+ }
+
+ /* Check the VADs in the requested range */
+ Status = MiCheckVadsForLockOperation(BaseAddress, RegionSize, &EndAddress);
+
+ /* Note: only bail out, if we hit an area without a VAD. If we hit an
+ incompatible VAD we continue, like Windows does */
+ if (Status == STATUS_ACCESS_VIOLATION)
+ {
+ Status = STATUS_NOT_LOCKED;
+ goto Cleanup;
+ }
+
+ /* Get the PTE and PDE */
+ PointerPte = MiAddressToPte(*BaseAddress);
+ PointerPde = MiAddressToPde(*BaseAddress);
+#if (_MI_PAGING_LEVELS >= 3)
+ PointerPpe = MiAddressToPpe(*BaseAddress);
+#endif
+#if (_MI_PAGING_LEVELS == 4)
+ PointerPxe = MiAddressToPxe(*BaseAddress);
+#endif
+
+ /* Get the last PTE */
+ LastPte = MiAddressToPte((PVOID)((ULONG_PTR)EndAddress - 1));
+
+ /* Lock the process working set */
+ MiLockProcessWorkingSet(CurrentProcess, PsGetCurrentThread());
+
+ /* Loop the pages */
+ do
+ {
+ /* Check for a page that is not present */
+ if (
+#if (_MI_PAGING_LEVELS == 4)
+ (PointerPxe->u.Hard.Valid == 0) ||
+#endif
+#if (_MI_PAGING_LEVELS >= 3)
+ (PointerPpe->u.Hard.Valid == 0) ||
+#endif
+ (PointerPde->u.Hard.Valid == 0) ||
+ (PointerPte->u.Hard.Valid == 0))
+ {
+ /* Remember it, but keep going */
+ Status = STATUS_NOT_LOCKED;
+ }
+ else
+ {
+ /* Get the PFN */
+ Pfn1 = MiGetPfnEntry(PFN_FROM_PTE(PointerPte));
+ ASSERT(Pfn1 != NULL);
+
+ /* Check if all of the requested locks are present */
+ if (((MapType & MAP_SYSTEM) && !MI_IS_LOCKED_VA(Pfn1, MAP_SYSTEM)) ||
+ ((MapType & MAP_PROCESS) && !MI_IS_LOCKED_VA(Pfn1, MAP_PROCESS)))
+ {
+ /* Remember it, but keep going */
+ Status = STATUS_NOT_LOCKED;
+
+ /* Check if no lock is present */
+ if (!MI_IS_LOCKED_VA(Pfn1, MAP_PROCESS | MAP_SYSTEM))
+ {
+ DPRINT1("FIXME: Should remove the page from WS\n");
+ }
+ }
+ }
+
+ /* Go to the next PTE */
+ PointerPte++;
+
+ /* Check if we're on a PDE boundary */
+ if (MiIsPteOnPdeBoundary(PointerPte)) PointerPde++;
+#if (_MI_PAGING_LEVELS >= 3)
+ if (MiIsPteOnPpeBoundary(PointerPte)) PointerPpe++;
+#endif
+#if (_MI_PAGING_LEVELS == 4)
+ if (MiIsPteOnPxeBoundary(PointerPte)) PointerPxe++;
+#endif
+ } while (PointerPte <= LastPte);
+
+ /* Check if we hit a page that was not locked */
+ if (Status == STATUS_NOT_LOCKED)
+ {
+ goto CleanupWithWsLock;
+ }
+
+ /* All pages in the region were locked, so unlock them all */
+
+ /* Get the PTE and PDE */
+ PointerPte = MiAddressToPte(*BaseAddress);
+ PointerPde = MiAddressToPde(*BaseAddress);
+#if (_MI_PAGING_LEVELS >= 3)
+ PointerPpe = MiAddressToPpe(*BaseAddress);
+#endif
+#if (_MI_PAGING_LEVELS == 4)
+ PointerPxe = MiAddressToPxe(*BaseAddress);
+#endif
+
+ /* Loop the pages */
+ do
+ {
+ /* Unlock it */
+ Pfn1 = MiGetPfnEntry(PFN_FROM_PTE(PointerPte));
+ MI_UNLOCK_VA(Pfn1, MapType);
+
+ /* Go to the next PTE */
+ PointerPte++;
+
+ /* Check if we're on a PDE boundary */
+ if (MiIsPteOnPdeBoundary(PointerPte)) PointerPde++;
+#if (_MI_PAGING_LEVELS >= 3)
+ if (MiIsPteOnPpeBoundary(PointerPte)) PointerPpe++;
+#endif
+#if (_MI_PAGING_LEVELS == 4)
+ if (MiIsPteOnPxeBoundary(PointerPte)) PointerPxe++;
+#endif
+ } while (PointerPte <= LastPte);
+
+ /* Everything is done */
+ Status = STATUS_SUCCESS;
+
+CleanupWithWsLock:
+
+ /* Release process working set */
+ MiUnlockProcessWorkingSet(CurrentProcess, PsGetCurrentThread());
+
+Cleanup:
+ /* Unlock address space */
+ MmUnlockAddressSpace(AddressSpace);
+
+ return Status;
}
+
NTSTATUS
NTAPI
NtUnlockVirtualMemory(IN HANDLE ProcessHandle,
}
//
- // Oops :(
+ // Call the internal function
//
- UNIMPLEMENTED;
+ Status = MiUnlockVirtualMemory(&CapturedBaseAddress,
+ &CapturedBytesToUnlock,
+ MapType);
//
// Detach if needed
//
// Return data to user
//
- *BaseAddress = PAGE_ALIGN(CapturedBaseAddress);
- *NumberOfBytesToUnlock = 0;
+ *BaseAddress = CapturedBaseAddress;
+ *NumberOfBytesToUnlock = CapturedBytesToUnlock;
}
_SEH2_EXCEPT(EXCEPTION_EXECUTE_HANDLER)
{
//
// Catch illegal base address
//
- if (BaseAddress > MM_HIGHEST_USER_ADDRESS) return STATUS_INVALID_PARAMETER_2;
+ if (BaseAddress > MM_HIGHEST_USER_ADDRESS) _SEH2_YIELD(return STATUS_INVALID_PARAMETER_2);
//
// Catch illegal region size
//
// Fail
//
- return STATUS_INVALID_PARAMETER_3;
+ _SEH2_YIELD(return STATUS_INVALID_PARAMETER_3);
}
//
//
// Must have a count
//
- if (CapturedEntryCount == 0) return STATUS_INVALID_PARAMETER_5;
+ if (CapturedEntryCount == 0) _SEH2_YIELD(return STATUS_INVALID_PARAMETER_5);
//
// Can't be larger than the maximum
//
// Fail
//
- return STATUS_INVALID_PARAMETER_5;
+ _SEH2_YIELD(return STATUS_INVALID_PARAMETER_5);
}
//
PMEMORY_AREA MemoryArea;
PFN_NUMBER PageCount;
PMMVAD Vad, FoundVad;
- PUSHORT UsedPageTableEntries;
NTSTATUS Status;
PMMSUPPORT AddressSpace;
PVOID PBaseAddress;
KPROCESSOR_MODE PreviousMode = KeGetPreviousMode();
PETHREAD CurrentThread = PsGetCurrentThread();
KAPC_STATE ApcState;
- ULONG ProtectionMask;
+ ULONG ProtectionMask, QuotaCharge = 0, QuotaFree = 0;
BOOLEAN Attached = FALSE, ChangeProtection = FALSE;
MMPTE TempPte;
PMMPTE PointerPte, PointerPde, LastPte;
}
}
- //
- // Force PAGE_READWRITE for everything, for now
- //
- Protect = PAGE_READWRITE;
-
/* Calculate the protection mask and make sure it's valid */
ProtectionMask = MiMakeProtectionMask(Protect);
if (ProtectionMask == MM_INVALID_PROTECTION)
}
//
- // Assert on the things we don't yet support
+ // Fail on the things we don't yet support
//
- ASSERT(ZeroBits == 0);
- ASSERT((AllocationType & MEM_LARGE_PAGES) == 0);
- ASSERT((AllocationType & MEM_PHYSICAL) == 0);
- ASSERT((AllocationType & MEM_WRITE_WATCH) == 0);
- ASSERT((AllocationType & MEM_TOP_DOWN) == 0);
- ASSERT((AllocationType & MEM_RESET) == 0);
- ASSERT(Process->VmTopDown == 0);
+ if (ZeroBits != 0)
+ {
+ DPRINT1("Zero bits not supported\n");
+ Status = STATUS_INVALID_PARAMETER;
+ goto FailPathNoLock;
+ }
+ if ((AllocationType & MEM_LARGE_PAGES) == MEM_LARGE_PAGES)
+ {
+ DPRINT1("MEM_LARGE_PAGES not supported\n");
+ Status = STATUS_INVALID_PARAMETER;
+ goto FailPathNoLock;
+ }
+ if ((AllocationType & MEM_PHYSICAL) == MEM_PHYSICAL)
+ {
+ DPRINT1("MEM_PHYSICAL not supported\n");
+ Status = STATUS_INVALID_PARAMETER;
+ goto FailPathNoLock;
+ }
+ if ((AllocationType & MEM_WRITE_WATCH) == MEM_WRITE_WATCH)
+ {
+ DPRINT1("MEM_WRITE_WATCH not supported\n");
+ Status = STATUS_INVALID_PARAMETER;
+ goto FailPathNoLock;
+ }
+ if ((AllocationType & MEM_TOP_DOWN) == MEM_TOP_DOWN)
+ {
+ DPRINT1("MEM_TOP_DOWN not supported\n");
+ AllocationType &= ~MEM_TOP_DOWN;
+ }
+
+ if (Process->VmTopDown == 1)
+ {
+ DPRINT1("VmTopDown not supported\n");
+ Status = STATUS_INVALID_PARAMETER;
+ goto FailPathNoLock;
+ }
//
// Check if the caller is reserving memory, or committing memory and letting
//
// Lock the working set and insert the VAD into the process VAD tree
//
- MiLockProcessWorkingSet(Process, CurrentThread);
+ MiLockProcessWorkingSetUnsafe(Process, CurrentThread);
Vad->ControlArea = NULL; // For Memory-Area hack
MiInsertVad(Vad, Process);
- MiUnlockProcessWorkingSet(Process, CurrentThread);
+ MiUnlockProcessWorkingSetUnsafe(Process, CurrentThread);
//
// Update the virtual size of the process, and if this is now the highest
goto FailPath;
}
+ if ((AllocationType & MEM_RESET) == MEM_RESET)
+ {
+ /// @todo HACK: pretend success
+ DPRINT("MEM_RESET not supported\n");
+ Status = STATUS_SUCCESS;
+ goto FailPath;
+ }
+
//
// These kinds of VADs are illegal for this Windows function when trying to
// commit an existing range
//
// Make sure that this address range actually fits within the VAD for it
//
- if (((StartingAddress >> PAGE_SHIFT) < FoundVad->StartingVpn) &&
+ if (((StartingAddress >> PAGE_SHIFT) < FoundVad->StartingVpn) ||
((EndingAddress >> PAGE_SHIFT) > FoundVad->EndingVpn))
{
DPRINT1("Address range does not fit into the VAD\n");
}
//
- // If this is an existing section view, we call the old RosMm routine which
- // has the relevant code required to handle the section scenario. In the future
- // we will limit this even more so that there's almost nothing that the code
- // needs to do, and it will become part of section.c in RosMm
+ // Make sure this is an ARM3 section
//
MemoryArea = MmLocateMemoryAreaByAddress(AddressSpace, (PVOID)PAGE_ROUND_DOWN(PBaseAddress));
if (MemoryArea->Type != MEMORY_AREA_OWNED_BY_ARM3)
{
- return MiRosAllocateVirtualMemory(ProcessHandle,
- Process,
- MemoryArea,
- AddressSpace,
- UBaseAddress,
- Attached,
- URegionSize,
- AllocationType,
- Protect);
+ DPRINT1("Illegal commit of non-ARM3 section!\n");
+ Status = STATUS_ALREADY_COMMITTED;
+ goto FailPath;
+ }
+
+ // Is this a previously reserved section being committed? If so, enter the
+ // special section path
+ //
+ if (FoundVad->u.VadFlags.PrivateMemory == FALSE)
+ {
+ //
+ // You cannot commit large page sections through this API
+ //
+ if (FoundVad->u.VadFlags.VadType == VadLargePageSection)
+ {
+ DPRINT1("Large page sections cannot be VirtualAlloc'd\n");
+ Status = STATUS_INVALID_PAGE_PROTECTION;
+ goto FailPath;
+ }
+
+ //
+ // You can only use caching flags on a rotate VAD
+ //
+ if ((Protect & (PAGE_NOCACHE | PAGE_WRITECOMBINE)) &&
+ (FoundVad->u.VadFlags.VadType != VadRotatePhysical))
+ {
+ DPRINT1("Cannot use caching flags with anything but rotate VADs\n");
+ Status = STATUS_INVALID_PAGE_PROTECTION;
+ goto FailPath;
+ }
+
+ //
+ // We should make sure that the section's permissions aren't being
+ // messed with
+ //
+ if (FoundVad->u.VadFlags.NoChange)
+ {
+ //
+ // Make sure it's okay to touch it
+ //
+ Status = MiCheckSecuredVad(FoundVad,
+ PBaseAddress,
+ PRegionSize,
+ ProtectionMask);
+ if (!NT_SUCCESS(Status))
+ {
+ DPRINT1("Secured VAD being messed around with\n");
+ goto FailPath;
+ }
+ }
+
+ //
+ // ARM3 does not support file-backed sections, only shared memory
+ //
+ ASSERT(FoundVad->ControlArea->FilePointer == NULL);
+
+ //
+ // Rotate VADs cannot be guard pages or inaccessible, nor copy on write
+ //
+ if ((FoundVad->u.VadFlags.VadType == VadRotatePhysical) &&
+ (Protect & (PAGE_WRITECOPY | PAGE_EXECUTE_WRITECOPY | PAGE_NOACCESS | PAGE_GUARD)))
+ {
+ DPRINT1("Invalid page protection for rotate VAD\n");
+ Status = STATUS_INVALID_PAGE_PROTECTION;
+ goto FailPath;
+ }
+
+ //
+ // Compute PTE addresses and the quota charge, then grab the commit lock
+ //
+ PointerPte = MI_GET_PROTOTYPE_PTE_FOR_VPN(FoundVad, StartingAddress >> PAGE_SHIFT);
+ LastPte = MI_GET_PROTOTYPE_PTE_FOR_VPN(FoundVad, EndingAddress >> PAGE_SHIFT);
+ QuotaCharge = (ULONG)(LastPte - PointerPte + 1);
+ KeAcquireGuardedMutexUnsafe(&MmSectionCommitMutex);
+
+ //
+ // Get the segment template PTE and start looping each page
+ //
+ TempPte = FoundVad->ControlArea->Segment->SegmentPteTemplate;
+ ASSERT(TempPte.u.Long != 0);
+ while (PointerPte <= LastPte)
+ {
+ //
+ // For each non-already-committed page, write the invalid template PTE
+ //
+ if (PointerPte->u.Long == 0)
+ {
+ MI_WRITE_INVALID_PTE(PointerPte, TempPte);
+ }
+ else
+ {
+ QuotaFree++;
+ }
+ PointerPte++;
+ }
+
+ //
+ // Now do the commit accounting and release the lock
+ //
+ ASSERT(QuotaCharge >= QuotaFree);
+ QuotaCharge -= QuotaFree;
+ FoundVad->ControlArea->Segment->NumberOfCommittedPages += QuotaCharge;
+ KeReleaseGuardedMutexUnsafe(&MmSectionCommitMutex);
+
+ //
+ // We are done with committing the section pages
+ //
+ Status = STATUS_SUCCESS;
+ goto FailPath;
}
//
- // This is a specific ReactOS check because we do not support Section VADs
+ // This is a specific ReactOS check because we only use normal VADs
//
ASSERT(FoundVad->u.VadFlags.VadType == VadNone);
- ASSERT(FoundVad->u.VadFlags.PrivateMemory == TRUE);
//
// While this is an actual Windows check
//
TempPte.u.Long = 0;
TempPte.u.Soft.Protection = ProtectionMask;
+ NT_ASSERT(TempPte.u.Long != 0);
//
// Get the PTE, PDE and the last PTE for this address range
//
// Lock the working set while we play with user pages and page tables
//
- //MiLockWorkingSet(CurrentThread, AddressSpace);
+ MiLockProcessWorkingSetUnsafe(Process, CurrentThread);
//
// Make the current page table valid, and then loop each page within it
//
// Have we crossed into a new page table?
//
- if (!(((ULONG_PTR)PointerPte) & (SYSTEM_PD_SIZE - 1)))
+ if (MiIsPteOnPdeBoundary(PointerPte))
{
//
// Get the PDE and now make it valid too
// First increment the count of pages in the page table for this
// process
//
- UsedPageTableEntries = &MmWorkingSetList->UsedPageTableEntries[MiGetPdeOffset(MiPteToAddress(PointerPte))];
- (*UsedPageTableEntries)++;
- ASSERT((*UsedPageTableEntries) <= PTE_COUNT);
+ MiIncrementPageTableReferences(MiPteToAddress(PointerPte));
//
// And now write the invalid demand-zero PTE as requested
// There's a change in protection, remember this for later, but do
// not yet handle it.
//
- DPRINT1("Protection change to: 0x%lx not implemented\n", Protect);
ChangeProtection = TRUE;
}
PointerPte++;
}
- //
- // This path is not yet handled
- //
- ASSERT(ChangeProtection == FALSE);
-
//
// Release the working set lock, unlock the address space, and detach from
// the target process if it was not the current process. Also dereference the
// target process if this wasn't the case.
//
- //MiUnlockProcessWorkingSet(Process, CurrentThread);
+ MiUnlockProcessWorkingSetUnsafe(Process, CurrentThread);
Status = STATUS_SUCCESS;
FailPath:
MmUnlockAddressSpace(AddressSpace);
+
+ //
+ // Check if we need to update the protection
+ //
+ if (ChangeProtection)
+ {
+ PVOID ProtectBaseAddress = (PVOID)StartingAddress;
+ SIZE_T ProtectSize = PRegionSize;
+ ULONG OldProtection;
+
+ //
+ // Change the protection of the region
+ //
+ MiProtectVirtualMemory(Process,
+ &ProtectBaseAddress,
+ &ProtectSize,
+ Protect,
+ &OldProtection);
+ }
+
FailPathNoLock:
if (Attached) KeUnstackDetachProcess(&ApcState);
if (ProcessHandle != NtCurrentProcess()) ObDereferenceObject(Process);
IN ULONG FreeType)
{
PMEMORY_AREA MemoryArea;
- ULONG PRegionSize;
+ SIZE_T PRegionSize;
PVOID PBaseAddress;
- ULONG CommitReduction = 0;
+ LONG_PTR CommitReduction = 0;
ULONG_PTR StartingAddress, EndingAddress;
PMMVAD Vad;
NTSTATUS Status;
}
//
- // These ASSERTs are here because ReactOS ARM3 does not currently implement
- // any other kinds of VADs.
+ // Only private memory (except rotate VADs) can be freed through here */
+ //
+ if ((!(Vad->u.VadFlags.PrivateMemory) &&
+ (Vad->u.VadFlags.VadType != VadRotatePhysical)) ||
+ (Vad->u.VadFlags.VadType == VadDevicePhysicalMemory))
+ {
+ DPRINT1("Attempt to free section memory\n");
+ Status = STATUS_UNABLE_TO_DELETE_SECTION;
+ goto FailPath;
+ }
+
+ //
+ // ARM3 does not yet handle protected VM
//
- ASSERT(Vad->u.VadFlags.PrivateMemory == 1);
ASSERT(Vad->u.VadFlags.NoChange == 0);
- ASSERT(Vad->u.VadFlags.VadType == VadNone);
//
// Finally, make sure there is a ReactOS Mm MEMORY_AREA for this allocation
//
if (FreeType & MEM_RELEASE)
{
+ //
+ // ARM3 only supports this VAD in this path
+ //
+ ASSERT(Vad->u.VadFlags.VadType == VadNone);
+
//
// Is the caller trying to remove the whole VAD, or remove only a portion
// of it? If no region size is specified, then the assumption is that the
//
// Finally lock the working set and remove the VAD from the VAD tree
//
- MiLockWorkingSet(CurrentThread, AddressSpace);
+ MiLockProcessWorkingSetUnsafe(Process, CurrentThread);
ASSERT(Process->VadRoot.NumberGenericTableElements >= 1);
MiRemoveNode((PMMADDRESS_NODE)Vad, &Process->VadRoot);
}
// the code path above when the caller sets a zero region size
// and the whole VAD is destroyed
//
- MiLockWorkingSet(CurrentThread, AddressSpace);
+ MiLockProcessWorkingSetUnsafe(Process, CurrentThread);
ASSERT(Process->VadRoot.NumberGenericTableElements >= 1);
MiRemoveNode((PMMADDRESS_NODE)Vad, &Process->VadRoot);
}
//
if ((EndingAddress >> PAGE_SHIFT) == Vad->EndingVpn)
{
+ PMEMORY_AREA MemoryArea;
+
//
// This is pretty easy and similar to case A. We compute the
// amount of pages to decommit, update the VAD's commit charge
// and then change the ending address of the VAD to be a bit
// smaller.
//
- // NOT YET IMPLEMENTED IN ARM3.
- //
- DPRINT1("Case C not handled\n");
- Status = STATUS_FREE_VM_NOT_AT_BASE;
- goto FailPath;
+ MiLockProcessWorkingSetUnsafe(Process, CurrentThread);
+ CommitReduction = MiCalculatePageCommitment(StartingAddress,
+ EndingAddress,
+ Vad,
+ Process);
+ Vad->u.VadFlags.CommitCharge -= CommitReduction;
+ // For ReactOS: shrink the corresponding memory area
+ MemoryArea = MmLocateMemoryAreaByAddress(AddressSpace, (PVOID)StartingAddress);
+ ASSERT(Vad->StartingVpn << PAGE_SHIFT == (ULONG_PTR)MemoryArea->StartingAddress);
+ ASSERT((Vad->EndingVpn + 1) << PAGE_SHIFT == (ULONG_PTR)MemoryArea->EndingAddress);
+ Vad->EndingVpn = ((ULONG_PTR)StartingAddress - 1) >> PAGE_SHIFT;
+ MemoryArea->EndingAddress = (PVOID)(((Vad->EndingVpn + 1) << PAGE_SHIFT) - 1);
}
else
{
// around with process pages.
//
MiDeleteVirtualAddresses(StartingAddress, EndingAddress, NULL);
- MiUnlockWorkingSet(CurrentThread, AddressSpace);
+ MiUnlockProcessWorkingSetUnsafe(Process, CurrentThread);
Status = STATUS_SUCCESS;
FinalPath:
return Status;
}
+
+PHYSICAL_ADDRESS
+NTAPI
+MmGetPhysicalAddress(PVOID Address)
+{
+ PHYSICAL_ADDRESS PhysicalAddress;
+ MMPDE TempPde;
+ MMPTE TempPte;
+
+ /* Check if the PXE/PPE/PDE is valid */
+ if (
+#if (_MI_PAGING_LEVELS == 4)
+ (MiAddressToPxe(Address)->u.Hard.Valid) &&
+#endif
+#if (_MI_PAGING_LEVELS >= 3)
+ (MiAddressToPpe(Address)->u.Hard.Valid) &&
+#endif
+ (MiAddressToPde(Address)->u.Hard.Valid))
+ {
+ /* Check for large pages */
+ TempPde = *MiAddressToPde(Address);
+ if (TempPde.u.Hard.LargePage)
+ {
+ /* Physical address is base page + large page offset */
+ PhysicalAddress.QuadPart = TempPde.u.Hard.PageFrameNumber << PAGE_SHIFT;
+ PhysicalAddress.QuadPart += ((ULONG_PTR)Address & (PAGE_SIZE * PTE_PER_PAGE - 1));
+ return PhysicalAddress;
+ }
+
+ /* Check if the PTE is valid */
+ TempPte = *MiAddressToPte(Address);
+ if (TempPte.u.Hard.Valid)
+ {
+ /* Physical address is base page + page offset */
+ PhysicalAddress.QuadPart = TempPte.u.Hard.PageFrameNumber << PAGE_SHIFT;
+ PhysicalAddress.QuadPart += ((ULONG_PTR)Address & (PAGE_SIZE - 1));
+ return PhysicalAddress;
+ }
+ }
+
+ DPRINT1("MM:MmGetPhysicalAddressFailed base address was %p\n", Address);
+ PhysicalAddress.QuadPart = 0;
+ return PhysicalAddress;
+}
+
+
/* EOF */