IN ULONG NewAccessProtection,
OUT PULONG OldAccessProtection OPTIONAL);
+VOID
+NTAPI
+MiFlushTbAndCapture(IN PMMVAD FoundVad,
+ IN PMMPTE PointerPte,
+ IN ULONG ProtectionMask,
+ IN PMMPFN Pfn1,
+ IN BOOLEAN CaptureDirtyBit);
+
+
/* PRIVATE FUNCTIONS **********************************************************/
+ULONG
+NTAPI
+MiCalculatePageCommitment(IN ULONG_PTR StartingAddress,
+ IN ULONG_PTR EndingAddress,
+ IN PMMVAD Vad,
+ IN PEPROCESS Process)
+{
+ PMMPTE PointerPte, LastPte, PointerPde;
+ ULONG CommittedPages;
+
+ /* Compute starting and ending PTE and PDE addresses */
+ PointerPde = MiAddressToPde(StartingAddress);
+ PointerPte = MiAddressToPte(StartingAddress);
+ LastPte = MiAddressToPte(EndingAddress);
+
+ /* Handle commited pages first */
+ if (Vad->u.VadFlags.MemCommit == 1)
+ {
+ /* This is a committed VAD, so Assume the whole range is committed */
+ CommittedPages = BYTES_TO_PAGES(EndingAddress - StartingAddress);
+
+ /* Is the PDE demand-zero? */
+ PointerPde = MiAddressToPte(PointerPte);
+ if (PointerPde->u.Long != 0)
+ {
+ /* It is not. Is it valid? */
+ if (PointerPde->u.Hard.Valid == 0)
+ {
+ /* Fault it in */
+ PointerPte = MiPteToAddress(PointerPde);
+ MiMakeSystemAddressValid(PointerPte, Process);
+ }
+ }
+ else
+ {
+ /* It is, skip it and move to the next PDE, unless we're done */
+ PointerPde++;
+ PointerPte = MiPteToAddress(PointerPde);
+ if (PointerPte > LastPte) return CommittedPages;
+ }
+
+ /* Now loop all the PTEs in the range */
+ while (PointerPte <= LastPte)
+ {
+ /* Have we crossed a PDE boundary? */
+ if (MiIsPteOnPdeBoundary(PointerPte))
+ {
+ /* Is this PDE demand zero? */
+ PointerPde = MiAddressToPte(PointerPte);
+ if (PointerPde->u.Long != 0)
+ {
+ /* It isn't -- is it valid? */
+ if (PointerPde->u.Hard.Valid == 0)
+ {
+ /* Nope, fault it in */
+ PointerPte = MiPteToAddress(PointerPde);
+ MiMakeSystemAddressValid(PointerPte, Process);
+ }
+ }
+ else
+ {
+ /* It is, skip it and move to the next PDE */
+ PointerPde++;
+ PointerPte = MiPteToAddress(PointerPde);
+ continue;
+ }
+ }
+
+ /* Is this PTE demand zero? */
+ if (PointerPte->u.Long != 0)
+ {
+ /* It isn't -- is it a decommited, invalid, or faulted PTE? */
+ if ((PointerPte->u.Soft.Protection == MM_DECOMMIT) &&
+ (PointerPte->u.Hard.Valid == 0) &&
+ ((PointerPte->u.Soft.Prototype == 0) ||
+ (PointerPte->u.Soft.PageFileHigh == MI_PTE_LOOKUP_NEEDED)))
+ {
+ /* It is, so remove it from the count of commited pages */
+ CommittedPages--;
+ }
+ }
+
+ /* Move to the next PTE */
+ PointerPte++;
+ }
+
+ /* Return how many committed pages there still are */
+ return CommittedPages;
+ }
+
+ /* This is a non-commited VAD, so assume none of it is committed */
+ CommittedPages = 0;
+
+ /* Is the PDE demand-zero? */
+ PointerPde = MiAddressToPte(PointerPte);
+ if (PointerPde->u.Long != 0)
+ {
+ /* It isn't -- is it invalid? */
+ if (PointerPde->u.Hard.Valid == 0)
+ {
+ /* It is, so page it in */
+ PointerPte = MiPteToAddress(PointerPde);
+ MiMakeSystemAddressValid(PointerPte, Process);
+ }
+ }
+ else
+ {
+ /* It is, so skip it and move to the next PDE */
+ PointerPde++;
+ PointerPte = MiPteToAddress(PointerPde);
+ if (PointerPte > LastPte) return CommittedPages;
+ }
+
+ /* Loop all the PTEs in this PDE */
+ while (PointerPte <= LastPte)
+ {
+ /* Have we crossed a PDE boundary? */
+ if (MiIsPteOnPdeBoundary(PointerPte))
+ {
+ /* Is this new PDE demand-zero? */
+ PointerPde = MiAddressToPte(PointerPte);
+ if (PointerPde->u.Long != 0)
+ {
+ /* It isn't. Is it valid? */
+ if (PointerPde->u.Hard.Valid == 0)
+ {
+ /* It isn't, so make it valid */
+ PointerPte = MiPteToAddress(PointerPde);
+ MiMakeSystemAddressValid(PointerPte, Process);
+ }
+ }
+ else
+ {
+ /* It is, so skip it and move to the next one */
+ PointerPde++;
+ PointerPte = MiPteToAddress(PointerPde);
+ continue;
+ }
+ }
+
+ /* Is this PTE demand-zero? */
+ if (PointerPte->u.Long != 0)
+ {
+ /* Nope. Is it a valid, non-decommited, non-paged out PTE? */
+ if ((PointerPte->u.Soft.Protection != MM_DECOMMIT) ||
+ (PointerPte->u.Hard.Valid == 1) ||
+ ((PointerPte->u.Soft.Prototype == 1) &&
+ (PointerPte->u.Soft.PageFileHigh != MI_PTE_LOOKUP_NEEDED)))
+ {
+ /* It is! So we'll treat this as a committed page */
+ CommittedPages++;
+ }
+ }
+
+ /* Move to the next PTE */
+ PointerPte++;
+ }
+
+ /* Return how many committed pages we found in this VAD */
+ return CommittedPages;
+}
+
ULONG
NTAPI
MiMakeSystemAddressValid(IN PVOID PageTableVirtualAddress,
IN PEPROCESS CurrentProcess)
{
NTSTATUS Status;
- BOOLEAN LockChange = FALSE;
+ BOOLEAN WsShared = FALSE, WsSafe = FALSE, LockChange = FALSE;
+ PETHREAD CurrentThread = PsGetCurrentThread();
/* Must be a non-pool page table, since those are double-mapped already */
ASSERT(PageTableVirtualAddress > MM_HIGHEST_USER_ADDRESS);
/* Check if the page table is valid */
while (!MmIsAddressValid(PageTableVirtualAddress))
{
+ /* Release the working set lock */
+ MiUnlockProcessWorkingSetForFault(CurrentProcess,
+ CurrentThread,
+ WsSafe,
+ WsShared);
+
/* Fault it in */
Status = MmAccessFault(FALSE, PageTableVirtualAddress, KernelMode, NULL);
if (!NT_SUCCESS(Status))
(ULONG_PTR)PageTableVirtualAddress);
}
+ /* Lock the working set again */
+ MiLockProcessWorkingSetForFault(CurrentProcess,
+ CurrentThread,
+ WsSafe,
+ WsShared);
+
/* This flag will be useful later when we do better locking */
LockChange = TRUE;
}
{
PFN_COUNT ActualPages = 0;
PETHREAD CurrentThread = PsGetCurrentThread();
- PMMPFN Pfn1;
- //PMMPFN Pfn2;
+ PMMPFN Pfn1, Pfn2;
PFN_NUMBER PageFrameIndex, PageTableIndex;
KIRQL OldIrql;
ASSERT(KeGetCurrentIrql() <= APC_LEVEL);
/* Get the page table entry */
PageTableIndex = Pfn1->u4.PteFrame;
- //Pfn2 = MiGetPfnEntry(PageTableIndex);
+ Pfn2 = MiGetPfnEntry(PageTableIndex);
/* Lock the PFN database */
OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
MiDecrementShareCount(Pfn1, PageFrameIndex);
/* Decrement the page table too */
- DPRINT("FIXME: ARM3 should decrement the pool PDE refcount for: %p\n", PageTableIndex);
- #if 0 // ARM3: Dont't trust this yet
MiDecrementShareCount(Pfn2, PageTableIndex);
- #endif
/* Release the PFN database */
KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
MiDecrementShareCount(Pfn1, PageFrameIndex);
/* Either a fork, or this is the shared user data page */
- if (PointerPte <= MiHighestUserPte)
+ if ((PointerPte <= MiHighestUserPte) && (PrototypePte != Pfn1->PteAddress))
{
/* If it's not the shared user page, then crash, since there's no fork() yet */
if ((PAGE_ALIGN(VirtualAddress) != (PVOID)USER_SHARED_DATA) ||
/* There should only be 1 shared reference count */
ASSERT(Pfn1->u2.ShareCount == 1);
- /* FIXME: Drop the reference on the page table. For now, leak it until RosMM is gone */
- //DPRINT1("Dropping a ref...\n");
+ /* Drop the reference on the page table. */
MiDecrementShareCount(MiGetPfnEntry(Pfn1->u4.PteFrame), Pfn1->u4.PteFrame);
/* Mark the PFN for deletion and dereference what should be the last ref */
TempPte = *PointerPte;
if (TempPte.u.Long)
{
- DPRINT("Decrement used PTEs by address: %lx\n", Va);
- (*UsedPageTableEntries)--;
+ *UsedPageTableEntries -= 1;
ASSERT((*UsedPageTableEntries) < PTE_COUNT);
- DPRINT("Refs: %lx\n", (*UsedPageTableEntries));
/* Check if the PTE is actually mapped in */
if (TempPte.u.Long & 0xFFFFFC01)
/* The PDE should still be valid at this point */
ASSERT(PointerPde->u.Hard.Valid == 1);
- DPRINT("Should check if handles for: %p are zero (PDE: %lx)\n", Va, PointerPde->u.Hard.PageFrameNumber);
- if (!(*UsedPageTableEntries))
+ if (*UsedPageTableEntries == 0)
{
- DPRINT("They are!\n");
if (PointerPde->u.Long != 0)
{
- DPRINT("PDE active: %lx in %16s\n", PointerPde->u.Hard.PageFrameNumber, CurrentProcess->ImageFileName);
-
/* Delete the PTE proper */
MiDeletePte(PointerPde,
MiPteToAddress(PointerPde),
//
// Check if we had allocated pool
//
- if (HavePoolAddress) ExFreePool(PoolAddress);
+ if (HavePoolAddress) ExFreePoolWithTag(PoolAddress, 'VmRw');
//
// Check if we failed during the probe
//
// Check if we had allocated pool
//
- if (HavePoolAddress) ExFreePool(PoolAddress);
+ if (HavePoolAddress) ExFreePoolWithTag(PoolAddress, 'VmRw');
//
// All bytes read
/* If we get here, the PTE is valid, so look up the page in PFN database */
Pfn = MiGetPfnEntry(TempPte.u.Hard.PageFrameNumber);
-
if (!Pfn->u3.e1.PrototypePte)
{
/* Return protection of the original pte */
+ ASSERT(Pfn->u4.AweAllocation == 0);
return MmProtectToValue[Pfn->OriginalPte.u.Soft.Protection];
}
- /* This is hardware PTE */
- UNIMPLEMENTED;
- ASSERT(FALSE);
-
- return PAGE_NOACCESS;
+ /* This is software PTE */
+ DPRINT1("Prototype PTE: %lx %p\n", TempPte.u.Hard.PageFrameNumber, Pfn);
+ DPRINT1("VA: %p\n", MiPteToAddress(&TempPte));
+ DPRINT1("Mask: %lx\n", TempPte.u.Soft.Protection);
+ DPRINT1("Mask2: %lx\n", Pfn->OriginalPte.u.Soft.Protection);
+ return MmProtectToValue[TempPte.u.Soft.Protection];
}
ULONG
OUT PVOID *NextVa)
{
- PMMPTE PointerPte;
+ PMMPTE PointerPte, ProtoPte;
PMMPDE PointerPde;
- MMPTE TempPte;
+ MMPTE TempPte, TempProtoPte;
BOOLEAN DemandZeroPte = TRUE, ValidPte = FALSE;
- ULONG State = MEM_RESERVE, Protect = 0, LockChange;
+ ULONG State = MEM_RESERVE, Protect = 0;
ASSERT((Vad->StartingVpn <= ((ULONG_PTR)Va >> PAGE_SHIFT)) &&
(Vad->EndingVpn >= ((ULONG_PTR)Va >> PAGE_SHIFT)));
/* Return the next range */
*NextVa = (PVOID)((ULONG_PTR)Va + PAGE_SIZE);
- /* Loop to make sure the PDE is valid */
- do
+ /* Is the PDE demand-zero? */
+ if (PointerPde->u.Long != 0)
{
- /* Try again */
- LockChange = 0;
-
- /* Is the PDE empty? */
- if (!PointerPde->u.Long)
- {
- /* No address in this range used yet, move to the next PDE range */
- *NextVa = MiPdeToAddress(PointerPde + 1);
- break;
- }
-
- /* The PDE is not empty, but is it faulted in? */
- if (!PointerPde->u.Hard.Valid)
+ /* It is not. Is it valid? */
+ if (PointerPde->u.Hard.Valid == 0)
{
- /* It isn't, go ahead and do the fault */
- LockChange = MiMakeSystemAddressValid(MiPdeToPte(PointerPde),
- TargetProcess);
+ /* Is isn't, fault it in */
+ PointerPte = MiPteToAddress(PointerPde);
+ MiMakeSystemAddressValid(PointerPte, TargetProcess);
+ ValidPte = TRUE;
}
-
- /* Check if the PDE was faulted in, making the PTE readable */
- if (!LockChange) ValidPte = TRUE;
- } while (LockChange);
+ }
+ else
+ {
+ /* It is, skip it and move to the next PDE */
+ *NextVa = MiPdeToAddress(PointerPde + 1);
+ }
/* Is it safe to try reading the PTE? */
if (ValidPte)
{
/* FIXME: watch out for large pages */
+ ASSERT(PointerPde->u.Hard.LargePage == FALSE);
/* Capture the PTE */
TempPte = *PointerPte;
- if (TempPte.u.Long)
+ if (TempPte.u.Long != 0)
{
/* The PTE is valid, so it's not zeroed out */
DemandZeroPte = FALSE;
- /* Check if it's valid or has a valid protection mask */
- ASSERT(TempPte.u.Soft.Prototype == 0);
- if ((TempPte.u.Soft.Protection != MM_DECOMMIT) ||
- (TempPte.u.Hard.Valid == 1))
+ /* Is it a decommited, invalid, or faulted PTE? */
+ if ((TempPte.u.Soft.Protection == MM_DECOMMIT) &&
+ (TempPte.u.Hard.Valid == 0) &&
+ ((TempPte.u.Soft.Prototype == 0) ||
+ (TempPte.u.Soft.PageFileHigh == MI_PTE_LOOKUP_NEEDED)))
+ {
+ /* Otherwise our defaults should hold */
+ ASSERT(Protect == 0);
+ ASSERT(State == MEM_RESERVE);
+ }
+ else
{
/* This means it's committed */
State = MEM_COMMIT;
+ /* We don't support these */
+ ASSERT(Vad->u.VadFlags.VadType != VadDevicePhysicalMemory);
+ ASSERT(Vad->u.VadFlags.VadType != VadRotatePhysical);
+ ASSERT(Vad->u.VadFlags.VadType != VadAwe);
+
/* Get protection state of this page */
Protect = MiGetPageProtection(PointerPte);
- }
- else
- {
- /* Otherwise our defaults should hold */
- ASSERT(Protect == 0);
- ASSERT(State == MEM_RESERVE);
+
+ /* Check if this is an image-backed VAD */
+ if ((TempPte.u.Soft.Valid == 0) &&
+ (TempPte.u.Soft.Prototype == 1) &&
+ (Vad->u.VadFlags.PrivateMemory == 0) &&
+ (Vad->ControlArea))
+ {
+ DPRINT1("Not supported\n");
+ ASSERT(FALSE);
+ }
}
}
}
/* Check if this was a demand-zero PTE, since we need to find the state */
if (DemandZeroPte)
{
- /* Check if the VAD is for committed memory */
- if (Vad->u.VadFlags.MemCommit)
+ /* Not yet handled */
+ ASSERT(Vad->u.VadFlags.VadType != VadDevicePhysicalMemory);
+ ASSERT(Vad->u.VadFlags.VadType != VadAwe);
+
+ /* Check if this is private commited memory, or an section-backed VAD */
+ if ((Vad->u.VadFlags.PrivateMemory == 0) && (Vad->ControlArea))
+ {
+ /* Tell caller about the next range */
+ *NextVa = (PVOID)((ULONG_PTR)Va + PAGE_SIZE);
+
+ /* Get the prototype PTE for this VAD */
+ ProtoPte = MI_GET_PROTOTYPE_PTE_FOR_VPN(Vad,
+ (ULONG_PTR)Va >> PAGE_SHIFT);
+ if (ProtoPte)
+ {
+ /* We should unlock the working set, but it's not being held! */
+
+ /* Is the prototype PTE actually valid (committed)? */
+ TempProtoPte = *ProtoPte;
+ if (TempProtoPte.u.Long)
+ {
+ /* Unless this is a memory-mapped file, handle it like private VAD */
+ State = MEM_COMMIT;
+ ASSERT(Vad->u.VadFlags.VadType != VadImageMap);
+ Protect = MmProtectToValue[Vad->u.VadFlags.Protection];
+ }
+
+ /* We should re-lock the working set */
+ }
+ }
+ else if (Vad->u.VadFlags.MemCommit)
{
/* This is committed memory */
State = MEM_COMMIT;
return Status;
}
- /* This must be a VM VAD */
- ASSERT(Vad->u.VadFlags.PrivateMemory);
+ /* Set the correct memory type based on what kind of VAD this is */
+ if ((Vad->u.VadFlags.PrivateMemory) ||
+ (Vad->u.VadFlags.VadType == VadRotatePhysical))
+ {
+ MemoryInfo.Type = MEM_PRIVATE;
+ }
+ else if (Vad->u.VadFlags.VadType == VadImageMap)
+ {
+ MemoryInfo.Type = MEM_IMAGE;
+ }
+ else
+ {
+ MemoryInfo.Type = MEM_MAPPED;
+ }
/* Lock the address space of the process */
MmLockAddressSpace(&TargetProcess->Vm);
return Status;
}
+ULONG
+NTAPI
+MiIsEntireRangeCommitted(IN ULONG_PTR StartingAddress,
+ IN ULONG_PTR EndingAddress,
+ IN PMMVAD Vad,
+ IN PEPROCESS Process)
+{
+ PMMPTE PointerPte, LastPte, PointerPde;
+ BOOLEAN OnBoundary = TRUE;
+ PAGED_CODE();
+
+ /* Get the PDE and PTE addresses */
+ PointerPde = MiAddressToPde(StartingAddress);
+ PointerPte = MiAddressToPte(StartingAddress);
+ LastPte = MiAddressToPte(EndingAddress);
+
+ /* Loop all the PTEs */
+ while (PointerPte <= LastPte)
+ {
+ /* Check if we've hit an new PDE boundary */
+ if (OnBoundary)
+ {
+ /* Is this PDE demand zero? */
+ PointerPde = MiAddressToPte(PointerPte);
+ if (PointerPde->u.Long != 0)
+ {
+ /* It isn't -- is it valid? */
+ if (PointerPde->u.Hard.Valid == 0)
+ {
+ /* Nope, fault it in */
+ PointerPte = MiPteToAddress(PointerPde);
+ MiMakeSystemAddressValid(PointerPte, Process);
+ }
+ }
+ else
+ {
+ /* The PTE was already valid, so move to the next one */
+ PointerPde++;
+ PointerPte = MiPteToAddress(PointerPde);
+
+ /* Is the entire VAD committed? If not, fail */
+ if (!Vad->u.VadFlags.MemCommit) return FALSE;
+
+ /* Everything is committed so far past the range, return true */
+ if (PointerPte > LastPte) return TRUE;
+ }
+ }
+
+ /* Is the PTE demand zero? */
+ if (PointerPte->u.Long == 0)
+ {
+ /* Is the entire VAD committed? If not, fail */
+ if (!Vad->u.VadFlags.MemCommit) return FALSE;
+ }
+ else
+ {
+ /* It isn't -- is it a decommited, invalid, or faulted PTE? */
+ if ((PointerPte->u.Soft.Protection == MM_DECOMMIT) &&
+ (PointerPte->u.Hard.Valid == 0) &&
+ ((PointerPte->u.Soft.Prototype == 0) ||
+ (PointerPte->u.Soft.PageFileHigh == MI_PTE_LOOKUP_NEEDED)))
+ {
+ /* Then part of the range is decommitted, so fail */
+ return FALSE;
+ }
+ }
+
+ /* Move to the next PTE */
+ PointerPte++;
+ OnBoundary = MiIsPteOnPdeBoundary(PointerPte);
+ }
+
+ /* All PTEs seem valid, and no VAD checks failed, the range is okay */
+ return TRUE;
+}
+
+NTSTATUS
+NTAPI
+MiRosProtectVirtualMemory(IN PEPROCESS Process,
+ IN OUT PVOID *BaseAddress,
+ IN OUT PSIZE_T NumberOfBytesToProtect,
+ IN ULONG NewAccessProtection,
+ OUT PULONG OldAccessProtection OPTIONAL)
+{
+ PMEMORY_AREA MemoryArea;
+ PMMSUPPORT AddressSpace;
+ ULONG OldAccessProtection_;
+ NTSTATUS Status;
+
+ *NumberOfBytesToProtect = PAGE_ROUND_UP((ULONG_PTR)(*BaseAddress) + (*NumberOfBytesToProtect)) - PAGE_ROUND_DOWN(*BaseAddress);
+ *BaseAddress = (PVOID)PAGE_ROUND_DOWN(*BaseAddress);
+
+ AddressSpace = &Process->Vm;
+ MmLockAddressSpace(AddressSpace);
+ MemoryArea = MmLocateMemoryAreaByAddress(AddressSpace, *BaseAddress);
+ if (MemoryArea == NULL || MemoryArea->DeleteInProgress)
+ {
+ MmUnlockAddressSpace(AddressSpace);
+ return STATUS_UNSUCCESSFUL;
+ }
+
+ if (OldAccessProtection == NULL) OldAccessProtection = &OldAccessProtection_;
+
+ ASSERT(MemoryArea->Type == MEMORY_AREA_SECTION_VIEW);
+ Status = MmProtectSectionView(AddressSpace,
+ MemoryArea,
+ *BaseAddress,
+ *NumberOfBytesToProtect,
+ NewAccessProtection,
+ OldAccessProtection);
+
+ MmUnlockAddressSpace(AddressSpace);
+
+ return Status;
+}
+
NTSTATUS
NTAPI
MiProtectVirtualMemory(IN PEPROCESS Process,
OUT PULONG OldAccessProtection OPTIONAL)
{
PMEMORY_AREA MemoryArea;
+ PMMVAD Vad;
+ PMMSUPPORT AddressSpace;
+ ULONG_PTR StartingAddress, EndingAddress;
+ PMMPTE PointerPde, PointerPte, LastPte;
+ MMPTE PteContents;
+ PMMPFN Pfn1;
+ ULONG ProtectionMask, OldProtect;
+ BOOLEAN Committed;
+ NTSTATUS Status = STATUS_SUCCESS;
+ PETHREAD Thread = PsGetCurrentThread();
+ /* Calculate base address for the VAD */
+ StartingAddress = (ULONG_PTR)PAGE_ALIGN((*BaseAddress));
+ EndingAddress = (((ULONG_PTR)*BaseAddress + *NumberOfBytesToProtect - 1) | (PAGE_SIZE - 1));
+
+ /* Calculate the protection mask and make sure it's valid */
+ ProtectionMask = MiMakeProtectionMask(NewAccessProtection);
+ if (ProtectionMask == MM_INVALID_PROTECTION)
+ {
+ DPRINT1("Invalid protection mask\n");
+ return STATUS_INVALID_PAGE_PROTECTION;
+ }
+
+ /* Check for ROS specific memory area */
MemoryArea = MmLocateMemoryAreaByAddress(&Process->Vm, *BaseAddress);
if ((MemoryArea) && (MemoryArea->Type == MEMORY_AREA_SECTION_VIEW))
{
+ /* Evil hack */
return MiRosProtectVirtualMemory(Process,
BaseAddress,
NumberOfBytesToProtect,
OldAccessProtection);
}
- UNIMPLEMENTED;
- return STATUS_CONFLICTING_ADDRESSES;
+ /* Lock the address space and make sure the process isn't already dead */
+ AddressSpace = MmGetCurrentAddressSpace();
+ MmLockAddressSpace(AddressSpace);
+ if (Process->VmDeleted)
+ {
+ DPRINT1("Process is dying\n");
+ Status = STATUS_PROCESS_IS_TERMINATING;
+ goto FailPath;
+ }
+
+ /* Get the VAD for this address range, and make sure it exists */
+ Vad = (PMMVAD)MiCheckForConflictingNode(StartingAddress >> PAGE_SHIFT,
+ EndingAddress >> PAGE_SHIFT,
+ &Process->VadRoot);
+ if (!Vad)
+ {
+ DPRINT("Could not find a VAD for this allocation\n");
+ Status = STATUS_CONFLICTING_ADDRESSES;
+ goto FailPath;
+ }
+
+ /* Make sure the address is within this VAD's boundaries */
+ if ((((ULONG_PTR)StartingAddress >> PAGE_SHIFT) < Vad->StartingVpn) ||
+ (((ULONG_PTR)EndingAddress >> PAGE_SHIFT) > Vad->EndingVpn))
+ {
+ Status = STATUS_CONFLICTING_ADDRESSES;
+ goto FailPath;
+ }
+
+ /* These kinds of VADs are not supported atm */
+ if ((Vad->u.VadFlags.VadType == VadAwe) ||
+ (Vad->u.VadFlags.VadType == VadDevicePhysicalMemory) ||
+ (Vad->u.VadFlags.VadType == VadLargePages))
+ {
+ DPRINT1("Illegal VAD for attempting to set protection\n");
+ Status = STATUS_CONFLICTING_ADDRESSES;
+ goto FailPath;
+ }
+
+ /* Check for a VAD whose protection can't be changed */
+ if (Vad->u.VadFlags.NoChange == 1)
+ {
+ DPRINT1("Trying to change protection of a NoChange VAD\n");
+ Status = STATUS_INVALID_PAGE_PROTECTION;
+ goto FailPath;
+ }
+
+ /* Is this section, or private memory? */
+ if (Vad->u.VadFlags.PrivateMemory == 0)
+ {
+ /* Not yet supported */
+ if (Vad->u.VadFlags.VadType == VadLargePageSection)
+ {
+ DPRINT1("Illegal VAD for attempting to set protection\n");
+ Status = STATUS_CONFLICTING_ADDRESSES;
+ goto FailPath;
+ }
+
+ /* Rotate VADs are not yet supported */
+ if (Vad->u.VadFlags.VadType == VadRotatePhysical)
+ {
+ DPRINT1("Illegal VAD for attempting to set protection\n");
+ Status = STATUS_CONFLICTING_ADDRESSES;
+ goto FailPath;
+ }
+
+ /* Not valid on section files */
+ if (NewAccessProtection & (PAGE_NOCACHE | PAGE_WRITECOMBINE))
+ {
+ /* Fail */
+ DPRINT1("Invalid protection flags for section\n");
+ Status = STATUS_INVALID_PARAMETER_4;
+ goto FailPath;
+ }
+
+ /* Check if data or page file mapping protection PTE is compatible */
+ if (!Vad->ControlArea->u.Flags.Image)
+ {
+ /* Not yet */
+ DPRINT1("Fixme: Not checking for valid protection\n");
+ }
+
+ /* This is a section, and this is not yet supported */
+ DPRINT1("Section protection not yet supported\n");
+ OldProtect = 0;
+ }
+ else
+ {
+ /* Private memory, check protection flags */
+ if ((NewAccessProtection & PAGE_WRITECOPY) ||
+ (NewAccessProtection & PAGE_EXECUTE_WRITECOPY))
+ {
+ DPRINT1("Invalid protection flags for private memory\n");
+ Status = STATUS_INVALID_PARAMETER_4;
+ goto FailPath;
+ }
+
+ /* Lock the working set */
+ MiLockProcessWorkingSetUnsafe(Process, Thread);
+
+ /* Check if all pages in this range are committed */
+ Committed = MiIsEntireRangeCommitted(StartingAddress,
+ EndingAddress,
+ Vad,
+ Process);
+ if (!Committed)
+ {
+ /* Fail */
+ DPRINT1("The entire range is not committed\n");
+ Status = STATUS_NOT_COMMITTED;
+ MiUnlockProcessWorkingSetUnsafe(Process, Thread);
+ goto FailPath;
+ }
+
+ /* Compute starting and ending PTE and PDE addresses */
+ PointerPde = MiAddressToPde(StartingAddress);
+ PointerPte = MiAddressToPte(StartingAddress);
+ LastPte = MiAddressToPte(EndingAddress);
+
+ /* Make this PDE valid */
+ MiMakePdeExistAndMakeValid(PointerPde, Process, MM_NOIRQL);
+
+ /* Save protection of the first page */
+ if (PointerPte->u.Long != 0)
+ {
+ /* Capture the page protection and make the PDE valid */
+ OldProtect = MiGetPageProtection(PointerPte);
+ MiMakePdeExistAndMakeValid(PointerPde, Process, MM_NOIRQL);
+ }
+ else
+ {
+ /* Grab the old protection from the VAD itself */
+ OldProtect = MmProtectToValue[Vad->u.VadFlags.Protection];
+ }
+
+ /* Loop all the PTEs now */
+ while (PointerPte <= LastPte)
+ {
+ /* Check if we've crossed a PDE boundary and make the new PDE valid too */
+ if (MiIsPteOnPdeBoundary(PointerPte))
+ {
+ PointerPde = MiAddressToPte(PointerPte);
+ MiMakePdeExistAndMakeValid(PointerPde, Process, MM_NOIRQL);
+ }
+
+ /* Capture the PTE and check if it was empty */
+ PteContents = *PointerPte;
+ if (PteContents.u.Long == 0)
+ {
+ /* This used to be a zero PTE and it no longer is, so we must add a
+ reference to the pagetable. */
+ MiIncrementPageTableReferences(MiPteToAddress(PointerPte));
+ }
+
+ /* Check what kind of PTE we are dealing with */
+ if (PteContents.u.Hard.Valid == 1)
+ {
+ /* Get the PFN entry */
+ Pfn1 = MiGetPfnEntry(PFN_FROM_PTE(&PteContents));
+
+ /* We don't support this yet */
+ ASSERT(Pfn1->u3.e1.PrototypePte == 0);
+
+ /* Check if the page should not be accessible at all */
+ if ((NewAccessProtection & PAGE_NOACCESS) ||
+ (NewAccessProtection & PAGE_GUARD))
+ {
+ /* The page should be in the WS and we should make it transition now */
+ DPRINT1("Making valid page invalid is not yet supported!\n");
+ Status = STATUS_NOT_IMPLEMENTED;
+ /* Unlock the working set */
+ MiUnlockProcessWorkingSetUnsafe(Process, Thread);
+ goto FailPath;
+ }
+
+ /* Write the protection mask and write it with a TLB flush */
+ Pfn1->OriginalPte.u.Soft.Protection = ProtectionMask;
+ MiFlushTbAndCapture(Vad,
+ PointerPte,
+ ProtectionMask,
+ Pfn1,
+ TRUE);
+ }
+ else
+ {
+ /* We don't support these cases yet */
+ ASSERT(PteContents.u.Soft.Prototype == 0);
+ ASSERT(PteContents.u.Soft.Transition == 0);
+
+ /* The PTE is already demand-zero, just update the protection mask */
+ PointerPte->u.Soft.Protection = ProtectionMask;
+ ASSERT(PointerPte->u.Long != 0);
+ }
+
+ /* Move to the next PTE */
+ PointerPte++;
+ }
+
+ /* Unlock the working set */
+ MiUnlockProcessWorkingSetUnsafe(Process, Thread);
+ }
+
+ /* Unlock the address space */
+ MmUnlockAddressSpace(AddressSpace);
+
+ /* Return parameters and success */
+ *NumberOfBytesToProtect = EndingAddress - StartingAddress + 1;
+ *BaseAddress = (PVOID)StartingAddress;
+ *OldAccessProtection = OldProtect;
+ return STATUS_SUCCESS;
+
+FailPath:
+ /* Unlock the address space and return the failure code */
+ MmUnlockAddressSpace(AddressSpace);
+ return Status;
}
VOID
ULONG PteCount = 0;
PMMPFN Pfn1;
MMPTE PteContents;
- PUSHORT UsedPageTableEntries;
PETHREAD CurrentThread = PsGetCurrentThread();
//
PointerPde = MiAddressToPde(StartingAddress);
PointerPte = MiAddressToPte(StartingAddress);
if (Vad->u.VadFlags.MemCommit) CommitPte = MiAddressToPte(Vad->EndingVpn << PAGE_SHIFT);
- MiLockWorkingSet(CurrentThread, &Process->Vm);
+ MiLockProcessWorkingSetUnsafe(Process, CurrentThread);
//
// Make the PDE valid, and now loop through each page's worth of data
//
// Check if we've crossed a PDE boundary
//
- if ((((ULONG_PTR)PointerPte) & (SYSTEM_PD_SIZE - 1)) == 0)
+ if (MiIsPteOnPdeBoundary(PointerPte))
{
//
// Get the new PDE and flush the valid PTEs we had built up until
// This used to be a zero PTE and it no longer is, so we must add a
// reference to the pagetable.
//
- UsedPageTableEntries = &MmWorkingSetList->UsedPageTableEntries[MiGetPdeOffset(StartingAddress)];
- (*UsedPageTableEntries)++;
- ASSERT((*UsedPageTableEntries) <= PTE_COUNT);
+ MiIncrementPageTableReferences(StartingAddress);
//
// Next, we account for decommitted PTEs and make the PTE as such
// release the working set and return the commit reduction accounting.
//
if (PteCount) MiProcessValidPteList(ValidPteList, PteCount);
- MiUnlockWorkingSet(CurrentThread, &Process->Vm);
+ MiUnlockProcessWorkingSetUnsafe(Process, CurrentThread);
return CommitReduction;
}
PMEMORY_AREA MemoryArea;
PFN_NUMBER PageCount;
PMMVAD Vad, FoundVad;
- PUSHORT UsedPageTableEntries;
NTSTATUS Status;
PMMSUPPORT AddressSpace;
PVOID PBaseAddress;
KPROCESSOR_MODE PreviousMode = KeGetPreviousMode();
PETHREAD CurrentThread = PsGetCurrentThread();
KAPC_STATE ApcState;
- ULONG ProtectionMask;
+ ULONG ProtectionMask, QuotaCharge = 0, QuotaFree = 0;
BOOLEAN Attached = FALSE, ChangeProtection = FALSE;
MMPTE TempPte;
PMMPTE PointerPte, PointerPde, LastPte;
}
}
- //
- // Force PAGE_READWRITE for everything, for now
- //
- Protect = PAGE_READWRITE;
-
/* Calculate the protection mask and make sure it's valid */
ProtectionMask = MiMakeProtectionMask(Protect);
if (ProtectionMask == MM_INVALID_PROTECTION)
}
//
- // Assert on the things we don't yet support
+ // Fail on the things we don't yet support
//
- ASSERT(ZeroBits == 0);
- ASSERT((AllocationType & MEM_LARGE_PAGES) == 0);
- ASSERT((AllocationType & MEM_PHYSICAL) == 0);
- ASSERT((AllocationType & MEM_WRITE_WATCH) == 0);
- ASSERT((AllocationType & MEM_TOP_DOWN) == 0);
- ASSERT((AllocationType & MEM_RESET) == 0);
- ASSERT(Process->VmTopDown == 0);
+ if (ZeroBits != 0)
+ {
+ DPRINT1("Zero bits not supported\n");
+ Status = STATUS_INVALID_PARAMETER;
+ goto FailPathNoLock;
+ }
+ if ((AllocationType & MEM_LARGE_PAGES) == MEM_LARGE_PAGES)
+ {
+ DPRINT1("MEM_LARGE_PAGES not supported\n");
+ Status = STATUS_INVALID_PARAMETER;
+ goto FailPathNoLock;
+ }
+ if ((AllocationType & MEM_PHYSICAL) == MEM_PHYSICAL)
+ {
+ DPRINT1("MEM_PHYSICAL not supported\n");
+ Status = STATUS_INVALID_PARAMETER;
+ goto FailPathNoLock;
+ }
+ if ((AllocationType & MEM_WRITE_WATCH) == MEM_WRITE_WATCH)
+ {
+ DPRINT1("MEM_WRITE_WATCH not supported\n");
+ Status = STATUS_INVALID_PARAMETER;
+ goto FailPathNoLock;
+ }
+ if ((AllocationType & MEM_TOP_DOWN) == MEM_TOP_DOWN)
+ {
+ DPRINT1("MEM_TOP_DOWN not supported\n");
+ Status = STATUS_INVALID_PARAMETER;
+ goto FailPathNoLock;
+ }
+ if ((AllocationType & MEM_RESET) == MEM_RESET)
+ {
+ DPRINT1("MEM_RESET not supported\n");
+ Status = STATUS_INVALID_PARAMETER;
+ goto FailPathNoLock;
+ }
+ if (Process->VmTopDown == 1)
+ {
+ DPRINT1("VmTopDown not supported\n");
+ Status = STATUS_INVALID_PARAMETER;
+ goto FailPathNoLock;
+ }
//
// Check if the caller is reserving memory, or committing memory and letting
//
PRegionSize = ROUND_TO_PAGES(PRegionSize);
PageCount = BYTES_TO_PAGES(PRegionSize);
+ EndingAddress = 0;
+ StartingAddress = 0;
}
else
{
// which was passed in isn't already conflicting with an existing address
// range.
//
- EndingAddress = 0;
if (!PBaseAddress)
{
Status = MiFindEmptyAddressRangeInTree(PRegionSize,
&Process->VadRoot,
(PMMADDRESS_NODE*)&Process->VadFreeHint,
&StartingAddress);
- ASSERT(NT_SUCCESS(Status));
+ if (!NT_SUCCESS(Status)) goto FailPath;
+
+ //
+ // Now we know where the allocation ends. Make sure it doesn't end up
+ // somewhere in kernel mode.
+ //
+ EndingAddress = ((ULONG_PTR)StartingAddress + PRegionSize - 1) | (PAGE_SIZE - 1);
+ if ((PVOID)EndingAddress > MM_HIGHEST_VAD_ADDRESS)
+ {
+ Status = STATUS_NO_MEMORY;
+ goto FailPath;
+ }
}
else if (MiCheckForConflictingNode(StartingAddress >> PAGE_SHIFT,
EndingAddress >> PAGE_SHIFT,
goto FailPath;
}
- //
- // Now we know where the allocation ends. Make sure it doesn't end up
- // somewhere in kernel mode.
- //
- EndingAddress = ((ULONG_PTR)StartingAddress + PRegionSize - 1) | (PAGE_SIZE - 1);
- if ((PVOID)EndingAddress > MM_HIGHEST_VAD_ADDRESS)
- {
- Status = STATUS_NO_MEMORY;
- goto FailPath;
- }
-
//
// Write out the VAD fields for this allocation
//
//
// Lock the working set and insert the VAD into the process VAD tree
//
- MiLockProcessWorkingSet(Process, CurrentThread);
+ MiLockProcessWorkingSetUnsafe(Process, CurrentThread);
Vad->ControlArea = NULL; // For Memory-Area hack
MiInsertVad(Vad, Process);
- MiUnlockProcessWorkingSet(Process, CurrentThread);
+ MiUnlockProcessWorkingSetUnsafe(Process, CurrentThread);
//
// Update the virtual size of the process, and if this is now the highest
}
//
- // If this is an existing section view, we call the old RosMm routine which
- // has the relevant code required to handle the section scenario. In the future
- // we will limit this even more so that there's almost nothing that the code
- // needs to do, and it will become part of section.c in RosMm
+ // Make sure this is an ARM3 section
//
MemoryArea = MmLocateMemoryAreaByAddress(AddressSpace, (PVOID)PAGE_ROUND_DOWN(PBaseAddress));
- if (MemoryArea->Type != MEMORY_AREA_OWNED_BY_ARM3)
+ ASSERT(MemoryArea->Type == MEMORY_AREA_OWNED_BY_ARM3);
+
+ // Is this a previously reserved section being committed? If so, enter the
+ // special section path
+ //
+ if (FoundVad->u.VadFlags.PrivateMemory == FALSE)
{
- return MiRosAllocateVirtualMemory(ProcessHandle,
- Process,
- MemoryArea,
- AddressSpace,
- UBaseAddress,
- Attached,
- URegionSize,
- AllocationType,
- Protect);
+ //
+ // You cannot commit large page sections through this API
+ //
+ if (FoundVad->u.VadFlags.VadType == VadLargePageSection)
+ {
+ DPRINT1("Large page sections cannot be VirtualAlloc'd\n");
+ Status = STATUS_INVALID_PAGE_PROTECTION;
+ goto FailPath;
+ }
+
+ //
+ // You can only use caching flags on a rotate VAD
+ //
+ if ((Protect & (PAGE_NOCACHE | PAGE_WRITECOMBINE)) &&
+ (FoundVad->u.VadFlags.VadType != VadRotatePhysical))
+ {
+ DPRINT1("Cannot use caching flags with anything but rotate VADs\n");
+ Status = STATUS_INVALID_PAGE_PROTECTION;
+ goto FailPath;
+ }
+
+ //
+ // We should make sure that the section's permissions aren't being
+ // messed with
+ //
+ if (FoundVad->u.VadFlags.NoChange)
+ {
+ //
+ // Make sure it's okay to touch it
+ //
+ Status = MiCheckSecuredVad(FoundVad,
+ PBaseAddress,
+ PRegionSize,
+ ProtectionMask);
+ if (!NT_SUCCESS(Status))
+ {
+ DPRINT1("Secured VAD being messed around with\n");
+ goto FailPath;
+ }
+ }
+
+ //
+ // ARM3 does not support file-backed sections, only shared memory
+ //
+ ASSERT(FoundVad->ControlArea->FilePointer == NULL);
+
+ //
+ // Rotate VADs cannot be guard pages or inaccessible, nor copy on write
+ //
+ if ((FoundVad->u.VadFlags.VadType == VadRotatePhysical) &&
+ (Protect & (PAGE_WRITECOPY | PAGE_EXECUTE_WRITECOPY | PAGE_NOACCESS | PAGE_GUARD)))
+ {
+ DPRINT1("Invalid page protection for rotate VAD\n");
+ Status = STATUS_INVALID_PAGE_PROTECTION;
+ goto FailPath;
+ }
+
+ //
+ // Compute PTE addresses and the quota charge, then grab the commit lock
+ //
+ PointerPte = MI_GET_PROTOTYPE_PTE_FOR_VPN(FoundVad, StartingAddress >> PAGE_SHIFT);
+ LastPte = MI_GET_PROTOTYPE_PTE_FOR_VPN(FoundVad, EndingAddress >> PAGE_SHIFT);
+ QuotaCharge = (ULONG)(LastPte - PointerPte + 1);
+ KeAcquireGuardedMutexUnsafe(&MmSectionCommitMutex);
+
+ //
+ // Get the segment template PTE and start looping each page
+ //
+ TempPte = FoundVad->ControlArea->Segment->SegmentPteTemplate;
+ ASSERT(TempPte.u.Long != 0);
+ while (PointerPte <= LastPte)
+ {
+ //
+ // For each non-already-committed page, write the invalid template PTE
+ //
+ if (PointerPte->u.Long == 0)
+ {
+ MI_WRITE_INVALID_PTE(PointerPte, TempPte);
+ }
+ else
+ {
+ QuotaFree++;
+ }
+ PointerPte++;
+ }
+
+ //
+ // Now do the commit accounting and release the lock
+ //
+ ASSERT(QuotaCharge >= QuotaFree);
+ QuotaCharge -= QuotaFree;
+ FoundVad->ControlArea->Segment->NumberOfCommittedPages += QuotaCharge;
+ KeReleaseGuardedMutexUnsafe(&MmSectionCommitMutex);
+
+ //
+ // We are done with committing the section pages
+ //
+ Status = STATUS_SUCCESS;
+ goto FailPath;
}
//
- // This is a specific ReactOS check because we do not support Section VADs
+ // This is a specific ReactOS check because we only use normal VADs
//
ASSERT(FoundVad->u.VadFlags.VadType == VadNone);
- ASSERT(FoundVad->u.VadFlags.PrivateMemory == TRUE);
//
// While this is an actual Windows check
//
// Lock the working set while we play with user pages and page tables
//
- //MiLockWorkingSet(CurrentThread, AddressSpace);
+ MiLockProcessWorkingSetUnsafe(Process, CurrentThread);
//
// Make the current page table valid, and then loop each page within it
//
// Have we crossed into a new page table?
//
- if (!(((ULONG_PTR)PointerPte) & (SYSTEM_PD_SIZE - 1)))
+ if (MiIsPteOnPdeBoundary(PointerPte))
{
//
// Get the PDE and now make it valid too
// First increment the count of pages in the page table for this
// process
//
- UsedPageTableEntries = &MmWorkingSetList->UsedPageTableEntries[MiGetPdeOffset(MiPteToAddress(PointerPte))];
- (*UsedPageTableEntries)++;
- ASSERT((*UsedPageTableEntries) <= PTE_COUNT);
+ MiIncrementPageTableReferences(MiPteToAddress(PointerPte));
//
// And now write the invalid demand-zero PTE as requested
// the target process if it was not the current process. Also dereference the
// target process if this wasn't the case.
//
- //MiUnlockProcessWorkingSet(Process, CurrentThread);
+ MiUnlockProcessWorkingSetUnsafe(Process, CurrentThread);
Status = STATUS_SUCCESS;
FailPath:
MmUnlockAddressSpace(AddressSpace);
IN ULONG FreeType)
{
PMEMORY_AREA MemoryArea;
- ULONG PRegionSize;
+ SIZE_T PRegionSize;
PVOID PBaseAddress;
- ULONG CommitReduction = 0;
+ ULONG_PTR CommitReduction = 0;
ULONG_PTR StartingAddress, EndingAddress;
PMMVAD Vad;
NTSTATUS Status;
}
//
- // These ASSERTs are here because ReactOS ARM3 does not currently implement
- // any other kinds of VADs.
+ // Only private memory (except rotate VADs) can be freed through here */
+ //
+ if ((!(Vad->u.VadFlags.PrivateMemory) &&
+ (Vad->u.VadFlags.VadType != VadRotatePhysical)) ||
+ (Vad->u.VadFlags.VadType == VadDevicePhysicalMemory))
+ {
+ DPRINT1("Attempt to free section memory\n");
+ Status = STATUS_UNABLE_TO_DELETE_SECTION;
+ goto FailPath;
+ }
+
+ //
+ // ARM3 does not yet handle protected VM
//
- ASSERT(Vad->u.VadFlags.PrivateMemory == 1);
ASSERT(Vad->u.VadFlags.NoChange == 0);
- ASSERT(Vad->u.VadFlags.VadType == VadNone);
//
// Finally, make sure there is a ReactOS Mm MEMORY_AREA for this allocation
//
if (FreeType & MEM_RELEASE)
{
+ //
+ // ARM3 only supports this VAD in this path
+ //
+ ASSERT(Vad->u.VadFlags.VadType == VadNone);
+
//
// Is the caller trying to remove the whole VAD, or remove only a portion
// of it? If no region size is specified, then the assumption is that the
//
// Finally lock the working set and remove the VAD from the VAD tree
//
- MiLockWorkingSet(CurrentThread, AddressSpace);
+ MiLockProcessWorkingSetUnsafe(Process, CurrentThread);
ASSERT(Process->VadRoot.NumberGenericTableElements >= 1);
MiRemoveNode((PMMADDRESS_NODE)Vad, &Process->VadRoot);
}
// the code path above when the caller sets a zero region size
// and the whole VAD is destroyed
//
- MiLockWorkingSet(CurrentThread, AddressSpace);
+ MiLockProcessWorkingSetUnsafe(Process, CurrentThread);
ASSERT(Process->VadRoot.NumberGenericTableElements >= 1);
MiRemoveNode((PMMADDRESS_NODE)Vad, &Process->VadRoot);
}
//
if ((EndingAddress >> PAGE_SHIFT) == Vad->EndingVpn)
{
+ PMEMORY_AREA MemoryArea;
+
//
// This is pretty easy and similar to case A. We compute the
// amount of pages to decommit, update the VAD's commit charge
// and then change the ending address of the VAD to be a bit
// smaller.
//
- // NOT YET IMPLEMENTED IN ARM3.
- //
- DPRINT1("Case C not handled\n");
- Status = STATUS_FREE_VM_NOT_AT_BASE;
- goto FailPath;
+ MiLockProcessWorkingSetUnsafe(Process, CurrentThread);
+ CommitReduction = MiCalculatePageCommitment(StartingAddress,
+ EndingAddress,
+ Vad,
+ Process);
+ Vad->u.VadFlags.CommitCharge -= CommitReduction;
+ // For ReactOS: shrink the corresponding memory area
+ MemoryArea = MmLocateMemoryAreaByAddress(AddressSpace, (PVOID)StartingAddress);
+ ASSERT(Vad->StartingVpn << PAGE_SHIFT == (ULONG_PTR)MemoryArea->StartingAddress);
+ ASSERT((Vad->EndingVpn + 1) << PAGE_SHIFT == (ULONG_PTR)MemoryArea->EndingAddress);
+ Vad->EndingVpn = ((ULONG_PTR)StartingAddress - 1) >> PAGE_SHIFT;
+ MemoryArea->EndingAddress = (PVOID)(((Vad->EndingVpn + 1) << PAGE_SHIFT) - 1);
}
else
{
// around with process pages.
//
MiDeleteVirtualAddresses(StartingAddress, EndingAddress, NULL);
- MiUnlockWorkingSet(CurrentThread, AddressSpace);
+ MiUnlockProcessWorkingSetUnsafe(Process, CurrentThread);
Status = STATUS_SUCCESS;
FinalPath:
// return whatever failure code was sent.
//
FailPath:
- MiUnlockWorkingSet(CurrentThread, AddressSpace);
+ MmUnlockAddressSpace(AddressSpace);
if (Attached) KeUnstackDetachProcess(&ApcState);
if (ProcessHandle != NtCurrentProcess()) ObDereferenceObject(Process);
return Status;