*/
/* INCLUDES *******************************************************************/
-/* So long, and Thanks for All the Fish */
#include <ntoskrnl.h>
#define NDEBUG
#include <debug.h>
#define MODULE_INVOLVED_IN_ARM3
-#include "../ARM3/miarm.h"
+#include <mm/ARM3/miarm.h>
#define MI_MAPPED_COPY_PAGES 14
#define MI_POOL_COPY_BYTES 512
IN PMMVAD Vad,
IN PEPROCESS Process)
{
- PMMPTE PointerPte, LastPte, PointerPde;
+ PMMPTE PointerPte, LastPte;
+ PMMPDE PointerPde;
ULONG CommittedPages;
/* Compute starting and ending PTE and PDE addresses */
CommittedPages = (ULONG)BYTES_TO_PAGES(EndingAddress - StartingAddress);
/* Is the PDE demand-zero? */
- PointerPde = MiAddressToPte(PointerPte);
+ PointerPde = MiPteToPde(PointerPte);
if (PointerPde->u.Long != 0)
{
/* It is not. Is it valid? */
if (MiIsPteOnPdeBoundary(PointerPte))
{
/* Is this PDE demand zero? */
- PointerPde = MiAddressToPte(PointerPte);
+ PointerPde = MiPteToPde(PointerPte);
if (PointerPde->u.Long != 0)
{
/* It isn't -- is it valid? */
CommittedPages = 0;
/* Is the PDE demand-zero? */
- PointerPde = MiAddressToPte(PointerPte);
+ PointerPde = MiPteToPde(PointerPte);
if (PointerPde->u.Long != 0)
{
/* It isn't -- is it invalid? */
if (MiIsPteOnPdeBoundary(PointerPte))
{
/* Is this new PDE demand-zero? */
- PointerPde = MiAddressToPte(PointerPte);
+ PointerPde = MiPteToPde(PointerPte);
if (PointerPde->u.Long != 0)
{
/* It isn't. Is it valid? */
while (!MmIsAddressValid(VirtualAddress))
{
/* Release the PFN database */
- KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
+ MiReleasePfnLock(OldIrql);
/* Fault it in */
Status = MmAccessFault(FALSE, VirtualAddress, KernelMode, NULL);
LockChange = TRUE;
/* Lock the PFN database */
- OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
+ OldIrql = MiAcquirePfnLock();
}
/* Let caller know what the lock state is */
Pfn2 = MiGetPfnEntry(PageTableIndex);
/* Lock the PFN database */
- OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
+ OldIrql = MiAcquirePfnLock();
/* Delete it the page */
MI_SET_PFN_DELETED(Pfn1);
MiDecrementShareCount(Pfn2, PageTableIndex);
/* Release the PFN database */
- KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
+ MiReleasePfnLock(OldIrql);
/* Destroy the PTE */
MI_ERASE_PTE(PointerPte);
PMMPDE PointerPde;
/* PFN lock must be held */
- ASSERT(KeGetCurrentIrql() == DISPATCH_LEVEL);
+ MI_ASSERT_PFN_LOCK_HELD();
/* Capture the PTE */
TempPte = *PointerPte;
/* See if the PTE is valid */
if (TempPte.u.Hard.Valid == 0)
{
- /* Prototype PTEs not supported yet */
+ /* Prototype and paged out PTEs not supported yet */
ASSERT(TempPte.u.Soft.Prototype == 0);
+ ASSERT((TempPte.u.Soft.PageFileHigh == 0) || (TempPte.u.Soft.Transition == 1));
+
if (TempPte.u.Soft.Transition)
{
/* Get the PFN entry */
PageFrameIndex = PFN_FROM_PTE(&TempPte);
Pfn1 = MiGetPfnEntry(PageFrameIndex);
- DPRINT1("Pte %p is transitional!\n", PointerPte);
+ DPRINT("Pte %p is transitional!\n", PointerPte);
+
+ /* Make sure the saved PTE address is valid */
+ ASSERT((PMMPTE)((ULONG_PTR)Pfn1->PteAddress & ~0x1) == PointerPte);
/* Destroy the PTE */
MI_ERASE_PTE(PointerPte);
ASSERT(Pfn1->u3.e1.PrototypePte == 0);
/* Make the page free. For prototypes, it will be made free when deleting the section object */
- if (Pfn1->u2.ShareCount == 0)
+ if (Pfn1->u3.e2.ReferenceCount == 0)
{
- NT_ASSERT(Pfn1->u3.e2.ReferenceCount == 0);
-
/* And it should be in standby or modified list */
ASSERT((Pfn1->u3.e1.PageLocation == ModifiedPageList) || (Pfn1->u3.e1.PageLocation == StandbyPageList));
- /* Unlink it and put it back in free list */
+ /* Unlink it and set its reference count to one */
MiUnlinkPageFromList(Pfn1);
- Pfn1->u3.e1.PageLocation = ActiveAndValid;
+ Pfn1->u3.e2.ReferenceCount++;
- /* Bring it back into the free list */
- MiInsertPageInFreeList(PageFrameIndex);
+ /* This will put it back in free list and clean properly up */
+ MI_SET_PFN_DELETED(Pfn1);
+ MiDecrementReferenceCount(Pfn1, PageFrameIndex);
}
return;
}
#if (_MI_PAGING_LEVELS == 2)
}
#endif
+ /* Drop the share count on the page table */
+ PointerPde = MiPteToPde(PointerPte);
+ MiDecrementShareCount(MiGetPfnEntry(PointerPde->u.Hard.PageFrameNumber),
+ PointerPde->u.Hard.PageFrameNumber);
+
/* Drop the share count */
MiDecrementShareCount(Pfn1, PageFrameIndex);
}
/* Lock the PFN Database while we delete the PTEs */
- OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
+ OldIrql = MiAcquirePfnLock();
do
{
/* Capture the PDE and make sure it exists */
}
/* Release the lock and get out if we're done */
- KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
+ MiReleasePfnLock(OldIrql);
if (Va > EndingAddress) return;
/* Otherwise, we exited because we hit a new PDE boundary, so start over */
PFN_NUMBER MdlBuffer[(sizeof(MDL) / sizeof(PFN_NUMBER)) + MI_MAPPED_COPY_PAGES + 1];
PMDL Mdl = (PMDL)MdlBuffer;
SIZE_T TotalSize, CurrentSize, RemainingSize;
- volatile BOOLEAN FailedInProbe = FALSE, FailedInMapping = FALSE, FailedInMoving;
- volatile BOOLEAN PagesLocked;
+ volatile BOOLEAN FailedInProbe = FALSE;
+ volatile BOOLEAN PagesLocked = FALSE;
PVOID CurrentAddress = SourceAddress, CurrentTargetAddress = TargetAddress;
- volatile PVOID MdlAddress;
+ volatile PVOID MdlAddress = NULL;
KAPC_STATE ApcState;
BOOLEAN HaveBadAddress;
ULONG_PTR BadAddress;
KeStackAttachProcess(&SourceProcess->Pcb, &ApcState);
//
- // Reset state for this pass
+ // Check state for this pass
//
- MdlAddress = NULL;
- PagesLocked = FALSE;
- FailedInMoving = FALSE;
+ ASSERT(MdlAddress == NULL);
+ ASSERT(PagesLocked == FALSE);
ASSERT(FailedInProbe == FALSE);
//
MmInitializeMdl(Mdl, CurrentAddress, CurrentSize);
MmProbeAndLockPages(Mdl, PreviousMode, IoReadAccess);
PagesLocked = TRUE;
+ }
+ _SEH2_EXCEPT(EXCEPTION_EXECUTE_HANDLER)
+ {
+ Status = _SEH2_GetExceptionCode();
+ }
+ _SEH2_END
- //
- // Now map the pages
- //
- MdlAddress = MmMapLockedPagesSpecifyCache(Mdl,
- KernelMode,
- MmCached,
- NULL,
- FALSE,
- HighPagePriority);
- if (!MdlAddress)
- {
- //
- // Use our SEH handler to pick this up
- //
- FailedInMapping = TRUE;
- ExRaiseStatus(STATUS_INSUFFICIENT_RESOURCES);
- }
+ /* Detach from source process */
+ KeUnstackDetachProcess(&ApcState);
- //
- // Now let go of the source and grab to the target process
- //
- KeUnstackDetachProcess(&ApcState);
- KeStackAttachProcess(&TargetProcess->Pcb, &ApcState);
+ if (Status != STATUS_SUCCESS)
+ {
+ goto Exit;
+ }
+
+ //
+ // Now map the pages
+ //
+ MdlAddress = MmMapLockedPagesSpecifyCache(Mdl,
+ KernelMode,
+ MmCached,
+ NULL,
+ FALSE,
+ HighPagePriority);
+ if (!MdlAddress)
+ {
+ Status = STATUS_INSUFFICIENT_RESOURCES;
+ goto Exit;
+ }
+ //
+ // Grab to the target process
+ //
+ KeStackAttachProcess(&TargetProcess->Pcb, &ApcState);
+
+ _SEH2_TRY
+ {
//
// Check if this is our first time through
//
- if ((CurrentAddress == SourceAddress) && (PreviousMode != KernelMode))
+ if ((CurrentTargetAddress == TargetAddress) && (PreviousMode != KernelMode))
{
//
// Catch a failure here
//
// Now do the actual move
//
- FailedInMoving = TRUE;
RtlCopyMemory(CurrentTargetAddress, MdlAddress, CurrentSize);
}
_SEH2_EXCEPT(MiGetExceptionInfo(_SEH2_GetExceptionInformation(),
&HaveBadAddress,
&BadAddress))
{
+ *ReturnSize = BufferSize - RemainingSize;
//
- // Detach from whoever we may be attached to
- //
- KeUnstackDetachProcess(&ApcState);
-
- //
- // Check if we had mapped the pages
- //
- if (MdlAddress) MmUnmapLockedPages(MdlAddress, Mdl);
-
- //
- // Check if we had locked the pages
- //
- if (PagesLocked) MmUnlockPages(Mdl);
-
- //
- // Check if we hit working set quota
- //
- if (_SEH2_GetExceptionCode() == STATUS_WORKING_SET_QUOTA)
- {
- //
- // Return the error
- //
- _SEH2_YIELD(return STATUS_WORKING_SET_QUOTA);
- }
-
- //
- // Check if we failed during the probe or mapping
+ // Check if we failed during the probe
//
- if ((FailedInProbe) || (FailedInMapping))
+ if (FailedInProbe)
{
//
// Exit
//
Status = _SEH2_GetExceptionCode();
- _SEH2_YIELD(return Status);
}
-
- //
- // Otherwise, we failed probably during the move
- //
- *ReturnSize = BufferSize - RemainingSize;
- if (FailedInMoving)
+ else
{
//
+ // Othewise we failed during the move.
// Check if we know exactly where we stopped copying
//
if (HaveBadAddress)
//
*ReturnSize = BadAddress - (ULONG_PTR)SourceAddress;
}
+ //
+ // Return partial copy
+ //
+ Status = STATUS_PARTIAL_COPY;
}
-
- //
- // Return partial copy
- //
- Status = STATUS_PARTIAL_COPY;
}
_SEH2_END;
- //
- // Check for SEH status
- //
- if (Status != STATUS_SUCCESS) return Status;
+ /* Detach from target process */
+ KeUnstackDetachProcess(&ApcState);
//
- // Detach from target
+ // Check for SEH status
//
- KeUnstackDetachProcess(&ApcState);
+ if (Status != STATUS_SUCCESS)
+ {
+ goto Exit;
+ }
//
// Unmap and unlock
//
MmUnmapLockedPages(MdlAddress, Mdl);
+ MdlAddress = NULL;
MmUnlockPages(Mdl);
+ PagesLocked = FALSE;
//
// Update location and size
CurrentTargetAddress = (PVOID)((ULONG_PTR)CurrentTargetAddress + CurrentSize);
}
+Exit:
+ if (MdlAddress != NULL)
+ MmUnmapLockedPages(MdlAddress, Mdl);
+ if (PagesLocked)
+ MmUnlockPages(Mdl);
+
//
// All bytes read
//
- *ReturnSize = BufferSize;
- return STATUS_SUCCESS;
+ if (Status == STATUS_SUCCESS)
+ *ReturnSize = BufferSize;
+ return Status;
}
NTSTATUS
{
UCHAR StackBuffer[MI_POOL_COPY_BYTES];
SIZE_T TotalSize, CurrentSize, RemainingSize;
- volatile BOOLEAN FailedInProbe = FALSE, FailedInMoving, HavePoolAddress = FALSE;
+ volatile BOOLEAN FailedInProbe = FALSE, HavePoolAddress = FALSE;
PVOID CurrentAddress = SourceAddress, CurrentTargetAddress = TargetAddress;
PVOID PoolAddress;
KAPC_STATE ApcState;
NTSTATUS Status = STATUS_SUCCESS;
PAGED_CODE();
+ DPRINT("Copying %Iu bytes from process %p (address %p) to process %p (Address %p)\n",
+ BufferSize, SourceProcess, SourceAddress, TargetProcess, TargetAddress);
+
//
// Calculate the maximum amount of data to move
//
//
KeStackAttachProcess(&SourceProcess->Pcb, &ApcState);
- //
- // Reset state for this pass
- //
- FailedInMoving = FALSE;
+ /* Check that state is sane */
ASSERT(FailedInProbe == FALSE);
+ ASSERT(Status == STATUS_SUCCESS);
//
// Protect user-mode copy
// Do the copy
//
RtlCopyMemory(PoolAddress, CurrentAddress, CurrentSize);
+ }
+ _SEH2_EXCEPT(MiGetExceptionInfo(_SEH2_GetExceptionInformation(),
+ &HaveBadAddress,
+ &BadAddress))
+ {
+ *ReturnSize = BufferSize - RemainingSize;
//
- // Now let go of the source and grab to the target process
+ // Check if we failed during the probe
//
- KeUnstackDetachProcess(&ApcState);
- KeStackAttachProcess(&TargetProcess->Pcb, &ApcState);
+ if (FailedInProbe)
+ {
+ //
+ // Exit
+ //
+ Status = _SEH2_GetExceptionCode();
+ }
+ else
+ {
+ //
+ // We failed during the move.
+ // Check if we know exactly where we stopped copying
+ //
+ if (HaveBadAddress)
+ {
+ //
+ // Return the exact number of bytes copied
+ //
+ *ReturnSize = BadAddress - (ULONG_PTR)SourceAddress;
+ }
+ //
+ // Return partial copy
+ //
+ Status = STATUS_PARTIAL_COPY;
+ }
+ }
+ _SEH2_END
+ /* Let go of the source */
+ KeUnstackDetachProcess(&ApcState);
+
+ if (Status != STATUS_SUCCESS)
+ {
+ goto Exit;
+ }
+
+ /* Grab the target process */
+ KeStackAttachProcess(&TargetProcess->Pcb, &ApcState);
+
+ _SEH2_TRY
+ {
//
// Check if this is our first time through
//
- if ((CurrentAddress == SourceAddress) && (PreviousMode != KernelMode))
+ if ((CurrentTargetAddress == TargetAddress) && (PreviousMode != KernelMode))
{
//
// Catch a failure here
//
// Now do the actual move
//
- FailedInMoving = TRUE;
RtlCopyMemory(CurrentTargetAddress, PoolAddress, CurrentSize);
}
_SEH2_EXCEPT(MiGetExceptionInfo(_SEH2_GetExceptionInformation(),
&HaveBadAddress,
&BadAddress))
{
- //
- // Detach from whoever we may be attached to
- //
- KeUnstackDetachProcess(&ApcState);
-
- //
- // Check if we had allocated pool
- //
- if (HavePoolAddress) ExFreePoolWithTag(PoolAddress, 'VmRw');
-
+ *ReturnSize = BufferSize - RemainingSize;
//
// Check if we failed during the probe
//
// Exit
//
Status = _SEH2_GetExceptionCode();
- _SEH2_YIELD(return Status);
}
-
- //
- // Otherwise, we failed, probably during the move
- //
- *ReturnSize = BufferSize - RemainingSize;
- if (FailedInMoving)
+ else
{
//
+ // Otherwise we failed during the move.
// Check if we know exactly where we stopped copying
//
if (HaveBadAddress)
//
*ReturnSize = BadAddress - (ULONG_PTR)SourceAddress;
}
+ //
+ // Return partial copy
+ //
+ Status = STATUS_PARTIAL_COPY;
}
-
- //
- // Return partial copy
- //
- Status = STATUS_PARTIAL_COPY;
}
_SEH2_END;
//
- // Check for SEH status
+ // Detach from target
//
- if (Status != STATUS_SUCCESS) return Status;
+ KeUnstackDetachProcess(&ApcState);
//
- // Detach from target
+ // Check for SEH status
//
- KeUnstackDetachProcess(&ApcState);
+ if (Status != STATUS_SUCCESS)
+ {
+ goto Exit;
+ }
//
// Update location and size
CurrentSize);
}
+Exit:
//
// Check if we had allocated pool
//
- if (HavePoolAddress) ExFreePoolWithTag(PoolAddress, 'VmRw');
+ if (HavePoolAddress)
+ ExFreePoolWithTag(PoolAddress, 'VmRw');
//
// All bytes read
//
- *ReturnSize = BufferSize;
- return STATUS_SUCCESS;
+ if (Status == STATUS_SUCCESS)
+ *ReturnSize = BufferSize;
+ return Status;
}
NTSTATUS
{
/* The PTE is valid, so we might need to get the protection from
the PFN. Lock the PFN database */
- OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
+ OldIrql = MiAcquirePfnLock();
/* Check if the PDE is still valid */
if (MiAddressToPte(PointerPte)->u.Hard.Valid == 0)
}
/* Release the PFN database */
- KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
+ MiReleasePfnLock(OldIrql);
}
/* Lock the working set again */
if (!NT_SUCCESS(Status))
{
DPRINT1("MmQuerySectionView failed. MemoryArea=%p (%p-%p), BaseAddress=%p\n",
- MemoryArea, MemoryArea->StartingAddress, MemoryArea->EndingAddress, BaseAddress);
- NT_ASSERT(NT_SUCCESS(Status));
+ MemoryArea, MA_GetStartingAddress(MemoryArea), MA_GetEndingAddress(MemoryArea), BaseAddress);
+ ASSERT(NT_SUCCESS(Status));
}
}
else
IN PMMVAD Vad,
IN PEPROCESS Process)
{
- PMMPTE PointerPte, LastPte, PointerPde;
+ PMMPTE PointerPte, LastPte;
+ PMMPDE PointerPde;
BOOLEAN OnBoundary = TRUE;
PAGED_CODE();
if (OnBoundary)
{
/* Is this PDE demand zero? */
- PointerPde = MiAddressToPte(PointerPte);
+ PointerPde = MiPteToPde(PointerPte);
if (PointerPde->u.Long != 0)
{
/* It isn't -- is it valid? */
if (PointerPde->u.Hard.Valid == 0)
{
/* Nope, fault it in */
- PointerPte = MiPteToAddress(PointerPde);
MiMakeSystemAddressValid(PointerPte, Process);
}
}
{
/* The PTE was already valid, so move to the next one */
PointerPde++;
- PointerPte = MiPteToAddress(PointerPde);
+ PointerPte = MiPdeToPte(PointerPde);
/* Is the entire VAD committed? If not, fail */
if (!Vad->u.VadFlags.MemCommit) return FALSE;
- /* Everything is committed so far past the range, return true */
- if (PointerPte > LastPte) return TRUE;
+ /* New loop iteration with our new, on-boundary PTE. */
+ continue;
}
}
PMMVAD Vad;
PMMSUPPORT AddressSpace;
ULONG_PTR StartingAddress, EndingAddress;
- PMMPTE PointerPde, PointerPte, LastPte;
+ PMMPTE PointerPte, LastPte;
+ PMMPDE PointerPde;
MMPTE PteContents;
PMMPFN Pfn1;
ULONG ProtectionMask, OldProtect;
/* Check if we've crossed a PDE boundary and make the new PDE valid too */
if (MiIsPteOnPdeBoundary(PointerPte))
{
- PointerPde = MiAddressToPte(PointerPte);
+ PointerPde = MiPteToPde(PointerPte);
MiMakePdeExistAndMakeValid(PointerPde, Process, MM_NOIRQL);
}
if ((NewAccessProtection & PAGE_NOACCESS) ||
(NewAccessProtection & PAGE_GUARD))
{
- KIRQL OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
+ KIRQL OldIrql = MiAcquirePfnLock();
/* Mark the PTE as transition and change its protection */
PteContents.u.Hard.Valid = 0;
KeInvalidateTlbEntry(MiPteToAddress(PointerPte));
/* We are done for this PTE */
- KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
+ MiReleasePfnLock(OldIrql);
}
else
{
VOID
NTAPI
-MiMakePdeExistAndMakeValid(IN PMMPTE PointerPde,
+MiMakePdeExistAndMakeValid(IN PMMPDE PointerPde,
IN PEPROCESS TargetProcess,
IN KIRQL OldIrql)
{
//
// Acquire the PFN lock and loop all the PTEs in the list
//
- OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
+ OldIrql = MiAcquirePfnLock();
for (i = 0; i != Count; i++)
{
//
// and then release the PFN lock
//
KeFlushCurrentTb();
- KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
+ MiReleasePfnLock(OldIrql);
}
ULONG
IN PEPROCESS Process,
IN PMMVAD Vad)
{
- PMMPTE PointerPde, PointerPte, CommitPte = NULL;
+ PMMPTE PointerPte, CommitPte = NULL;
+ PMMPDE PointerPde;
ULONG CommitReduction = 0;
PMMPTE ValidPteList[256];
ULONG PteCount = 0;
IN SIZE_T Length,
IN ULONG Mode)
{
- static BOOLEAN Warn; if (!Warn++) UNIMPLEMENTED;
+ static ULONG Warn; if (!Warn++) UNIMPLEMENTED;
return Address;
}
NTAPI
MmUnsecureVirtualMemory(IN PVOID SecureMem)
{
- static BOOLEAN Warn; if (!Warn++) UNIMPLEMENTED;
+ static ULONG Warn; if (!Warn++) UNIMPLEMENTED;
}
/* SYSTEM CALLS ***************************************************************/
return Status;
}
+NTSTATUS
+NTAPI
+NtFlushInstructionCache(_In_ HANDLE ProcessHandle,
+ _In_opt_ PVOID BaseAddress,
+ _In_ SIZE_T FlushSize)
+{
+ KAPC_STATE ApcState;
+ PKPROCESS Process;
+ NTSTATUS Status;
+ PAGED_CODE();
+
+ /* Is a base address given? */
+ if (BaseAddress != NULL)
+ {
+ /* If the requested size is 0, there is nothing to do */
+ if (FlushSize == 0)
+ {
+ return STATUS_SUCCESS;
+ }
+
+ /* Is this a user mode call? */
+ if (ExGetPreviousMode() != KernelMode)
+ {
+ /* Make sure the base address is in user space */
+ if (BaseAddress > MmHighestUserAddress)
+ {
+ DPRINT1("Invalid BaseAddress 0x%p\n", BaseAddress);
+ return STATUS_ACCESS_VIOLATION;
+ }
+ }
+ }
+
+ /* Is another process requested? */
+ if (ProcessHandle != NtCurrentProcess())
+ {
+ /* Reference the process */
+ Status = ObReferenceObjectByHandle(ProcessHandle,
+ PROCESS_VM_WRITE,
+ PsProcessType,
+ ExGetPreviousMode(),
+ (PVOID*)&Process,
+ NULL);
+ if (!NT_SUCCESS(Status))
+ {
+ DPRINT1("Failed to reference the process %p\n", ProcessHandle);
+ return Status;
+ }
+
+ /* Attach to the process */
+ KeStackAttachProcess(Process, &ApcState);
+ }
+
+ /* Forward to Ke */
+ KeSweepICache(BaseAddress, FlushSize);
+
+ /* Check if we attached */
+ if (ProcessHandle != NtCurrentProcess())
+ {
+ /* Detach from the process and dereference it */
+ KeUnstackDetachProcess(&ApcState);
+ ObDereferenceObject(Process);
+ }
+
+ /* All done, return to caller */
+ return STATUS_SUCCESS;
+}
+
NTSTATUS
NTAPI
NtProtectVirtualMemory(IN HANDLE ProcessHandle,
{
PEPROCESS Process;
PMEMORY_AREA MemoryArea;
- PFN_NUMBER PageCount;
PMMVAD Vad = NULL, FoundVad;
NTSTATUS Status;
PMMSUPPORT AddressSpace;
PVOID PBaseAddress;
- ULONG_PTR PRegionSize, StartingAddress, EndingAddress, HighestAddress;
+ ULONG_PTR PRegionSize, StartingAddress, EndingAddress;
+ ULONG_PTR HighestAddress = (ULONG_PTR)MM_HIGHEST_VAD_ADDRESS;
PEPROCESS CurrentProcess = PsGetCurrentProcess();
KPROCESSOR_MODE PreviousMode = KeGetPreviousMode();
PETHREAD CurrentThread = PsGetCurrentThread();
ULONG ProtectionMask, QuotaCharge = 0, QuotaFree = 0;
BOOLEAN Attached = FALSE, ChangeProtection = FALSE;
MMPTE TempPte;
- PMMPTE PointerPte, PointerPde, LastPte;
+ PMMPTE PointerPte, LastPte;
+ PMMPDE PointerPde;
TABLE_SEARCH_RESULT Result;
- PMMADDRESS_NODE Parent;
PAGED_CODE();
/* Check for valid Zero bits */
// This is a blind commit, all we need is the region size
//
PRegionSize = ROUND_TO_PAGES(PRegionSize);
- PageCount = BYTES_TO_PAGES(PRegionSize);
EndingAddress = 0;
StartingAddress = 0;
goto FailPathNoLock;
}
}
- else
- {
- //
- // Use the highest VAD address as maximum
- //
- HighestAddress = (ULONG_PTR)MM_HIGHEST_VAD_ADDRESS;
- }
}
else
{
// fall based on the aligned address and the passed in region size
//
EndingAddress = ((ULONG_PTR)PBaseAddress + PRegionSize - 1) | (PAGE_SIZE - 1);
- StartingAddress = ROUND_DOWN((ULONG_PTR)PBaseAddress, _64K);
- PageCount = BYTES_TO_PAGES(EndingAddress - StartingAddress);
+ PRegionSize = EndingAddress + 1 - ROUND_DOWN((ULONG_PTR)PBaseAddress, _64K);
+ StartingAddress = (ULONG_PTR)PBaseAddress;
}
//
goto FailPathNoLock;
}
- Vad->u.LongFlags = 0;
+ RtlZeroMemory(Vad, sizeof(MMVAD_LONG));
if (AllocationType & MEM_COMMIT) Vad->u.VadFlags.MemCommit = 1;
Vad->u.VadFlags.Protection = ProtectionMask;
Vad->u.VadFlags.PrivateMemory = 1;
- Vad->u.VadFlags.CommitCharge = AllocationType & MEM_COMMIT ? PageCount : 0;
-
- //
- // Lock the address space and make sure the process isn't already dead
- //
- AddressSpace = MmGetCurrentAddressSpace();
- MmLockAddressSpace(AddressSpace);
- if (Process->VmDeleted)
- {
- Status = STATUS_PROCESS_IS_TERMINATING;
- goto FailPath;
- }
-
- //
- // Did we have a base address? If no, find a valid address that is 64KB
- // aligned in the VAD tree. Otherwise, make sure that the address range
- // which was passed in isn't already conflicting with an existing address
- // range.
- //
- if (!PBaseAddress)
- {
- /* Which way should we search? */
- if ((AllocationType & MEM_TOP_DOWN) || Process->VmTopDown)
- {
- /* Find an address top-down */
- Result = MiFindEmptyAddressRangeDownTree(PRegionSize,
- HighestAddress,
- _64K,
- &Process->VadRoot,
- &StartingAddress,
- &Parent);
- }
- else
- {
- /* Find an address bottom-up */
- Result = MiFindEmptyAddressRangeInTree(PRegionSize,
- _64K,
- &Process->VadRoot,
- &Parent,
- &StartingAddress);
- }
-
- if (Result == TableFoundNode)
- {
- Status = STATUS_NO_MEMORY;
- goto FailPath;
- }
-
- //
- // Now we know where the allocation ends. Make sure it doesn't end up
- // somewhere in kernel mode.
- //
- ASSERT(StartingAddress != 0);
- ASSERT(StartingAddress < (ULONG_PTR)MM_HIGHEST_USER_ADDRESS);
- EndingAddress = (StartingAddress + PRegionSize - 1) | (PAGE_SIZE - 1);
- ASSERT(EndingAddress > StartingAddress);
- if (EndingAddress > HighestAddress)
- {
- Status = STATUS_NO_MEMORY;
- goto FailPath;
- }
- }
- else
- {
- /* Make sure it doesn't conflict with an existing allocation */
- Result = MiCheckForConflictingNode(StartingAddress >> PAGE_SHIFT,
- EndingAddress >> PAGE_SHIFT,
- &Process->VadRoot,
- &Parent);
- if (Result == TableFoundNode)
- {
- //
- // The address specified is in conflict!
- //
- Status = STATUS_CONFLICTING_ADDRESSES;
- goto FailPath;
- }
- }
-
- //
- // Write out the VAD fields for this allocation
- //
- Vad->StartingVpn = StartingAddress >> PAGE_SHIFT;
- Vad->EndingVpn = EndingAddress >> PAGE_SHIFT;
-
- //
- // FIXME: Should setup VAD bitmap
- //
- Status = STATUS_SUCCESS;
-
- //
- // Lock the working set and insert the VAD into the process VAD tree
- //
- MiLockProcessWorkingSetUnsafe(Process, CurrentThread);
Vad->ControlArea = NULL; // For Memory-Area hack
- Process->VadRoot.NodeHint = Vad;
- MiInsertNode(&Process->VadRoot, (PVOID)Vad, Parent, Result);
- MiUnlockProcessWorkingSetUnsafe(Process, CurrentThread);
//
- // Make sure the actual region size is at least as big as the
- // requested region size and update the value
- //
- ASSERT(PRegionSize <= (EndingAddress + 1 - StartingAddress));
- PRegionSize = (EndingAddress + 1 - StartingAddress);
-
+ // Insert the VAD
//
- // Update the virtual size of the process, and if this is now the highest
- // virtual size we have ever seen, update the peak virtual size to reflect
- // this.
- //
- Process->VirtualSize += PRegionSize;
- if (Process->VirtualSize > Process->PeakVirtualSize)
+ Status = MiInsertVadEx(Vad,
+ &StartingAddress,
+ PRegionSize,
+ HighestAddress,
+ MM_VIRTMEM_GRANULARITY,
+ AllocationType);
+ if (!NT_SUCCESS(Status))
{
- Process->PeakVirtualSize = Process->VirtualSize;
+ DPRINT1("Failed to insert the VAD!\n");
+ goto FailPathNoLock;
}
//
- // Release address space and detach and dereference the target process if
+ // Detach and dereference the target process if
// it was different from the current process
//
- MmUnlockAddressSpace(AddressSpace);
if (Attached) KeUnstackDetachProcess(&ApcState);
if (ProcessHandle != NtCurrentProcess()) ObDereferenceObject(Process);
//
TempPte.u.Long = 0;
TempPte.u.Soft.Protection = ProtectionMask;
- NT_ASSERT(TempPte.u.Long != 0);
+ ASSERT(TempPte.u.Long != 0);
//
// Get the PTE, PDE and the last PTE for this address range
//
// Get the PDE and now make it valid too
//
- PointerPde = MiAddressToPte(PointerPte);
+ PointerPde = MiPteToPde(PointerPte);
MiMakePdeExistAndMakeValid(PointerPde, Process, MM_NOIRQL);
}
PMEMORY_AREA MemoryArea;
SIZE_T PRegionSize;
PVOID PBaseAddress;
- LONG_PTR CommitReduction = 0;
+ LONG_PTR AlreadyDecommitted, CommitReduction = 0;
ULONG_PTR StartingAddress, EndingAddress;
PMMVAD Vad;
NTSTATUS Status;
PAGED_CODE();
//
- // Only two flags are supported
+ // Only two flags are supported, exclusively.
//
- if (!(FreeType & (MEM_RELEASE | MEM_DECOMMIT)))
+ if (FreeType != MEM_RELEASE && FreeType != MEM_DECOMMIT)
{
- DPRINT1("Invalid FreeType\n");
- return STATUS_INVALID_PARAMETER_4;
- }
-
- //
- // Check if no flag was used, or if both flags were used
- //
- if (!((FreeType & (MEM_DECOMMIT | MEM_RELEASE))) ||
- ((FreeType & (MEM_DECOMMIT | MEM_RELEASE)) == (MEM_DECOMMIT | MEM_RELEASE)))
- {
- DPRINT1("Invalid FreeType combination\n");
+ DPRINT1("Invalid FreeType (0x%08lx)\n", FreeType);
return STATUS_INVALID_PARAMETER_4;
}
}
}
- DPRINT("NtFreeVirtualMemory: Process 0x%p, Adress 0x%p, size 0x%x, FreeType %x.\n",
- Process, PBaseAddress, PRegionSize, FreeType);
+ DPRINT("NtFreeVirtualMemory: Process 0x%p, Address 0x%p, Size 0x%Ix, FreeType 0x%08lx\n",
+ Process, PBaseAddress, PRegionSize, FreeType);
//
// Lock the address space
Vad->u.VadFlags.CommitCharge -= CommitReduction;
// For ReactOS: shrink the corresponding memory area
MemoryArea = MmLocateMemoryAreaByAddress(AddressSpace, (PVOID)StartingAddress);
- ASSERT(Vad->StartingVpn << PAGE_SHIFT == (ULONG_PTR)MemoryArea->StartingAddress);
- ASSERT((Vad->EndingVpn + 1) << PAGE_SHIFT == (ULONG_PTR)MemoryArea->EndingAddress);
- Vad->EndingVpn = ((ULONG_PTR)StartingAddress - 1) >> PAGE_SHIFT;
- MemoryArea->EndingAddress = (PVOID)(StartingAddress);
+ ASSERT(Vad->StartingVpn == MemoryArea->VadNode.StartingVpn);
+ ASSERT(Vad->EndingVpn == MemoryArea->VadNode.EndingVpn);
+ Vad->EndingVpn = (StartingAddress - 1) >> PAGE_SHIFT;
+ MemoryArea->VadNode.EndingVpn = Vad->EndingVpn;
}
else
{
// Decommit the PTEs for the range plus the actual backing pages for the
// range, then reduce that amount from the commit charge in the VAD
//
+ AlreadyDecommitted = MiDecommitPages((PVOID)StartingAddress,
+ MiAddressToPte(EndingAddress),
+ Process,
+ Vad);
CommitReduction = MiAddressToPte(EndingAddress) -
MiAddressToPte(StartingAddress) +
1 -
- MiDecommitPages((PVOID)StartingAddress,
- MiAddressToPte(EndingAddress),
- Process,
- Vad);
+ AlreadyDecommitted;
+
ASSERT(CommitReduction >= 0);
Vad->u.VadFlags.CommitCharge -= CommitReduction;
ASSERT(Vad->u.VadFlags.CommitCharge >= 0);