BOOLEAN UserPdeFault = FALSE;
#endif
-LONG MmSystemLockPagesCount;
-
/* PRIVATE FUNCTIONS **********************************************************/
+NTSTATUS
+NTAPI
+MiCheckForUserStackOverflow(IN PVOID Address,
+ IN PVOID TrapInformation)
+{
+ PETHREAD CurrentThread = PsGetCurrentThread();
+ PTEB Teb = CurrentThread->Tcb.Teb;
+ PVOID StackBase, DeallocationStack, NextStackAddress;
+ SIZE_T GuranteedSize;
+ NTSTATUS Status;
+
+ /* Do we own the address space lock? */
+ if (CurrentThread->AddressSpaceOwner == 1)
+ {
+ /* This isn't valid */
+ DPRINT1("Process owns address space lock\n");
+ ASSERT(KeAreAllApcsDisabled() == TRUE);
+ return STATUS_GUARD_PAGE_VIOLATION;
+ }
+
+ /* Are we attached? */
+ if (KeIsAttachedProcess())
+ {
+ /* This isn't valid */
+ DPRINT1("Process is attached\n");
+ return STATUS_GUARD_PAGE_VIOLATION;
+ }
+
+ /* Read the current settings */
+ StackBase = Teb->NtTib.StackBase;
+ DeallocationStack = Teb->DeallocationStack;
+ GuranteedSize = Teb->GuaranteedStackBytes;
+ DPRINT("Handling guard page fault with Stacks Addresses 0x%p and 0x%p, guarantee: %lx\n",
+ StackBase, DeallocationStack, GuranteedSize);
+
+ /* Guarantees make this code harder, for now, assume there aren't any */
+ ASSERT(GuranteedSize == 0);
+
+ /* So allocate only the minimum guard page size */
+ GuranteedSize = PAGE_SIZE;
+
+ /* Does this faulting stack address actually exist in the stack? */
+ if ((Address >= StackBase) || (Address < DeallocationStack))
+ {
+ /* That's odd... */
+ DPRINT1("Faulting address outside of stack bounds. Address=%p, StackBase=%p, DeallocationStack=%p\n",
+ Address, StackBase, DeallocationStack);
+ return STATUS_GUARD_PAGE_VIOLATION;
+ }
+
+ /* This is where the stack will start now */
+ NextStackAddress = (PVOID)((ULONG_PTR)PAGE_ALIGN(Address) - GuranteedSize);
+
+ /* Do we have at least one page between here and the end of the stack? */
+ if (((ULONG_PTR)NextStackAddress - PAGE_SIZE) <= (ULONG_PTR)DeallocationStack)
+ {
+ /* We don't -- Windows would try to make this guard page valid now */
+ DPRINT1("Close to our death...\n");
+ ASSERT(FALSE);
+ return STATUS_STACK_OVERFLOW;
+ }
+
+ /* Don't handle this flag yet */
+ ASSERT((PsGetCurrentProcess()->Peb->NtGlobalFlag & FLG_DISABLE_STACK_EXTENSION) == 0);
+
+ /* Update the stack limit */
+ Teb->NtTib.StackLimit = (PVOID)((ULONG_PTR)NextStackAddress + GuranteedSize);
+
+ /* Now move the guard page to the next page */
+ Status = ZwAllocateVirtualMemory(NtCurrentProcess(),
+ &NextStackAddress,
+ 0,
+ &GuranteedSize,
+ MEM_COMMIT,
+ PAGE_READWRITE | PAGE_GUARD);
+ if ((NT_SUCCESS(Status) || (Status == STATUS_ALREADY_COMMITTED)))
+ {
+ /* We did it! */
+ DPRINT("Guard page handled successfully for %p\n", Address);
+ return STATUS_PAGE_FAULT_GUARD_PAGE;
+ }
+
+ /* Fail, we couldn't move the guard page */
+ DPRINT1("Guard page failure: %lx\n", Status);
+ ASSERT(FALSE);
+ return STATUS_STACK_OVERFLOW;
+}
+
+FORCEINLINE
+BOOLEAN
+MiIsAccessAllowed(
+ _In_ ULONG ProtectionMask,
+ _In_ BOOLEAN Write,
+ _In_ BOOLEAN Execute)
+{
+ #define _BYTE_MASK(Bit0, Bit1, Bit2, Bit3, Bit4, Bit5, Bit6, Bit7) \
+ (Bit0) | ((Bit1) << 1) | ((Bit2) << 2) | ((Bit3) << 3) | \
+ ((Bit4) << 4) | ((Bit5) << 5) | ((Bit6) << 6) | ((Bit7) << 7)
+ static const UCHAR AccessAllowedMask[2][2] =
+ {
+ { // Protect 0 1 2 3 4 5 6 7
+ _BYTE_MASK(0, 1, 1, 1, 1, 1, 1, 1), // READ
+ _BYTE_MASK(0, 0, 1, 1, 0, 0, 1, 1), // EXECUTE READ
+ },
+ {
+ _BYTE_MASK(0, 0, 0, 0, 1, 1, 1, 1), // WRITE
+ _BYTE_MASK(0, 0, 0, 0, 0, 0, 1, 1), // EXECUTE WRITE
+ }
+ };
+
+ /* We want only the lower access bits */
+ ProtectionMask &= MM_PROTECT_ACCESS;
+
+ /* Look it up in the table */
+ return (AccessAllowedMask[Write != 0][Execute != 0] >> ProtectionMask) & 1;
+}
+
+NTSTATUS
+NTAPI
+MiAccessCheck(IN PMMPTE PointerPte,
+ IN BOOLEAN StoreInstruction,
+ IN KPROCESSOR_MODE PreviousMode,
+ IN ULONG_PTR ProtectionMask,
+ IN PVOID TrapFrame,
+ IN BOOLEAN LockHeld)
+{
+ MMPTE TempPte;
+
+ /* Check for invalid user-mode access */
+ if ((PreviousMode == UserMode) && (PointerPte > MiHighestUserPte))
+ {
+ return STATUS_ACCESS_VIOLATION;
+ }
+
+ /* Capture the PTE -- is it valid? */
+ TempPte = *PointerPte;
+ if (TempPte.u.Hard.Valid)
+ {
+ /* Was someone trying to write to it? */
+ if (StoreInstruction)
+ {
+ /* Is it writable?*/
+ if ((TempPte.u.Hard.Write) || (TempPte.u.Hard.CopyOnWrite))
+ {
+ /* Then there's nothing to worry about */
+ return STATUS_SUCCESS;
+ }
+
+ /* Oops! This isn't allowed */
+ return STATUS_ACCESS_VIOLATION;
+ }
+
+ /* Someone was trying to read from a valid PTE, that's fine too */
+ return STATUS_SUCCESS;
+ }
+
+ /* Check if the protection on the page allows what is being attempted */
+ if (!MiIsAccessAllowed(ProtectionMask, StoreInstruction, FALSE))
+ {
+ return STATUS_ACCESS_VIOLATION;
+ }
+
+ /* Check if this is a guard page */
+ if ((ProtectionMask & MM_PROTECT_SPECIAL) == MM_GUARDPAGE)
+ {
+ NT_ASSERT(ProtectionMask != MM_DECOMMIT);
+
+ /* Attached processes can't expand their stack */
+ if (KeIsAttachedProcess()) return STATUS_ACCESS_VIOLATION;
+
+ /* No support for transition PTEs yet */
+ ASSERT(((TempPte.u.Soft.Transition == 1) &&
+ (TempPte.u.Soft.Prototype == 0)) == FALSE);
+
+ /* Remove the guard page bit, and return a guard page violation */
+ TempPte.u.Soft.Protection = ProtectionMask & ~MM_GUARDPAGE;
+ NT_ASSERT(TempPte.u.Long != 0);
+ MI_WRITE_INVALID_PTE(PointerPte, TempPte);
+ return STATUS_GUARD_PAGE_VIOLATION;
+ }
+
+ /* Nothing to do */
+ return STATUS_SUCCESS;
+}
+
PMMPTE
NTAPI
MiCheckVirtualAddress(IN PVOID VirtualAddress,
}
#if (_MI_PAGING_LEVELS == 2)
-BOOLEAN
FORCEINLINE
+BOOLEAN
MiSynchronizeSystemPde(PMMPDE PointerPde)
{
MMPDE SystemPde;
FASTCALL
MiCheckPdeForSessionSpace(IN PVOID Address)
{
- /* Code not yet tested */
- ASSERT(FALSE);
- return STATUS_NOT_IMPLEMENTED;
+ MMPTE TempPde;
+ PMMPTE PointerPde;
+ PVOID SessionAddress;
+ ULONG Index;
+
+ /* Is this a session PTE? */
+ if (MI_IS_SESSION_PTE(Address))
+ {
+ /* Make sure the PDE for session space is valid */
+ PointerPde = MiAddressToPde(MmSessionSpace);
+ if (!PointerPde->u.Hard.Valid)
+ {
+ /* This means there's no valid session, bail out */
+ DbgPrint("MiCheckPdeForSessionSpace: No current session for PTE %p\n",
+ Address);
+ DbgBreakPoint();
+ return STATUS_ACCESS_VIOLATION;
+ }
+
+ /* Now get the session-specific page table for this address */
+ SessionAddress = MiPteToAddress(Address);
+ PointerPde = MiAddressToPte(Address);
+ if (PointerPde->u.Hard.Valid) return STATUS_WAIT_1;
+
+ /* It's not valid, so find it in the page table array */
+ Index = ((ULONG_PTR)SessionAddress - (ULONG_PTR)MmSessionBase) >> 22;
+ TempPde.u.Long = MmSessionSpace->PageTables[Index].u.Long;
+ if (TempPde.u.Hard.Valid)
+ {
+ /* The copy is valid, so swap it in */
+ InterlockedExchange((PLONG)PointerPde, TempPde.u.Long);
+ return STATUS_WAIT_1;
+ }
+
+ /* We don't seem to have allocated a page table for this address yet? */
+ DbgPrint("MiCheckPdeForSessionSpace: No Session PDE for PTE %p, %p\n",
+ PointerPde->u.Long, SessionAddress);
+ DbgBreakPoint();
+ return STATUS_ACCESS_VIOLATION;
+ }
+
+ /* Is the address also a session address? If not, we're done */
+ if (!MI_IS_SESSION_ADDRESS(Address)) return STATUS_SUCCESS;
+
+ /* It is, so again get the PDE for session space */
+ PointerPde = MiAddressToPde(MmSessionSpace);
+ if (!PointerPde->u.Hard.Valid)
+ {
+ /* This means there's no valid session, bail out */
+ DbgPrint("MiCheckPdeForSessionSpace: No current session for VA %p\n",
+ Address);
+ DbgBreakPoint();
+ return STATUS_ACCESS_VIOLATION;
+ }
+
+ /* Now get the PDE for the address itself */
+ PointerPde = MiAddressToPde(Address);
+ if (!PointerPde->u.Hard.Valid)
+ {
+ /* Do the swap, we should be good to go */
+ Index = ((ULONG_PTR)Address - (ULONG_PTR)MmSessionBase) >> 22;
+ PointerPde->u.Long = MmSessionSpace->PageTables[Index].u.Long;
+ if (PointerPde->u.Hard.Valid) return STATUS_WAIT_1;
+
+ /* We had not allocated a page table for this session address yet, fail! */
+ DbgPrint("MiCheckPdeForSessionSpace: No Session PDE for VA %p, %p\n",
+ PointerPde->u.Long, Address);
+ DbgBreakPoint();
+ return STATUS_ACCESS_VIOLATION;
+ }
+
+ /* It's valid, so there's nothing to do */
+ return STATUS_SUCCESS;
}
NTSTATUS
/* Check if we need a zero page */
NeedZero = (OldIrql != MM_NOIRQL);
- /* Get the next system page color */
- Color = MI_GET_NEXT_COLOR();
+ /* Session-backed image views must be zeroed */
+ if ((Process == HYDRA_PROCESS) &&
+ ((MI_IS_SESSION_IMAGE_ADDRESS(Address)) ||
+ ((Address >= MiSessionViewStart) && (Address < MiSessionSpaceWs))))
+ {
+ NeedZero = TRUE;
+ }
+
+ /* Hardcode unknown color */
+ Color = 0xFFFFFFFF;
}
/* Check if the PFN database should be acquired */
if (!Process) MI_SET_PROCESS2("Kernel Demand 0");
/* Do we need a zero page? */
- if ((NeedZero) && (Process))
+ if (Color != 0xFFFFFFFF)
{
/* Try to get one, if we couldn't grab a free page and zero it */
PageFrameNumber = MiRemoveZeroPageSafe(Color);
- if (PageFrameNumber)
- {
- /* We got a genuine zero page, stop worrying about it */
- NeedZero = FALSE;
- }
- else
+ if (!PageFrameNumber)
{
/* We'll need a free page and zero it manually */
PageFrameNumber = MiRemoveAnyPage(Color);
+ NeedZero = TRUE;
}
}
- else if (!NeedZero)
- {
- /* Process or system doesn't want a zero page, grab anything */
- PageFrameNumber = MiRemoveAnyPage(Color);
- }
else
{
- /* System wants a zero page, obtain one */
- PageFrameNumber = MiRemoveZeroPage(Color);
- NeedZero = FALSE;
+ /* Get a color, and see if we should grab a zero or non-zero page */
+ Color = MI_GET_NEXT_COLOR();
+ if (!NeedZero)
+ {
+ /* Process or system doesn't want a zero page, grab anything */
+ PageFrameNumber = MiRemoveAnyPage(Color);
+ }
+ else
+ {
+ /* System wants a zero page, obtain one */
+ PageFrameNumber = MiRemoveZeroPage(Color);
+ }
}
/* Initialize it */
IN PMMPTE PointerPte,
IN PMMPTE PointerProtoPte,
IN KIRQL OldIrql,
- IN PMMPFN* LockedPfn)
+ IN PMMPFN* LockedProtoPfn)
{
MMPTE TempPte;
PMMPTE OriginalPte, PageTablePte;
ULONG_PTR Protection;
PFN_NUMBER PageFrameIndex;
PMMPFN Pfn1, Pfn2;
+ BOOLEAN OriginalProtection, DirtyPage;
/* Must be called with an valid prototype PTE, with the PFN lock held */
ASSERT(KeGetCurrentIrql() == DISPATCH_LEVEL);
PageTablePte = MiAddressToPte(PointerPte);
Pfn2 = MiGetPfnEntry(PageTablePte->u.Hard.PageFrameNumber);
//Pfn2->u2.ShareCount++;
+ DBG_UNREFERENCED_LOCAL_VARIABLE(Pfn2);
/* Check where we should be getting the protection information from */
if (PointerPte->u.Soft.PageFileHigh == MI_PTE_LOOKUP_NEEDED)
{
/* Get the protection from the PTE, there's no real Proto PTE data */
Protection = PointerPte->u.Soft.Protection;
+
+ /* Remember that we did not use the proto protection */
+ OriginalProtection = FALSE;
}
else
{
/* Get the protection from the original PTE link */
OriginalPte = &Pfn1->OriginalPte;
Protection = OriginalPte->u.Soft.Protection;
+
+ /* Remember that we used the original protection */
+ OriginalProtection = TRUE;
+
+ /* Check if this was a write on a read only proto */
+ if ((StoreInstruction) && !(Protection & MM_READWRITE))
+ {
+ /* Clear the flag */
+ StoreInstruction = 0;
+ }
+ }
+
+ /* Check if this was a write on a non-COW page */
+ DirtyPage = FALSE;
+ if ((StoreInstruction) && ((Protection & MM_WRITECOPY) != MM_WRITECOPY))
+ {
+ /* Then the page should be marked dirty */
+ DirtyPage = TRUE;
+
+ /* ReactOS check */
+ ASSERT(Pfn1->OriginalPte.u.Soft.Prototype != 0);
+ }
+
+ /* Did we get a locked incoming PFN? */
+ if (*LockedProtoPfn)
+ {
+ /* Drop a reference */
+ ASSERT((*LockedProtoPfn)->u3.e2.ReferenceCount >= 1);
+ MiDereferencePfnAndDropLockCount(*LockedProtoPfn);
+ *LockedProtoPfn = NULL;
}
/* Release the PFN lock */
KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
- /* Remove caching bits */
- Protection &= ~(MM_NOCACHE | MM_NOACCESS);
+ /* Remove special/caching bits */
+ Protection &= ~MM_PROTECT_SPECIAL;
+
+ /* Setup caching */
+ if (Pfn1->u3.e1.CacheAttribute == MiWriteCombined)
+ {
+ /* Write combining, no caching */
+ MI_PAGE_DISABLE_CACHE(&TempPte);
+ MI_PAGE_WRITE_COMBINED(&TempPte);
+ }
+ else if (Pfn1->u3.e1.CacheAttribute == MiNonCached)
+ {
+ /* Write through, no caching */
+ MI_PAGE_DISABLE_CACHE(&TempPte);
+ MI_PAGE_WRITE_THROUGH(&TempPte);
+ }
/* Check if this is a kernel or user address */
if (Address < MmSystemRangeStart)
MI_MAKE_HARDWARE_PTE(&TempPte, PointerPte, Protection, PageFrameIndex);
}
+ /* Set the dirty flag if needed */
+ if (DirtyPage) TempPte.u.Hard.Dirty = TRUE;
+
/* Write the PTE */
MI_WRITE_VALID_PTE(PointerPte, TempPte);
+ /* Reset the protection if needed */
+ if (OriginalProtection) Protection = MM_ZERO_ACCESS;
+
/* Return success */
+ ASSERT(PointerPte == MiAddressToPte(Address));
return STATUS_SUCCESS;
}
PMMPFN Pfn1;
MMPTE TempPte;
PMMPTE PointerToPteForProtoPage;
- USHORT NewRefCount;
- DPRINT1("Transition fault on 0x%p with PTE 0x%lx in process %s\n", FaultingAddress, PointerPte, CurrentProcess->ImageFileName);
+ DPRINT1("Transition fault on 0x%p with PTE 0x%p in process %s\n",
+ FaultingAddress, PointerPte, CurrentProcess->ImageFileName);
/* Windowss does this check */
ASSERT(*InPageBlock == NULL);
ASSERT(Pfn1->u3.e1.ReadInProgress == 0);
/* Windows checks there's some free pages and this isn't an in-page error */
- ASSERT(MmAvailablePages >= 0);
+ ASSERT(MmAvailablePages > 0);
ASSERT(Pfn1->u4.InPageError == 0);
/* ReactOS checks for this */
/* Otherwise, the page is removed from its list */
DPRINT1("Transition page in free/zero list\n");
MiUnlinkPageFromList(Pfn1);
-
- /* Windows does these checks -- perhaps a macro? */
- ASSERT(Pfn1->u2.ShareCount == 0);
- ASSERT(Pfn1->u2.ShareCount == 0);
- ASSERT(Pfn1->u3.e1.PageLocation != ActiveAndValid);
-
- /* Check if this was a prototype PTE */
- if ((Pfn1->u3.e1.PrototypePte == 1) &&
- (Pfn1->OriginalPte.u.Soft.Prototype == 1))
- {
- DPRINT1("Prototype floating page not yet supported\n");
- ASSERT(FALSE);
- }
-
- /* Update counter */
- InterlockedIncrementSizeT(&MmSystemLockPagesCount);
-
- /* We must be the first reference */
- NewRefCount = InterlockedIncrement16((PSHORT)&Pfn1->u3.e2.ReferenceCount);
- ASSERT(NewRefCount == 1);
+ MiReferenceUnusedPageAndBumpLockCount(Pfn1);
}
/* At this point, there should no longer be any in-page errors */
ASSERT(Pfn1->u4.InPageError == 0);
/* Check if this was a PFN with no more share references */
- if (Pfn1->u2.ShareCount == 0)
- {
- /* Windows checks for these... maybe a macro? */
- ASSERT(Pfn1->u3.e2.ReferenceCount != 0);
- ASSERT(Pfn1->u2.ShareCount == 0);
-
- /* Was this the last active reference to it */
- DPRINT1("Page share count is zero\n");
- if (Pfn1->u3.e2.ReferenceCount == 1)
- {
- /* The page should be leaking somewhere on the free/zero list */
- DPRINT1("Page reference count is one\n");
- ASSERT(Pfn1->u3.e1.PageLocation != ActiveAndValid);
- if ((Pfn1->u3.e1.PrototypePte == 1) &&
- (Pfn1->OriginalPte.u.Soft.Prototype == 1))
- {
- /* Do extra processing if it was a prototype page */
- DPRINT1("Prototype floating page not yet supported\n");
- ASSERT(FALSE);
- }
-
- /* Update counter */
- InterlockedDecrementSizeT(&MmSystemLockPagesCount);
- }
- }
+ if (Pfn1->u2.ShareCount == 0) MiDropLockCount(Pfn1);
/* Bump the share count and make the page valid */
Pfn1->u2.ShareCount++;
PointerPte,
PointerProtoPte,
OldIrql,
- NULL);
+ OutPfn);
}
/* Make sure there's some protection mask */
return STATUS_ACCESS_VIOLATION;
}
+ /* There is no such thing as a decommitted prototype PTE */
+ NT_ASSERT(TempPte.u.Long != MmDecommittedPte.u.Long);
+
/* Check for access rights on the PTE proper */
PteContents = *PointerPte;
if (PteContents.u.Soft.PageFileHigh != MI_PTE_LOOKUP_NEEDED)
{
if (!PteContents.u.Proto.ReadOnly)
{
- /* FIXME: CHECK FOR ACCESS */
+ /* Check for page acess in software */
+ Status = MiAccessCheck(PointerProtoPte,
+ StoreInstruction,
+ KernelMode,
+ TempPte.u.Soft.Protection,
+ TrapInformation,
+ TRUE);
+ ASSERT(Status == STATUS_SUCCESS);
/* Check for copy on write page */
if ((TempPte.u.Soft.Protection & MM_WRITECOPY) == MM_WRITECOPY)
PointerPte,
PointerProtoPte,
OldIrql,
- NULL);
+ OutPfn);
}
NTSTATUS
IN BOOLEAN Recursive,
IN PEPROCESS Process,
IN PVOID TrapInformation,
- IN PVOID Vad)
+ IN PMMVAD Vad)
{
MMPTE TempPte;
KIRQL OldIrql, LockIrql;
NTSTATUS Status;
PMMPTE SuperProtoPte;
+ PMMPFN Pfn1, OutPfn = NULL;
+ PFN_NUMBER PageFrameIndex;
+ PFN_COUNT PteCount, ProcessedPtes;
DPRINT("ARM3 Page Fault Dispatcher for address: %p in process: %p\n",
Address,
Process);
/* Has the PTE been made valid yet? */
if (!SuperProtoPte->u.Hard.Valid)
{
- UNIMPLEMENTED;
- while (TRUE);
+ ASSERT(FALSE);
}
- else
+ else if (PointerPte->u.Hard.Valid == 1)
{
- /* Resolve the fault -- this will release the PFN lock */
- ASSERT(PointerPte->u.Hard.Valid == 0);
- Status = MiResolveProtoPteFault(StoreInstruction,
- Address,
- PointerPte,
- PointerProtoPte,
- NULL,
- NULL,
- NULL,
- Process,
- LockIrql,
- TrapInformation);
- ASSERT(Status == STATUS_SUCCESS);
-
- /* Complete this as a transition fault */
- ASSERT(OldIrql == KeGetCurrentIrql());
- ASSERT(OldIrql <= APC_LEVEL);
- ASSERT(KeAreAllApcsDisabled() == TRUE);
- return Status;
+ ASSERT(FALSE);
}
+
+ /* Resolve the fault -- this will release the PFN lock */
+ Status = MiResolveProtoPteFault(StoreInstruction,
+ Address,
+ PointerPte,
+ PointerProtoPte,
+ &OutPfn,
+ NULL,
+ NULL,
+ Process,
+ LockIrql,
+ TrapInformation);
+ ASSERT(Status == STATUS_SUCCESS);
+
+ /* Complete this as a transition fault */
+ ASSERT(OldIrql == KeGetCurrentIrql());
+ ASSERT(OldIrql <= APC_LEVEL);
+ ASSERT(KeAreAllApcsDisabled() == TRUE);
+ return Status;
}
else
{
- /* We currently only handle very limited paths */
- ASSERT(PointerPte->u.Soft.Prototype == 1);
+ /* We only handle the lookup path */
ASSERT(PointerPte->u.Soft.PageFileHigh == MI_PTE_LOOKUP_NEEDED);
+ /* Is there a non-image VAD? */
+ if ((Vad) &&
+ (Vad->u.VadFlags.VadType != VadImageMap) &&
+ !(Vad->u2.VadFlags2.ExtendableFile))
+ {
+ /* One day, ReactOS will cluster faults */
+ ASSERT(Address <= MM_HIGHEST_USER_ADDRESS);
+ DPRINT("Should cluster fault, but won't\n");
+ }
+
+ /* Only one PTE to handle for now */
+ PteCount = 1;
+ ProcessedPtes = 0;
+
/* Lock the PFN database */
LockIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
- /* For our current usage, this should be true */
+ /* We only handle the valid path */
ASSERT(SuperProtoPte->u.Hard.Valid == 1);
- ASSERT(TempPte.u.Hard.Valid == 0);
+
+ /* Capture the PTE */
+ TempPte = *PointerProtoPte;
+
+ /* Loop to handle future case of clustered faults */
+ while (TRUE)
+ {
+ /* For our current usage, this should be true */
+ if (TempPte.u.Hard.Valid == 1)
+ {
+ /* Bump the share count on the PTE */
+ PageFrameIndex = PFN_FROM_PTE(&TempPte);
+ Pfn1 = MI_PFN_ELEMENT(PageFrameIndex);
+ Pfn1->u2.ShareCount++;
+ }
+ else if ((TempPte.u.Soft.Prototype == 0) &&
+ (TempPte.u.Soft.Transition == 1))
+ {
+ /* This is a standby page, bring it back from the cache */
+ PageFrameIndex = TempPte.u.Trans.PageFrameNumber;
+ DPRINT("oooh, shiny, a soft fault! 0x%lx\n", PageFrameIndex);
+ Pfn1 = MI_PFN_ELEMENT(PageFrameIndex);
+ ASSERT(Pfn1->u3.e1.PageLocation != ActiveAndValid);
+
+ /* Should not yet happen in ReactOS */
+ ASSERT(Pfn1->u3.e1.ReadInProgress == 0);
+ ASSERT(Pfn1->u4.InPageError == 0);
+
+ /* Get the page */
+ MiUnlinkPageFromList(Pfn1);
+
+ /* Bump its reference count */
+ ASSERT(Pfn1->u2.ShareCount == 0);
+ InterlockedIncrement16((PSHORT)&Pfn1->u3.e2.ReferenceCount);
+ Pfn1->u2.ShareCount++;
+
+ /* Make it valid again */
+ /* This looks like another macro.... */
+ Pfn1->u3.e1.PageLocation = ActiveAndValid;
+ ASSERT(PointerProtoPte->u.Hard.Valid == 0);
+ ASSERT(PointerProtoPte->u.Trans.Prototype == 0);
+ ASSERT(PointerProtoPte->u.Trans.Transition == 1);
+ TempPte.u.Long = (PointerProtoPte->u.Long & ~0xFFF) |
+ MmProtectToPteMask[PointerProtoPte->u.Trans.Protection];
+ TempPte.u.Hard.Valid = 1;
+ TempPte.u.Hard.Accessed = 1;
+
+ /* Is the PTE writeable? */
+ if (((Pfn1->u3.e1.Modified) && (TempPte.u.Hard.Write)) &&
+ (TempPte.u.Hard.CopyOnWrite == 0))
+ {
+ /* Make it dirty */
+ TempPte.u.Hard.Dirty = TRUE;
+ }
+ else
+ {
+ /* Make it clean */
+ TempPte.u.Hard.Dirty = FALSE;
+ }
+
+ /* Write the valid PTE */
+ MI_WRITE_VALID_PTE(PointerProtoPte, TempPte);
+ ASSERT(PointerPte->u.Hard.Valid == 0);
+ }
+ else
+ {
+ /* Page is invalid, get out of the loop */
+ break;
+ }
+
+ /* One more done, was it the last? */
+ if (++ProcessedPtes == PteCount)
+ {
+ /* Complete the fault */
+ MiCompleteProtoPteFault(StoreInstruction,
+ Address,
+ PointerPte,
+ PointerProtoPte,
+ LockIrql,
+ &OutPfn);
+
+ /* THIS RELEASES THE PFN LOCK! */
+ break;
+ }
+
+ /* No clustered faults yet */
+ ASSERT(FALSE);
+ }
+
+ /* Did we resolve the fault? */
+ if (ProcessedPtes)
+ {
+ /* Bump the transition count */
+ InterlockedExchangeAddSizeT(&KeGetCurrentPrcb()->MmTransitionCount, ProcessedPtes);
+ ProcessedPtes--;
+
+ /* Loop all the processing we did */
+ ASSERT(ProcessedPtes == 0);
+
+ /* Complete this as a transition fault */
+ ASSERT(OldIrql == KeGetCurrentIrql());
+ ASSERT(OldIrql <= APC_LEVEL);
+ ASSERT(KeAreAllApcsDisabled() == TRUE);
+ return STATUS_PAGE_FAULT_TRANSITION;
+ }
+
+ /* We did not -- PFN lock is still held, prepare to resolve prototype PTE fault */
+ OutPfn = MI_PFN_ELEMENT(SuperProtoPte->u.Hard.PageFrameNumber);
+ MiReferenceUsedPageAndBumpLockCount(OutPfn);
+ ASSERT(OutPfn->u3.e2.ReferenceCount > 1);
+ ASSERT(PointerPte->u.Hard.Valid == 0);
/* Resolve the fault -- this will release the PFN lock */
Status = MiResolveProtoPteFault(StoreInstruction,
Address,
PointerPte,
PointerProtoPte,
- NULL,
+ &OutPfn,
NULL,
NULL,
Process,
LockIrql,
TrapInformation);
- ASSERT(Status == STATUS_SUCCESS);
+ //ASSERT(Status != STATUS_ISSUE_PAGING_IO);
+ //ASSERT(Status != STATUS_REFAULT);
+ //ASSERT(Status != STATUS_PTE_CHANGED);
+
+ /* Did the routine clean out the PFN or should we? */
+ if (OutPfn)
+ {
+ /* We had a locked PFN, so acquire the PFN lock to dereference it */
+ ASSERT(PointerProtoPte != NULL);
+ OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
+
+ /* Dereference the locked PFN */
+ MiDereferencePfnAndDropLockCount(OutPfn);
+ ASSERT(OutPfn->u3.e2.ReferenceCount >= 1);
+
+ /* And now release the lock */
+ KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
+ }
/* Complete this as a transition fault */
ASSERT(OldIrql == KeGetCurrentIrql());
ASSERT(OldIrql <= APC_LEVEL);
ASSERT(KeAreAllApcsDisabled() == TRUE);
- return STATUS_PAGE_FAULT_TRANSITION;
+ return Status;
}
}
+ /* Is this a transition PTE */
+ if (TempPte.u.Soft.Transition)
+ {
+ PVOID InPageBlock = NULL;
+ /* Lock the PFN database */
+ LockIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
+
+ /* Resolve */
+ Status = MiResolveTransitionFault(Address, PointerPte, Process, LockIrql, &InPageBlock);
+
+ NT_ASSERT(NT_SUCCESS(Status));
+
+ /* And now release the lock and leave*/
+ KeReleaseQueuedSpinLock(LockQueuePfnLock, LockIrql);
+
+ ASSERT(OldIrql == KeGetCurrentIrql());
+ ASSERT(OldIrql <= APC_LEVEL);
+ ASSERT(KeAreAllApcsDisabled() == TRUE);
+ return Status;
+ }
+
//
// The PTE must be invalid but not completely empty. It must also not be a
- // prototype PTE as that scenario should've been handled above. These are
- // all Windows checks
+ // prototype or transition PTE as those scenarii should've been handled above.
+ // These are all Windows checks
//
ASSERT(TempPte.u.Hard.Valid == 0);
ASSERT(TempPte.u.Soft.Prototype == 0);
+ ASSERT(TempPte.u.Soft.Transition == 0);
ASSERT(TempPte.u.Long != 0);
//
- // No transition or page file software PTEs in ARM3 yet, so this must be a
- // demand zero page. These are all ReactOS checks
+ // No page file software PTEs in ARM3 yet, so this must be a
+ // demand zero page. This is a ReactOS check.
//
- ASSERT(TempPte.u.Soft.Transition == 0);
ASSERT(TempPte.u.Soft.PageFileHigh == 0);
//
#if (_MI_PAGING_LEVELS >= 3)
(PointerPpe->u.Hard.Valid == 0) ||
#endif
- (PointerPde->u.Hard.Valid == 0))
+ (PointerPde->u.Hard.Valid == 0) ||
+ (PointerPte->u.Hard.Valid == 0))
{
- /* This fault is not valid, printf out some debugging help */
+ /* This fault is not valid, print out some debugging help */
DbgPrint("MM:***PAGE FAULT AT IRQL > 1 Va %p, IRQL %lx\n",
Address,
OldIrql);
if (TrapInformation)
{
PKTRAP_FRAME TrapFrame = TrapInformation;
+#ifdef _M_IX86
DbgPrint("MM:***EIP %p, EFL %p\n", TrapFrame->Eip, TrapFrame->EFlags);
DbgPrint("MM:***EAX %p, ECX %p EDX %p\n", TrapFrame->Eax, TrapFrame->Ecx, TrapFrame->Edx);
DbgPrint("MM:***EBX %p, ESI %p EDI %p\n", TrapFrame->Ebx, TrapFrame->Esi, TrapFrame->Edi);
+#elif defined(_M_AMD64)
+ DbgPrint("MM:***RIP %p, EFL %p\n", TrapFrame->Rip, TrapFrame->EFlags);
+ DbgPrint("MM:***RAX %p, RCX %p RDX %p\n", TrapFrame->Rax, TrapFrame->Rcx, TrapFrame->Rdx);
+ DbgPrint("MM:***RBX %p, RSI %p RDI %p\n", TrapFrame->Rbx, TrapFrame->Rsi, TrapFrame->Rdi);
+#endif
}
/* Tell the trap handler to fail */
}
/* Nothing is actually wrong */
- DPRINT1("Fault at IRQL1 is ok\n");
+ DPRINT1("Fault at IRQL %u is ok (%p)\n", OldIrql, Address);
return STATUS_SUCCESS;
}
/* Bail out, if the fault came from user mode */
if (Mode == UserMode) return STATUS_ACCESS_VIOLATION;
-#if (_MI_PAGING_LEVELS == 4)
- /* AMD64 system, check if PXE is invalid */
- if (PointerPxe->u.Hard.Valid == 0)
- {
- KeBugCheckEx(PAGE_FAULT_IN_NONPAGED_AREA,
- (ULONG_PTR)Address,
- StoreInstruction,
- (ULONG_PTR)TrapInformation,
- 7);
- }
-#endif
-#if (_MI_PAGING_LEVELS == 4)
- /* PAE/AMD64 system, check if PPE is invalid */
- if (PointerPpe->u.Hard.Valid == 0)
- {
- KeBugCheckEx(PAGE_FAULT_IN_NONPAGED_AREA,
- (ULONG_PTR)Address,
- StoreInstruction,
- (ULONG_PTR)TrapInformation,
- 5);
- }
-#endif
#if (_MI_PAGING_LEVELS == 2)
if (MI_IS_SYSTEM_PAGE_TABLE_ADDRESS(Address)) MiSynchronizeSystemPde((PMMPDE)PointerPte);
MiCheckPdeForPagedPool(Address);
#endif
- /* Check if the PDE is invalid */
- if (PointerPde->u.Hard.Valid == 0)
+ /* Check if the higher page table entries are invalid */
+ if (
+#if (_MI_PAGING_LEVELS == 4)
+ /* AMD64 system, check if PXE is invalid */
+ (PointerPxe->u.Hard.Valid == 0) ||
+#endif
+#if (_MI_PAGING_LEVELS >= 3)
+ /* PAE/AMD64 system, check if PPE is invalid */
+ (PointerPpe->u.Hard.Valid == 0) ||
+#endif
+ /* Always check if the PDE is valid */
+ (PointerPde->u.Hard.Valid == 0))
{
- /* PDE (still) not valid, kill the system */
+ /* PXE/PPE/PDE (still) not valid, kill the system */
KeBugCheckEx(PAGE_FAULT_IN_NONPAGED_AREA,
(ULONG_PTR)Address,
StoreInstruction,
KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
return STATUS_SUCCESS;
}
- else
- {
- /* Not yet handled */
- ASSERT(FALSE);
- }
}
-
+#if (_MI_PAGING_LEVELS == 2)
/* Check if this was a session PTE that needs to remap the session PDE */
if (MI_IS_SESSION_PTE(Address))
{
- /* Not yet handled */
- ASSERT(FALSE);
+ /* Do the remapping */
+ Status = MiCheckPdeForSessionSpace(Address);
+ if (!NT_SUCCESS(Status))
+ {
+ /* It failed, this address is invalid */
+ KeBugCheckEx(PAGE_FAULT_IN_NONPAGED_AREA,
+ (ULONG_PTR)Address,
+ StoreInstruction,
+ (ULONG_PTR)TrapInformation,
+ 6);
+ }
}
+#else
+
+_WARN("Session space stuff is not implemented yet!")
+
+#endif
/* Check for a fault on the page table or hyperspace */
if (MI_IS_PAGE_TABLE_OR_HYPER_ADDRESS(Address))
}
else
{
- /* Not yet handled */
- ASSERT(FALSE);
+ /* Use the session process and working set */
+ CurrentProcess = HYDRA_PROCESS;
+ WorkingSet = &MmSessionSpace->GlobalVirtualAddress->Vm;
+
+ /* Make sure we don't have a recursive working set lock */
+ if ((CurrentThread->OwnsSessionWorkingSetExclusive) ||
+ (CurrentThread->OwnsSessionWorkingSetShared))
+ {
+ /* Fail */
+ return STATUS_IN_PAGE_ERROR | 0x10000000;
+ }
}
/* Acquire the working set lock */
}
}
- /* Case not yet handled */
- ASSERT(!IsSessionAddress);
+ /* Check for read-only write in session space */
+ if ((IsSessionAddress) &&
+ (StoreInstruction) &&
+ !(TempPte.u.Hard.Write))
+ {
+ /* Sanity check */
+ ASSERT(MI_IS_SESSION_IMAGE_ADDRESS(Address));
+
+ /* Was this COW? */
+ if (TempPte.u.Hard.CopyOnWrite == 0)
+ {
+ /* Then this is not allowed */
+ KeBugCheckEx(ATTEMPTED_WRITE_TO_READONLY_MEMORY,
+ (ULONG_PTR)Address,
+ (ULONG_PTR)TempPte.u.Long,
+ (ULONG_PTR)TrapInformation,
+ 13);
+ }
+
+ /* Otherwise, handle COW */
+ ASSERT(FALSE);
+ }
/* Release the working set */
MiUnlockWorkingSet(CurrentThread, WorkingSet);
/* Get the prototype PTE! */
ProtoPte = MiProtoPteToPte(&TempPte);
- /* Case not yet handled */
- ASSERT(!IsSessionAddress);
+ /* Do we need to locate the prototype PTE in session space? */
+ if ((IsSessionAddress) &&
+ (TempPte.u.Soft.PageFileHigh == MI_PTE_LOOKUP_NEEDED))
+ {
+ /* Yep, go find it as well as the VAD for it */
+ ProtoPte = MiCheckVirtualAddress(Address,
+ &ProtectionCode,
+ &Vad);
+ ASSERT(ProtoPte != NULL);
+ }
}
else
{
ASSERT(MI_IS_PAGE_LARGE(PointerPde) == FALSE);
}
- /* Now capture the PTE. Ignore virtual faults for now */
+ /* Now capture the PTE. */
TempPte = *PointerPte;
- ASSERT(TempPte.u.Hard.Valid == 0);
+
+ /* Check if the PTE is valid */
+ if (TempPte.u.Hard.Valid)
+ {
+ /* Check if this is a write on a readonly PTE */
+ if (StoreInstruction)
+ {
+ /* Is this a copy on write PTE? */
+ if (TempPte.u.Hard.CopyOnWrite)
+ {
+ /* Not supported yet */
+ ASSERT(FALSE);
+ }
+
+ /* Is this a read-only PTE? */
+ if (!TempPte.u.Hard.Write)
+ {
+ /* Return the status */
+ MiUnlockProcessWorkingSet(CurrentProcess, CurrentThread);
+ return STATUS_ACCESS_VIOLATION;
+ }
+ }
+
+ /* FIXME: Execution is ignored for now, since we don't have no-execute pages yet */
+
+ /* The fault has already been resolved by a different thread */
+ MiUnlockProcessWorkingSet(CurrentProcess, CurrentThread);
+ return STATUS_SUCCESS;
+ }
/* Quick check for demand-zero */
if (TempPte.u.Long == (MM_READWRITE << MM_PTE_SOFTWARE_PROTECTION_BITS))
return Status;
}
- /* No guard page support yet */
- ASSERT((ProtectionCode & MM_DECOMMIT) == 0);
-
/*
* Check if this is a real user-mode address or actually a kernel-mode
* page table for a user mode address
if (Address <= MM_HIGHEST_USER_ADDRESS)
{
/* Add an additional page table reference */
- MmWorkingSetList->UsedPageTableEntries[MiGetPdeOffset(Address)]++;
- ASSERT(MmWorkingSetList->UsedPageTableEntries[MiGetPdeOffset(Address)] <= PTE_COUNT);
+ MiIncrementPageTableReferences(Address);
+ }
+
+ /* Is this a guard page? */
+ if ((ProtectionCode & MM_PROTECT_SPECIAL) == MM_GUARDPAGE)
+ {
+ /* The VAD protection cannot be MM_DECOMMIT! */
+ NT_ASSERT(ProtectionCode != MM_DECOMMIT);
+
+ /* Remove the bit */
+ TempPte.u.Soft.Protection = ProtectionCode & ~MM_GUARDPAGE;
+ MI_WRITE_INVALID_PTE(PointerPte, TempPte);
+
+ /* Not supported */
+ ASSERT(ProtoPte == NULL);
+ ASSERT(CurrentThread->ApcNeeded == 0);
+
+ /* Drop the working set lock */
+ MiUnlockProcessWorkingSet(CurrentProcess, CurrentThread);
+ ASSERT(KeGetCurrentIrql() == OldIrql);
+
+ /* Handle stack expansion */
+ return MiCheckForUserStackOverflow(Address, TrapInformation);
}
/* Did we get a prototype PTE back? */
else
{
/* No, create a new PTE. First, write the protection */
- PointerPte->u.Soft.Protection = ProtectionCode;
+ TempPte.u.Soft.Protection = ProtectionCode;
+ MI_WRITE_INVALID_PTE(PointerPte, TempPte);
}
/* Lock the PFN database since we're going to grab a page */
return STATUS_PAGE_FAULT_DEMAND_ZERO;
}
- /* No guard page support yet */
- ASSERT((ProtectionCode & MM_DECOMMIT) == 0);
+ /* We should have a valid protection here */
ASSERT(ProtectionCode != 0x100);
/* Write the prototype PTE */
TempPte = PrototypePte;
TempPte.u.Soft.Protection = ProtectionCode;
+ NT_ASSERT(TempPte.u.Long != 0);
MI_WRITE_INVALID_PTE(PointerPte, TempPte);
}
else
{
- /* This path is not yet supported */
- ASSERT(FALSE);
+ /* Get the protection code and check if this is a proto PTE */
+ ProtectionCode = (ULONG)TempPte.u.Soft.Protection;
+ if (TempPte.u.Soft.Prototype)
+ {
+ /* Do we need to go find the real PTE? */
+ if (TempPte.u.Soft.PageFileHigh == MI_PTE_LOOKUP_NEEDED)
+ {
+ /* Get the prototype pte and VAD for it */
+ ProtoPte = MiCheckVirtualAddress(Address,
+ &ProtectionCode,
+ &Vad);
+ if (!ProtoPte)
+ {
+ ASSERT(KeGetCurrentIrql() <= APC_LEVEL);
+ MiUnlockProcessWorkingSet(CurrentProcess, CurrentThread);
+ return STATUS_ACCESS_VIOLATION;
+ }
+ }
+ else
+ {
+ /* Get the prototype PTE! */
+ ProtoPte = MiProtoPteToPte(&TempPte);
+
+ /* Is it read-only */
+ if (TempPte.u.Proto.ReadOnly)
+ {
+ /* Set read-only code */
+ ProtectionCode = MM_READONLY;
+ }
+ else
+ {
+ /* Set unknown protection */
+ ProtectionCode = 0x100;
+ ASSERT(CurrentProcess->CloneRoot != NULL);
+ }
+ }
+ }
}
- /* FIXME: Run MiAccessCheck */
+ /* Do we have a valid protection code? */
+ if (ProtectionCode != 0x100)
+ {
+ /* Run a software access check first, including to detect guard pages */
+ Status = MiAccessCheck(PointerPte,
+ StoreInstruction,
+ Mode,
+ ProtectionCode,
+ TrapInformation,
+ FALSE);
+ if (Status != STATUS_SUCCESS)
+ {
+ /* Not supported */
+ ASSERT(CurrentThread->ApcNeeded == 0);
+
+ /* Drop the working set lock */
+ MiUnlockProcessWorkingSet(CurrentProcess, CurrentThread);
+ ASSERT(KeGetCurrentIrql() == OldIrql);
+
+ /* Did we hit a guard page? */
+ if (Status == STATUS_GUARD_PAGE_VIOLATION)
+ {
+ /* Handle stack expansion */
+ return MiCheckForUserStackOverflow(Address, TrapInformation);
+ }
+
+ /* Otherwise, fail back to the caller directly */
+ return Status;
+ }
+ }
/* Dispatch the fault */
Status = MiDispatchFault(StoreInstruction,
Vad);
/* Return the status */
- ASSERT(NT_SUCCESS(Status));
ASSERT(KeGetCurrentIrql() <= APC_LEVEL);
MiUnlockProcessWorkingSet(CurrentProcess, CurrentThread);
return Status;