* PROGRAMMERS: ReactOS Portable Systems Group
*/
-#ifndef _M_AMD64
+#pragma once
-#define MI_MIN_PAGES_FOR_NONPAGED_POOL_TUNING ((255 * _1MB) >> PAGE_SHIFT)
-#define MI_MIN_PAGES_FOR_SYSPTE_TUNING ((19 * _1MB) >> PAGE_SHIFT)
-#define MI_MIN_PAGES_FOR_SYSPTE_BOOST ((32 * _1MB) >> PAGE_SHIFT)
-#define MI_MIN_PAGES_FOR_SYSPTE_BOOST_BOOST ((256 * _1MB) >> PAGE_SHIFT)
-#define MI_MAX_INIT_NONPAGED_POOL_SIZE (128 * _1MB)
-#define MI_MAX_NONPAGED_POOL_SIZE (128 * _1MB)
-#define MI_MAX_FREE_PAGE_LISTS 4
-
-#define MI_MIN_INIT_PAGED_POOLSIZE (32 * _1MB)
-
-#define MI_SESSION_VIEW_SIZE (20 * _1MB)
-#define MI_SESSION_POOL_SIZE (16 * _1MB)
-#define MI_SESSION_IMAGE_SIZE (8 * _1MB)
-#define MI_SESSION_WORKING_SET_SIZE (4 * _1MB)
-#define MI_SESSION_SIZE (MI_SESSION_VIEW_SIZE + \
- MI_SESSION_POOL_SIZE + \
- MI_SESSION_IMAGE_SIZE + \
- MI_SESSION_WORKING_SET_SIZE)
-
-#define MI_SYSTEM_VIEW_SIZE (32 * _1MB)
-
-#define MI_HIGHEST_USER_ADDRESS (PVOID)0x7FFEFFFF
-#define MI_USER_PROBE_ADDRESS (PVOID)0x7FFF0000
-#define MI_DEFAULT_SYSTEM_RANGE_START (PVOID)0x80000000
-#define MI_SYSTEM_CACHE_WS_START (PVOID)0xC0C00000
-#define MI_PAGED_POOL_START (PVOID)0xE1000000
-#define MI_NONPAGED_POOL_END (PVOID)0xFFBE0000
-#define MI_DEBUG_MAPPING (PVOID)0xFFBFF000
-
-#define MI_SYSTEM_PTE_BASE (PVOID)MiAddressToPte(NULL)
-
-#define MI_MIN_SECONDARY_COLORS 8
-#define MI_SECONDARY_COLORS 64
-#define MI_MAX_SECONDARY_COLORS 1024
-
-#define MI_MIN_ALLOCATION_FRAGMENT (4 * _1KB)
-#define MI_ALLOCATION_FRAGMENT (64 * _1KB)
-#define MI_MAX_ALLOCATION_FRAGMENT (2 * _1MB)
-
-#define MM_HIGHEST_VAD_ADDRESS \
- (PVOID)((ULONG_PTR)MM_HIGHEST_USER_ADDRESS - (16 * PAGE_SIZE))
#define MI_LOWEST_VAD_ADDRESS (PVOID)MM_LOWEST_USER_ADDRESS
-#define MI_DEFAULT_SYSTEM_PTE_COUNT 50000
-
-#endif /* !_M_AMD64 */
-
/* Make the code cleaner with some definitions for size multiples */
#define _1KB (1024u)
#define _1MB (1024 * _1KB)
/* Size of a page directory */
#define PD_SIZE (PDE_COUNT * sizeof(MMPDE))
+/* Stop using these! */
+#define PD_COUNT PPE_PER_PAGE
+#define PDE_COUNT PDE_PER_PAGE
+#define PTE_COUNT PTE_PER_PAGE
+
/* Size of all page directories for a process */
#define SYSTEM_PD_SIZE (PD_COUNT * PD_SIZE)
-
-/* Architecture specific count of PDEs in a directory, and count of PTEs in a PT */
#ifdef _M_IX86
-#define PD_COUNT 1
-#define PDE_COUNT 1024
-#define PTE_COUNT 1024
C_ASSERT(SYSTEM_PD_SIZE == PAGE_SIZE);
-#define MiIsPteOnPdeBoundary(PointerPte) \
- ((((ULONG_PTR)PointerPte) & (PAGE_SIZE - 1)) == 0)
-#elif _M_ARM
-#define PD_COUNT 1
-#define PDE_COUNT 4096
-#define PTE_COUNT 256
-#else
-#define PD_COUNT PPE_PER_PAGE
-#define PDE_COUNT PDE_PER_PAGE
-#define PTE_COUNT PTE_PER_PAGE
#endif
//
// Protection Bits part of the internal memory manager Protection Mask, from:
// http://reactos.org/wiki/Techwiki:Memory_management_in_the_Windows_XP_kernel
+// https://www.reactos.org/wiki/Techwiki:Memory_Protection_constants
// and public assertions.
//
#define MM_ZERO_ACCESS 0
#define MM_WRITECOPY 5
#define MM_EXECUTE_READWRITE 6
#define MM_EXECUTE_WRITECOPY 7
-#define MM_NOCACHE 8
-#define MM_DECOMMIT 0x10
-#define MM_NOACCESS (MM_DECOMMIT | MM_NOCACHE)
+#define MM_PROTECT_ACCESS 7
+
+//
+// These are flags on top of the actual protection mask
+//
+#define MM_NOCACHE 0x08
+#define MM_GUARDPAGE 0x10
+#define MM_WRITECOMBINE 0x18
+#define MM_PROTECT_SPECIAL 0x18
+
+//
+// These are special cases
+//
+#define MM_DECOMMIT (MM_ZERO_ACCESS | MM_GUARDPAGE)
+#define MM_NOACCESS (MM_ZERO_ACCESS | MM_WRITECOMBINE)
+#define MM_OUTSWAPPED_KSTACK (MM_EXECUTE_WRITECOPY | MM_WRITECOMBINE)
#define MM_INVALID_PROTECTION 0xFFFFFFFF
//
#define MI_IS_PAGE_TABLE_OR_HYPER_ADDRESS(Address) \
(((PVOID)(Address) >= (PVOID)PTE_BASE) && ((PVOID)(Address) <= (PVOID)MmHyperSpaceEnd))
-//
-// Corresponds to MMPTE_SOFTWARE.Protection
-//
-#ifdef _M_IX86
-#define MM_PTE_SOFTWARE_PROTECTION_BITS 5
-#elif _M_ARM
-#define MM_PTE_SOFTWARE_PROTECTION_BITS 6
-#elif _M_AMD64
-#define MM_PTE_SOFTWARE_PROTECTION_BITS 5
-#else
-#error Define these please!
-#endif
-
//
// Creates a software PTE with the given protection
//
//
// Special values for LoadedImports
//
+#ifdef _WIN64
+#define MM_SYSLDR_NO_IMPORTS (PVOID)0xFFFFFFFFFFFFFFFEULL
+#define MM_SYSLDR_BOOT_LOADED (PVOID)0xFFFFFFFFFFFFFFFFULL
+#else
#define MM_SYSLDR_NO_IMPORTS (PVOID)0xFFFFFFFE
#define MM_SYSLDR_BOOT_LOADED (PVOID)0xFFFFFFFF
+#endif
#define MM_SYSLDR_SINGLE_ENTRY 0x1
//
#define MI_GET_NEXT_COLOR() (MI_GET_PAGE_COLOR(++MmSystemPageColor))
#define MI_GET_NEXT_PROCESS_COLOR(x) (MI_GET_PAGE_COLOR(++(x)->NextPageColor))
-#ifndef _M_AMD64
-//
-// Decodes a Prototype PTE into the underlying PTE
-//
-#define MiProtoPteToPte(x) \
- (PMMPTE)((ULONG_PTR)MmPagedPoolStart + \
- (((x)->u.Proto.ProtoAddressHigh << 9) | (x)->u.Proto.ProtoAddressLow << 2))
-
-//
-// Decodes a Prototype PTE into the underlying PTE
-//
-#define MiSubsectionPteToSubsection(x) \
- ((x)->u.Subsect.WhichPool == PagedPool) ? \
- (PMMPTE)((ULONG_PTR)MmSubsectionBase + \
- (((x)->u.Subsect.SubsectionAddressHigh << 7) | \
- (x)->u.Subsect.SubsectionAddressLow << 3)) : \
- (PMMPTE)((ULONG_PTR)MmNonPagedPoolEnd - \
- (((x)->u.Subsect.SubsectionAddressHigh << 7) | \
- (x)->u.Subsect.SubsectionAddressLow << 3))
-#endif
-
//
// Prototype PTEs that don't yet have a pagefile association
//
-#ifdef _M_AMD64
+#ifdef _WIN64
#define MI_PTE_LOOKUP_NEEDED 0xffffffffULL
#else
#define MI_PTE_LOOKUP_NEEDED 0xFFFFF
#endif
-//
-// Number of session lists in the MM_SESSIONS_SPACE structure
-//
-#if defined(_M_AMD64)
-#define SESSION_POOL_LOOKASIDES 21
-#elif defined(_M_IX86)
-#define SESSION_POOL_LOOKASIDES 26
-#else
-#error Not Defined!
-#endif
-
//
// Number of session data and tag pages
//
//
// FIXFIX: These should go in ex.h after the pool merge
//
-#ifdef _M_AMD64
+#ifdef _WIN64
#define POOL_BLOCK_SIZE 16
#else
#define POOL_BLOCK_SIZE 8
{
struct
{
-#ifdef _M_AMD64
+#ifdef _WIN64
USHORT PreviousSize:8;
USHORT PoolIndex:8;
USHORT BlockSize:8;
};
ULONG Ulong1;
};
-#ifdef _M_AMD64
+#ifdef _WIN64
ULONG PoolTag;
#endif
union
{
-#ifdef _M_AMD64
+#ifdef _WIN64
PEPROCESS ProcessBilled;
#else
ULONG PoolTag;
SIZE_T CommittedPages;
PVOID PagedPoolStart;
PVOID PagedPoolEnd;
- PMMPTE PagedPoolBasePde;
+ PMMPDE PagedPoolBasePde;
ULONG Color;
LONG ResidentProcessCount;
ULONG SessionPoolAllocationFailures[4];
PDRIVER_UNLOAD Win32KDriverUnload;
POOL_DESCRIPTOR PagedPool;
#if defined (_M_AMD64)
- MMPTE PageDirectory;
+ MMPDE PageDirectory;
#else
- PMMPTE PageTables;
+ PMMPDE PageTables;
#endif
#if defined (_M_AMD64)
PMMPTE SpecialPoolFirstPte;
extern PFN_NUMBER MmMaximumNonPagedPoolInPages;
extern PFN_NUMBER MmSizeOfPagedPoolInPages;
extern PVOID MmNonPagedSystemStart;
-extern SIZE_T MiNonPagedSystemSize;
extern PVOID MmNonPagedPoolStart;
extern PVOID MmNonPagedPoolExpansionStart;
extern PVOID MmNonPagedPoolEnd;
extern SIZE_T MmSystemLockPagesCount;
extern ULONG_PTR MmSubsectionBase;
extern LARGE_INTEGER MmCriticalSectionTimeout;
+extern LIST_ENTRY MmWorkingSetExpansionHead;
+extern KSPIN_LOCK MmExpansionLock;
+extern PETHREAD MiExpansionLockOwner;
-BOOLEAN
FORCEINLINE
+BOOLEAN
MiIsMemoryTypeFree(TYPE_OF_MEMORY MemoryType)
{
return ((MemoryType == LoaderFree) ||
(MemoryType == LoaderOsloaderStack));
}
-BOOLEAN
FORCEINLINE
+BOOLEAN
MiIsMemoryTypeInvisible(TYPE_OF_MEMORY MemoryType)
{
return ((MemoryType == LoaderFirmwarePermanent) ||
}
#ifdef _M_AMD64
-BOOLEAN
FORCEINLINE
+BOOLEAN
MiIsUserPxe(PVOID Address)
{
return ((ULONG_PTR)Address >> 7) == 0x1FFFFEDF6FB7DA0ULL;
}
-BOOLEAN
FORCEINLINE
+BOOLEAN
MiIsUserPpe(PVOID Address)
{
return ((ULONG_PTR)Address >> 16) == 0xFFFFF6FB7DA0ULL;
}
-BOOLEAN
FORCEINLINE
+BOOLEAN
MiIsUserPde(PVOID Address)
{
return ((ULONG_PTR)Address >> 25) == 0x7FFFFB7DA0ULL;
}
-BOOLEAN
FORCEINLINE
+BOOLEAN
MiIsUserPte(PVOID Address)
{
return ((ULONG_PTR)Address >> 34) == 0x3FFFFDA0ULL;
}
#else
-BOOLEAN
FORCEINLINE
+BOOLEAN
MiIsUserPde(PVOID Address)
{
return ((Address >= (PVOID)MiAddressToPde(NULL)) &&
(Address <= (PVOID)MiHighestUserPde));
}
-BOOLEAN
FORCEINLINE
+BOOLEAN
MiIsUserPte(PVOID Address)
{
return (Address <= (PVOID)MiHighestUserPte);
//
// Figures out the hardware bits for a PTE
//
-ULONG_PTR
FORCEINLINE
+ULONG_PTR
MiDetermineUserGlobalPteMask(IN PVOID PointerPte)
{
MMPTE TempPte;
ASSERT(MappingPte <= MiHighestUserPte);
/* Start fresh */
- *NewPte = ValidKernelPte;
+ NewPte->u.Long = 0;
/* Set the protection and page */
+ NewPte->u.Hard.Valid = TRUE;
NewPte->u.Hard.Owner = TRUE;
NewPte->u.Hard.PageFrameNumber = PageFrameNumber;
NewPte->u.Long |= MmProtectToPteMask[ProtectionMask];
NewPte->u.Subsect.SubsectionAddressHigh = (Offset & 0xFFFFF80) >> 7;
}
+FORCEINLINE
+BOOLEAN
+MI_IS_MAPPED_PTE(PMMPTE PointerPte)
+{
+ /// \todo Make this reasonable code, this is UGLY!
+ return ((PointerPte->u.Long & 0xFFFFFC01) != 0);
+}
+
#endif
+FORCEINLINE
+VOID
+MI_MAKE_TRANSITION_PTE(_Out_ PMMPTE NewPte,
+ _In_ PFN_NUMBER Page,
+ _In_ ULONG Protection)
+{
+ NewPte->u.Long = 0;
+ NewPte->u.Trans.Transition = 1;
+ NewPte->u.Trans.Protection = Protection;
+ NewPte->u.Trans.PageFrameNumber = Page;
+}
+
//
// Returns if the page is physically resident (ie: a large page)
// FIXFIX: CISC/x86 only?
//
// Writes a valid PTE
//
-VOID
FORCEINLINE
+VOID
MI_WRITE_VALID_PTE(IN PMMPTE PointerPte,
IN MMPTE TempPte)
{
}
//
-// Writes an invalid PTE
+// Updates a valid PTE
//
+FORCEINLINE
VOID
+MI_UPDATE_VALID_PTE(IN PMMPTE PointerPte,
+ IN MMPTE TempPte)
+{
+ /* Write the valid PTE */
+ ASSERT(PointerPte->u.Hard.Valid == 1);
+ ASSERT(TempPte.u.Hard.Valid == 1);
+ ASSERT(PointerPte->u.Hard.PageFrameNumber == TempPte.u.Hard.PageFrameNumber);
+ *PointerPte = TempPte;
+}
+
+//
+// Writes an invalid PTE
+//
FORCEINLINE
+VOID
MI_WRITE_INVALID_PTE(IN PMMPTE PointerPte,
IN MMPTE InvalidPte)
{
/* Write the invalid PTE */
ASSERT(InvalidPte.u.Hard.Valid == 0);
+ ASSERT(InvalidPte.u.Long != 0);
*PointerPte = InvalidPte;
}
//
-// Writes a valid PDE
+// Erase the PTE completely
//
+FORCEINLINE
VOID
+MI_ERASE_PTE(IN PMMPTE PointerPte)
+{
+ /* Zero out the PTE */
+ ASSERT(PointerPte->u.Long != 0);
+ PointerPte->u.Long = 0;
+}
+
+//
+// Writes a valid PDE
+//
FORCEINLINE
+VOID
MI_WRITE_VALID_PDE(IN PMMPDE PointerPde,
IN MMPDE TempPde)
{
//
// Writes an invalid PDE
//
-VOID
FORCEINLINE
+VOID
MI_WRITE_INVALID_PDE(IN PMMPDE PointerPde,
IN MMPDE InvalidPde)
{
/* Write the invalid PDE */
ASSERT(InvalidPde.u.Hard.Valid == 0);
+ ASSERT(InvalidPde.u.Long != 0);
*PointerPde = InvalidPde;
}
/* Check if this process is the owner, and that the thread owns the WS */
if (PsGetCurrentThread()->OwnsProcessWorkingSetExclusive == 0)
{
- DPRINT1("Thread: %p is not an owner\n", PsGetCurrentThread());
+ DPRINT("Thread: %p is not an owner\n", PsGetCurrentThread());
}
if (KeGetCurrentThread()->ApcState.Process != &Process->Pcb)
{
- DPRINT1("Current thread %p is attached to another process %p\n", PsGetCurrentThread(), Process);
+ DPRINT("Current thread %p is attached to another process %p\n", PsGetCurrentThread(), Process);
}
return ((KeGetCurrentThread()->ApcState.Process == &Process->Pcb) &&
((PsGetCurrentThread()->OwnsProcessWorkingSetExclusive) ||
//
// New ARM3<->RosMM PAGE Architecture
//
-BOOLEAN
FORCEINLINE
+BOOLEAN
MiIsRosSectionObject(IN PVOID Section)
{
PROS_SECTION_OBJECT RosSection = Section;
return FALSE;
}
-#ifdef _WIN64
-// HACK ON TOP OF HACK ALERT!!!
-#define MI_GET_ROS_DATA(x) \
- (((x)->RosMmData == 0) ? NULL : ((PMMROSPFN)((ULONG64)(ULONG)((x)->RosMmData) | \
- ((ULONG64)MmNonPagedPoolStart & 0xffffffff00000000ULL))))
-#else
-#define MI_GET_ROS_DATA(x) ((PMMROSPFN)(x->RosMmData))
-#endif
-#define MI_IS_ROS_PFN(x) (((x)->u4.AweAllocation == TRUE) && (MI_GET_ROS_DATA(x) != NULL))
-#define ASSERT_IS_ROS_PFN(x) ASSERT(MI_IS_ROS_PFN(x) == TRUE);
-typedef struct _MMROSPFN
-{
- PMM_RMAP_ENTRY RmapListHead;
- SWAPENTRY SwapEntry;
-} MMROSPFN, *PMMROSPFN;
-
-#define RosMmData AweReferenceCount
+#define MI_IS_ROS_PFN(x) ((x)->u4.AweAllocation == TRUE)
VOID
NTAPI
KeLeaveGuardedRegion();
}
+//
+// Unlocks the working set for the given process
+//
+FORCEINLINE
+VOID
+MiUnlockProcessWorkingSetShared(IN PEPROCESS Process,
+ IN PETHREAD Thread)
+{
+ /* Make sure we are the owner of a safe acquisition (because shared) */
+ ASSERT(MI_WS_OWNER(Process));
+ ASSERT(!MI_IS_WS_UNSAFE(Process));
+
+ /* Ensure we are in a shared acquisition */
+ ASSERT(Thread->OwnsProcessWorkingSetShared == TRUE);
+ ASSERT(Thread->OwnsProcessWorkingSetExclusive == FALSE);
+
+ /* Don't claim the lock anylonger */
+ Thread->OwnsProcessWorkingSetShared = FALSE;
+
+ /* Release the lock and re-enable APCs */
+ ExReleasePushLockShared(&Process->Vm.WorkingSetMutex);
+ KeLeaveGuardedRegion();
+}
+
//
// Unlocks the working set for the given process
//
else
{
/* Owner is shared (implies safe), release normally */
- ASSERT(FALSE);
+ MiUnlockProcessWorkingSetShared(Process, Thread);
*Safe = TRUE;
*Shared = TRUE;
}
IN BOOLEAN Safe,
IN BOOLEAN Shared)
{
- ASSERT(Shared == FALSE);
-
/* Check if this was a safe lock or not */
if (Safe)
{
- /* Reacquire safely */
- MiLockProcessWorkingSet(Process, Thread);
+ if (Shared)
+ {
+ /* Reacquire safely & shared */
+ MiLockProcessWorkingSetShared(Process, Thread);
+ }
+ else
+ {
+ /* Reacquire safely */
+ MiLockProcessWorkingSet(Process, Thread);
+ }
}
else
{
+ /* Unsafe lock cannot be shared */
+ ASSERT(Shared == FALSE);
/* Reacquire unsafely */
MiLockProcessWorkingSetUnsafe(Process, Thread);
}
}
+FORCEINLINE
+KIRQL
+MiAcquireExpansionLock(VOID)
+{
+ KIRQL OldIrql;
+
+ ASSERT(KeGetCurrentIrql() <= APC_LEVEL);
+ KeAcquireSpinLock(&MmExpansionLock, &OldIrql);
+ ASSERT(MiExpansionLockOwner == NULL);
+ MiExpansionLockOwner = PsGetCurrentThread();
+ return OldIrql;
+}
+
+FORCEINLINE
+VOID
+MiReleaseExpansionLock(KIRQL OldIrql)
+{
+ ASSERT(MiExpansionLockOwner == PsGetCurrentThread());
+ MiExpansionLockOwner = NULL;
+ KeReleaseSpinLock(&MmExpansionLock, OldIrql);
+ ASSERT(KeGetCurrentIrql() <= APC_LEVEL);
+}
+
//
// Returns the ProtoPTE inside a VAD for the given VPN
//
VOID
NTAPI
-MiInitializeSessionSpaceLayout();
+MiInitializeSessionSpaceLayout(VOID);
NTSTATUS
NTAPI
IN PLOADER_PARAMETER_BLOCK LoaderBlock
);
+VOID
+NTAPI
+MiInitializeSessionWsSupport(
+ VOID
+);
+
VOID
NTAPI
MiInitializeSessionIds(
IN ULONG Flags
);
-PVOID
-NTAPI
-MiMapLockedPagesInUserSpace(
- IN PMDL Mdl,
- IN PVOID BaseVa,
- IN MEMORY_CACHING_TYPE CacheType,
- IN PVOID BaseAddress
-);
-
-VOID
-NTAPI
-MiUnmapLockedPagesInUserSpace(
- IN PVOID BaseAddress,
- IN PMDL Mdl
-);
-
VOID
NTAPI
MiInsertPageInList(
IN PMMPFN Pfn
);
-PFN_NUMBER
-NTAPI
-MiAllocatePfn(
- IN PMMPTE PointerPte,
- IN ULONG Protection
-);
-
VOID
NTAPI
MiInitializePfn(
NTAPI
MiInitializeAndChargePfn(
OUT PPFN_NUMBER PageFrameIndex,
- IN PMMPTE PointerPde,
+ IN PMMPDE PointerPde,
IN PFN_NUMBER ContainingPageFrame,
IN BOOLEAN SessionAllocation
);
NTAPI
MiInitializePfnForOtherProcess(
IN PFN_NUMBER PageFrameIndex,
- IN PMMPTE PointerPte,
+ IN PVOID PteAddress,
IN PFN_NUMBER PteFrame
);
IN PVOID VirtualAddress
);
-PMMADDRESS_NODE
+TABLE_SEARCH_RESULT
NTAPI
MiCheckForConflictingNode(
IN ULONG_PTR StartVpn,
IN ULONG_PTR EndVpn,
- IN PMM_AVL_TABLE Table
+ IN PMM_AVL_TABLE Table,
+ OUT PMMADDRESS_NODE *NodeOrParent
);
TABLE_SEARCH_RESULT
OUT PULONG_PTR Base
);
-NTSTATUS
+TABLE_SEARCH_RESULT
NTAPI
MiFindEmptyAddressRangeInTree(
IN SIZE_T Length,
VOID
NTAPI
MiInsertVad(
- IN PMMVAD Vad,
- IN PEPROCESS Process
-);
+ _Inout_ PMMVAD Vad,
+ _Inout_ PMM_AVL_TABLE VadRoot);
+
+NTSTATUS
+NTAPI
+MiInsertVadEx(
+ _Inout_ PMMVAD Vad,
+ _In_ ULONG_PTR *BaseAddress,
+ _In_ SIZE_T ViewSize,
+ _In_ ULONG_PTR HighestAddress,
+ _In_ ULONG_PTR Alignment,
+ _In_ ULONG AllocationType);
VOID
NTAPI
MiRosUnmapViewOfSection(
IN PEPROCESS Process,
IN PVOID BaseAddress,
- IN ULONG Flags
+ IN BOOLEAN SkipDebuggerNotify
);
VOID
IN PMMVAD Vad
);
+VOID
+NTAPI
+MiDeletePte(
+ IN PMMPTE PointerPte,
+ IN PVOID VirtualAddress,
+ IN PEPROCESS CurrentProcess,
+ IN PMMPTE PrototypePte
+);
+
ULONG
NTAPI
MiMakeSystemAddressValid(
IN ULONG_PTR Vpn
);
+VOID
+NTAPI
+MiDeleteARM3Section(
+ PVOID ObjectBody
+);
+
NTSTATUS
NTAPI
MiQueryMemorySectionName(
VOID
NTAPI
MiMakePdeExistAndMakeValid(
- IN PMMPTE PointerPde,
+ IN PMMPDE PointerPde,
IN PEPROCESS TargetProcess,
IN KIRQL OldIrql
);
// then we'd like to have our own code to grab a free page and zero it out, by
// using MiRemoveAnyPage. This macro implements this.
//
-PFN_NUMBER
FORCEINLINE
+PFN_NUMBER
MiRemoveZeroPageSafe(IN ULONG Color)
{
if (MmFreePagesByColor[ZeroedPageList][Color].Flink != LIST_HEAD) return MiRemoveZeroPage(Color);
return 0;
}
+#if (_MI_PAGING_LEVELS == 2)
+FORCEINLINE
+BOOLEAN
+MiSynchronizeSystemPde(PMMPDE PointerPde)
+{
+ MMPDE SystemPde;
+ ULONG Index;
+
+ /* Get the Index from the PDE */
+ Index = ((ULONG_PTR)PointerPde & (SYSTEM_PD_SIZE - 1)) / sizeof(MMPTE);
+
+ /* Copy the PDE from the double-mapped system page directory */
+ SystemPde = MmSystemPagePtes[Index];
+ *PointerPde = SystemPde;
+
+ /* Make sure we re-read the PDE and PTE */
+ KeMemoryBarrierWithoutFence();
+
+ /* Return, if we had success */
+ return SystemPde.u.Hard.Valid != 0;
+}
+#endif
+
/* EOF */