#ifndef _M_AMD64
-#define MI_MIN_PAGES_FOR_NONPAGED_POOL_TUNING ((255*1024*1024) >> PAGE_SHIFT)
-#define MI_MIN_PAGES_FOR_SYSPTE_TUNING ((19*1024*1024) >> PAGE_SHIFT)
-#define MI_MIN_PAGES_FOR_SYSPTE_BOOST ((32*1024*1024) >> PAGE_SHIFT)
-#define MI_MAX_INIT_NONPAGED_POOL_SIZE (128 * 1024 * 1024)
-#define MI_MAX_NONPAGED_POOL_SIZE (128 * 1024 * 1024)
-#define MI_MAX_FREE_PAGE_LISTS 4
-
-#define MI_MIN_INIT_PAGED_POOLSIZE (32 * 1024 * 1024)
-
-#define MI_SESSION_VIEW_SIZE (20 * 1024 * 1024)
-#define MI_SESSION_POOL_SIZE (16 * 1024 * 1024)
-#define MI_SESSION_IMAGE_SIZE (8 * 1024 * 1024)
-#define MI_SESSION_WORKING_SET_SIZE (4 * 1024 * 1024)
-#define MI_SESSION_SIZE (MI_SESSION_VIEW_SIZE + \
- MI_SESSION_POOL_SIZE + \
- MI_SESSION_IMAGE_SIZE + \
- MI_SESSION_WORKING_SET_SIZE)
-
-#define MI_SYSTEM_VIEW_SIZE (16 * 1024 * 1024)
-
-#define MI_SYSTEM_CACHE_WS_START (PVOID)0xC0C00000
-#define MI_PAGED_POOL_START (PVOID)0xE1000000
-#define MI_NONPAGED_POOL_END (PVOID)0xFFBE0000
-#define MI_DEBUG_MAPPING (PVOID)0xFFBFF000
+#define MI_MIN_PAGES_FOR_NONPAGED_POOL_TUNING ((255 * _1MB) >> PAGE_SHIFT)
+#define MI_MIN_PAGES_FOR_SYSPTE_TUNING ((19 * _1MB) >> PAGE_SHIFT)
+#define MI_MIN_PAGES_FOR_SYSPTE_BOOST ((32 * _1MB) >> PAGE_SHIFT)
+#define MI_MAX_INIT_NONPAGED_POOL_SIZE (128 * _1MB)
+#define MI_MAX_NONPAGED_POOL_SIZE (128 * _1MB)
+#define MI_MAX_FREE_PAGE_LISTS 4
+
+#define MI_MIN_INIT_PAGED_POOLSIZE (32 * _1MB)
+
+#define MI_SESSION_VIEW_SIZE (20 * _1MB)
+#define MI_SESSION_POOL_SIZE (16 * _1MB)
+#define MI_SESSION_IMAGE_SIZE (8 * _1MB)
+#define MI_SESSION_WORKING_SET_SIZE (4 * _1MB)
+#define MI_SESSION_SIZE (MI_SESSION_VIEW_SIZE + \
+ MI_SESSION_POOL_SIZE + \
+ MI_SESSION_IMAGE_SIZE + \
+ MI_SESSION_WORKING_SET_SIZE)
+
+#define MI_SYSTEM_VIEW_SIZE (16 * _1MB)
+
+#define MI_SYSTEM_CACHE_WS_START (PVOID)0xC0C00000
+#define MI_PAGED_POOL_START (PVOID)0xE1000000
+#define MI_NONPAGED_POOL_END (PVOID)0xFFBE0000
+#define MI_DEBUG_MAPPING (PVOID)0xFFBFF000
+
+#define MI_SYSTEM_PTE_BASE (PVOID)MiAddressToPte(NULL)
#define MI_MIN_SECONDARY_COLORS 8
#define MI_SECONDARY_COLORS 64
#define MI_MAX_SECONDARY_COLORS 1024
+#define MI_MIN_ALLOCATION_FRAGMENT (4 * _1KB)
+#define MI_ALLOCATION_FRAGMENT (64 * _1KB)
+#define MI_MAX_ALLOCATION_FRAGMENT (2 * _1MB)
+
#define MM_HIGHEST_VAD_ADDRESS \
(PVOID)((ULONG_PTR)MM_HIGHEST_USER_ADDRESS - (16 * PAGE_SIZE))
+#define MI_LOWEST_VAD_ADDRESS (PVOID)MM_LOWEST_USER_ADDRESS
#endif /* !_M_AMD64 */
/* Make the code cleaner with some definitions for size multiples */
-#define _1KB (1024)
+#define _1KB (1024u)
#define _1MB (1024 * _1KB)
+#define _1GB (1024 * _1MB)
+
+/* Everyone loves 64K */
+#define _64K (64 * _1KB)
/* Area mapped by a PDE */
#define PDE_MAPPED_VA (PTE_COUNT * PAGE_SIZE)
/* Size of a page table */
#define PT_SIZE (PTE_COUNT * sizeof(MMPTE))
+/* Size of a page directory */
+#define PD_SIZE (PDE_COUNT * sizeof(MMPDE))
+
+/* Size of all page directories for a process */
+#define SYSTEM_PD_SIZE (PD_COUNT * PD_SIZE)
+
/* Architecture specific count of PDEs in a directory, and count of PTEs in a PT */
#ifdef _M_IX86
#define PD_COUNT 1
#define PDE_COUNT 1024
#define PTE_COUNT 1024
+C_ASSERT(SYSTEM_PD_SIZE == PAGE_SIZE);
#elif _M_ARM
#define PD_COUNT 1
#define PDE_COUNT 4096
#define MM_NOCACHE 8
#define MM_DECOMMIT 0x10
#define MM_NOACCESS (MM_DECOMMIT | MM_NOCACHE)
+#define MM_INVALID_PROTECTION 0xFFFFFFFF
//
// Specific PTE Definitions that map to the Memory Manager's Protection Mask Bits
#define PTE_WRITECOPY 0x200
#define PTE_EXECUTE_READWRITE 0x0
#define PTE_EXECUTE_WRITECOPY 0x200
+#define PTE_PROTOTYPE 0x400
//
// Cache flags
//
#else
#error Define these please!
#endif
-static const
-ULONG
-MmProtectToPteMask[32] =
-{
- //
- // These are the base MM_ protection flags
- //
- 0,
- PTE_READONLY | PTE_ENABLE_CACHE,
- PTE_EXECUTE | PTE_ENABLE_CACHE,
- PTE_EXECUTE_READ | PTE_ENABLE_CACHE,
- PTE_READWRITE | PTE_ENABLE_CACHE,
- PTE_WRITECOPY | PTE_ENABLE_CACHE,
- PTE_EXECUTE_READWRITE | PTE_ENABLE_CACHE,
- PTE_EXECUTE_WRITECOPY | PTE_ENABLE_CACHE,
- //
- // These OR in the MM_NOCACHE flag
- //
- 0,
- PTE_READONLY | PTE_DISABLE_CACHE,
- PTE_EXECUTE | PTE_DISABLE_CACHE,
- PTE_EXECUTE_READ | PTE_DISABLE_CACHE,
- PTE_READWRITE | PTE_DISABLE_CACHE,
- PTE_WRITECOPY | PTE_DISABLE_CACHE,
- PTE_EXECUTE_READWRITE | PTE_DISABLE_CACHE,
- PTE_EXECUTE_WRITECOPY | PTE_DISABLE_CACHE,
- //
- // These OR in the MM_DECOMMIT flag, which doesn't seem supported on x86/64/ARM
- //
- 0,
- PTE_READONLY | PTE_ENABLE_CACHE,
- PTE_EXECUTE | PTE_ENABLE_CACHE,
- PTE_EXECUTE_READ | PTE_ENABLE_CACHE,
- PTE_READWRITE | PTE_ENABLE_CACHE,
- PTE_WRITECOPY | PTE_ENABLE_CACHE,
- PTE_EXECUTE_READWRITE | PTE_ENABLE_CACHE,
- PTE_EXECUTE_WRITECOPY | PTE_ENABLE_CACHE,
- //
- // These OR in the MM_NOACCESS flag, which seems to enable WriteCombining?
- //
- 0,
- PTE_READONLY | PTE_WRITECOMBINED_CACHE,
- PTE_EXECUTE | PTE_WRITECOMBINED_CACHE,
- PTE_EXECUTE_READ | PTE_WRITECOMBINED_CACHE,
- PTE_READWRITE | PTE_WRITECOMBINED_CACHE,
- PTE_WRITECOPY | PTE_WRITECOMBINED_CACHE,
- PTE_EXECUTE_READWRITE | PTE_WRITECOMBINED_CACHE,
- PTE_EXECUTE_WRITECOPY | PTE_WRITECOMBINED_CACHE,
-};
-
+
+extern const ULONG MmProtectToPteMask[32];
+extern const ULONG MmProtectToValue[32];
+
//
// Assertions for session images, addresses, and PTEs
//
#define MI_IS_SESSION_PTE(Pte) \
((((PMMPTE)Pte) >= MiSessionBasePte) && (((PMMPTE)Pte) < MiSessionLastPte))
+#define MI_IS_PAGE_TABLE_ADDRESS(Address) \
+ (((PVOID)(Address) >= (PVOID)PTE_BASE) && ((PVOID)(Address) <= (PVOID)PTE_TOP))
+
+#define MI_IS_SYSTEM_PAGE_TABLE_ADDRESS(Address) \
+ (((Address) >= (PVOID)MiAddressToPte(MmSystemRangeStart)) && ((Address) <= (PVOID)PTE_TOP))
+
+#define MI_IS_PAGE_TABLE_OR_HYPER_ADDRESS(Address) \
+ (((PVOID)(Address) >= (PVOID)PTE_BASE) && ((PVOID)(Address) <= (PVOID)MmHyperSpaceEnd))
+
//
// Corresponds to MMPTE_SOFTWARE.Protection
//
#define MM_SYSLDR_BOOT_LOADED (PVOID)0xFFFFFFFF
#define MM_SYSLDR_SINGLE_ENTRY 0x1
+#if defined(_M_IX86) || defined(_M_ARM)
//
// PFN List Sentinel
//
#define LIST_HEAD 0xFFFFFFFF
+//
+// Because GCC cannot automatically downcast 0xFFFFFFFF to lesser-width bits,
+// we need a manual definition suited to the number of bits in the PteFrame.
+// This is used as a LIST_HEAD for the colored list
+//
+#define COLORED_LIST_HEAD ((1 << 25) - 1) // 0x1FFFFFF
+#elif defined(_M_AMD64)
+#define LIST_HEAD 0xFFFFFFFFFFFFFFFFLL
+#define COLORED_LIST_HEAD ((1 << 57) - 1) // 0x1FFFFFFFFFFFFFFLL
+#else
+#error Define these please!
+#endif
+
//
// Special IRQL value (found in assertions)
//
#define MM_NOIRQL (KIRQL)0xFFFFFFFF
+//
+// Returns the color of a page
+//
+#define MI_GET_PAGE_COLOR(x) ((x) & MmSecondaryColorMask)
+#define MI_GET_NEXT_COLOR(x) (MI_GET_PAGE_COLOR(++MmSystemPageColor))
+#define MI_GET_NEXT_PROCESS_COLOR(x) (MI_GET_PAGE_COLOR(++(x)->NextPageColor))
+
+#ifdef _M_IX86
+//
+// Decodes a Prototype PTE into the underlying PTE
+//
+#define MiProtoPteToPte(x) \
+ (PMMPTE)((ULONG_PTR)MmPagedPoolStart + \
+ (((x)->u.Proto.ProtoAddressHigh << 7) | (x)->u.Proto.ProtoAddressLow))
+#endif
+
+//
+// Prototype PTEs that don't yet have a pagefile association
+//
+#define MI_PTE_LOOKUP_NEEDED 0xFFFFF
+
+//
+// System views are binned into 64K chunks
+//
+#define MI_SYSTEM_VIEW_BUCKET_SIZE _64K
+
//
// FIXFIX: These should go in ex.h after the pool merge
//
PFN_NUMBER LastFrame;
} MI_LARGE_PAGE_RANGES, *PMI_LARGE_PAGE_RANGES;
+typedef struct _MMVIEW
+{
+ ULONG_PTR Entry;
+ PCONTROL_AREA ControlArea;
+} MMVIEW, *PMMVIEW;
+
+typedef struct _MMSESSION
+{
+ KGUARDED_MUTEX SystemSpaceViewLock;
+ PKGUARDED_MUTEX SystemSpaceViewLockPointer;
+ PCHAR SystemSpaceViewStart;
+ PMMVIEW SystemSpaceViewTable;
+ ULONG SystemSpaceHashSize;
+ ULONG SystemSpaceHashEntries;
+ ULONG SystemSpaceHashKey;
+ ULONG BitmapFailures;
+ PRTL_BITMAP SystemSpaceBitMap;
+} MMSESSION, *PMMSESSION;
+
extern MMPTE HyperTemplatePte;
extern MMPDE ValidKernelPde;
extern MMPTE ValidKernelPte;
+extern MMPDE DemandZeroPde;
+extern MMPTE DemandZeroPte;
+extern MMPTE PrototypePte;
extern BOOLEAN MmLargeSystemCache;
extern BOOLEAN MmZeroPageFile;
extern BOOLEAN MmProtectFreedNonPagedPool;
extern BOOLEAN MmMirroring;
extern BOOLEAN MmMakeLowMemory;
extern BOOLEAN MmEnforceWriteProtection;
-extern ULONG MmAllocationFragment;
+extern SIZE_T MmAllocationFragment;
extern ULONG MmConsumedPoolPercentage;
extern ULONG MmVerifyDriverBufferType;
extern ULONG MmVerifyDriverLevel;
extern PMMPTE MiHighestUserPte;
extern PMMPDE MiHighestUserPde;
extern PFN_NUMBER MmSystemPageDirectory[PD_COUNT];
+extern PMMPTE MmSharedUserDataPte;
+extern LIST_ENTRY MmProcessList;
+extern BOOLEAN MmZeroingPageThreadActive;
+extern KEVENT MmZeroingPageEvent;
+extern ULONG MmSystemPageColor;
+extern ULONG MmProcessColorSeed;
+extern PMMWSL MmWorkingSetList;
-#define MI_PFN_TO_PFNENTRY(x) (&MmPfnDatabase[1][x])
-#define MI_PFNENTRY_TO_PFN(x) (x - MmPfnDatabase[1])
+//
+// Figures out the hardware bits for a PTE
+//
+ULONG
+FORCEINLINE
+MiDetermineUserGlobalPteMask(IN PMMPTE PointerPte)
+{
+ MMPTE TempPte;
+
+ /* Start fresh */
+ TempPte.u.Long = 0;
+
+ /* Make it valid and accessed */
+ TempPte.u.Hard.Valid = TRUE;
+ TempPte.u.Hard.Accessed = TRUE;
+
+ /* Is this for user-mode? */
+ if ((PointerPte <= MiHighestUserPte) ||
+ ((PointerPte >= MiAddressToPde(NULL)) && (PointerPte <= MiHighestUserPde)))
+ {
+ /* Set the owner bit */
+ TempPte.u.Hard.Owner = TRUE;
+ }
+
+ /* FIXME: We should also set the global bit */
+
+ /* Return the protection */
+ return TempPte.u.Long;
+}
//
// Creates a valid kernel PTE with the given protection
//
FORCEINLINE
VOID
+MI_MAKE_HARDWARE_PTE_KERNEL(IN PMMPTE NewPte,
+ IN PMMPTE MappingPte,
+ IN ULONG ProtectionMask,
+ IN PFN_NUMBER PageFrameNumber)
+{
+ /* Only valid for kernel, non-session PTEs */
+ ASSERT(MappingPte > MiHighestUserPte);
+ ASSERT(!MI_IS_SESSION_PTE(MappingPte));
+ ASSERT((MappingPte < (PMMPTE)PDE_BASE) || (MappingPte > (PMMPTE)PDE_TOP));
+
+ /* Start fresh */
+ *NewPte = ValidKernelPte;
+
+ /* Set the protection and page */
+ NewPte->u.Hard.PageFrameNumber = PageFrameNumber;
+ NewPte->u.Long |= MmProtectToPteMask[ProtectionMask];
+}
+
+//
+// Creates a valid PTE with the given protection
+//
+FORCEINLINE
+VOID
MI_MAKE_HARDWARE_PTE(IN PMMPTE NewPte,
IN PMMPTE MappingPte,
IN ULONG ProtectionMask,
IN PFN_NUMBER PageFrameNumber)
+{
+ /* Set the protection and page */
+ NewPte->u.Long = MiDetermineUserGlobalPteMask(MappingPte);
+ NewPte->u.Long |= MmProtectToPteMask[ProtectionMask];
+ NewPte->u.Hard.PageFrameNumber = PageFrameNumber;
+}
+
+//
+// Creates a valid user PTE with the given protection
+//
+FORCEINLINE
+VOID
+MI_MAKE_HARDWARE_PTE_USER(IN PMMPTE NewPte,
+ IN PMMPTE MappingPte,
+ IN ULONG ProtectionMask,
+ IN PFN_NUMBER PageFrameNumber)
{
/* Only valid for kernel, non-session PTEs */
- ASSERT(MappingPte > MiHighestUserPte);
- ASSERT(!MI_IS_SESSION_PTE(MappingPte));
- ASSERT((MappingPte < (PMMPTE)PDE_BASE) || (MappingPte > (PMMPTE)PDE_TOP));
+ ASSERT(MappingPte <= MiHighestUserPte);
/* Start fresh */
*NewPte = ValidKernelPte;
/* Set the protection and page */
+ NewPte->u.Hard.Owner = TRUE;
NewPte->u.Hard.PageFrameNumber = PageFrameNumber;
NewPte->u.Long |= MmProtectToPteMask[ProtectionMask];
}
+#ifdef _M_IX86
+//
+// Builds a Prototype PTE for the address of the PTE
+//
+FORCEINLINE
+VOID
+MI_MAKE_PROTOTYPE_PTE(IN PMMPTE NewPte,
+ IN PMMPTE PointerPte)
+{
+ ULONG_PTR Offset;
+
+ /* Mark this as a prototype */
+ NewPte->u.Long = 0;
+ NewPte->u.Proto.Prototype = 1;
+
+ /*
+ * Prototype PTEs are only valid in paged pool by design, this little trick
+ * lets us only use 28 bits for the adress of the PTE
+ */
+ Offset = (ULONG_PTR)PointerPte - (ULONG_PTR)MmPagedPoolStart;
+
+ /* 7 bits go in the "low", and the other 21 bits go in the "high" */
+ NewPte->u.Proto.ProtoAddressLow = Offset & 0x7F;
+ NewPte->u.Proto.ProtoAddressHigh = (Offset & 0xFFFFFF80) >> 7;
+ ASSERT(MiProtoPteToPte(NewPte) == PointerPte);
+}
+#endif
+
//
// Returns if the page is physically resident (ie: a large page)
// FIXFIX: CISC/x86 only?
*PointerPte = InvalidPte;
}
-NTSTATUS
+//
+// Checks if the thread already owns a working set
+//
+FORCEINLINE
+BOOLEAN
+MM_ANY_WS_LOCK_HELD(IN PETHREAD Thread)
+{
+ /* If any of these are held, return TRUE */
+ return ((Thread->OwnsProcessWorkingSetExclusive) ||
+ (Thread->OwnsProcessWorkingSetShared) ||
+ (Thread->OwnsSystemWorkingSetExclusive) ||
+ (Thread->OwnsSystemWorkingSetShared) ||
+ (Thread->OwnsSessionWorkingSetExclusive) ||
+ (Thread->OwnsSessionWorkingSetShared));
+}
+
+//
+// Checks if the process owns the working set lock
+//
+FORCEINLINE
+BOOLEAN
+MI_WS_OWNER(IN PEPROCESS Process)
+{
+ /* Check if this process is the owner, and that the thread owns the WS */
+ return ((KeGetCurrentThread()->ApcState.Process == &Process->Pcb) &&
+ ((PsGetCurrentThread()->OwnsProcessWorkingSetExclusive) ||
+ (PsGetCurrentThread()->OwnsProcessWorkingSetShared)));
+}
+
+//
+// Locks the working set for the given process
+//
+FORCEINLINE
+VOID
+MiLockProcessWorkingSet(IN PEPROCESS Process,
+ IN PETHREAD Thread)
+{
+ /* Shouldn't already be owning the process working set */
+ ASSERT(Thread->OwnsProcessWorkingSetShared == FALSE);
+ ASSERT(Thread->OwnsProcessWorkingSetExclusive == FALSE);
+
+ /* Block APCs, make sure that still nothing is already held */
+ KeEnterGuardedRegion();
+ ASSERT(!MM_ANY_WS_LOCK_HELD(Thread));
+
+ /* FIXME: Actually lock it (we can't because Vm is used by MAREAs) */
+
+ /* FIXME: This also can't be checked because Vm is used by MAREAs) */
+ //ASSERT(Process->Vm.Flags.AcquiredUnsafe == 0);
+
+ /* Okay, now we can own it exclusively */
+ ASSERT(Thread->OwnsProcessWorkingSetExclusive == FALSE);
+ Thread->OwnsProcessWorkingSetExclusive = TRUE;
+}
+
+//
+// Unlocks the working set for the given process
+//
+FORCEINLINE
+VOID
+MiUnlockProcessWorkingSet(IN PEPROCESS Process,
+ IN PETHREAD Thread)
+{
+ /* Make sure this process really is owner, and it was a safe acquisition */
+ ASSERT(MI_WS_OWNER(Process));
+ /* This can't be checked because Vm is used by MAREAs) */
+ //ASSERT(Process->Vm.Flags.AcquiredUnsafe == 0);
+
+ /* The thread doesn't own it anymore */
+ ASSERT(Thread->OwnsProcessWorkingSetExclusive == TRUE);
+ Thread->OwnsProcessWorkingSetExclusive = FALSE;
+
+ /* FIXME: Actually release it (we can't because Vm is used by MAREAs) */
+
+ /* Unblock APCs */
+ KeLeaveGuardedRegion();
+}
+
+//
+// Locks the working set
+//
+FORCEINLINE
+VOID
+MiLockWorkingSet(IN PETHREAD Thread,
+ IN PMMSUPPORT WorkingSet)
+{
+ /* Block APCs */
+ KeEnterGuardedRegion();
+
+ /* Working set should be in global memory */
+ ASSERT(MI_IS_SESSION_ADDRESS((PVOID)WorkingSet) == FALSE);
+
+ /* Thread shouldn't already be owning something */
+ ASSERT(!MM_ANY_WS_LOCK_HELD(Thread));
+
+ /* FIXME: Actually lock it (we can't because Vm is used by MAREAs) */
+
+ /* Which working set is this? */
+ if (WorkingSet == &MmSystemCacheWs)
+ {
+ /* Own the system working set */
+ ASSERT((Thread->OwnsSystemWorkingSetExclusive == FALSE) &&
+ (Thread->OwnsSystemWorkingSetShared == FALSE));
+ Thread->OwnsSystemWorkingSetExclusive = TRUE;
+ }
+ else if (WorkingSet->Flags.SessionSpace)
+ {
+ /* We don't implement this yet */
+ UNIMPLEMENTED;
+ while (TRUE);
+ }
+ else
+ {
+ /* Own the process working set */
+ ASSERT((Thread->OwnsProcessWorkingSetExclusive == FALSE) &&
+ (Thread->OwnsProcessWorkingSetShared == FALSE));
+ Thread->OwnsProcessWorkingSetExclusive = TRUE;
+ }
+}
+
+//
+// Unlocks the working set
+//
+FORCEINLINE
+VOID
+MiUnlockWorkingSet(IN PETHREAD Thread,
+ IN PMMSUPPORT WorkingSet)
+{
+ /* Working set should be in global memory */
+ ASSERT(MI_IS_SESSION_ADDRESS((PVOID)WorkingSet) == FALSE);
+
+ /* Which working set is this? */
+ if (WorkingSet == &MmSystemCacheWs)
+ {
+ /* Release the system working set */
+ ASSERT((Thread->OwnsSystemWorkingSetExclusive == TRUE) ||
+ (Thread->OwnsSystemWorkingSetShared == TRUE));
+ Thread->OwnsSystemWorkingSetExclusive = FALSE;
+ }
+ else if (WorkingSet->Flags.SessionSpace)
+ {
+ /* We don't implement this yet */
+ UNIMPLEMENTED;
+ while (TRUE);
+ }
+ else
+ {
+ /* Release the process working set */
+ ASSERT((Thread->OwnsProcessWorkingSetExclusive) ||
+ (Thread->OwnsProcessWorkingSetShared));
+ Thread->OwnsProcessWorkingSetExclusive = FALSE;
+ }
+
+ /* FIXME: Actually release it (we can't because Vm is used by MAREAs) */
+
+ /* Unblock APCs */
+ KeLeaveGuardedRegion();
+}
+
+//
+// Returns the ProtoPTE inside a VAD for the given VPN
+//
+FORCEINLINE
+PMMPTE
+MI_GET_PROTOTYPE_PTE_FOR_VPN(IN PMMVAD Vad,
+ IN ULONG_PTR Vpn)
+{
+ PMMPTE ProtoPte;
+
+ /* Find the offset within the VAD's prototype PTEs */
+ ProtoPte = Vad->FirstPrototypePte + (Vpn - Vad->StartingVpn);
+ ASSERT(ProtoPte <= Vad->LastContiguousPte);
+ return ProtoPte;
+}
+
+//
+// Returns the PFN Database entry for the given page number
+// Warning: This is not necessarily a valid PFN database entry!
+//
+FORCEINLINE
+PMMPFN
+MI_PFN_ELEMENT(IN PFN_NUMBER Pfn)
+{
+ /* Get the entry */
+ return &MmPfnDatabase[Pfn];
+};
+
+BOOLEAN
NTAPI
MmArmInitSystem(
IN ULONG Phase,
VOID
NTAPI
-MiInsertInListTail(
+MiInsertPageInList(
IN PMMPFNLIST ListHead,
- IN PMMPFN Entry
-);
-
-VOID
-NTAPI
-MiInsertZeroListAtBack(
- IN PFN_NUMBER PageIndex
+ IN PFN_NUMBER PageFrameIndex
);
VOID
IN PMMPFN Entry
);
-PMMPFN
-NTAPI
-MiRemoveHeadList(
- IN PMMPFNLIST ListHead
-);
-
PFN_NUMBER
NTAPI
MiAllocatePfn(
IN PFN_NUMBER PageFrameIndex
);
+VOID
+NTAPI
+MiDecrementReferenceCount(
+ IN PMMPFN Pfn1,
+ IN PFN_NUMBER PageFrameIndex
+);
+
PFN_NUMBER
NTAPI
MiRemoveAnyPage(
IN ULONG Color
);
+VOID
+NTAPI
+MiZeroPhysicalPage(
+ IN PFN_NUMBER PageFrameIndex
+);
+
VOID
NTAPI
MiInsertPageInFreeList(
IN PMMPFN Pfn1
);
+PMMVAD
+NTAPI
+MiLocateAddress(
+ IN PVOID VirtualAddress
+);
+
+PMMADDRESS_NODE
+NTAPI
+MiCheckForConflictingNode(
+ IN ULONG_PTR StartVpn,
+ IN ULONG_PTR EndVpn,
+ IN PMM_AVL_TABLE Table
+);
+
+TABLE_SEARCH_RESULT
+NTAPI
+MiFindEmptyAddressRangeDownTree(
+ IN SIZE_T Length,
+ IN ULONG_PTR BoundaryAddress,
+ IN ULONG_PTR Alignment,
+ IN PMM_AVL_TABLE Table,
+ OUT PULONG_PTR Base,
+ OUT PMMADDRESS_NODE *Parent
+);
+
+NTSTATUS
+NTAPI
+MiFindEmptyAddressRangeInTree(
+ IN SIZE_T Length,
+ IN ULONG_PTR Alignment,
+ IN PMM_AVL_TABLE Table,
+ OUT PMMADDRESS_NODE *PreviousVad,
+ OUT PULONG_PTR Base
+);
+
+VOID
+NTAPI
+MiInsertVad(
+ IN PMMVAD Vad,
+ IN PEPROCESS Process
+);
+
+VOID
+NTAPI
+MiInsertNode(
+ IN PMM_AVL_TABLE Table,
+ IN PMMADDRESS_NODE NewNode,
+ PMMADDRESS_NODE Parent,
+ TABLE_SEARCH_RESULT Result
+);
+
+VOID
+NTAPI
+MiRemoveNode(
+ IN PMMADDRESS_NODE Node,
+ IN PMM_AVL_TABLE Table
+);
+
+PMMADDRESS_NODE
+NTAPI
+MiGetPreviousNode(
+ IN PMMADDRESS_NODE Node
+);
+
+PMMADDRESS_NODE
+NTAPI
+MiGetNextNode(
+ IN PMMADDRESS_NODE Node
+);
+
+BOOLEAN
+NTAPI
+MiInitializeSystemSpaceMap(
+ IN PVOID InputSession OPTIONAL
+);
+
+ULONG
+NTAPI
+MiMakeProtectionMask(
+ IN ULONG Protect
+);
+
+VOID
+NTAPI
+MiDeleteVirtualAddresses(
+ IN ULONG_PTR Va,
+ IN ULONG_PTR EndingAddress,
+ IN PMMVAD Vad
+);
+
+ULONG
+NTAPI
+MiMakeSystemAddressValid(
+ IN PVOID PageTableVirtualAddress,
+ IN PEPROCESS CurrentProcess
+);
+
+ULONG
+NTAPI
+MiMakeSystemAddressValidPfn(
+ IN PVOID VirtualAddress,
+ IN KIRQL OldIrql
+);
+
+VOID
+NTAPI
+MiRemoveMappedView(
+ IN PEPROCESS CurrentProcess,
+ IN PMMVAD Vad
+);
+
+PSUBSECTION
+NTAPI
+MiLocateSubsection(
+ IN PMMVAD Vad,
+ IN ULONG_PTR Vpn
+);
+
+//
+// MiRemoveZeroPage will use inline code to zero out the page manually if only
+// free pages are available. In some scenarios, we don't/can't run that piece of
+// code and would rather only have a real zero page. If we can't have a zero page,
+// then we'd like to have our own code to grab a free page and zero it out, by
+// using MiRemoveAnyPage. This macro implements this.
+//
+PFN_NUMBER
+FORCEINLINE
+MiRemoveZeroPageSafe(IN ULONG Color)
+{
+ if (MmFreePagesByColor[ZeroedPageList][Color].Flink != LIST_HEAD) return MiRemoveZeroPage(Color);
+ return 0;
+}
+
+//
+// New ARM3<->RosMM PAGE Architecture
+//
+#define MI_GET_ROS_DATA(x) ((PMMROSPFN)(x->RosMmData))
+#define MI_IS_ROS_PFN(x) (((x)->u4.AweAllocation == TRUE) && (MI_GET_ROS_DATA(x) != NULL))
+#define ASSERT_IS_ROS_PFN(x) ASSERT(MI_IS_ROS_PFN(x) == TRUE);
+typedef struct _MMROSPFN
+{
+ PMM_RMAP_ENTRY RmapListHead;
+ SWAPENTRY SwapEntry;
+} MMROSPFN, *PMMROSPFN;
+
+#define RosMmData AweReferenceCount
+
/* EOF */