#define MI_NONPAGED_POOL_END (PVOID)0xFFBE0000
#define MI_DEBUG_MAPPING (PVOID)0xFFBFF000
-// on AMD64 this would be MiAddressToPte(MM_KSEG0_BASE)
#define MI_SYSTEM_PTE_BASE (PVOID)MiAddressToPte(NULL)
#define MI_MIN_SECONDARY_COLORS 8
#define MM_HIGHEST_VAD_ADDRESS \
(PVOID)((ULONG_PTR)MM_HIGHEST_USER_ADDRESS - (16 * PAGE_SIZE))
-
-/* The range 0x10000->0x7FEFFFFF is reserved for the ROSMM MAREA Allocator */
-#define MI_LOWEST_VAD_ADDRESS (PVOID)0x7FF00000
+#define MI_LOWEST_VAD_ADDRESS (PVOID)MM_LOWEST_USER_ADDRESS
#endif /* !_M_AMD64 */
#define _1MB (1024 * _1KB)
#define _1GB (1024 * _1MB)
+/* Everyone loves 64K */
+#define _64K (64 * _1KB)
+
/* Area mapped by a PDE */
#define PDE_MAPPED_VA (PTE_COUNT * PAGE_SIZE)
/* Size of a page table */
#define PT_SIZE (PTE_COUNT * sizeof(MMPTE))
+/* Size of a page directory */
+#define PD_SIZE (PDE_COUNT * sizeof(MMPDE))
+
+/* Size of all page directories for a process */
+#define SYSTEM_PD_SIZE (PD_COUNT * PD_SIZE)
+
/* Architecture specific count of PDEs in a directory, and count of PTEs in a PT */
#ifdef _M_IX86
#define PD_COUNT 1
#define PDE_COUNT 1024
#define PTE_COUNT 1024
+C_ASSERT(SYSTEM_PD_SIZE == PAGE_SIZE);
#elif _M_ARM
#define PD_COUNT 1
#define PDE_COUNT 4096
#define MM_NOCACHE 8
#define MM_DECOMMIT 0x10
#define MM_NOACCESS (MM_DECOMMIT | MM_NOCACHE)
+#define MM_INVALID_PROTECTION 0xFFFFFFFF
//
// Specific PTE Definitions that map to the Memory Manager's Protection Mask Bits
#define PTE_WRITECOPY 0x200
#define PTE_EXECUTE_READWRITE 0x0
#define PTE_EXECUTE_WRITECOPY 0x200
+#define PTE_PROTOTYPE 0x400
//
// Cache flags
//
#else
#error Define these please!
#endif
-static const
-ULONG
-MmProtectToPteMask[32] =
-{
- //
- // These are the base MM_ protection flags
- //
- 0,
- PTE_READONLY | PTE_ENABLE_CACHE,
- PTE_EXECUTE | PTE_ENABLE_CACHE,
- PTE_EXECUTE_READ | PTE_ENABLE_CACHE,
- PTE_READWRITE | PTE_ENABLE_CACHE,
- PTE_WRITECOPY | PTE_ENABLE_CACHE,
- PTE_EXECUTE_READWRITE | PTE_ENABLE_CACHE,
- PTE_EXECUTE_WRITECOPY | PTE_ENABLE_CACHE,
- //
- // These OR in the MM_NOCACHE flag
- //
- 0,
- PTE_READONLY | PTE_DISABLE_CACHE,
- PTE_EXECUTE | PTE_DISABLE_CACHE,
- PTE_EXECUTE_READ | PTE_DISABLE_CACHE,
- PTE_READWRITE | PTE_DISABLE_CACHE,
- PTE_WRITECOPY | PTE_DISABLE_CACHE,
- PTE_EXECUTE_READWRITE | PTE_DISABLE_CACHE,
- PTE_EXECUTE_WRITECOPY | PTE_DISABLE_CACHE,
- //
- // These OR in the MM_DECOMMIT flag, which doesn't seem supported on x86/64/ARM
- //
- 0,
- PTE_READONLY | PTE_ENABLE_CACHE,
- PTE_EXECUTE | PTE_ENABLE_CACHE,
- PTE_EXECUTE_READ | PTE_ENABLE_CACHE,
- PTE_READWRITE | PTE_ENABLE_CACHE,
- PTE_WRITECOPY | PTE_ENABLE_CACHE,
- PTE_EXECUTE_READWRITE | PTE_ENABLE_CACHE,
- PTE_EXECUTE_WRITECOPY | PTE_ENABLE_CACHE,
- //
- // These OR in the MM_NOACCESS flag, which seems to enable WriteCombining?
- //
- 0,
- PTE_READONLY | PTE_WRITECOMBINED_CACHE,
- PTE_EXECUTE | PTE_WRITECOMBINED_CACHE,
- PTE_EXECUTE_READ | PTE_WRITECOMBINED_CACHE,
- PTE_READWRITE | PTE_WRITECOMBINED_CACHE,
- PTE_WRITECOPY | PTE_WRITECOMBINED_CACHE,
- PTE_EXECUTE_READWRITE | PTE_WRITECOMBINED_CACHE,
- PTE_EXECUTE_WRITECOPY | PTE_WRITECOMBINED_CACHE,
-};
-
+
+extern const ULONG MmProtectToPteMask[32];
+extern const ULONG MmProtectToValue[32];
+
//
// Assertions for session images, addresses, and PTEs
//
#define MM_SYSLDR_BOOT_LOADED (PVOID)0xFFFFFFFF
#define MM_SYSLDR_SINGLE_ENTRY 0x1
+#if defined(_M_IX86) || defined(_M_ARM)
//
// PFN List Sentinel
//
#define LIST_HEAD 0xFFFFFFFF
+//
+// Because GCC cannot automatically downcast 0xFFFFFFFF to lesser-width bits,
+// we need a manual definition suited to the number of bits in the PteFrame.
+// This is used as a LIST_HEAD for the colored list
+//
+#define COLORED_LIST_HEAD ((1 << 25) - 1) // 0x1FFFFFF
+#elif defined(_M_AMD64)
+#define LIST_HEAD 0xFFFFFFFFFFFFFFFFLL
+#define COLORED_LIST_HEAD ((1 << 57) - 1) // 0x1FFFFFFFFFFFFFFLL
+#else
+#error Define these please!
+#endif
+
//
// Special IRQL value (found in assertions)
//
#define MM_NOIRQL (KIRQL)0xFFFFFFFF
+//
+// Returns the color of a page
+//
+#define MI_GET_PAGE_COLOR(x) ((x) & MmSecondaryColorMask)
+#define MI_GET_NEXT_COLOR(x) (MI_GET_PAGE_COLOR(++MmSystemPageColor))
+#define MI_GET_NEXT_PROCESS_COLOR(x) (MI_GET_PAGE_COLOR(++(x)->NextPageColor))
+
+#ifdef _M_IX86
+//
+// Decodes a Prototype PTE into the underlying PTE
+//
+#define MiProtoPteToPte(x) \
+ (PMMPTE)((ULONG_PTR)MmPagedPoolStart + \
+ (((x)->u.Proto.ProtoAddressHigh << 7) | (x)->u.Proto.ProtoAddressLow))
+#endif
+
+//
+// Prototype PTEs that don't yet have a pagefile association
+//
+#define MI_PTE_LOOKUP_NEEDED 0xFFFFF
+
+//
+// System views are binned into 64K chunks
+//
+#define MI_SYSTEM_VIEW_BUCKET_SIZE _64K
+
//
// FIXFIX: These should go in ex.h after the pool merge
//
PFN_NUMBER LastFrame;
} MI_LARGE_PAGE_RANGES, *PMI_LARGE_PAGE_RANGES;
+typedef struct _MMVIEW
+{
+ ULONG_PTR Entry;
+ PCONTROL_AREA ControlArea;
+} MMVIEW, *PMMVIEW;
+
+typedef struct _MMSESSION
+{
+ KGUARDED_MUTEX SystemSpaceViewLock;
+ PKGUARDED_MUTEX SystemSpaceViewLockPointer;
+ PCHAR SystemSpaceViewStart;
+ PMMVIEW SystemSpaceViewTable;
+ ULONG SystemSpaceHashSize;
+ ULONG SystemSpaceHashEntries;
+ ULONG SystemSpaceHashKey;
+ ULONG BitmapFailures;
+ PRTL_BITMAP SystemSpaceBitMap;
+} MMSESSION, *PMMSESSION;
+
extern MMPTE HyperTemplatePte;
extern MMPDE ValidKernelPde;
extern MMPTE ValidKernelPte;
extern MMPDE DemandZeroPde;
+extern MMPTE DemandZeroPte;
+extern MMPTE PrototypePte;
extern BOOLEAN MmLargeSystemCache;
extern BOOLEAN MmZeroPageFile;
extern BOOLEAN MmProtectFreedNonPagedPool;
extern BOOLEAN MmMirroring;
extern BOOLEAN MmMakeLowMemory;
extern BOOLEAN MmEnforceWriteProtection;
-extern ULONG MmAllocationFragment;
+extern SIZE_T MmAllocationFragment;
extern ULONG MmConsumedPoolPercentage;
extern ULONG MmVerifyDriverBufferType;
extern ULONG MmVerifyDriverLevel;
extern PMMPTE MiHighestUserPte;
extern PMMPDE MiHighestUserPde;
extern PFN_NUMBER MmSystemPageDirectory[PD_COUNT];
-
-#define MI_PFN_TO_PFNENTRY(x) (&MmPfnDatabase[1][x])
-#define MI_PFNENTRY_TO_PFN(x) (x - MmPfnDatabase[1])
+extern PMMPTE MmSharedUserDataPte;
+extern LIST_ENTRY MmProcessList;
+extern BOOLEAN MmZeroingPageThreadActive;
+extern KEVENT MmZeroingPageEvent;
+extern ULONG MmSystemPageColor;
+extern ULONG MmProcessColorSeed;
+extern PMMWSL MmWorkingSetList;
//
// Figures out the hardware bits for a PTE
//
ULONG
FORCEINLINE
-MiDetermineUserGlobalPteMask(IN PMMPTE PointerPte)
+MiDetermineUserGlobalPteMask(IN PVOID PointerPte)
{
MMPTE TempPte;
/* Make it valid and accessed */
TempPte.u.Hard.Valid = TRUE;
- TempPte.u.Hard.Accessed = TRUE;
+ MI_MAKE_ACCESSED_PAGE(&TempPte);
/* Is this for user-mode? */
- if ((PointerPte <= MiHighestUserPte) ||
- ((PointerPte >= MiAddressToPde(NULL)) && (PointerPte <= MiHighestUserPde)))
+ if ((PointerPte <= (PVOID)MiHighestUserPte) ||
+ ((PointerPte >= (PVOID)MiAddressToPde(NULL)) &&
+ (PointerPte <= (PVOID)MiHighestUserPde)))
{
/* Set the owner bit */
- TempPte.u.Hard.Owner = TRUE;
+ MI_MAKE_OWNER_PAGE(&TempPte);
}
/* FIXME: We should also set the global bit */
NewPte->u.Long |= MmProtectToPteMask[ProtectionMask];
}
+#ifdef _M_IX86
+//
+// Builds a Prototype PTE for the address of the PTE
+//
+FORCEINLINE
+VOID
+MI_MAKE_PROTOTYPE_PTE(IN PMMPTE NewPte,
+ IN PMMPTE PointerPte)
+{
+ ULONG_PTR Offset;
+
+ /* Mark this as a prototype */
+ NewPte->u.Long = 0;
+ NewPte->u.Proto.Prototype = 1;
+
+ /*
+ * Prototype PTEs are only valid in paged pool by design, this little trick
+ * lets us only use 28 bits for the adress of the PTE
+ */
+ Offset = (ULONG_PTR)PointerPte - (ULONG_PTR)MmPagedPoolStart;
+
+ /* 7 bits go in the "low", and the other 21 bits go in the "high" */
+ NewPte->u.Proto.ProtoAddressLow = Offset & 0x7F;
+ NewPte->u.Proto.ProtoAddressHigh = (Offset & 0xFFFFFF80) >> 7;
+ ASSERT(MiProtoPteToPte(NewPte) == PointerPte);
+}
+#endif
+
//
// Returns if the page is physically resident (ie: a large page)
// FIXFIX: CISC/x86 only?
KeLeaveGuardedRegion();
}
-NTSTATUS
+//
+// Returns the ProtoPTE inside a VAD for the given VPN
+//
+FORCEINLINE
+PMMPTE
+MI_GET_PROTOTYPE_PTE_FOR_VPN(IN PMMVAD Vad,
+ IN ULONG_PTR Vpn)
+{
+ PMMPTE ProtoPte;
+
+ /* Find the offset within the VAD's prototype PTEs */
+ ProtoPte = Vad->FirstPrototypePte + (Vpn - Vad->StartingVpn);
+ ASSERT(ProtoPte <= Vad->LastContiguousPte);
+ return ProtoPte;
+}
+
+//
+// Returns the PFN Database entry for the given page number
+// Warning: This is not necessarily a valid PFN database entry!
+//
+FORCEINLINE
+PMMPFN
+MI_PFN_ELEMENT(IN PFN_NUMBER Pfn)
+{
+ /* Get the entry */
+ return &MmPfnDatabase[Pfn];
+};
+
+BOOLEAN
NTAPI
MmArmInitSystem(
IN ULONG Phase,
VOID
NTAPI
-MiInsertInListTail(
+MiInsertPageInList(
IN PMMPFNLIST ListHead,
- IN PMMPFN Entry
-);
-
-VOID
-NTAPI
-MiInsertZeroListAtBack(
- IN PFN_NUMBER PageIndex
+ IN PFN_NUMBER PageFrameIndex
);
VOID
IN PMMPFN Entry
);
-PMMPFN
-NTAPI
-MiRemoveHeadList(
- IN PMMPFNLIST ListHead
-);
-
PFN_NUMBER
NTAPI
MiAllocatePfn(
IN PFN_NUMBER PageFrameIndex
);
+VOID
+NTAPI
+MiDecrementReferenceCount(
+ IN PMMPFN Pfn1,
+ IN PFN_NUMBER PageFrameIndex
+);
+
PFN_NUMBER
NTAPI
MiRemoveAnyPage(
IN ULONG Color
);
+VOID
+NTAPI
+MiZeroPhysicalPage(
+ IN PFN_NUMBER PageFrameIndex
+);
+
VOID
NTAPI
MiInsertPageInFreeList(
IN PMM_AVL_TABLE Table
);
-NTSTATUS
+TABLE_SEARCH_RESULT
NTAPI
MiFindEmptyAddressRangeDownTree(
IN SIZE_T Length,
IN ULONG_PTR BoundaryAddress,
IN ULONG_PTR Alignment,
IN PMM_AVL_TABLE Table,
+ OUT PULONG_PTR Base,
+ OUT PMMADDRESS_NODE *Parent
+);
+
+NTSTATUS
+NTAPI
+MiFindEmptyAddressRangeInTree(
+ IN SIZE_T Length,
+ IN ULONG_PTR Alignment,
+ IN PMM_AVL_TABLE Table,
+ OUT PMMADDRESS_NODE *PreviousVad,
OUT PULONG_PTR Base
);
+VOID
+NTAPI
+MiInsertVad(
+ IN PMMVAD Vad,
+ IN PEPROCESS Process
+);
+
VOID
NTAPI
MiInsertNode(
+ IN PMM_AVL_TABLE Table,
IN PMMADDRESS_NODE NewNode,
+ PMMADDRESS_NODE Parent,
+ TABLE_SEARCH_RESULT Result
+);
+
+VOID
+NTAPI
+MiRemoveNode(
+ IN PMMADDRESS_NODE Node,
IN PMM_AVL_TABLE Table
);
+PMMADDRESS_NODE
+NTAPI
+MiGetPreviousNode(
+ IN PMMADDRESS_NODE Node
+);
+
+PMMADDRESS_NODE
+NTAPI
+MiGetNextNode(
+ IN PMMADDRESS_NODE Node
+);
+
+BOOLEAN
+NTAPI
+MiInitializeSystemSpaceMap(
+ IN PVOID InputSession OPTIONAL
+);
+
+ULONG
+NTAPI
+MiMakeProtectionMask(
+ IN ULONG Protect
+);
+
+VOID
+NTAPI
+MiDeleteVirtualAddresses(
+ IN ULONG_PTR Va,
+ IN ULONG_PTR EndingAddress,
+ IN PMMVAD Vad
+);
+
+ULONG
+NTAPI
+MiMakeSystemAddressValid(
+ IN PVOID PageTableVirtualAddress,
+ IN PEPROCESS CurrentProcess
+);
+
+ULONG
+NTAPI
+MiMakeSystemAddressValidPfn(
+ IN PVOID VirtualAddress,
+ IN KIRQL OldIrql
+);
+
+VOID
+NTAPI
+MiRemoveMappedView(
+ IN PEPROCESS CurrentProcess,
+ IN PMMVAD Vad
+);
+
+PSUBSECTION
+NTAPI
+MiLocateSubsection(
+ IN PMMVAD Vad,
+ IN ULONG_PTR Vpn
+);
+
+//
+// MiRemoveZeroPage will use inline code to zero out the page manually if only
+// free pages are available. In some scenarios, we don't/can't run that piece of
+// code and would rather only have a real zero page. If we can't have a zero page,
+// then we'd like to have our own code to grab a free page and zero it out, by
+// using MiRemoveAnyPage. This macro implements this.
+//
+PFN_NUMBER
+FORCEINLINE
+MiRemoveZeroPageSafe(IN ULONG Color)
+{
+ if (MmFreePagesByColor[ZeroedPageList][Color].Flink != LIST_HEAD) return MiRemoveZeroPage(Color);
+ return 0;
+}
+
+//
+// New ARM3<->RosMM PAGE Architecture
+//
+#define MI_GET_ROS_DATA(x) ((PMMROSPFN)(x->RosMmData))
+#define MI_IS_ROS_PFN(x) (((x)->u4.AweAllocation == TRUE) && (MI_GET_ROS_DATA(x) != NULL))
+#define ASSERT_IS_ROS_PFN(x) ASSERT(MI_IS_ROS_PFN(x) == TRUE);
+typedef struct _MMROSPFN
+{
+ PMM_RMAP_ENTRY RmapListHead;
+ SWAPENTRY SwapEntry;
+} MMROSPFN, *PMMROSPFN;
+
+#define RosMmData AweReferenceCount
+
/* EOF */