* PROGRAMMERS: ReactOS Portable Systems Group
*/
-#ifndef _M_AMD64
+#pragma once
-#define MI_MIN_PAGES_FOR_NONPAGED_POOL_TUNING ((255 * _1MB) >> PAGE_SHIFT)
-#define MI_MIN_PAGES_FOR_SYSPTE_TUNING ((19 * _1MB) >> PAGE_SHIFT)
-#define MI_MIN_PAGES_FOR_SYSPTE_BOOST ((32 * _1MB) >> PAGE_SHIFT)
-#define MI_MAX_INIT_NONPAGED_POOL_SIZE (128 * _1MB)
-#define MI_MAX_NONPAGED_POOL_SIZE (128 * _1MB)
-#define MI_MAX_FREE_PAGE_LISTS 4
-
-#define MI_MIN_INIT_PAGED_POOLSIZE (32 * _1MB)
-
-#define MI_SESSION_VIEW_SIZE (20 * _1MB)
-#define MI_SESSION_POOL_SIZE (16 * _1MB)
-#define MI_SESSION_IMAGE_SIZE (8 * _1MB)
-#define MI_SESSION_WORKING_SET_SIZE (4 * _1MB)
-#define MI_SESSION_SIZE (MI_SESSION_VIEW_SIZE + \
- MI_SESSION_POOL_SIZE + \
- MI_SESSION_IMAGE_SIZE + \
- MI_SESSION_WORKING_SET_SIZE)
-
-#define MI_SYSTEM_VIEW_SIZE (16 * _1MB)
-
-#define MI_HIGHEST_USER_ADDRESS (PVOID)0x7FFEFFFF
-#define MI_USER_PROBE_ADDRESS (PVOID)0x7FFF0000
-#define MI_DEFAULT_SYSTEM_RANGE_START (PVOID)0x80000000
-#define MI_SYSTEM_CACHE_WS_START (PVOID)0xC0C00000
-#define MI_PAGED_POOL_START (PVOID)0xE1000000
-#define MI_NONPAGED_POOL_END (PVOID)0xFFBE0000
-#define MI_DEBUG_MAPPING (PVOID)0xFFBFF000
-
-#define MI_SYSTEM_PTE_BASE (PVOID)MiAddressToPte(NULL)
-
-#define MI_MIN_SECONDARY_COLORS 8
-#define MI_SECONDARY_COLORS 64
-#define MI_MAX_SECONDARY_COLORS 1024
-
-#define MI_MIN_ALLOCATION_FRAGMENT (4 * _1KB)
-#define MI_ALLOCATION_FRAGMENT (64 * _1KB)
-#define MI_MAX_ALLOCATION_FRAGMENT (2 * _1MB)
-
-#define MM_HIGHEST_VAD_ADDRESS \
- (PVOID)((ULONG_PTR)MM_HIGHEST_USER_ADDRESS - (16 * PAGE_SIZE))
#define MI_LOWEST_VAD_ADDRESS (PVOID)MM_LOWEST_USER_ADDRESS
-#endif /* !_M_AMD64 */
-
/* Make the code cleaner with some definitions for size multiples */
#define _1KB (1024u)
#define _1MB (1024 * _1KB)
/* Size of a page directory */
#define PD_SIZE (PDE_COUNT * sizeof(MMPDE))
+/* Stop using these! */
+#define PD_COUNT PPE_PER_PAGE
+#define PDE_COUNT PDE_PER_PAGE
+#define PTE_COUNT PTE_PER_PAGE
+
/* Size of all page directories for a process */
#define SYSTEM_PD_SIZE (PD_COUNT * PD_SIZE)
-
-/* Architecture specific count of PDEs in a directory, and count of PTEs in a PT */
#ifdef _M_IX86
-#define PD_COUNT 1
-#define PDE_COUNT 1024
-#define PTE_COUNT 1024
C_ASSERT(SYSTEM_PD_SIZE == PAGE_SIZE);
-#elif _M_ARM
-#define PD_COUNT 1
-#define PDE_COUNT 4096
-#define PTE_COUNT 256
-#else
-#define PD_COUNT PPE_PER_PAGE
-#define PDE_COUNT PDE_PER_PAGE
-#define PTE_COUNT PTE_PER_PAGE
#endif
//
-// Protection Bits part of the internal memory manager Protection Mask
-// Taken from http://www.reactos.org/wiki/Techwiki:Memory_management_in_the_Windows_XP_kernel
+// Protection Bits part of the internal memory manager Protection Mask, from:
+// http://reactos.org/wiki/Techwiki:Memory_management_in_the_Windows_XP_kernel
+// https://www.reactos.org/wiki/Techwiki:Memory_Protection_constants
// and public assertions.
//
#define MM_ZERO_ACCESS 0
#define MM_WRITECOPY 5
#define MM_EXECUTE_READWRITE 6
#define MM_EXECUTE_WRITECOPY 7
-#define MM_NOCACHE 8
-#define MM_DECOMMIT 0x10
-#define MM_NOACCESS (MM_DECOMMIT | MM_NOCACHE)
+#define MM_PROTECT_ACCESS 7
+
+//
+// These are flags on top of the actual protection mask
+//
+#define MM_NOCACHE 0x08
+#define MM_GUARDPAGE 0x10
+#define MM_WRITECOMBINE 0x18
+#define MM_PROTECT_SPECIAL 0x18
+
+//
+// These are special cases
+//
+#define MM_DECOMMIT (MM_ZERO_ACCESS | MM_GUARDPAGE)
+#define MM_NOACCESS (MM_ZERO_ACCESS | MM_WRITECOMBINE)
+#define MM_OUTSWAPPED_KSTACK (MM_EXECUTE_WRITECOPY | MM_WRITECOMBINE)
#define MM_INVALID_PROTECTION 0xFFFFFFFF
//
#error Define these please!
#endif
-extern const ULONG MmProtectToPteMask[32];
+extern const ULONG_PTR MmProtectToPteMask[32];
extern const ULONG MmProtectToValue[32];
//
#define MI_IS_PAGE_TABLE_OR_HYPER_ADDRESS(Address) \
(((PVOID)(Address) >= (PVOID)PTE_BASE) && ((PVOID)(Address) <= (PVOID)MmHyperSpaceEnd))
-//
-// Corresponds to MMPTE_SOFTWARE.Protection
-//
-#ifdef _M_IX86
-#define MM_PTE_SOFTWARE_PROTECTION_BITS 5
-#elif _M_ARM
-#define MM_PTE_SOFTWARE_PROTECTION_BITS 6
-#elif _M_AMD64
-#define MM_PTE_SOFTWARE_PROTECTION_BITS 5
-#else
-#error Define these please!
-#endif
-
//
// Creates a software PTE with the given protection
//
//
// Special values for LoadedImports
//
+#ifdef _WIN64
+#define MM_SYSLDR_NO_IMPORTS (PVOID)0xFFFFFFFFFFFFFFFEULL
+#define MM_SYSLDR_BOOT_LOADED (PVOID)0xFFFFFFFFFFFFFFFFULL
+#else
#define MM_SYSLDR_NO_IMPORTS (PVOID)0xFFFFFFFE
#define MM_SYSLDR_BOOT_LOADED (PVOID)0xFFFFFFFF
+#endif
#define MM_SYSLDR_SINGLE_ENTRY 0x1
+//
+// Number of initial session IDs
+//
+#define MI_INITIAL_SESSION_IDS 64
+
#if defined(_M_IX86) || defined(_M_ARM)
//
// PFN List Sentinel
#define MI_GET_NEXT_COLOR() (MI_GET_PAGE_COLOR(++MmSystemPageColor))
#define MI_GET_NEXT_PROCESS_COLOR(x) (MI_GET_PAGE_COLOR(++(x)->NextPageColor))
-#ifndef _M_AMD64
//
-// Decodes a Prototype PTE into the underlying PTE
+// Prototype PTEs that don't yet have a pagefile association
//
-#define MiProtoPteToPte(x) \
- (PMMPTE)((ULONG_PTR)MmPagedPoolStart + \
- (((x)->u.Proto.ProtoAddressHigh << 7) | (x)->u.Proto.ProtoAddressLow))
+#ifdef _WIN64
+#define MI_PTE_LOOKUP_NEEDED 0xffffffffULL
+#else
+#define MI_PTE_LOOKUP_NEEDED 0xFFFFF
#endif
//
-// Prototype PTEs that don't yet have a pagefile association
+// Number of session data and tag pages
//
-#define MI_PTE_LOOKUP_NEEDED 0xFFFFF
+#define MI_SESSION_DATA_PAGES_MAXIMUM (MM_ALLOCATION_GRANULARITY / PAGE_SIZE)
+#define MI_SESSION_TAG_PAGES_MAXIMUM (MM_ALLOCATION_GRANULARITY / PAGE_SIZE)
+
+//
+// Used by MiCheckSecuredVad
+//
+#define MM_READ_WRITE_ALLOWED 11
+#define MM_READ_ONLY_ALLOWED 10
+#define MM_NO_ACCESS_ALLOWED 01
+#define MM_DELETE_CHECK 85
//
// System views are binned into 64K chunks
//
// FIXFIX: These should go in ex.h after the pool merge
//
-#ifdef _M_AMD64
+#ifdef _WIN64
#define POOL_BLOCK_SIZE 16
#else
#define POOL_BLOCK_SIZE 8
#define BASE_POOL_TYPE_MASK 1
#define POOL_MAX_ALLOC (PAGE_SIZE - (sizeof(POOL_HEADER) + POOL_BLOCK_SIZE))
+//
+// Pool debugging/analysis/tracing flags
+//
+#define POOL_FLAG_CHECK_TIMERS 0x1
+#define POOL_FLAG_CHECK_WORKERS 0x2
+#define POOL_FLAG_CHECK_RESOURCES 0x4
+#define POOL_FLAG_VERIFIER 0x8
+#define POOL_FLAG_CHECK_DEADLOCK 0x10
+#define POOL_FLAG_SPECIAL_POOL 0x20
+#define POOL_FLAG_DBGPRINT_ON_FAILURE 0x40
+#define POOL_FLAG_CRASH_ON_FAILURE 0x80
+
+//
+// BAD_POOL_HEADER codes during pool bugcheck
+//
+#define POOL_CORRUPTED_LIST 3
+#define POOL_SIZE_OR_INDEX_MISMATCH 5
+#define POOL_ENTRIES_NOT_ALIGNED_PREVIOUS 6
+#define POOL_HEADER_NOT_ALIGNED 7
+#define POOL_HEADER_IS_ZERO 8
+#define POOL_ENTRIES_NOT_ALIGNED_NEXT 9
+#define POOL_ENTRY_NOT_FOUND 10
+
+//
+// BAD_POOL_CALLER codes during pool bugcheck
+//
+#define POOL_ENTRY_CORRUPTED 1
+#define POOL_ENTRY_ALREADY_FREE 6
+#define POOL_ENTRY_NOT_ALLOCATED 7
+#define POOL_ALLOC_IRQL_INVALID 8
+#define POOL_FREE_IRQL_INVALID 9
+#define POOL_BILLED_PROCESS_INVALID 13
+#define POOL_HEADER_SIZE_INVALID 32
+
typedef struct _POOL_DESCRIPTOR
{
POOL_TYPE PoolType;
{
struct
{
-#ifdef _M_AMD64
+#ifdef _WIN64
USHORT PreviousSize:8;
USHORT PoolIndex:8;
USHORT BlockSize:8;
};
ULONG Ulong1;
};
-#ifdef _M_AMD64
+#ifdef _WIN64
ULONG PoolTag;
#endif
union
{
-#ifdef _M_AMD64
+#ifdef _WIN64
PEPROCESS ProcessBilled;
#else
ULONG PoolTag;
C_ASSERT(sizeof(POOL_HEADER) == POOL_BLOCK_SIZE);
C_ASSERT(POOL_BLOCK_SIZE == sizeof(LIST_ENTRY));
+typedef struct _POOL_TRACKER_TABLE
+{
+ ULONG Key;
+ LONG NonPagedAllocs;
+ LONG NonPagedFrees;
+ SIZE_T NonPagedBytes;
+ LONG PagedAllocs;
+ LONG PagedFrees;
+ SIZE_T PagedBytes;
+} POOL_TRACKER_TABLE, *PPOOL_TRACKER_TABLE;
+
+typedef struct _POOL_TRACKER_BIG_PAGES
+{
+ PVOID Va;
+ ULONG Key;
+ ULONG NumberOfPages;
+ PVOID QuotaObject;
+} POOL_TRACKER_BIG_PAGES, *PPOOL_TRACKER_BIG_PAGES;
+
extern ULONG ExpNumberOfPagedPools;
extern POOL_DESCRIPTOR NonPagedPoolDescriptor;
extern PPOOL_DESCRIPTOR ExpPagedPoolDescriptor[16 + 1];
-extern PVOID PoolTrackTable;
+extern PPOOL_TRACKER_TABLE PoolTrackTable;
//
// END FIXFIX
PRTL_BITMAP SystemSpaceBitMap;
} MMSESSION, *PMMSESSION;
+typedef struct _MM_SESSION_SPACE_FLAGS
+{
+ ULONG Initialized:1;
+ ULONG DeletePending:1;
+ ULONG Filler:30;
+} MM_SESSION_SPACE_FLAGS;
+
+typedef struct _MM_SESSION_SPACE
+{
+ struct _MM_SESSION_SPACE *GlobalVirtualAddress;
+ LONG ReferenceCount;
+ union
+ {
+ ULONG LongFlags;
+ MM_SESSION_SPACE_FLAGS Flags;
+ } u;
+ ULONG SessionId;
+ LIST_ENTRY ProcessList;
+ LARGE_INTEGER LastProcessSwappedOutTime;
+ PFN_NUMBER SessionPageDirectoryIndex;
+ SIZE_T NonPageablePages;
+ SIZE_T CommittedPages;
+ PVOID PagedPoolStart;
+ PVOID PagedPoolEnd;
+ PMMPDE PagedPoolBasePde;
+ ULONG Color;
+ LONG ResidentProcessCount;
+ ULONG SessionPoolAllocationFailures[4];
+ LIST_ENTRY ImageList;
+ LCID LocaleId;
+ ULONG AttachCount;
+ KEVENT AttachEvent;
+ PEPROCESS LastProcess;
+ LONG ProcessReferenceToSession;
+ LIST_ENTRY WsListEntry;
+ GENERAL_LOOKASIDE Lookaside[SESSION_POOL_LOOKASIDES];
+ MMSESSION Session;
+ KGUARDED_MUTEX PagedPoolMutex;
+ MM_PAGED_POOL_INFO PagedPoolInfo;
+ MMSUPPORT Vm;
+ PMMWSLE Wsle;
+ PDRIVER_UNLOAD Win32KDriverUnload;
+ POOL_DESCRIPTOR PagedPool;
+#if defined (_M_AMD64)
+ MMPDE PageDirectory;
+#else
+ PMMPDE PageTables;
+#endif
+#if defined (_M_AMD64)
+ PMMPTE SpecialPoolFirstPte;
+ PMMPTE SpecialPoolLastPte;
+ PMMPTE NextPdeForSpecialPoolExpansion;
+ PMMPTE LastPdeForSpecialPoolExpansion;
+ PFN_NUMBER SpecialPagesInUse;
+#endif
+ LONG ImageLoadingCount;
+} MM_SESSION_SPACE, *PMM_SESSION_SPACE;
+
+extern PMM_SESSION_SPACE MmSessionSpace;
extern MMPTE HyperTemplatePte;
extern MMPDE ValidKernelPde;
extern MMPTE ValidKernelPte;
+extern MMPDE ValidKernelPdeLocal;
+extern MMPTE ValidKernelPteLocal;
extern MMPDE DemandZeroPde;
extern MMPTE DemandZeroPte;
extern MMPTE PrototypePte;
+extern MMPTE MmDecommittedPte;
extern BOOLEAN MmLargeSystemCache;
extern BOOLEAN MmZeroPageFile;
extern BOOLEAN MmProtectFreedNonPagedPool;
extern MM_PAGED_POOL_INFO MmPagedPoolInfo;
extern RTL_BITMAP MiPfnBitMap;
extern KGUARDED_MUTEX MmPagedPoolMutex;
+extern KGUARDED_MUTEX MmSectionCommitMutex;
extern PVOID MmPagedPoolStart;
extern PVOID MmPagedPoolEnd;
extern PVOID MmNonPagedSystemStart;
extern PVOID MmSystemCacheEnd;
extern MMSUPPORT MmSystemCacheWs;
extern SIZE_T MmAllocatedNonPagedPool;
-extern ULONG_PTR MmSubsectionBase;
extern ULONG MmSpecialPoolTag;
extern PVOID MmHyperSpaceEnd;
extern PMMWSL MmSystemCacheWorkingSetList;
extern ULONG MmMaximumNonPagedPoolPercent;
extern ULONG MmLargeStackSize;
extern PMMCOLOR_TABLES MmFreePagesByColor[FreePageList + 1];
+extern MMPFNLIST MmStandbyPageListByPriority[8];
extern ULONG MmProductType;
extern MM_SYSTEMSIZE MmSystemSize;
extern PKEVENT MiLowMemoryEvent;
extern PFN_NUMBER MiHighNonPagedPoolThreshold;
extern PFN_NUMBER MmMinimumFreePages;
extern PFN_NUMBER MmPlentyFreePages;
+extern SIZE_T MmMinimumStackCommitInBytes;
extern PFN_COUNT MiExpansionPoolPagesInitialCharge;
extern PFN_NUMBER MmResidentAvailablePages;
extern PFN_NUMBER MmResidentAvailableAtInit;
extern ULONG MmTotalFreeSystemPtes[MaximumPtePoolTypes];
extern PFN_NUMBER MmTotalSystemDriverPages;
+extern ULONG MmCritsectTimeoutSeconds;
extern PVOID MiSessionImageStart;
extern PVOID MiSessionImageEnd;
extern PMMPTE MiHighestUserPte;
extern PVOID MiSessionPoolEnd; // 0xBE000000
extern PVOID MiSessionPoolStart; // 0xBD000000
extern PVOID MiSessionViewStart; // 0xBE000000
+extern PVOID MiSessionSpaceWs;
+extern ULONG MmMaximumDeadKernelStacks;
+extern SLIST_HEADER MmDeadStackSListHead;
+extern MM_AVL_TABLE MmSectionBasedRoot;
+extern KGUARDED_MUTEX MmSectionBasedMutex;
+extern PVOID MmHighSectionBase;
+extern SIZE_T MmSystemLockPagesCount;
+extern ULONG_PTR MmSubsectionBase;
+extern LARGE_INTEGER MmCriticalSectionTimeout;
+extern LIST_ENTRY MmWorkingSetExpansionHead;
+extern KSPIN_LOCK MmExpansionLock;
+extern PETHREAD MiExpansionLockOwner;
-BOOLEAN
FORCEINLINE
+BOOLEAN
MiIsMemoryTypeFree(TYPE_OF_MEMORY MemoryType)
{
return ((MemoryType == LoaderFree) ||
(MemoryType == LoaderOsloaderStack));
}
-BOOLEAN
FORCEINLINE
+BOOLEAN
MiIsMemoryTypeInvisible(TYPE_OF_MEMORY MemoryType)
{
return ((MemoryType == LoaderFirmwarePermanent) ||
(MemoryType == LoaderBBTMemory));
}
+#ifdef _M_AMD64
+FORCEINLINE
+BOOLEAN
+MiIsUserPxe(PVOID Address)
+{
+ return ((ULONG_PTR)Address >> 7) == 0x1FFFFEDF6FB7DA0ULL;
+}
+
+FORCEINLINE
+BOOLEAN
+MiIsUserPpe(PVOID Address)
+{
+ return ((ULONG_PTR)Address >> 16) == 0xFFFFF6FB7DA0ULL;
+}
+
+FORCEINLINE
+BOOLEAN
+MiIsUserPde(PVOID Address)
+{
+ return ((ULONG_PTR)Address >> 25) == 0x7FFFFB7DA0ULL;
+}
+
+FORCEINLINE
+BOOLEAN
+MiIsUserPte(PVOID Address)
+{
+ return ((ULONG_PTR)Address >> 34) == 0x3FFFFDA0ULL;
+}
+#else
+FORCEINLINE
+BOOLEAN
+MiIsUserPde(PVOID Address)
+{
+ return ((Address >= (PVOID)MiAddressToPde(NULL)) &&
+ (Address <= (PVOID)MiHighestUserPde));
+}
+
+FORCEINLINE
+BOOLEAN
+MiIsUserPte(PVOID Address)
+{
+ return (Address <= (PVOID)MiHighestUserPte);
+}
+#endif
//
// Figures out the hardware bits for a PTE
//
-ULONG_PTR
FORCEINLINE
+ULONG_PTR
MiDetermineUserGlobalPteMask(IN PVOID PointerPte)
{
MMPTE TempPte;
MI_MAKE_ACCESSED_PAGE(&TempPte);
/* Is this for user-mode? */
- if ((PointerPte <= (PVOID)MiHighestUserPte) ||
- ((PointerPte >= (PVOID)MiAddressToPde(NULL)) &&
- (PointerPte <= (PVOID)MiHighestUserPde)))
+ if (
+#if (_MI_PAGING_LEVELS == 4)
+ MiIsUserPxe(PointerPte) ||
+#endif
+#if (_MI_PAGING_LEVELS >= 3)
+ MiIsUserPpe(PointerPte) ||
+#endif
+ MiIsUserPde(PointerPte) ||
+ MiIsUserPte(PointerPte))
{
/* Set the owner bit */
MI_MAKE_OWNER_PAGE(&TempPte);
ASSERT(MappingPte <= MiHighestUserPte);
/* Start fresh */
- *NewPte = ValidKernelPte;
+ NewPte->u.Long = 0;
/* Set the protection and page */
+ NewPte->u.Hard.Valid = TRUE;
NewPte->u.Hard.Owner = TRUE;
NewPte->u.Hard.PageFrameNumber = PageFrameNumber;
NewPte->u.Long |= MmProtectToPteMask[ProtectionMask];
/*
* Prototype PTEs are only valid in paged pool by design, this little trick
- * lets us only use 28 bits for the adress of the PTE
+ * lets us only use 30 bits for the adress of the PTE, as long as the area
+ * stays 1024MB At most.
*/
Offset = (ULONG_PTR)PointerPte - (ULONG_PTR)MmPagedPoolStart;
- /* 7 bits go in the "low", and the other 21 bits go in the "high" */
- NewPte->u.Proto.ProtoAddressLow = Offset & 0x7F;
- NewPte->u.Proto.ProtoAddressHigh = (Offset & 0xFFFFFF80) >> 7;
- ASSERT(MiProtoPteToPte(NewPte) == PointerPte);
+ /*
+ * 7 bits go in the "low" (but we assume the bottom 2 are zero)
+ * and the other 21 bits go in the "high"
+ */
+ NewPte->u.Proto.ProtoAddressLow = (Offset & 0x1FC) >> 2;
+ NewPte->u.Proto.ProtoAddressHigh = (Offset & 0x3FFFFE00) >> 9;
+}
+
+//
+// Builds a Subsection PTE for the address of the Segment
+//
+FORCEINLINE
+VOID
+MI_MAKE_SUBSECTION_PTE(IN PMMPTE NewPte,
+ IN PVOID Segment)
+{
+ ULONG_PTR Offset;
+
+ /* Mark this as a prototype */
+ NewPte->u.Long = 0;
+ NewPte->u.Subsect.Prototype = 1;
+
+ /*
+ * Segments are only valid either in nonpaged pool. We store the 20 bit
+ * difference either from the top or bottom of nonpaged pool, giving a
+ * maximum of 128MB to each delta, meaning nonpaged pool cannot exceed
+ * 256MB.
+ */
+ if ((ULONG_PTR)Segment < ((ULONG_PTR)MmSubsectionBase + (128 * _1MB)))
+ {
+ Offset = (ULONG_PTR)Segment - (ULONG_PTR)MmSubsectionBase;
+ NewPte->u.Subsect.WhichPool = PagedPool;
+ }
+ else
+ {
+ Offset = (ULONG_PTR)MmNonPagedPoolEnd - (ULONG_PTR)Segment;
+ NewPte->u.Subsect.WhichPool = NonPagedPool;
+ }
+
+ /*
+ * 4 bits go in the "low" (but we assume the bottom 3 are zero)
+ * and the other 20 bits go in the "high"
+ */
+ NewPte->u.Subsect.SubsectionAddressLow = (Offset & 0x78) >> 3;
+ NewPte->u.Subsect.SubsectionAddressHigh = (Offset & 0xFFFFF80) >> 7;
+}
+
+FORCEINLINE
+BOOLEAN
+MI_IS_MAPPED_PTE(PMMPTE PointerPte)
+{
+ /// \todo Make this reasonable code, this is UGLY!
+ return ((PointerPte->u.Long & 0xFFFFFC01) != 0);
}
+
#endif
+FORCEINLINE
+VOID
+MI_MAKE_TRANSITION_PTE(_Out_ PMMPTE NewPte,
+ _In_ PFN_NUMBER Page,
+ _In_ ULONG Protection)
+{
+ NewPte->u.Long = 0;
+ NewPte->u.Trans.Transition = 1;
+ NewPte->u.Trans.Protection = Protection;
+ NewPte->u.Trans.PageFrameNumber = Page;
+}
+
//
// Returns if the page is physically resident (ie: a large page)
// FIXFIX: CISC/x86 only?
//
// Writes a valid PTE
//
-VOID
FORCEINLINE
+VOID
MI_WRITE_VALID_PTE(IN PMMPTE PointerPte,
IN MMPTE TempPte)
{
}
//
-// Writes an invalid PTE
+// Updates a valid PTE
//
+FORCEINLINE
VOID
+MI_UPDATE_VALID_PTE(IN PMMPTE PointerPte,
+ IN MMPTE TempPte)
+{
+ /* Write the valid PTE */
+ ASSERT(PointerPte->u.Hard.Valid == 1);
+ ASSERT(TempPte.u.Hard.Valid == 1);
+ ASSERT(PointerPte->u.Hard.PageFrameNumber == TempPte.u.Hard.PageFrameNumber);
+ *PointerPte = TempPte;
+}
+
+//
+// Writes an invalid PTE
+//
FORCEINLINE
+VOID
MI_WRITE_INVALID_PTE(IN PMMPTE PointerPte,
IN MMPTE InvalidPte)
{
/* Write the invalid PTE */
ASSERT(InvalidPte.u.Hard.Valid == 0);
+ ASSERT(InvalidPte.u.Long != 0);
*PointerPte = InvalidPte;
}
//
-// Writes a valid PDE
+// Erase the PTE completely
//
+FORCEINLINE
VOID
+MI_ERASE_PTE(IN PMMPTE PointerPte)
+{
+ /* Zero out the PTE */
+ ASSERT(PointerPte->u.Long != 0);
+ PointerPte->u.Long = 0;
+}
+
+//
+// Writes a valid PDE
+//
FORCEINLINE
+VOID
MI_WRITE_VALID_PDE(IN PMMPDE PointerPde,
IN MMPDE TempPde)
{
//
// Writes an invalid PDE
//
-VOID
FORCEINLINE
+VOID
MI_WRITE_INVALID_PDE(IN PMMPDE PointerPde,
IN MMPDE InvalidPde)
{
/* Write the invalid PDE */
ASSERT(InvalidPde.u.Hard.Valid == 0);
+ ASSERT(InvalidPde.u.Long != 0);
*PointerPde = InvalidPde;
}
MI_WS_OWNER(IN PEPROCESS Process)
{
/* Check if this process is the owner, and that the thread owns the WS */
+ if (PsGetCurrentThread()->OwnsProcessWorkingSetExclusive == 0)
+ {
+ DPRINT("Thread: %p is not an owner\n", PsGetCurrentThread());
+ }
+ if (KeGetCurrentThread()->ApcState.Process != &Process->Pcb)
+ {
+ DPRINT("Current thread %p is attached to another process %p\n", PsGetCurrentThread(), Process);
+ }
return ((KeGetCurrentThread()->ApcState.Process == &Process->Pcb) &&
((PsGetCurrentThread()->OwnsProcessWorkingSetExclusive) ||
(PsGetCurrentThread()->OwnsProcessWorkingSetShared)));
}
+//
+// New ARM3<->RosMM PAGE Architecture
+//
+FORCEINLINE
+BOOLEAN
+MiIsRosSectionObject(IN PVOID Section)
+{
+ PROS_SECTION_OBJECT RosSection = Section;
+ if ((RosSection->Type == 'SC') && (RosSection->Size == 'TN')) return TRUE;
+ return FALSE;
+}
+
+#define MI_IS_ROS_PFN(x) ((x)->u4.AweAllocation == TRUE)
+
+VOID
+NTAPI
+MiDecrementReferenceCount(
+ IN PMMPFN Pfn1,
+ IN PFN_NUMBER PageFrameIndex
+);
+
+FORCEINLINE
+BOOLEAN
+MI_IS_WS_UNSAFE(IN PEPROCESS Process)
+{
+ return (Process->Vm.Flags.AcquiredUnsafe == TRUE);
+}
+
//
// Locks the working set for the given process
//
KeEnterGuardedRegion();
ASSERT(!MM_ANY_WS_LOCK_HELD(Thread));
- /* FIXME: Actually lock it (we can't because Vm is used by MAREAs) */
+ /* Lock the working set */
+ ExAcquirePushLockExclusive(&Process->Vm.WorkingSetMutex);
+
+ /* Now claim that we own the lock */
+ ASSERT(!MI_IS_WS_UNSAFE(Process));
+ ASSERT(Thread->OwnsProcessWorkingSetExclusive == FALSE);
+ Thread->OwnsProcessWorkingSetExclusive = TRUE;
+}
+
+FORCEINLINE
+VOID
+MiLockProcessWorkingSetShared(IN PEPROCESS Process,
+ IN PETHREAD Thread)
+{
+ /* Shouldn't already be owning the process working set */
+ ASSERT(Thread->OwnsProcessWorkingSetShared == FALSE);
+ ASSERT(Thread->OwnsProcessWorkingSetExclusive == FALSE);
+
+ /* Block APCs, make sure that still nothing is already held */
+ KeEnterGuardedRegion();
+ ASSERT(!MM_ANY_WS_LOCK_HELD(Thread));
+
+ /* Lock the working set */
+ ExAcquirePushLockShared(&Process->Vm.WorkingSetMutex);
+
+ /* Now claim that we own the lock */
+ ASSERT(!MI_IS_WS_UNSAFE(Process));
+ ASSERT(Thread->OwnsProcessWorkingSetShared == FALSE);
+ ASSERT(Thread->OwnsProcessWorkingSetExclusive == FALSE);
+ Thread->OwnsProcessWorkingSetShared = TRUE;
+}
+
+FORCEINLINE
+VOID
+MiLockProcessWorkingSetUnsafe(IN PEPROCESS Process,
+ IN PETHREAD Thread)
+{
+ /* Shouldn't already be owning the process working set */
+ ASSERT(Thread->OwnsProcessWorkingSetExclusive == FALSE);
+
+ /* APCs must be blocked, make sure that still nothing is already held */
+ ASSERT(KeAreAllApcsDisabled() == TRUE);
+ ASSERT(!MM_ANY_WS_LOCK_HELD(Thread));
- /* FIXME: This also can't be checked because Vm is used by MAREAs) */
- //ASSERT(Process->Vm.Flags.AcquiredUnsafe == 0);
+ /* Lock the working set */
+ ExAcquirePushLockExclusive(&Process->Vm.WorkingSetMutex);
- /* Okay, now we can own it exclusively */
+ /* Now claim that we own the lock */
+ ASSERT(!MI_IS_WS_UNSAFE(Process));
+ Process->Vm.Flags.AcquiredUnsafe = 1;
ASSERT(Thread->OwnsProcessWorkingSetExclusive == FALSE);
Thread->OwnsProcessWorkingSetExclusive = TRUE;
}
MiUnlockProcessWorkingSet(IN PEPROCESS Process,
IN PETHREAD Thread)
{
- /* Make sure this process really is owner, and it was a safe acquisition */
+ /* Make sure we are the owner of a safe acquisition */
ASSERT(MI_WS_OWNER(Process));
- /* This can't be checked because Vm is used by MAREAs) */
- //ASSERT(Process->Vm.Flags.AcquiredUnsafe == 0);
+ ASSERT(!MI_IS_WS_UNSAFE(Process));
/* The thread doesn't own it anymore */
ASSERT(Thread->OwnsProcessWorkingSetExclusive == TRUE);
Thread->OwnsProcessWorkingSetExclusive = FALSE;
- /* FIXME: Actually release it (we can't because Vm is used by MAREAs) */
+ /* Release the lock and re-enable APCs */
+ ExReleasePushLockExclusive(&Process->Vm.WorkingSetMutex);
+ KeLeaveGuardedRegion();
+}
- /* Unblock APCs */
+//
+// Unlocks the working set for the given process
+//
+FORCEINLINE
+VOID
+MiUnlockProcessWorkingSetShared(IN PEPROCESS Process,
+ IN PETHREAD Thread)
+{
+ /* Make sure we are the owner of a safe acquisition (because shared) */
+ ASSERT(MI_WS_OWNER(Process));
+ ASSERT(!MI_IS_WS_UNSAFE(Process));
+
+ /* Ensure we are in a shared acquisition */
+ ASSERT(Thread->OwnsProcessWorkingSetShared == TRUE);
+ ASSERT(Thread->OwnsProcessWorkingSetExclusive == FALSE);
+
+ /* Don't claim the lock anylonger */
+ Thread->OwnsProcessWorkingSetShared = FALSE;
+
+ /* Release the lock and re-enable APCs */
+ ExReleasePushLockShared(&Process->Vm.WorkingSetMutex);
KeLeaveGuardedRegion();
}
+//
+// Unlocks the working set for the given process
+//
+FORCEINLINE
+VOID
+MiUnlockProcessWorkingSetUnsafe(IN PEPROCESS Process,
+ IN PETHREAD Thread)
+{
+ /* Make sure we are the owner of an unsafe acquisition */
+ ASSERT(KeGetCurrentIrql() <= APC_LEVEL);
+ ASSERT(KeAreAllApcsDisabled() == TRUE);
+ ASSERT(MI_WS_OWNER(Process));
+ ASSERT(MI_IS_WS_UNSAFE(Process));
+
+ /* No longer unsafe */
+ Process->Vm.Flags.AcquiredUnsafe = 0;
+
+ /* The thread doesn't own it anymore */
+ ASSERT(Thread->OwnsProcessWorkingSetExclusive == TRUE);
+ Thread->OwnsProcessWorkingSetExclusive = FALSE;
+
+ /* Release the lock but don't touch APC state */
+ ExReleasePushLockExclusive(&Process->Vm.WorkingSetMutex);
+ ASSERT(KeGetCurrentIrql() <= APC_LEVEL);
+}
+
//
// Locks the working set
//
/* Thread shouldn't already be owning something */
ASSERT(!MM_ANY_WS_LOCK_HELD(Thread));
- /* FIXME: Actually lock it (we can't because Vm is used by MAREAs) */
+ /* Lock this working set */
+ ExAcquirePushLockExclusive(&WorkingSet->WorkingSetMutex);
/* Which working set is this? */
if (WorkingSet == &MmSystemCacheWs)
}
else if (WorkingSet->Flags.SessionSpace)
{
- /* We don't implement this yet */
- UNIMPLEMENTED;
- while (TRUE);
+ /* Own the session working set */
+ ASSERT((Thread->OwnsSessionWorkingSetExclusive == FALSE) &&
+ (Thread->OwnsSessionWorkingSetShared == FALSE));
+ Thread->OwnsSessionWorkingSetExclusive = TRUE;
}
else
{
}
else if (WorkingSet->Flags.SessionSpace)
{
- /* We don't implement this yet */
- UNIMPLEMENTED;
- while (TRUE);
+ /* Release the session working set */
+ ASSERT((Thread->OwnsSessionWorkingSetExclusive == TRUE) ||
+ (Thread->OwnsSessionWorkingSetShared == TRUE));
+ Thread->OwnsSessionWorkingSetExclusive = 0;
}
else
{
Thread->OwnsProcessWorkingSetExclusive = FALSE;
}
- /* FIXME: Actually release it (we can't because Vm is used by MAREAs) */
+ /* Release the working set lock */
+ ExReleasePushLockExclusive(&WorkingSet->WorkingSetMutex);
/* Unblock APCs */
KeLeaveGuardedRegion();
}
-//
-// Returns the ProtoPTE inside a VAD for the given VPN
-//
FORCEINLINE
-PMMPTE
-MI_GET_PROTOTYPE_PTE_FOR_VPN(IN PMMVAD Vad,
- IN ULONG_PTR Vpn)
+VOID
+MiUnlockProcessWorkingSetForFault(IN PEPROCESS Process,
+ IN PETHREAD Thread,
+ OUT PBOOLEAN Safe,
+ OUT PBOOLEAN Shared)
+{
+ ASSERT(MI_WS_OWNER(Process));
+
+ /* Check if the current owner is unsafe */
+ if (MI_IS_WS_UNSAFE(Process))
+ {
+ /* Release unsafely */
+ MiUnlockProcessWorkingSetUnsafe(Process, Thread);
+ *Safe = FALSE;
+ *Shared = FALSE;
+ }
+ else if (Thread->OwnsProcessWorkingSetExclusive == 1)
+ {
+ /* Owner is safe and exclusive, release normally */
+ MiUnlockProcessWorkingSet(Process, Thread);
+ *Safe = TRUE;
+ *Shared = FALSE;
+ }
+ else
+ {
+ /* Owner is shared (implies safe), release normally */
+ MiUnlockProcessWorkingSetShared(Process, Thread);
+ *Safe = TRUE;
+ *Shared = TRUE;
+ }
+}
+
+FORCEINLINE
+VOID
+MiLockProcessWorkingSetForFault(IN PEPROCESS Process,
+ IN PETHREAD Thread,
+ IN BOOLEAN Safe,
+ IN BOOLEAN Shared)
+{
+ /* Check if this was a safe lock or not */
+ if (Safe)
+ {
+ if (Shared)
+ {
+ /* Reacquire safely & shared */
+ MiLockProcessWorkingSetShared(Process, Thread);
+ }
+ else
+ {
+ /* Reacquire safely */
+ MiLockProcessWorkingSet(Process, Thread);
+ }
+ }
+ else
+ {
+ /* Unsafe lock cannot be shared */
+ ASSERT(Shared == FALSE);
+ /* Reacquire unsafely */
+ MiLockProcessWorkingSetUnsafe(Process, Thread);
+ }
+}
+
+FORCEINLINE
+KIRQL
+MiAcquireExpansionLock(VOID)
+{
+ KIRQL OldIrql;
+
+ ASSERT(KeGetCurrentIrql() <= APC_LEVEL);
+ KeAcquireSpinLock(&MmExpansionLock, &OldIrql);
+ ASSERT(MiExpansionLockOwner == NULL);
+ MiExpansionLockOwner = PsGetCurrentThread();
+ return OldIrql;
+}
+
+FORCEINLINE
+VOID
+MiReleaseExpansionLock(KIRQL OldIrql)
+{
+ ASSERT(MiExpansionLockOwner == PsGetCurrentThread());
+ MiExpansionLockOwner = NULL;
+ KeReleaseSpinLock(&MmExpansionLock, OldIrql);
+ ASSERT(KeGetCurrentIrql() <= APC_LEVEL);
+}
+
+//
+// Returns the ProtoPTE inside a VAD for the given VPN
+//
+FORCEINLINE
+PMMPTE
+MI_GET_PROTOTYPE_PTE_FOR_VPN(IN PMMVAD Vad,
+ IN ULONG_PTR Vpn)
{
PMMPTE ProtoPte;
return &MmPfnDatabase[Pfn];
};
+//
+// Drops a locked page without dereferencing it
+//
+FORCEINLINE
+VOID
+MiDropLockCount(IN PMMPFN Pfn1)
+{
+ /* This page shouldn't be locked, but it should be valid */
+ ASSERT(Pfn1->u3.e2.ReferenceCount != 0);
+ ASSERT(Pfn1->u2.ShareCount == 0);
+
+ /* Is this the last reference to the page */
+ if (Pfn1->u3.e2.ReferenceCount == 1)
+ {
+ /* It better not be valid */
+ ASSERT(Pfn1->u3.e1.PageLocation != ActiveAndValid);
+
+ /* Is it a prototype PTE? */
+ if ((Pfn1->u3.e1.PrototypePte == 1) &&
+ (Pfn1->OriginalPte.u.Soft.Prototype == 1))
+ {
+ /* FIXME: We should return commit */
+ DPRINT1("Not returning commit for prototype PTE\n");
+ }
+
+ /* Update the counter */
+ InterlockedDecrementSizeT(&MmSystemLockPagesCount);
+ }
+}
+
+//
+// Drops a locked page and dereferences it
+//
+FORCEINLINE
+VOID
+MiDereferencePfnAndDropLockCount(IN PMMPFN Pfn1)
+{
+ USHORT RefCount, OldRefCount;
+ PFN_NUMBER PageFrameIndex;
+
+ /* Loop while we decrement the page successfully */
+ do
+ {
+ /* There should be at least one reference */
+ OldRefCount = Pfn1->u3.e2.ReferenceCount;
+ ASSERT(OldRefCount != 0);
+
+ /* Are we the last one */
+ if (OldRefCount == 1)
+ {
+ /* The page shoudln't be shared not active at this point */
+ ASSERT(Pfn1->u3.e2.ReferenceCount == 1);
+ ASSERT(Pfn1->u3.e1.PageLocation != ActiveAndValid);
+ ASSERT(Pfn1->u2.ShareCount == 0);
+
+ /* Is it a prototype PTE? */
+ if ((Pfn1->u3.e1.PrototypePte == 1) &&
+ (Pfn1->OriginalPte.u.Soft.Prototype == 1))
+ {
+ /* FIXME: We should return commit */
+ DPRINT1("Not returning commit for prototype PTE\n");
+ }
+
+ /* Update the counter, and drop a reference the long way */
+ InterlockedDecrementSizeT(&MmSystemLockPagesCount);
+ PageFrameIndex = MiGetPfnEntryIndex(Pfn1);
+ MiDecrementReferenceCount(Pfn1, PageFrameIndex);
+ return;
+ }
+
+ /* Drop a reference the short way, and that's it */
+ RefCount = InterlockedCompareExchange16((PSHORT)&Pfn1->u3.e2.ReferenceCount,
+ OldRefCount - 1,
+ OldRefCount);
+ ASSERT(RefCount != 0);
+ } while (OldRefCount != RefCount);
+
+ /* If we got here, there should be more than one reference */
+ ASSERT(RefCount > 1);
+ if (RefCount == 2)
+ {
+ /* Is it still being shared? */
+ if (Pfn1->u2.ShareCount >= 1)
+ {
+ /* Then it should be valid */
+ ASSERT(Pfn1->u3.e1.PageLocation == ActiveAndValid);
+
+ /* Is it a prototype PTE? */
+ if ((Pfn1->u3.e1.PrototypePte == 1) &&
+ (Pfn1->OriginalPte.u.Soft.Prototype == 1))
+ {
+ /* We don't handle ethis */
+ ASSERT(FALSE);
+ }
+
+ /* Update the counter */
+ InterlockedDecrementSizeT(&MmSystemLockPagesCount);
+ }
+ }
+}
+
+//
+// References a locked page and updates the counter
+// Used in MmProbeAndLockPages to handle different edge cases
+//
+FORCEINLINE
+VOID
+MiReferenceProbedPageAndBumpLockCount(IN PMMPFN Pfn1)
+{
+ USHORT RefCount, OldRefCount;
+
+ /* Sanity check */
+ ASSERT(Pfn1->u3.e2.ReferenceCount != 0);
+
+ /* Does ARM3 own the page? */
+ if (MI_IS_ROS_PFN(Pfn1))
+ {
+ /* ReactOS Mm doesn't track share count */
+ ASSERT(Pfn1->u3.e1.PageLocation == ActiveAndValid);
+ }
+ else
+ {
+ /* On ARM3 pages, we should see a valid share count */
+ ASSERT((Pfn1->u2.ShareCount != 0) && (Pfn1->u3.e1.PageLocation == ActiveAndValid));
+
+ /* Is it a prototype PTE? */
+ if ((Pfn1->u3.e1.PrototypePte == 1) &&
+ (Pfn1->OriginalPte.u.Soft.Prototype == 1))
+ {
+ /* FIXME: We should charge commit */
+ DPRINT1("Not charging commit for prototype PTE\n");
+ }
+ }
+
+ /* More locked pages! */
+ InterlockedIncrementSizeT(&MmSystemLockPagesCount);
+
+ /* Loop trying to update the reference count */
+ do
+ {
+ /* Get the current reference count, make sure it's valid */
+ OldRefCount = Pfn1->u3.e2.ReferenceCount;
+ ASSERT(OldRefCount != 0);
+ ASSERT(OldRefCount < 2500);
+
+ /* Bump it up by one */
+ RefCount = InterlockedCompareExchange16((PSHORT)&Pfn1->u3.e2.ReferenceCount,
+ OldRefCount + 1,
+ OldRefCount);
+ ASSERT(RefCount != 0);
+ } while (OldRefCount != RefCount);
+
+ /* Was this the first lock attempt? If not, undo our bump */
+ if (OldRefCount != 1) InterlockedDecrementSizeT(&MmSystemLockPagesCount);
+}
+
+//
+// References a locked page and updates the counter
+// Used in all other cases except MmProbeAndLockPages
+//
+FORCEINLINE
+VOID
+MiReferenceUsedPageAndBumpLockCount(IN PMMPFN Pfn1)
+{
+ USHORT NewRefCount;
+
+ /* Is it a prototype PTE? */
+ if ((Pfn1->u3.e1.PrototypePte == 1) &&
+ (Pfn1->OriginalPte.u.Soft.Prototype == 1))
+ {
+ /* FIXME: We should charge commit */
+ DPRINT1("Not charging commit for prototype PTE\n");
+ }
+
+ /* More locked pages! */
+ InterlockedIncrementSizeT(&MmSystemLockPagesCount);
+
+ /* Update the reference count */
+ NewRefCount = InterlockedIncrement16((PSHORT)&Pfn1->u3.e2.ReferenceCount);
+ if (NewRefCount == 2)
+ {
+ /* Is it locked or shared? */
+ if (Pfn1->u2.ShareCount)
+ {
+ /* It's shared, so make sure it's active */
+ ASSERT(Pfn1->u3.e1.PageLocation == ActiveAndValid);
+ }
+ else
+ {
+ /* It's locked, so we shouldn't lock again */
+ InterlockedDecrementSizeT(&MmSystemLockPagesCount);
+ }
+ }
+ else
+ {
+ /* Someone had already locked the page, so undo our bump */
+ ASSERT(NewRefCount < 2500);
+ InterlockedDecrementSizeT(&MmSystemLockPagesCount);
+ }
+}
+
+//
+// References a locked page and updates the counter
+// Used in all other cases except MmProbeAndLockPages
+//
+FORCEINLINE
+VOID
+MiReferenceUnusedPageAndBumpLockCount(IN PMMPFN Pfn1)
+{
+ USHORT NewRefCount;
+
+ /* Make sure the page isn't used yet */
+ ASSERT(Pfn1->u2.ShareCount == 0);
+ ASSERT(Pfn1->u3.e1.PageLocation != ActiveAndValid);
+
+ /* Is it a prototype PTE? */
+ if ((Pfn1->u3.e1.PrototypePte == 1) &&
+ (Pfn1->OriginalPte.u.Soft.Prototype == 1))
+ {
+ /* FIXME: We should charge commit */
+ DPRINT1("Not charging commit for prototype PTE\n");
+ }
+
+ /* More locked pages! */
+ InterlockedIncrementSizeT(&MmSystemLockPagesCount);
+
+ /* Update the reference count */
+ NewRefCount = InterlockedIncrement16((PSHORT)&Pfn1->u3.e2.ReferenceCount);
+ if (NewRefCount != 1)
+ {
+ /* Someone had already locked the page, so undo our bump */
+ ASSERT(NewRefCount < 2500);
+ InterlockedDecrementSizeT(&MmSystemLockPagesCount);
+ }
+}
+
+FORCEINLINE
+VOID
+MiIncrementPageTableReferences(IN PVOID Address)
+{
+ PUSHORT RefCount;
+
+ RefCount = &MmWorkingSetList->UsedPageTableEntries[MiGetPdeOffset(Address)];
+
+ *RefCount += 1;
+ ASSERT(*RefCount <= PTE_PER_PAGE);
+}
+
+FORCEINLINE
+VOID
+MiDecrementPageTableReferences(IN PVOID Address)
+{
+ PUSHORT RefCount;
+
+ RefCount = &MmWorkingSetList->UsedPageTableEntries[MiGetPdeOffset(Address)];
+
+ *RefCount -= 1;
+ ASSERT(*RefCount < PTE_PER_PAGE);
+}
+
+FORCEINLINE
+USHORT
+MiQueryPageTableReferences(IN PVOID Address)
+{
+ PUSHORT RefCount;
+
+ RefCount = &MmWorkingSetList->UsedPageTableEntries[MiGetPdeOffset(Address)];
+
+ return *RefCount;
+}
+
BOOLEAN
NTAPI
MmArmInitSystem(
VOID
NTAPI
-MiInitializeSessionSpaceLayout();
+MiInitializeSessionSpaceLayout(VOID);
NTSTATUS
NTAPI
IN PLOADER_PARAMETER_BLOCK LoaderBlock
);
+VOID
+NTAPI
+MiInitializeSessionWsSupport(
+ VOID
+);
+
+VOID
+NTAPI
+MiInitializeSessionIds(
+ VOID
+);
+
BOOLEAN
NTAPI
MiInitializeMemoryEvents(
IN PVOID AddressEnd
);
+NTSTATUS
+NTAPI
+MiRosProtectVirtualMemory(
+ IN PEPROCESS Process,
+ IN OUT PVOID *BaseAddress,
+ IN OUT PSIZE_T NumberOfBytesToProtect,
+ IN ULONG NewAccessProtection,
+ OUT PULONG OldAccessProtection OPTIONAL
+);
+
NTSTATUS
NTAPI
MmArmAccessFault(
IN ULONG Threshold //
); //
+// FIXFIX: THIS ONE TOO
+VOID
+NTAPI
+INIT_FUNCTION
+ExInitializePoolDescriptor(
+ IN PPOOL_DESCRIPTOR PoolDescriptor,
+ IN POOL_TYPE PoolType,
+ IN ULONG PoolIndex,
+ IN ULONG Threshold,
+ IN PVOID PoolLock
+);
+
+NTSTATUS
+NTAPI
+MiInitializeSessionPool(
+ VOID
+);
+
VOID
NTAPI
MiInitializeSystemPtes(
IN ULONG Flags
);
-PVOID
-NTAPI
-MiMapLockedPagesInUserSpace(
- IN PMDL Mdl,
- IN PVOID BaseVa,
- IN MEMORY_CACHING_TYPE CacheType,
- IN PVOID BaseAddress
-);
-
-VOID
-NTAPI
-MiUnmapLockedPagesInUserSpace(
- IN PVOID BaseAddress,
- IN PMDL Mdl
-);
-
VOID
NTAPI
MiInsertPageInList(
IN PMMPFN Entry
);
-PFN_NUMBER
+VOID
NTAPI
-MiAllocatePfn(
- IN PMMPTE PointerPte,
- IN ULONG Protection
+MiUnlinkPageFromList(
+ IN PMMPFN Pfn
);
VOID
IN BOOLEAN Modified
);
+NTSTATUS
+NTAPI
+MiInitializeAndChargePfn(
+ OUT PPFN_NUMBER PageFrameIndex,
+ IN PMMPDE PointerPde,
+ IN PFN_NUMBER ContainingPageFrame,
+ IN BOOLEAN SessionAllocation
+);
+
VOID
NTAPI
MiInitializePfnAndMakePteValid(
NTAPI
MiInitializePfnForOtherProcess(
IN PFN_NUMBER PageFrameIndex,
- IN PMMPTE PointerPte,
+ IN PVOID PteAddress,
IN PFN_NUMBER PteFrame
);
IN PFN_NUMBER PageFrameIndex
);
-VOID
-NTAPI
-MiDecrementReferenceCount(
- IN PMMPFN Pfn1,
- IN PFN_NUMBER PageFrameIndex
-);
-
PFN_NUMBER
NTAPI
MiRemoveAnyPage(
OUT PPFN_NUMBER ValidPages
);
+ULONG
+NTAPI
+MiGetPageProtection(
+ IN PMMPTE PointerPte
+);
+
PLDR_DATA_TABLE_ENTRY
NTAPI
MiLookupDataTableEntry(
IN PVOID VirtualAddress
);
-PMMADDRESS_NODE
+TABLE_SEARCH_RESULT
NTAPI
MiCheckForConflictingNode(
IN ULONG_PTR StartVpn,
IN ULONG_PTR EndVpn,
- IN PMM_AVL_TABLE Table
+ IN PMM_AVL_TABLE Table,
+ OUT PMMADDRESS_NODE *NodeOrParent
);
TABLE_SEARCH_RESULT
NTSTATUS
NTAPI
+MiFindEmptyAddressRangeDownBasedTree(
+ IN SIZE_T Length,
+ IN ULONG_PTR BoundaryAddress,
+ IN ULONG_PTR Alignment,
+ IN PMM_AVL_TABLE Table,
+ OUT PULONG_PTR Base
+);
+
+TABLE_SEARCH_RESULT
+NTAPI
MiFindEmptyAddressRangeInTree(
IN SIZE_T Length,
IN ULONG_PTR Alignment,
OUT PULONG_PTR Base
);
+NTSTATUS
+NTAPI
+MiCheckSecuredVad(
+ IN PMMVAD Vad,
+ IN PVOID Base,
+ IN SIZE_T Size,
+ IN ULONG ProtectionMask
+);
+
VOID
NTAPI
MiInsertVad(
- IN PMMVAD Vad,
- IN PEPROCESS Process
+ _Inout_ PMMVAD Vad,
+ _Inout_ PMM_AVL_TABLE VadRoot);
+
+NTSTATUS
+NTAPI
+MiInsertVadEx(
+ _Inout_ PMMVAD Vad,
+ _In_ ULONG_PTR *BaseAddress,
+ _In_ SIZE_T ViewSize,
+ _In_ ULONG_PTR HighestAddress,
+ _In_ ULONG_PTR Alignment,
+ _In_ ULONG AllocationType);
+
+VOID
+NTAPI
+MiInsertBasedSection(
+ IN PSECTION Section
+);
+
+NTSTATUS
+NTAPI
+MiUnmapViewOfSection(
+ IN PEPROCESS Process,
+ IN PVOID BaseAddress,
+ IN ULONG Flags
+);
+
+NTSTATUS
+NTAPI
+MiRosUnmapViewOfSection(
+ IN PEPROCESS Process,
+ IN PVOID BaseAddress,
+ IN BOOLEAN SkipDebuggerNotify
);
VOID
BOOLEAN
NTAPI
MiInitializeSystemSpaceMap(
- IN PVOID InputSession OPTIONAL
+ IN PMMSESSION InputSession OPTIONAL
+);
+
+VOID
+NTAPI
+MiSessionRemoveProcess(
+ VOID
+);
+
+VOID
+NTAPI
+MiReleaseProcessReferenceToSessionDataPage(
+ IN PMM_SESSION_SPACE SessionGlobal
+);
+
+VOID
+NTAPI
+MiSessionAddProcess(
+ IN PEPROCESS NewProcess
+);
+
+NTSTATUS
+NTAPI
+MiSessionCommitPageTables(
+ IN PVOID StartVa,
+ IN PVOID EndVa
);
ULONG
IN PMMVAD Vad
);
+VOID
+NTAPI
+MiDeletePte(
+ IN PMMPTE PointerPte,
+ IN PVOID VirtualAddress,
+ IN PEPROCESS CurrentProcess,
+ IN PMMPTE PrototypePte
+);
+
ULONG
NTAPI
MiMakeSystemAddressValid(
IN ULONG_PTR Vpn
);
+VOID
+NTAPI
+MiDeleteARM3Section(
+ PVOID ObjectBody
+);
+
+NTSTATUS
+NTAPI
+MiQueryMemorySectionName(
+ IN HANDLE ProcessHandle,
+ IN PVOID BaseAddress,
+ OUT PVOID MemoryInformation,
+ IN SIZE_T MemoryInformationLength,
+ OUT PSIZE_T ReturnLength
+);
+
+NTSTATUS
+NTAPI
+MiRosUnmapViewInSystemSpace(
+ IN PVOID MappedBase
+);
+
+POOL_TYPE
+NTAPI
+MmDeterminePoolType(
+ IN PVOID PoolAddress
+);
+
+VOID
+NTAPI
+MiMakePdeExistAndMakeValid(
+ IN PMMPDE PointerPde,
+ IN PEPROCESS TargetProcess,
+ IN KIRQL OldIrql
+);
+
//
// MiRemoveZeroPage will use inline code to zero out the page manually if only
// free pages are available. In some scenarios, we don't/can't run that piece of
// then we'd like to have our own code to grab a free page and zero it out, by
// using MiRemoveAnyPage. This macro implements this.
//
-PFN_NUMBER
FORCEINLINE
+PFN_NUMBER
MiRemoveZeroPageSafe(IN ULONG Color)
{
if (MmFreePagesByColor[ZeroedPageList][Color].Flink != LIST_HEAD) return MiRemoveZeroPage(Color);
return 0;
}
-//
-// New ARM3<->RosMM PAGE Architecture
-//
-#define MI_GET_ROS_DATA(x) ((PMMROSPFN)(x->RosMmData))
-#define MI_IS_ROS_PFN(x) (((x)->u4.AweAllocation == TRUE) && (MI_GET_ROS_DATA(x) != NULL))
-#define ASSERT_IS_ROS_PFN(x) ASSERT(MI_IS_ROS_PFN(x) == TRUE);
-typedef struct _MMROSPFN
+#if (_MI_PAGING_LEVELS == 2)
+FORCEINLINE
+BOOLEAN
+MiSynchronizeSystemPde(PMMPDE PointerPde)
{
- PMM_RMAP_ENTRY RmapListHead;
- SWAPENTRY SwapEntry;
-} MMROSPFN, *PMMROSPFN;
+ MMPDE SystemPde;
+ ULONG Index;
+
+ /* Get the Index from the PDE */
+ Index = ((ULONG_PTR)PointerPde & (SYSTEM_PD_SIZE - 1)) / sizeof(MMPTE);
-#define RosMmData AweReferenceCount
+ /* Copy the PDE from the double-mapped system page directory */
+ SystemPde = MmSystemPagePtes[Index];
+ *PointerPde = SystemPde;
+
+ /* Make sure we re-read the PDE and PTE */
+ KeMemoryBarrierWithoutFence();
+
+ /* Return, if we had success */
+ return SystemPde.u.Hard.Valid != 0;
+}
+#endif
/* EOF */