ULONG MmSizeOfNonPagedPoolInBytes;
ULONG MmMaximumNonPagedPoolInBytes;
+/* Some of the same values, in pages */
+PFN_NUMBER MmMaximumNonPagedPoolInPages;
+
//
// These numbers describe the discrete equation components of the nonpaged
// pool sizing algorithm.
ULONG MmSessionPoolSize;
ULONG MmSessionImageSize;
+/*
+ * These are the PTE addresses of the boundaries carved out above
+ */
+PMMPTE MiSessionImagePteStart;
+PMMPTE MiSessionImagePteEnd;
+PMMPTE MiSessionBasePte;
+PMMPTE MiSessionLastPte;
+
//
// The system view space, on the other hand, is where sections that are memory
// mapped into "system space" end up.
// map paged pool PDEs into external processes when they fault on a paged pool
// address.
//
-PFN_NUMBER MmSystemPageDirectory;
+PFN_NUMBER MmSystemPageDirectory[PD_COUNT];
PMMPTE MmSystemPagePtes;
//
PVOID MmHighestUserAddress;
PVOID MmSystemRangeStart;
+/* And these store the respective highest PTE/PDE address */
+PMMPTE MiHighestUserPte;
+PMMPDE MiHighestUserPde;
+
+/* These variables define the system cache address space */
PVOID MmSystemCacheStart;
PVOID MmSystemCacheEnd;
MMSUPPORT MmSystemCacheWs;
C_ASSERT(FreePageList == 1);
PMMCOLOR_TABLES MmFreePagesByColor[FreePageList + 1];
+/* An event used in Phase 0 before the rest of the system is ready to go */
+KEVENT MiTempEvent;
+
+/* All the events used for memory threshold notifications */
+PKEVENT MiLowMemoryEvent;
+PKEVENT MiHighMemoryEvent;
+PKEVENT MiLowPagedPoolEvent;
+PKEVENT MiHighPagedPoolEvent;
+PKEVENT MiLowNonPagedPoolEvent;
+PKEVENT MiHighNonPagedPoolEvent;
+
+/* The actual thresholds themselves, in page numbers */
+PFN_NUMBER MmLowMemoryThreshold;
+PFN_NUMBER MmHighMemoryThreshold;
+PFN_NUMBER MiLowPagedPoolThreshold;
+PFN_NUMBER MiHighPagedPoolThreshold;
+PFN_NUMBER MiLowNonPagedPoolThreshold;
+PFN_NUMBER MiHighNonPagedPoolThreshold;
+
+/*
+ * This number determines how many free pages must exist, at minimum, until we
+ * start trimming working sets and flushing modified pages to obtain more free
+ * pages.
+ *
+ * This number changes if the system detects that this is a server product
+ */
+PFN_NUMBER MmMinimumFreePages = 26;
+
+/*
+ * This number indicates how many pages we consider to be a low limit of having
+ * "plenty" of free memory.
+ *
+ * It is doubled on systems that have more than 63MB of memory
+ */
+PFN_NUMBER MmPlentyFreePages = 400;
+
+/* These values store the type of system this is (small, med, large) and if server */
+ULONG MmProductType;
+MM_SYSTEMSIZE MmSystemSize;
+
+/*
+ * These values store the cache working set minimums and maximums, in pages
+ *
+ * The minimum value is boosted on systems with more than 24MB of RAM, and cut
+ * down to only 32 pages on embedded (<24MB RAM) systems.
+ *
+ * An extra boost of 2MB is given on systems with more than 33MB of RAM.
+ */
+PFN_NUMBER MmSystemCacheWsMinimum = 288;
+PFN_NUMBER MmSystemCacheWsMaximum = 350;
+
+/* FIXME: Move to cache/working set code later */
+BOOLEAN MmLargeSystemCache;
+
/* PRIVATE FUNCTIONS **********************************************************/
//
MMPTE TempPte = ValidKernelPte;
/* The color table starts after the ARM3 PFN database */
- MmFreePagesByColor[0] = (PMMCOLOR_TABLES)&MmPfnDatabase[1][MmHighestPhysicalPage + 1];
+ MmFreePagesByColor[0] = (PMMCOLOR_TABLES)&MmPfnDatabase[MmHighestPhysicalPage + 1];
/* Loop the PTEs. We have two color tables for each secondary color */
PointerPte = MiAddressToPte(&MmFreePagesByColor[0][0]);
}
/* Get the PTEs for this range */
- PointerPte = MiAddressToPte(&MmPfnDatabase[0][BasePage]);
- LastPte = MiAddressToPte(((ULONG_PTR)&MmPfnDatabase[0][BasePage + PageCount]) - 1);
- DPRINT("MD Type: %lx Base: %lx Count: %lx\n", MdBlock->MemoryType, BasePage, PageCount);
-
- /* Loop them */
- while (PointerPte <= LastPte)
- {
- /* We'll only touch PTEs that aren't already valid */
- if (PointerPte->u.Hard.Valid == 0)
- {
- /* Use the next free page */
- TempPte.u.Hard.PageFrameNumber = FreePage;
- ASSERT(FreePageCount != 0);
-
- /* Consume free pages */
- FreePage++;
- FreePageCount--;
- if (!FreePageCount)
- {
- /* Out of memory */
- KeBugCheckEx(INSTALL_MORE_MEMORY,
- MmNumberOfPhysicalPages,
- FreePageCount,
- MxOldFreeDescriptor.PageCount,
- 1);
- }
-
- /* Write out this PTE */
- PagesLeft++;
- ASSERT(PointerPte->u.Hard.Valid == 0);
- ASSERT(TempPte.u.Hard.Valid == 1);
- *PointerPte = TempPte;
-
- /* Zero this page */
- RtlZeroMemory(MiPteToAddress(PointerPte), PAGE_SIZE);
- }
-
- /* Next! */
- PointerPte++;
- }
-
- /* Get the PTEs for this range */
- PointerPte = MiAddressToPte(&MmPfnDatabase[1][BasePage]);
- LastPte = MiAddressToPte(((ULONG_PTR)&MmPfnDatabase[1][BasePage + PageCount]) - 1);
+ PointerPte = MiAddressToPte(&MmPfnDatabase[BasePage]);
+ LastPte = MiAddressToPte(((ULONG_PTR)&MmPfnDatabase[BasePage + PageCount]) - 1);
DPRINT("MD Type: %lx Base: %lx Count: %lx\n", MdBlock->MemoryType, BasePage, PageCount);
/* Loop them */
/* Next! */
PointerPte++;
}
-
+
/* Do the next address range */
NextEntry = MdBlock->ListEntry.Flink;
}
if (MiIsRegularMemory(LoaderBlock, PageFrameIndex))
{
/* Yes we do, set it up */
- Pfn1 = MI_PFN_TO_PFNENTRY(PageFrameIndex);
+ Pfn1 = MiGetPfnEntry(PageFrameIndex);
Pfn1->u4.PteFrame = StartupPdIndex;
Pfn1->PteAddress = PointerPde;
Pfn1->u2.ShareCount++;
MmSizeOfNonPagedPoolInBytes)))
{
/* Get the PFN entry and make sure it too is valid */
- Pfn2 = MI_PFN_TO_PFNENTRY(PtePageIndex);
+ Pfn2 = MiGetPfnEntry(PtePageIndex);
if ((MmIsAddressValid(Pfn2)) &&
(MmIsAddressValid(Pfn2 + 1)))
{
else
{
/* Next PDE mapped address */
- BaseAddress += PTE_COUNT * PAGE_SIZE;
+ BaseAddress += PDE_MAPPED_VA;
}
/* Next PTE */
PMMPDE PointerPde;
/* Grab the lowest page and check if it has no real references */
- Pfn1 = MI_PFN_TO_PFNENTRY(MmLowestPhysicalPage);
+ Pfn1 = MiGetPfnEntry(MmLowestPhysicalPage);
if (!(MmLowestPhysicalPage) && !(Pfn1->u3.e2.ReferenceCount))
{
/* Make it a bogus page to catch errors */
PMMPFN Pfn1;
PMMPTE PointerPte;
PMMPDE PointerPde;
+ KIRQL OldIrql;
/* Now loop through the descriptors */
NextEntry = LoaderBlock->MemoryDescriptorListHead.Flink;
/* Get the last page of this descriptor. Note we loop backwards */
PageFrameIndex += PageCount - 1;
- Pfn1 = MI_PFN_TO_PFNENTRY(PageFrameIndex);
+ Pfn1 = MiGetPfnEntry(PageFrameIndex);
+
+ /* Lock the PFN Database */
+ OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
while (PageCount--)
{
/* If the page really has no references, mark it as free */
if (!Pfn1->u3.e2.ReferenceCount)
{
+ /* Add it to the free list */
Pfn1->u3.e1.CacheAttribute = MiNonCached;
- //MiInsertPageInFreeList(PageFrameIndex);
+ MiInsertPageInFreeList(PageFrameIndex);
}
/* Go to the next page */
PageFrameIndex--;
}
+ /* Release PFN database */
+ KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
+
/* Done with this block */
break;
/* Map these pages with the KSEG0 mapping that adds 0x80000000 */
PointerPte = MiAddressToPte(KSEG0_BASE + (PageFrameIndex << PAGE_SHIFT));
- Pfn1 = MI_PFN_TO_PFNENTRY(PageFrameIndex);
+ Pfn1 = MiGetPfnEntry(PageFrameIndex);
while (PageCount--)
{
/* Check if the page is really unused */
PMMPFN Pfn1;
/* Loop the PFN database page */
- PointerPte = MiAddressToPte(MI_PFN_TO_PFNENTRY(MmLowestPhysicalPage));
- LastPte = MiAddressToPte(MI_PFN_TO_PFNENTRY(MmHighestPhysicalPage));
+ PointerPte = MiAddressToPte(MiGetPfnEntry(MmLowestPhysicalPage));
+ LastPte = MiAddressToPte(MiGetPfnEntry(MmHighestPhysicalPage));
while (PointerPte <= LastPte)
{
/* Make sure the page is valid */
if (PointerPte->u.Hard.Valid == 1)
{
/* Get the PFN entry and just mark it referenced */
- Pfn1 = MI_PFN_TO_PFNENTRY(PointerPte->u.Hard.PageFrameNumber);
+ Pfn1 = MiGetPfnEntry(PointerPte->u.Hard.PageFrameNumber);
Pfn1->u2.ShareCount = 1;
Pfn1->u3.e2.ReferenceCount = 1;
}
MiBuildPfnDatabaseSelf();
}
+VOID
+NTAPI
+MiAdjustWorkingSetManagerParameters(IN BOOLEAN Client)
+{
+ /* This function needs to do more work, for now, we tune page minimums */
+
+ /* Check for a system with around 64MB RAM or more */
+ if (MmNumberOfPhysicalPages >= (63 * _1MB) / PAGE_SIZE)
+ {
+ /* Double the minimum amount of pages we consider for a "plenty free" scenario */
+ MmPlentyFreePages *= 2;
+ }
+}
+
+VOID
+NTAPI
+MiNotifyMemoryEvents(VOID)
+{
+ /* Are we in a low-memory situation? */
+ if (MmAvailablePages < MmLowMemoryThreshold)
+ {
+ /* Clear high, set low */
+ if (KeReadStateEvent(MiHighMemoryEvent)) KeClearEvent(MiHighMemoryEvent);
+ if (!KeReadStateEvent(MiLowMemoryEvent)) KeSetEvent(MiLowMemoryEvent, 0, FALSE);
+ }
+ else if (MmAvailablePages < MmHighMemoryThreshold)
+ {
+ /* We are in between, clear both */
+ if (KeReadStateEvent(MiHighMemoryEvent)) KeClearEvent(MiHighMemoryEvent);
+ if (KeReadStateEvent(MiLowMemoryEvent)) KeClearEvent(MiLowMemoryEvent);
+ }
+ else
+ {
+ /* Clear low, set high */
+ if (KeReadStateEvent(MiLowMemoryEvent)) KeClearEvent(MiLowMemoryEvent);
+ if (!KeReadStateEvent(MiHighMemoryEvent)) KeSetEvent(MiHighMemoryEvent, 0, FALSE);
+ }
+}
+
+NTSTATUS
+NTAPI
+MiCreateMemoryEvent(IN PUNICODE_STRING Name,
+ OUT PKEVENT *Event)
+{
+ PACL Dacl;
+ HANDLE EventHandle;
+ ULONG DaclLength;
+ NTSTATUS Status;
+ OBJECT_ATTRIBUTES ObjectAttributes;
+ SECURITY_DESCRIPTOR SecurityDescriptor;
+
+ /* Create the SD */
+ Status = RtlCreateSecurityDescriptor(&SecurityDescriptor,
+ SECURITY_DESCRIPTOR_REVISION);
+ if (!NT_SUCCESS(Status)) return Status;
+
+ /* One ACL with 3 ACEs, containing each one SID */
+ DaclLength = sizeof(ACL) +
+ 3 * sizeof(ACCESS_ALLOWED_ACE) +
+ RtlLengthSid(SeLocalSystemSid) +
+ RtlLengthSid(SeAliasAdminsSid) +
+ RtlLengthSid(SeWorldSid);
+
+ /* Allocate space for the DACL */
+ Dacl = ExAllocatePoolWithTag(PagedPool, DaclLength, 'lcaD');
+ if (!Dacl) return STATUS_INSUFFICIENT_RESOURCES;
+
+ /* Setup the ACL inside it */
+ Status = RtlCreateAcl(Dacl, DaclLength, ACL_REVISION);
+ if (!NT_SUCCESS(Status)) goto CleanUp;
+
+ /* Add query rights for everyone */
+ Status = RtlAddAccessAllowedAce(Dacl,
+ ACL_REVISION,
+ SYNCHRONIZE | EVENT_QUERY_STATE | READ_CONTROL,
+ SeWorldSid);
+ if (!NT_SUCCESS(Status)) goto CleanUp;
+
+ /* Full rights for the admin */
+ Status = RtlAddAccessAllowedAce(Dacl,
+ ACL_REVISION,
+ EVENT_ALL_ACCESS,
+ SeAliasAdminsSid);
+ if (!NT_SUCCESS(Status)) goto CleanUp;
+
+ /* As well as full rights for the system */
+ Status = RtlAddAccessAllowedAce(Dacl,
+ ACL_REVISION,
+ EVENT_ALL_ACCESS,
+ SeLocalSystemSid);
+ if (!NT_SUCCESS(Status)) goto CleanUp;
+
+ /* Set this DACL inside the SD */
+ Status = RtlSetDaclSecurityDescriptor(&SecurityDescriptor,
+ TRUE,
+ Dacl,
+ FALSE);
+ if (!NT_SUCCESS(Status)) goto CleanUp;
+
+ /* Setup the event attributes, making sure it's a permanent one */
+ InitializeObjectAttributes(&ObjectAttributes,
+ Name,
+ OBJ_KERNEL_HANDLE | OBJ_PERMANENT,
+ NULL,
+ &SecurityDescriptor);
+
+ /* Create the event */
+ Status = ZwCreateEvent(&EventHandle,
+ EVENT_ALL_ACCESS,
+ &ObjectAttributes,
+ NotificationEvent,
+ FALSE);
+CleanUp:
+ /* Free the DACL */
+ ExFreePool(Dacl);
+
+ /* Check if this is the success path */
+ if (NT_SUCCESS(Status))
+ {
+ /* Add a reference to the object, then close the handle we had */
+ Status = ObReferenceObjectByHandle(EventHandle,
+ EVENT_MODIFY_STATE,
+ ExEventObjectType,
+ KernelMode,
+ (PVOID*)Event,
+ NULL);
+ ZwClose (EventHandle);
+ }
+
+ /* Return status */
+ return Status;
+}
+
+BOOLEAN
+NTAPI
+MiInitializeMemoryEvents(VOID)
+{
+ UNICODE_STRING LowString = RTL_CONSTANT_STRING(L"\\KernelObjects\\LowMemoryCondition");
+ UNICODE_STRING HighString = RTL_CONSTANT_STRING(L"\\KernelObjects\\HighMemoryCondition");
+ UNICODE_STRING LowPagedPoolString = RTL_CONSTANT_STRING(L"\\KernelObjects\\LowPagedPoolCondition");
+ UNICODE_STRING HighPagedPoolString = RTL_CONSTANT_STRING(L"\\KernelObjects\\HighPagedPoolCondition");
+ UNICODE_STRING LowNonPagedPoolString = RTL_CONSTANT_STRING(L"\\KernelObjects\\LowNonPagedPoolCondition");
+ UNICODE_STRING HighNonPagedPoolString = RTL_CONSTANT_STRING(L"\\KernelObjects\\HighNonPagedPoolCondition");
+ NTSTATUS Status;
+
+ /* Check if we have a registry setting */
+ if (MmLowMemoryThreshold)
+ {
+ /* Convert it to pages */
+ MmLowMemoryThreshold *= (_1MB / PAGE_SIZE);
+ }
+ else
+ {
+ /* The low memory threshold is hit when we don't consider that we have "plenty" of free pages anymore */
+ MmLowMemoryThreshold = MmPlentyFreePages;
+
+ /* More than one GB of memory? */
+ if (MmNumberOfPhysicalPages > 0x40000)
+ {
+ /* Start at 32MB, and add another 16MB for each GB */
+ MmLowMemoryThreshold = (32 * _1MB) / PAGE_SIZE;
+ MmLowMemoryThreshold += ((MmNumberOfPhysicalPages - 0x40000) >> 7);
+ }
+ else if (MmNumberOfPhysicalPages > 0x8000)
+ {
+ /* For systems with > 128MB RAM, add another 4MB for each 128MB */
+ MmLowMemoryThreshold += ((MmNumberOfPhysicalPages - 0x8000) >> 5);
+ }
+
+ /* Don't let the minimum threshold go past 64MB */
+ MmLowMemoryThreshold = min(MmLowMemoryThreshold, (64 * _1MB) / PAGE_SIZE);
+ }
+
+ /* Check if we have a registry setting */
+ if (MmHighMemoryThreshold)
+ {
+ /* Convert it into pages */
+ MmHighMemoryThreshold *= (_1MB / PAGE_SIZE);
+ }
+ else
+ {
+ /* Otherwise, the default is three times the low memory threshold */
+ MmHighMemoryThreshold = 3 * MmLowMemoryThreshold;
+ ASSERT(MmHighMemoryThreshold > MmLowMemoryThreshold);
+ }
+
+ /* Make sure high threshold is actually higher than the low */
+ MmHighMemoryThreshold = max(MmHighMemoryThreshold, MmLowMemoryThreshold);
+
+ /* Create the memory events for all the thresholds */
+ Status = MiCreateMemoryEvent(&LowString, &MiLowMemoryEvent);
+ if (!NT_SUCCESS(Status)) return FALSE;
+ Status = MiCreateMemoryEvent(&HighString, &MiHighMemoryEvent);
+ if (!NT_SUCCESS(Status)) return FALSE;
+ Status = MiCreateMemoryEvent(&LowPagedPoolString, &MiLowPagedPoolEvent);
+ if (!NT_SUCCESS(Status)) return FALSE;
+ Status = MiCreateMemoryEvent(&HighPagedPoolString, &MiHighPagedPoolEvent);
+ if (!NT_SUCCESS(Status)) return FALSE;
+ Status = MiCreateMemoryEvent(&LowNonPagedPoolString, &MiLowNonPagedPoolEvent);
+ if (!NT_SUCCESS(Status)) return FALSE;
+ Status = MiCreateMemoryEvent(&HighNonPagedPoolString, &MiHighNonPagedPoolEvent);
+ if (!NT_SUCCESS(Status)) return FALSE;
+
+ /* Now setup the pool events */
+ MiInitializePoolEvents();
+
+ /* Set the initial event state */
+ MiNotifyMemoryEvents();
+ return TRUE;
+}
+
+VOID
+NTAPI
+MiAddHalIoMappings(VOID)
+{
+ PVOID BaseAddress;
+ PMMPTE PointerPde;
+ PMMPTE PointerPte;
+ ULONG i, j, PdeCount;
+ PFN_NUMBER PageFrameIndex;
+
+ /* HAL Heap address -- should be on a PDE boundary */
+ BaseAddress = (PVOID)0xFFC00000;
+ ASSERT(MiAddressToPteOffset(BaseAddress) == 0);
+
+ /* Check how many PDEs the heap has */
+ PointerPde = MiAddressToPde(BaseAddress);
+ PdeCount = PDE_COUNT - ADDR_TO_PDE_OFFSET(BaseAddress);
+ for (i = 0; i < PdeCount; i++)
+ {
+ /* Does the HAL own this mapping? */
+ if ((PointerPde->u.Hard.Valid == 1) &&
+ (PointerPde->u.Hard.LargePage == 0))
+ {
+ /* Get the PTE for it and scan each page */
+ PointerPte = MiAddressToPte(BaseAddress);
+ for (j = 0 ; j < PTE_COUNT; j++)
+ {
+ /* Does the HAL own this page? */
+ if (PointerPte->u.Hard.Valid == 1)
+ {
+ /* Is the HAL using it for device or I/O mapped memory? */
+ PageFrameIndex = PFN_FROM_PTE(PointerPte);
+ if (!MiGetPfnEntry(PageFrameIndex))
+ {
+ /* FIXME: For PAT, we need to track I/O cache attributes for coherency */
+ DPRINT1("HAL I/O Mapping at %p is unsafe\n", BaseAddress);
+ }
+ }
+
+ /* Move to the next page */
+ BaseAddress = (PVOID)((ULONG_PTR)BaseAddress + PAGE_SIZE);
+ PointerPte++;
+ }
+ }
+ else
+ {
+ /* Move to the next address */
+ BaseAddress = (PVOID)((ULONG_PTR)BaseAddress + PDE_MAPPED_VA);
+ }
+
+ /* Move to the next PDE */
+ PointerPde++;
+ }
+}
+
VOID
NTAPI
MmDumpArmPfnDatabase(VOID)
//
for (i = 0; i <= MmHighestPhysicalPage; i++)
{
- Pfn1 = MI_PFN_TO_PFNENTRY(i);
+ Pfn1 = MiGetPfnEntry(i);
if (!Pfn1) continue;
//
// Get the page frame number for the system page directory
//
PointerPte = MiAddressToPte(PDE_BASE);
- MmSystemPageDirectory = PFN_FROM_PTE(PointerPte);
+ ASSERT(PD_COUNT == 1);
+ MmSystemPageDirectory[0] = PFN_FROM_PTE(PointerPte);
//
// Allocate a system PTE which will hold a copy of the page directory
// way).
//
TempPte = ValidKernelPte;
- TempPte.u.Hard.PageFrameNumber = MmSystemPageDirectory;
+ ASSERT(PD_COUNT == 1);
+ TempPte.u.Hard.PageFrameNumber = MmSystemPageDirectory[0];
ASSERT(PointerPte->u.Hard.Valid == 0);
ASSERT(TempPte.u.Hard.Valid == 1);
*PointerPte = TempPte;
//
OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
- //
- // Allocate a page and map the first paged pool PDE
- //
- PageFrameIndex = MmAllocPage(MC_NPPOOL);
+ /* Allocate a page and map the first paged pool PDE */
+ PageFrameIndex = MiRemoveZeroPage(0);
TempPte.u.Hard.PageFrameNumber = PageFrameIndex;
ASSERT(PointerPde->u.Hard.Valid == 0);
ASSERT(TempPte.u.Hard.Valid == 1);
*PointerPde = TempPte;
+ /* Initialize the PFN entry for it */
+ MiInitializePfnForOtherProcess(PageFrameIndex,
+ PointerPde,
+ MmSystemPageDirectory[(PointerPde - (PMMPTE)PDE_BASE) / PDE_COUNT]);
+
//
// Release the PFN database lock
//
//
InitializePool(PagedPool, 0);
- //
- // Initialize the paged pool mutex
- //
- KeInitializeGuardedMutex(&MmPagedPoolMutex);
+ /* Default low threshold of 30MB or one fifth of paged pool */
+ MiLowPagedPoolThreshold = (30 * _1MB) >> PAGE_SHIFT;
+ MiLowPagedPoolThreshold = min(MiLowPagedPoolThreshold, Size / 5);
+
+ /* Default high threshold of 60MB or 25% */
+ MiHighPagedPoolThreshold = (60 * _1MB) >> PAGE_SHIFT;
+ MiHighPagedPoolThreshold = min(MiHighPagedPoolThreshold, (Size * 2) / 5);
+ ASSERT(MiLowPagedPoolThreshold < MiHighPagedPoolThreshold);
}
NTSTATUS
IncludeType[LoaderBBTMemory] = FALSE;
if (Phase == 0)
{
+ /* Initialize the phase 0 temporary event */
+ KeInitializeEvent(&MiTempEvent, NotificationEvent, FALSE);
+
+ /* Set all the events to use the temporary event for now */
+ MiLowMemoryEvent = &MiTempEvent;
+ MiHighMemoryEvent = &MiTempEvent;
+ MiLowPagedPoolEvent = &MiTempEvent;
+ MiHighPagedPoolEvent = &MiTempEvent;
+ MiLowNonPagedPoolEvent = &MiTempEvent;
+ MiHighNonPagedPoolEvent = &MiTempEvent;
+
//
// Define the basic user vs. kernel address space separation
//
MmUserProbeAddress = (ULONG_PTR)MmSystemRangeStart - 0x10000;
MmHighestUserAddress = (PVOID)(MmUserProbeAddress - 1);
+ /* Highest PTE and PDE based on the addresses above */
+ MiHighestUserPte = MiAddressToPte(MmHighestUserAddress);
+ MiHighestUserPde = MiAddressToPde(MmHighestUserAddress);
+
//
// Get the size of the boot loader's image allocations and then round
// that region up to a PDE size, so that any PDEs we might create for
//
MmBootImageSize = KeLoaderBlock->Extension->LoaderPagesSpanned;
MmBootImageSize *= PAGE_SIZE;
- MmBootImageSize = (MmBootImageSize + (4 * 1024 * 1024) - 1) & ~((4 * 1024 * 1024) - 1);
- ASSERT((MmBootImageSize % (4 * 1024 * 1024)) == 0);
+ MmBootImageSize = (MmBootImageSize + PDE_MAPPED_VA - 1) & ~(PDE_MAPPED_VA - 1);
+ ASSERT((MmBootImageSize % PDE_MAPPED_VA) == 0);
//
// Set the size of session view, pool, and image
//
MiSystemViewStart = (PVOID)((ULONG_PTR)MmSessionBase -
MmSystemViewSize);
+
+ /* Compute the PTE addresses for all the addresses we carved out */
+ MiSessionImagePteStart = MiAddressToPte(MiSessionImageStart);
+ MiSessionImagePteEnd = MiAddressToPte(MiSessionImageEnd);
+ MiSessionBasePte = MiAddressToPte(MmSessionBase);
+ MiSessionLastPte = MiAddressToPte(MiSessionSpaceEnd);
+
+ /* Initialize the user mode image list */
+ InitializeListHead(&MmLoadedUserImageList);
+
+ /* Initialize the paged pool mutex */
+ KeInitializeGuardedMutex(&MmPagedPoolMutex);
+
+ /* Initialize the Loader Lock */
+ KeInitializeMutant(&MmSystemLoadLock, FALSE);
//
// Count physical pages on the system
// Sync us up with ReactOS Mm
//
MiSyncARM3WithROS(MmNonPagedSystemStart, (PVOID)((ULONG_PTR)MmNonPagedPoolEnd - 1));
- MiSyncARM3WithROS(MmPfnDatabase[0], (PVOID)((ULONG_PTR)MmNonPagedPoolStart + MmSizeOfNonPagedPoolInBytes - 1));
+ MiSyncARM3WithROS(MmPfnDatabase, (PVOID)((ULONG_PTR)MmNonPagedPoolStart + MmSizeOfNonPagedPoolInBytes - 1));
MiSyncARM3WithROS((PVOID)HYPER_SPACE, (PVOID)(HYPER_SPACE + PAGE_SIZE - 1));
//
}
}
- //
- // Size up paged pool and build the shadow system page directory
- //
+ /* Look for large page cache entries that need caching */
+ MiSyncCachedRanges();
+
+ /* Loop for HAL Heap I/O device mappings that need coherency tracking */
+ MiAddHalIoMappings();
+
+ /* Set the initial resident page count */
+ MmResidentAvailablePages = MmAvailablePages - 32;
+
+ /* Initialize large page structures on PAE/x64, and MmProcessList on x86 */
+ MiInitializeLargePageSupport();
+
+ /* Check if the registry says any drivers should be loaded with large pages */
+ MiInitializeDriverLargePageList();
+
+ /* Relocate the boot drivers into system PTE space and fixup their PFNs */
+ MiReloadBootLoadedDrivers(LoaderBlock);
+
+ /* FIXME: Call out into Driver Verifier for initialization */
+
+ /* Check how many pages the system has */
+ if (MmNumberOfPhysicalPages <= (13 * _1MB))
+ {
+ /* Set small system */
+ MmSystemSize = MmSmallSystem;
+ }
+ else if (MmNumberOfPhysicalPages <= (19 * _1MB))
+ {
+ /* Set small system and add 100 pages for the cache */
+ MmSystemSize = MmSmallSystem;
+ MmSystemCacheWsMinimum += 100;
+ }
+ else
+ {
+ /* Set medium system and add 400 pages for the cache */
+ MmSystemSize = MmMediumSystem;
+ MmSystemCacheWsMinimum += 400;
+ }
+
+ /* Check for less than 24MB */
+ if (MmNumberOfPhysicalPages < ((24 * _1MB) / PAGE_SIZE))
+ {
+ /* No more than 32 pages */
+ MmSystemCacheWsMinimum = 32;
+ }
+
+ /* Check for more than 32MB */
+ if (MmNumberOfPhysicalPages >= ((32 * _1MB) / PAGE_SIZE))
+ {
+ /* Check for product type being "Wi" for WinNT */
+ if (MmProductType == '\0i\0W')
+ {
+ /* Then this is a large system */
+ MmSystemSize = MmLargeSystem;
+ }
+ else
+ {
+ /* For servers, we need 64MB to consider this as being large */
+ if (MmNumberOfPhysicalPages >= ((64 * _1MB) / PAGE_SIZE))
+ {
+ /* Set it as large */
+ MmSystemSize = MmLargeSystem;
+ }
+ }
+ }
+
+ /* Check for more than 33 MB */
+ if (MmNumberOfPhysicalPages > ((33 * _1MB) / PAGE_SIZE))
+ {
+ /* Add another 500 pages to the cache */
+ MmSystemCacheWsMinimum += 500;
+ }
+
+ /* Now setup the shared user data fields */
+ ASSERT(SharedUserData->NumberOfPhysicalPages == 0);
+ SharedUserData->NumberOfPhysicalPages = MmNumberOfPhysicalPages;
+ SharedUserData->LargePageMinimum = 0;
+
+ /* Check for workstation (Wi for WinNT) */
+ if (MmProductType == '\0i\0W')
+ {
+ /* Set Windows NT Workstation product type */
+ SharedUserData->NtProductType = NtProductWinNt;
+ MmProductType = 0;
+ }
+ else
+ {
+ /* Check for LanMan server */
+ if (MmProductType == '\0a\0L')
+ {
+ /* This is a domain controller */
+ SharedUserData->NtProductType = NtProductLanManNt;
+ }
+ else
+ {
+ /* Otherwise it must be a normal server */
+ SharedUserData->NtProductType = NtProductServer;
+ }
+
+ /* Set the product type, and make the system more aggressive with low memory */
+ MmProductType = 1;
+ MmMinimumFreePages = 81;
+ }
+
+ /* Update working set tuning parameters */
+ MiAdjustWorkingSetManagerParameters(!MmProductType);
+
+ /* Finetune the page count by removing working set and NP expansion */
+ MmResidentAvailablePages -= MiExpansionPoolPagesInitialCharge;
+ MmResidentAvailablePages -= MmSystemCacheWsMinimum;
+ MmResidentAvailableAtInit = MmResidentAvailablePages;
+ if (MmResidentAvailablePages <= 0)
+ {
+ /* This should not happen */
+ DPRINT1("System cache working set too big\n");
+ return FALSE;
+ }
+
+ /* Size up paged pool and build the shadow system page directory */
MiBuildPagedPool();
+
+ /* Debugger physical memory support is now ready to be used */
+ MiDbgReadyForPhysical = TRUE;
+
+ /* Initialize the loaded module list */
+ MiInitializeLoadedModuleList(LoaderBlock);
}
//