ULONG MmSizeOfNonPagedPoolInBytes;
ULONG MmMaximumNonPagedPoolInBytes;
+/* Some of the same values, in pages */
+PFN_NUMBER MmMaximumNonPagedPoolInPages;
+
//
// These numbers describe the discrete equation components of the nonpaged
// pool sizing algorithm.
ULONG MmSessionPoolSize;
ULONG MmSessionImageSize;
+/*
+ * These are the PTE addresses of the boundaries carved out above
+ */
+PMMPTE MiSessionImagePteStart;
+PMMPTE MiSessionImagePteEnd;
+PMMPTE MiSessionBasePte;
+PMMPTE MiSessionLastPte;
+
//
// The system view space, on the other hand, is where sections that are memory
// mapped into "system space" end up.
// map paged pool PDEs into external processes when they fault on a paged pool
// address.
//
-PFN_NUMBER MmSystemPageDirectory;
+PFN_NUMBER MmSystemPageDirectory[PD_COUNT];
PMMPTE MmSystemPagePtes;
//
PVOID MmHighestUserAddress;
PVOID MmSystemRangeStart;
+/* And these store the respective highest PTE/PDE address */
+PMMPTE MiHighestUserPte;
+PMMPDE MiHighestUserPde;
+
+/* These variables define the system cache address space */
PVOID MmSystemCacheStart;
PVOID MmSystemCacheEnd;
MMSUPPORT MmSystemCacheWs;
//
// Actual (registry-configurable) size of a GUI thread's stack
//
-ULONG MmLargeStackSize;
+ULONG MmLargeStackSize = KERNEL_LARGE_STACK_SIZE;
+
+//
+// Before we have a PFN database, memory comes straight from our physical memory
+// blocks, which is nice because it's guaranteed contiguous and also because once
+// we take a page from here, the system doesn't see it anymore.
+// However, once the fun is over, those pages must be re-integrated back into
+// PFN society life, and that requires us keeping a copy of the original layout
+// so that we can parse it later.
+//
+PMEMORY_ALLOCATION_DESCRIPTOR MxFreeDescriptor;
+MEMORY_ALLOCATION_DESCRIPTOR MxOldFreeDescriptor;
+
+/*
+ * For each page's worth bytes of L2 cache in a given set/way line, the zero and
+ * free lists are organized in what is called a "color".
+ *
+ * This array points to the two lists, so it can be thought of as a multi-dimensional
+ * array of MmFreePagesByColor[2][MmSecondaryColors]. Since the number is dynamic,
+ * we describe the array in pointer form instead.
+ *
+ * On a final note, the color tables themselves are right after the PFN database.
+ */
+C_ASSERT(FreePageList == 1);
+PMMCOLOR_TABLES MmFreePagesByColor[FreePageList + 1];
+
+/* An event used in Phase 0 before the rest of the system is ready to go */
+KEVENT MiTempEvent;
+
+/* All the events used for memory threshold notifications */
+PKEVENT MiLowMemoryEvent;
+PKEVENT MiHighMemoryEvent;
+PKEVENT MiLowPagedPoolEvent;
+PKEVENT MiHighPagedPoolEvent;
+PKEVENT MiLowNonPagedPoolEvent;
+PKEVENT MiHighNonPagedPoolEvent;
+
+/* The actual thresholds themselves, in page numbers */
+PFN_NUMBER MmLowMemoryThreshold;
+PFN_NUMBER MmHighMemoryThreshold;
+PFN_NUMBER MiLowPagedPoolThreshold;
+PFN_NUMBER MiHighPagedPoolThreshold;
+PFN_NUMBER MiLowNonPagedPoolThreshold;
+PFN_NUMBER MiHighNonPagedPoolThreshold;
+
+/*
+ * This number determines how many free pages must exist, at minimum, until we
+ * start trimming working sets and flushing modified pages to obtain more free
+ * pages.
+ *
+ * This number changes if the system detects that this is a server product
+ */
+PFN_NUMBER MmMinimumFreePages = 26;
+
+/*
+ * This number indicates how many pages we consider to be a low limit of having
+ * "plenty" of free memory.
+ *
+ * It is doubled on systems that have more than 63MB of memory
+ */
+PFN_NUMBER MmPlentyFreePages = 400;
+
+/* These values store the type of system this is (small, med, large) and if server */
+ULONG MmProductType;
+MM_SYSTEMSIZE MmSystemSize;
+
+/*
+ * These values store the cache working set minimums and maximums, in pages
+ *
+ * The minimum value is boosted on systems with more than 24MB of RAM, and cut
+ * down to only 32 pages on embedded (<24MB RAM) systems.
+ *
+ * An extra boost of 2MB is given on systems with more than 33MB of RAM.
+ */
+PFN_NUMBER MmSystemCacheWsMinimum = 288;
+PFN_NUMBER MmSystemCacheWsMaximum = 350;
+
+/* FIXME: Move to cache/working set code later */
+BOOLEAN MmLargeSystemCache;
/* PRIVATE FUNCTIONS **********************************************************/
}
}
+PFN_NUMBER
+NTAPI
+MxGetNextPage(IN PFN_NUMBER PageCount)
+{
+ PFN_NUMBER Pfn;
+
+ /* Make sure we have enough pages */
+ if (PageCount > MxFreeDescriptor->PageCount)
+ {
+ /* Crash the system */
+ KeBugCheckEx(INSTALL_MORE_MEMORY,
+ MmNumberOfPhysicalPages,
+ MxFreeDescriptor->PageCount,
+ MxOldFreeDescriptor.PageCount,
+ PageCount);
+ }
+
+ /* Use our lowest usable free pages */
+ Pfn = MxFreeDescriptor->BasePage;
+ MxFreeDescriptor->BasePage += PageCount;
+ MxFreeDescriptor->PageCount -= PageCount;
+ return Pfn;
+}
+
+VOID
+NTAPI
+MiComputeColorInformation(VOID)
+{
+ ULONG L2Associativity;
+
+ /* Check if no setting was provided already */
+ if (!MmSecondaryColors)
+ {
+ /* Get L2 cache information */
+ L2Associativity = KeGetPcr()->SecondLevelCacheAssociativity;
+
+ /* The number of colors is the number of cache bytes by set/way */
+ MmSecondaryColors = KeGetPcr()->SecondLevelCacheSize;
+ if (L2Associativity) MmSecondaryColors /= L2Associativity;
+ }
+
+ /* Now convert cache bytes into pages */
+ MmSecondaryColors >>= PAGE_SHIFT;
+ if (!MmSecondaryColors)
+ {
+ /* If there was no cache data from the KPCR, use the default colors */
+ MmSecondaryColors = MI_SECONDARY_COLORS;
+ }
+ else
+ {
+ /* Otherwise, make sure there aren't too many colors */
+ if (MmSecondaryColors > MI_MAX_SECONDARY_COLORS)
+ {
+ /* Set the maximum */
+ MmSecondaryColors = MI_MAX_SECONDARY_COLORS;
+ }
+
+ /* Make sure there aren't too little colors */
+ if (MmSecondaryColors < MI_MIN_SECONDARY_COLORS)
+ {
+ /* Set the default */
+ MmSecondaryColors = MI_SECONDARY_COLORS;
+ }
+
+ /* Finally make sure the colors are a power of two */
+ if (MmSecondaryColors & (MmSecondaryColors - 1))
+ {
+ /* Set the default */
+ MmSecondaryColors = MI_SECONDARY_COLORS;
+ }
+ }
+
+ /* Compute the mask and store it */
+ MmSecondaryColorMask = MmSecondaryColors - 1;
+ KeGetCurrentPrcb()->SecondaryColorMask = MmSecondaryColorMask;
+}
+
+VOID
+NTAPI
+MiInitializeColorTables(VOID)
+{
+ ULONG i;
+ PMMPTE PointerPte, LastPte;
+ MMPTE TempPte = ValidKernelPte;
+
+ /* The color table starts after the ARM3 PFN database */
+ MmFreePagesByColor[0] = (PMMCOLOR_TABLES)&MmPfnDatabase[MmHighestPhysicalPage + 1];
+
+ /* Loop the PTEs. We have two color tables for each secondary color */
+ PointerPte = MiAddressToPte(&MmFreePagesByColor[0][0]);
+ LastPte = MiAddressToPte((ULONG_PTR)MmFreePagesByColor[0] +
+ (2 * MmSecondaryColors * sizeof(MMCOLOR_TABLES))
+ - 1);
+ while (PointerPte <= LastPte)
+ {
+ /* Check for valid PTE */
+ if (PointerPte->u.Hard.Valid == 0)
+ {
+ /* Get a page and map it */
+ TempPte.u.Hard.PageFrameNumber = MxGetNextPage(1);
+ ASSERT(TempPte.u.Hard.Valid == 1);
+ *PointerPte = TempPte;
+
+ /* Zero out the page */
+ RtlZeroMemory(MiPteToAddress(PointerPte), PAGE_SIZE);
+ }
+
+ /* Next */
+ PointerPte++;
+ }
+
+ /* Now set the address of the next list, right after this one */
+ MmFreePagesByColor[1] = &MmFreePagesByColor[0][MmSecondaryColors];
+
+ /* Now loop the lists to set them up */
+ for (i = 0; i < MmSecondaryColors; i++)
+ {
+ /* Set both free and zero lists for each color */
+ MmFreePagesByColor[ZeroedPageList][i].Flink = 0xFFFFFFFF;
+ MmFreePagesByColor[ZeroedPageList][i].Blink = (PVOID)0xFFFFFFFF;
+ MmFreePagesByColor[ZeroedPageList][i].Count = 0;
+ MmFreePagesByColor[FreePageList][i].Flink = 0xFFFFFFFF;
+ MmFreePagesByColor[FreePageList][i].Blink = (PVOID)0xFFFFFFFF;
+ MmFreePagesByColor[FreePageList][i].Count = 0;
+ }
+}
+
+BOOLEAN
+NTAPI
+MiIsRegularMemory(IN PLOADER_PARAMETER_BLOCK LoaderBlock,
+ IN PFN_NUMBER Pfn)
+{
+ PLIST_ENTRY NextEntry;
+ PMEMORY_ALLOCATION_DESCRIPTOR MdBlock;
+
+ /* Loop the memory descriptors */
+ NextEntry = LoaderBlock->MemoryDescriptorListHead.Flink;
+ while (NextEntry != &LoaderBlock->MemoryDescriptorListHead)
+ {
+ /* Get the memory descriptor */
+ MdBlock = CONTAINING_RECORD(NextEntry,
+ MEMORY_ALLOCATION_DESCRIPTOR,
+ ListEntry);
+
+ /* Check if this PFN could be part of the block */
+ if (Pfn >= (MdBlock->BasePage))
+ {
+ /* Check if it really is part of the block */
+ if (Pfn < (MdBlock->BasePage + MdBlock->PageCount))
+ {
+ /* Check if the block is actually memory we don't map */
+ if ((MdBlock->MemoryType == LoaderFirmwarePermanent) ||
+ (MdBlock->MemoryType == LoaderBBTMemory) ||
+ (MdBlock->MemoryType == LoaderSpecialMemory))
+ {
+ /* We don't need PFN database entries for this memory */
+ break;
+ }
+
+ /* This is memory we want to map */
+ return TRUE;
+ }
+ }
+ else
+ {
+ /* Blocks are ordered, so if it's not here, it doesn't exist */
+ break;
+ }
+
+ /* Get to the next descriptor */
+ NextEntry = MdBlock->ListEntry.Flink;
+ }
+
+ /* Check if this PFN is actually from our free memory descriptor */
+ if ((Pfn >= MxOldFreeDescriptor.BasePage) &&
+ (Pfn < MxOldFreeDescriptor.BasePage + MxOldFreeDescriptor.PageCount))
+ {
+ /* We use these pages for initial mappings, so we do want to count them */
+ return TRUE;
+ }
+
+ /* Otherwise this isn't memory that we describe or care about */
+ return FALSE;
+}
+
+VOID
+NTAPI
+MiMapPfnDatabase(IN PLOADER_PARAMETER_BLOCK LoaderBlock)
+{
+ ULONG FreePage, FreePageCount, PagesLeft, BasePage, PageCount;
+ PLIST_ENTRY NextEntry;
+ PMEMORY_ALLOCATION_DESCRIPTOR MdBlock;
+ PMMPTE PointerPte, LastPte;
+ MMPTE TempPte = ValidKernelPte;
+
+ /* Get current page data, since we won't be using MxGetNextPage as it would corrupt our state */
+ FreePage = MxFreeDescriptor->BasePage;
+ FreePageCount = MxFreeDescriptor->PageCount;
+ PagesLeft = 0;
+
+ /* Loop the memory descriptors */
+ NextEntry = LoaderBlock->MemoryDescriptorListHead.Flink;
+ while (NextEntry != &LoaderBlock->MemoryDescriptorListHead)
+ {
+ /* Get the descriptor */
+ MdBlock = CONTAINING_RECORD(NextEntry,
+ MEMORY_ALLOCATION_DESCRIPTOR,
+ ListEntry);
+ if ((MdBlock->MemoryType == LoaderFirmwarePermanent) ||
+ (MdBlock->MemoryType == LoaderBBTMemory) ||
+ (MdBlock->MemoryType == LoaderSpecialMemory))
+ {
+ /* These pages are not part of the PFN database */
+ NextEntry = MdBlock->ListEntry.Flink;
+ continue;
+ }
+
+ /* Next, check if this is our special free descriptor we've found */
+ if (MdBlock == MxFreeDescriptor)
+ {
+ /* Use the real numbers instead */
+ BasePage = MxOldFreeDescriptor.BasePage;
+ PageCount = MxOldFreeDescriptor.PageCount;
+ }
+ else
+ {
+ /* Use the descriptor's numbers */
+ BasePage = MdBlock->BasePage;
+ PageCount = MdBlock->PageCount;
+ }
+
+ /* Get the PTEs for this range */
+ PointerPte = MiAddressToPte(&MmPfnDatabase[BasePage]);
+ LastPte = MiAddressToPte(((ULONG_PTR)&MmPfnDatabase[BasePage + PageCount]) - 1);
+ DPRINT("MD Type: %lx Base: %lx Count: %lx\n", MdBlock->MemoryType, BasePage, PageCount);
+
+ /* Loop them */
+ while (PointerPte <= LastPte)
+ {
+ /* We'll only touch PTEs that aren't already valid */
+ if (PointerPte->u.Hard.Valid == 0)
+ {
+ /* Use the next free page */
+ TempPte.u.Hard.PageFrameNumber = FreePage;
+ ASSERT(FreePageCount != 0);
+
+ /* Consume free pages */
+ FreePage++;
+ FreePageCount--;
+ if (!FreePageCount)
+ {
+ /* Out of memory */
+ KeBugCheckEx(INSTALL_MORE_MEMORY,
+ MmNumberOfPhysicalPages,
+ FreePageCount,
+ MxOldFreeDescriptor.PageCount,
+ 1);
+ }
+
+ /* Write out this PTE */
+ PagesLeft++;
+ ASSERT(PointerPte->u.Hard.Valid == 0);
+ ASSERT(TempPte.u.Hard.Valid == 1);
+ *PointerPte = TempPte;
+
+ /* Zero this page */
+ RtlZeroMemory(MiPteToAddress(PointerPte), PAGE_SIZE);
+ }
+
+ /* Next! */
+ PointerPte++;
+ }
+
+ /* Do the next address range */
+ NextEntry = MdBlock->ListEntry.Flink;
+ }
+
+ /* Now update the free descriptors to consume the pages we used up during the PFN allocation loop */
+ MxFreeDescriptor->BasePage = FreePage;
+ MxFreeDescriptor->PageCount = FreePageCount;
+}
+
+VOID
+NTAPI
+MiBuildPfnDatabaseFromPages(IN PLOADER_PARAMETER_BLOCK LoaderBlock)
+{
+ PMMPDE PointerPde;
+ PMMPTE PointerPte;
+ ULONG i, Count, j;
+ PFN_NUMBER PageFrameIndex, StartupPdIndex, PtePageIndex;
+ PMMPFN Pfn1, Pfn2;
+ ULONG_PTR BaseAddress = 0;
+
+ /* PFN of the startup page directory */
+ StartupPdIndex = PFN_FROM_PTE(MiAddressToPde(PDE_BASE));
+
+ /* Start with the first PDE and scan them all */
+ PointerPde = MiAddressToPde(NULL);
+ Count = PD_COUNT * PDE_COUNT;
+ for (i = 0; i < Count; i++)
+ {
+ /* Check for valid PDE */
+ if (PointerPde->u.Hard.Valid == 1)
+ {
+ /* Get the PFN from it */
+ PageFrameIndex = PFN_FROM_PTE(PointerPde);
+
+ /* Do we want a PFN entry for this page? */
+ if (MiIsRegularMemory(LoaderBlock, PageFrameIndex))
+ {
+ /* Yes we do, set it up */
+ Pfn1 = MiGetPfnEntry(PageFrameIndex);
+ Pfn1->u4.PteFrame = StartupPdIndex;
+ Pfn1->PteAddress = PointerPde;
+ Pfn1->u2.ShareCount++;
+ Pfn1->u3.e2.ReferenceCount = 1;
+ Pfn1->u3.e1.PageLocation = ActiveAndValid;
+ Pfn1->u3.e1.CacheAttribute = MiNonCached;
+ }
+ else
+ {
+ /* No PFN entry */
+ Pfn1 = NULL;
+ }
+
+ /* Now get the PTE and scan the pages */
+ PointerPte = MiAddressToPte(BaseAddress);
+ for (j = 0; j < PTE_COUNT; j++)
+ {
+ /* Check for a valid PTE */
+ if (PointerPte->u.Hard.Valid == 1)
+ {
+ /* Increase the shared count of the PFN entry for the PDE */
+ ASSERT(Pfn1 != NULL);
+ Pfn1->u2.ShareCount++;
+
+ /* Now check if the PTE is valid memory too */
+ PtePageIndex = PFN_FROM_PTE(PointerPte);
+ if (MiIsRegularMemory(LoaderBlock, PtePageIndex))
+ {
+ /*
+ * Only add pages above the end of system code or pages
+ * that are part of nonpaged pool
+ */
+ if ((BaseAddress >= 0xA0000000) ||
+ ((BaseAddress >= (ULONG_PTR)MmNonPagedPoolStart) &&
+ (BaseAddress < (ULONG_PTR)MmNonPagedPoolStart +
+ MmSizeOfNonPagedPoolInBytes)))
+ {
+ /* Get the PFN entry and make sure it too is valid */
+ Pfn2 = MiGetPfnEntry(PtePageIndex);
+ if ((MmIsAddressValid(Pfn2)) &&
+ (MmIsAddressValid(Pfn2 + 1)))
+ {
+ /* Setup the PFN entry */
+ Pfn2->u4.PteFrame = PageFrameIndex;
+ Pfn2->PteAddress = PointerPte;
+ Pfn2->u2.ShareCount++;
+ Pfn2->u3.e2.ReferenceCount = 1;
+ Pfn2->u3.e1.PageLocation = ActiveAndValid;
+ Pfn2->u3.e1.CacheAttribute = MiNonCached;
+ }
+ }
+ }
+ }
+
+ /* Next PTE */
+ PointerPte++;
+ BaseAddress += PAGE_SIZE;
+ }
+ }
+ else
+ {
+ /* Next PDE mapped address */
+ BaseAddress += PDE_MAPPED_VA;
+ }
+
+ /* Next PTE */
+ PointerPde++;
+ }
+}
+
+VOID
+NTAPI
+MiBuildPfnDatabaseZeroPage(VOID)
+{
+ PMMPFN Pfn1;
+ PMMPDE PointerPde;
+
+ /* Grab the lowest page and check if it has no real references */
+ Pfn1 = MiGetPfnEntry(MmLowestPhysicalPage);
+ if (!(MmLowestPhysicalPage) && !(Pfn1->u3.e2.ReferenceCount))
+ {
+ /* Make it a bogus page to catch errors */
+ PointerPde = MiAddressToPde(0xFFFFFFFF);
+ Pfn1->u4.PteFrame = PFN_FROM_PTE(PointerPde);
+ Pfn1->PteAddress = PointerPde;
+ Pfn1->u2.ShareCount++;
+ Pfn1->u3.e2.ReferenceCount = 0xFFF0;
+ Pfn1->u3.e1.PageLocation = ActiveAndValid;
+ Pfn1->u3.e1.CacheAttribute = MiNonCached;
+ }
+}
+
+VOID
+NTAPI
+MiBuildPfnDatabaseFromLoaderBlock(IN PLOADER_PARAMETER_BLOCK LoaderBlock)
+{
+ PLIST_ENTRY NextEntry;
+ PFN_NUMBER PageCount = 0;
+ PMEMORY_ALLOCATION_DESCRIPTOR MdBlock;
+ PFN_NUMBER PageFrameIndex;
+ PMMPFN Pfn1;
+ PMMPTE PointerPte;
+ PMMPDE PointerPde;
+ KIRQL OldIrql;
+
+ /* Now loop through the descriptors */
+ NextEntry = LoaderBlock->MemoryDescriptorListHead.Flink;
+ while (NextEntry != &LoaderBlock->MemoryDescriptorListHead)
+ {
+ /* Get the current descriptor */
+ MdBlock = CONTAINING_RECORD(NextEntry,
+ MEMORY_ALLOCATION_DESCRIPTOR,
+ ListEntry);
+
+ /* Read its data */
+ PageCount = MdBlock->PageCount;
+ PageFrameIndex = MdBlock->BasePage;
+
+ /* Don't allow memory above what the PFN database is mapping */
+ if (PageFrameIndex > MmHighestPhysicalPage)
+ {
+ /* Since they are ordered, everything past here will be larger */
+ break;
+ }
+
+ /* On the other hand, the end page might be higher up... */
+ if ((PageFrameIndex + PageCount) > (MmHighestPhysicalPage + 1))
+ {
+ /* In which case we'll trim the descriptor to go as high as we can */
+ PageCount = MmHighestPhysicalPage + 1 - PageFrameIndex;
+ MdBlock->PageCount = PageCount;
+
+ /* But if there's nothing left to trim, we got too high, so quit */
+ if (!PageCount) break;
+ }
+
+ /* Now check the descriptor type */
+ switch (MdBlock->MemoryType)
+ {
+ /* Check for bad RAM */
+ case LoaderBad:
+
+ DPRINT1("You have damaged RAM modules. Stopping boot\n");
+ while (TRUE);
+ break;
+
+ /* Check for free RAM */
+ case LoaderFree:
+ case LoaderLoadedProgram:
+ case LoaderFirmwareTemporary:
+ case LoaderOsloaderStack:
+
+ /* Get the last page of this descriptor. Note we loop backwards */
+ PageFrameIndex += PageCount - 1;
+ Pfn1 = MiGetPfnEntry(PageFrameIndex);
+
+ /* Lock the PFN Database */
+ OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
+ while (PageCount--)
+ {
+ /* If the page really has no references, mark it as free */
+ if (!Pfn1->u3.e2.ReferenceCount)
+ {
+ /* Add it to the free list */
+ Pfn1->u3.e1.CacheAttribute = MiNonCached;
+ MiInsertPageInFreeList(PageFrameIndex);
+ }
+
+ /* Go to the next page */
+ Pfn1--;
+ PageFrameIndex--;
+ }
+
+ /* Release PFN database */
+ KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
+
+ /* Done with this block */
+ break;
+
+ /* Check for pages that are invisible to us */
+ case LoaderFirmwarePermanent:
+ case LoaderSpecialMemory:
+ case LoaderBBTMemory:
+
+ /* And skip them */
+ break;
+
+ default:
+
+ /* Map these pages with the KSEG0 mapping that adds 0x80000000 */
+ PointerPte = MiAddressToPte(KSEG0_BASE + (PageFrameIndex << PAGE_SHIFT));
+ Pfn1 = MiGetPfnEntry(PageFrameIndex);
+ while (PageCount--)
+ {
+ /* Check if the page is really unused */
+ PointerPde = MiAddressToPde(KSEG0_BASE + (PageFrameIndex << PAGE_SHIFT));
+ if (!Pfn1->u3.e2.ReferenceCount)
+ {
+ /* Mark it as being in-use */
+ Pfn1->u4.PteFrame = PFN_FROM_PTE(PointerPde);
+ Pfn1->PteAddress = PointerPte;
+ Pfn1->u2.ShareCount++;
+ Pfn1->u3.e2.ReferenceCount = 1;
+ Pfn1->u3.e1.PageLocation = ActiveAndValid;
+ Pfn1->u3.e1.CacheAttribute = MiNonCached;
+
+ /* Check for RAM disk page */
+ if (MdBlock->MemoryType == LoaderXIPRom)
+ {
+ /* Make it a pseudo-I/O ROM mapping */
+ Pfn1->u1.Flink = 0;
+ Pfn1->u2.ShareCount = 0;
+ Pfn1->u3.e2.ReferenceCount = 0;
+ Pfn1->u3.e1.PageLocation = 0;
+ Pfn1->u3.e1.Rom = 1;
+ Pfn1->u4.InPageError = 0;
+ Pfn1->u3.e1.PrototypePte = 1;
+ }
+ }
+
+ /* Advance page structures */
+ Pfn1++;
+ PageFrameIndex++;
+ PointerPte++;
+ }
+ break;
+ }
+
+ /* Next descriptor entry */
+ NextEntry = MdBlock->ListEntry.Flink;
+ }
+}
+
+VOID
+NTAPI
+MiBuildPfnDatabaseSelf(VOID)
+{
+ PMMPTE PointerPte, LastPte;
+ PMMPFN Pfn1;
+
+ /* Loop the PFN database page */
+ PointerPte = MiAddressToPte(MiGetPfnEntry(MmLowestPhysicalPage));
+ LastPte = MiAddressToPte(MiGetPfnEntry(MmHighestPhysicalPage));
+ while (PointerPte <= LastPte)
+ {
+ /* Make sure the page is valid */
+ if (PointerPte->u.Hard.Valid == 1)
+ {
+ /* Get the PFN entry and just mark it referenced */
+ Pfn1 = MiGetPfnEntry(PointerPte->u.Hard.PageFrameNumber);
+ Pfn1->u2.ShareCount = 1;
+ Pfn1->u3.e2.ReferenceCount = 1;
+ }
+
+ /* Next */
+ PointerPte++;
+ }
+}
+
+VOID
+NTAPI
+MiInitializePfnDatabase(IN PLOADER_PARAMETER_BLOCK LoaderBlock)
+{
+ /* Scan memory and start setting up PFN entries */
+ MiBuildPfnDatabaseFromPages(LoaderBlock);
+
+ /* Add the zero page */
+ MiBuildPfnDatabaseZeroPage();
+
+ /* Scan the loader block and build the rest of the PFN database */
+ MiBuildPfnDatabaseFromLoaderBlock(LoaderBlock);
+
+ /* Finally add the pages for the PFN database itself */
+ MiBuildPfnDatabaseSelf();
+}
+
+VOID
+NTAPI
+MiAdjustWorkingSetManagerParameters(IN BOOLEAN Client)
+{
+ /* This function needs to do more work, for now, we tune page minimums */
+
+ /* Check for a system with around 64MB RAM or more */
+ if (MmNumberOfPhysicalPages >= (63 * _1MB) / PAGE_SIZE)
+ {
+ /* Double the minimum amount of pages we consider for a "plenty free" scenario */
+ MmPlentyFreePages *= 2;
+ }
+}
+
+VOID
+NTAPI
+MiNotifyMemoryEvents(VOID)
+{
+ /* Are we in a low-memory situation? */
+ if (MmAvailablePages < MmLowMemoryThreshold)
+ {
+ /* Clear high, set low */
+ if (KeReadStateEvent(MiHighMemoryEvent)) KeClearEvent(MiHighMemoryEvent);
+ if (!KeReadStateEvent(MiLowMemoryEvent)) KeSetEvent(MiLowMemoryEvent, 0, FALSE);
+ }
+ else if (MmAvailablePages < MmHighMemoryThreshold)
+ {
+ /* We are in between, clear both */
+ if (KeReadStateEvent(MiHighMemoryEvent)) KeClearEvent(MiHighMemoryEvent);
+ if (KeReadStateEvent(MiLowMemoryEvent)) KeClearEvent(MiLowMemoryEvent);
+ }
+ else
+ {
+ /* Clear low, set high */
+ if (KeReadStateEvent(MiLowMemoryEvent)) KeClearEvent(MiLowMemoryEvent);
+ if (!KeReadStateEvent(MiHighMemoryEvent)) KeSetEvent(MiHighMemoryEvent, 0, FALSE);
+ }
+}
+
+NTSTATUS
+NTAPI
+MiCreateMemoryEvent(IN PUNICODE_STRING Name,
+ OUT PKEVENT *Event)
+{
+ PACL Dacl;
+ HANDLE EventHandle;
+ ULONG DaclLength;
+ NTSTATUS Status;
+ OBJECT_ATTRIBUTES ObjectAttributes;
+ SECURITY_DESCRIPTOR SecurityDescriptor;
+
+ /* Create the SD */
+ Status = RtlCreateSecurityDescriptor(&SecurityDescriptor,
+ SECURITY_DESCRIPTOR_REVISION);
+ if (!NT_SUCCESS(Status)) return Status;
+
+ /* One ACL with 3 ACEs, containing each one SID */
+ DaclLength = sizeof(ACL) +
+ 3 * sizeof(ACCESS_ALLOWED_ACE) +
+ RtlLengthSid(SeLocalSystemSid) +
+ RtlLengthSid(SeAliasAdminsSid) +
+ RtlLengthSid(SeWorldSid);
+
+ /* Allocate space for the DACL */
+ Dacl = ExAllocatePoolWithTag(PagedPool, DaclLength, 'lcaD');
+ if (!Dacl) return STATUS_INSUFFICIENT_RESOURCES;
+
+ /* Setup the ACL inside it */
+ Status = RtlCreateAcl(Dacl, DaclLength, ACL_REVISION);
+ if (!NT_SUCCESS(Status)) goto CleanUp;
+
+ /* Add query rights for everyone */
+ Status = RtlAddAccessAllowedAce(Dacl,
+ ACL_REVISION,
+ SYNCHRONIZE | EVENT_QUERY_STATE | READ_CONTROL,
+ SeWorldSid);
+ if (!NT_SUCCESS(Status)) goto CleanUp;
+
+ /* Full rights for the admin */
+ Status = RtlAddAccessAllowedAce(Dacl,
+ ACL_REVISION,
+ EVENT_ALL_ACCESS,
+ SeAliasAdminsSid);
+ if (!NT_SUCCESS(Status)) goto CleanUp;
+
+ /* As well as full rights for the system */
+ Status = RtlAddAccessAllowedAce(Dacl,
+ ACL_REVISION,
+ EVENT_ALL_ACCESS,
+ SeLocalSystemSid);
+ if (!NT_SUCCESS(Status)) goto CleanUp;
+
+ /* Set this DACL inside the SD */
+ Status = RtlSetDaclSecurityDescriptor(&SecurityDescriptor,
+ TRUE,
+ Dacl,
+ FALSE);
+ if (!NT_SUCCESS(Status)) goto CleanUp;
+
+ /* Setup the event attributes, making sure it's a permanent one */
+ InitializeObjectAttributes(&ObjectAttributes,
+ Name,
+ OBJ_KERNEL_HANDLE | OBJ_PERMANENT,
+ NULL,
+ &SecurityDescriptor);
+
+ /* Create the event */
+ Status = ZwCreateEvent(&EventHandle,
+ EVENT_ALL_ACCESS,
+ &ObjectAttributes,
+ NotificationEvent,
+ FALSE);
+CleanUp:
+ /* Free the DACL */
+ ExFreePool(Dacl);
+
+ /* Check if this is the success path */
+ if (NT_SUCCESS(Status))
+ {
+ /* Add a reference to the object, then close the handle we had */
+ Status = ObReferenceObjectByHandle(EventHandle,
+ EVENT_MODIFY_STATE,
+ ExEventObjectType,
+ KernelMode,
+ (PVOID*)Event,
+ NULL);
+ ZwClose (EventHandle);
+ }
+
+ /* Return status */
+ return Status;
+}
+
+BOOLEAN
+NTAPI
+MiInitializeMemoryEvents(VOID)
+{
+ UNICODE_STRING LowString = RTL_CONSTANT_STRING(L"\\KernelObjects\\LowMemoryCondition");
+ UNICODE_STRING HighString = RTL_CONSTANT_STRING(L"\\KernelObjects\\HighMemoryCondition");
+ UNICODE_STRING LowPagedPoolString = RTL_CONSTANT_STRING(L"\\KernelObjects\\LowPagedPoolCondition");
+ UNICODE_STRING HighPagedPoolString = RTL_CONSTANT_STRING(L"\\KernelObjects\\HighPagedPoolCondition");
+ UNICODE_STRING LowNonPagedPoolString = RTL_CONSTANT_STRING(L"\\KernelObjects\\LowNonPagedPoolCondition");
+ UNICODE_STRING HighNonPagedPoolString = RTL_CONSTANT_STRING(L"\\KernelObjects\\HighNonPagedPoolCondition");
+ NTSTATUS Status;
+
+ /* Check if we have a registry setting */
+ if (MmLowMemoryThreshold)
+ {
+ /* Convert it to pages */
+ MmLowMemoryThreshold *= (_1MB / PAGE_SIZE);
+ }
+ else
+ {
+ /* The low memory threshold is hit when we don't consider that we have "plenty" of free pages anymore */
+ MmLowMemoryThreshold = MmPlentyFreePages;
+
+ /* More than one GB of memory? */
+ if (MmNumberOfPhysicalPages > 0x40000)
+ {
+ /* Start at 32MB, and add another 16MB for each GB */
+ MmLowMemoryThreshold = (32 * _1MB) / PAGE_SIZE;
+ MmLowMemoryThreshold += ((MmNumberOfPhysicalPages - 0x40000) >> 7);
+ }
+ else if (MmNumberOfPhysicalPages > 0x8000)
+ {
+ /* For systems with > 128MB RAM, add another 4MB for each 128MB */
+ MmLowMemoryThreshold += ((MmNumberOfPhysicalPages - 0x8000) >> 5);
+ }
+
+ /* Don't let the minimum threshold go past 64MB */
+ MmLowMemoryThreshold = min(MmLowMemoryThreshold, (64 * _1MB) / PAGE_SIZE);
+ }
+
+ /* Check if we have a registry setting */
+ if (MmHighMemoryThreshold)
+ {
+ /* Convert it into pages */
+ MmHighMemoryThreshold *= (_1MB / PAGE_SIZE);
+ }
+ else
+ {
+ /* Otherwise, the default is three times the low memory threshold */
+ MmHighMemoryThreshold = 3 * MmLowMemoryThreshold;
+ ASSERT(MmHighMemoryThreshold > MmLowMemoryThreshold);
+ }
+
+ /* Make sure high threshold is actually higher than the low */
+ MmHighMemoryThreshold = max(MmHighMemoryThreshold, MmLowMemoryThreshold);
+
+ /* Create the memory events for all the thresholds */
+ Status = MiCreateMemoryEvent(&LowString, &MiLowMemoryEvent);
+ if (!NT_SUCCESS(Status)) return FALSE;
+ Status = MiCreateMemoryEvent(&HighString, &MiHighMemoryEvent);
+ if (!NT_SUCCESS(Status)) return FALSE;
+ Status = MiCreateMemoryEvent(&LowPagedPoolString, &MiLowPagedPoolEvent);
+ if (!NT_SUCCESS(Status)) return FALSE;
+ Status = MiCreateMemoryEvent(&HighPagedPoolString, &MiHighPagedPoolEvent);
+ if (!NT_SUCCESS(Status)) return FALSE;
+ Status = MiCreateMemoryEvent(&LowNonPagedPoolString, &MiLowNonPagedPoolEvent);
+ if (!NT_SUCCESS(Status)) return FALSE;
+ Status = MiCreateMemoryEvent(&HighNonPagedPoolString, &MiHighNonPagedPoolEvent);
+ if (!NT_SUCCESS(Status)) return FALSE;
+
+ /* Now setup the pool events */
+ MiInitializePoolEvents();
+
+ /* Set the initial event state */
+ MiNotifyMemoryEvents();
+ return TRUE;
+}
+
+VOID
+NTAPI
+MiAddHalIoMappings(VOID)
+{
+ PVOID BaseAddress;
+ PMMPTE PointerPde;
+ PMMPTE PointerPte;
+ ULONG i, j, PdeCount;
+ PFN_NUMBER PageFrameIndex;
+
+ /* HAL Heap address -- should be on a PDE boundary */
+ BaseAddress = (PVOID)0xFFC00000;
+ ASSERT(MiAddressToPteOffset(BaseAddress) == 0);
+
+ /* Check how many PDEs the heap has */
+ PointerPde = MiAddressToPde(BaseAddress);
+ PdeCount = PDE_COUNT - ADDR_TO_PDE_OFFSET(BaseAddress);
+ for (i = 0; i < PdeCount; i++)
+ {
+ /* Does the HAL own this mapping? */
+ if ((PointerPde->u.Hard.Valid == 1) &&
+ (PointerPde->u.Hard.LargePage == 0))
+ {
+ /* Get the PTE for it and scan each page */
+ PointerPte = MiAddressToPte(BaseAddress);
+ for (j = 0 ; j < PTE_COUNT; j++)
+ {
+ /* Does the HAL own this page? */
+ if (PointerPte->u.Hard.Valid == 1)
+ {
+ /* Is the HAL using it for device or I/O mapped memory? */
+ PageFrameIndex = PFN_FROM_PTE(PointerPte);
+ if (!MiGetPfnEntry(PageFrameIndex))
+ {
+ /* FIXME: For PAT, we need to track I/O cache attributes for coherency */
+ DPRINT1("HAL I/O Mapping at %p is unsafe\n", BaseAddress);
+ }
+ }
+
+ /* Move to the next page */
+ BaseAddress = (PVOID)((ULONG_PTR)BaseAddress + PAGE_SIZE);
+ PointerPte++;
+ }
+ }
+ else
+ {
+ /* Move to the next address */
+ BaseAddress = (PVOID)((ULONG_PTR)BaseAddress + PDE_MAPPED_VA);
+ }
+
+ /* Move to the next PDE */
+ PointerPde++;
+ }
+}
+
+VOID
+NTAPI
+MmDumpArmPfnDatabase(VOID)
+{
+ ULONG i;
+ PMMPFN Pfn1;
+ PCHAR Consumer = "Unknown";
+ KIRQL OldIrql;
+ ULONG ActivePages = 0, FreePages = 0, OtherPages = 0;
+
+ KeRaiseIrql(HIGH_LEVEL, &OldIrql);
+
+ //
+ // Loop the PFN database
+ //
+ for (i = 0; i <= MmHighestPhysicalPage; i++)
+ {
+ Pfn1 = MiGetPfnEntry(i);
+ if (!Pfn1) continue;
+
+ //
+ // Get the page location
+ //
+ switch (Pfn1->u3.e1.PageLocation)
+ {
+ case ActiveAndValid:
+
+ Consumer = "Active and Valid";
+ ActivePages++;
+ break;
+
+ case FreePageList:
+
+ Consumer = "Free Page List";
+ FreePages++;
+ break;
+
+ default:
+
+ Consumer = "Other (ASSERT!)";
+ OtherPages++;
+ break;
+ }
+
+ //
+ // Pretty-print the page
+ //
+ DbgPrint("0x%08p:\t%20s\t(%02d.%02d) [%08p-%08p])\n",
+ i << PAGE_SHIFT,
+ Consumer,
+ Pfn1->u3.e2.ReferenceCount,
+ Pfn1->u2.ShareCount,
+ Pfn1->PteAddress,
+ Pfn1->u4.PteFrame);
+ }
+
+ DbgPrint("Active: %d pages\t[%d KB]\n", ActivePages, (ActivePages << PAGE_SHIFT) / 1024);
+ DbgPrint("Free: %d pages\t[%d KB]\n", FreePages, (FreePages << PAGE_SHIFT) / 1024);
+ DbgPrint("Other: %d pages\t[%d KB]\n", OtherPages, (OtherPages << PAGE_SHIFT) / 1024);
+
+ KeLowerIrql(OldIrql);
+}
+
PFN_NUMBER
NTAPI
MiPagesInLoaderBlock(IN PLOADER_PARAMETER_BLOCK LoaderBlock,
MiBuildPagedPool(VOID)
{
PMMPTE PointerPte, PointerPde;
- MMPTE TempPte = HyperTemplatePte;
+ MMPTE TempPte = ValidKernelPte;
PFN_NUMBER PageFrameIndex;
KIRQL OldIrql;
ULONG Size, BitMapSize;
// Get the page frame number for the system page directory
//
PointerPte = MiAddressToPte(PDE_BASE);
- MmSystemPageDirectory = PFN_FROM_PTE(PointerPte);
+ ASSERT(PD_COUNT == 1);
+ MmSystemPageDirectory[0] = PFN_FROM_PTE(PointerPte);
//
// Allocate a system PTE which will hold a copy of the page directory
// page directory array in the old ReactOS Mm is used (but in a less hacky
// way).
//
- TempPte = HyperTemplatePte;
- TempPte.u.Hard.PageFrameNumber = MmSystemPageDirectory;
+ TempPte = ValidKernelPte;
+ ASSERT(PD_COUNT == 1);
+ TempPte.u.Hard.PageFrameNumber = MmSystemPageDirectory[0];
ASSERT(PointerPte->u.Hard.Valid == 0);
ASSERT(TempPte.u.Hard.Valid == 1);
*PointerPte = TempPte;
//
OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
- //
- // Allocate a page and map the first paged pool PDE
- //
- PageFrameIndex = MmAllocPage(MC_NPPOOL, 0);
+ /* Allocate a page and map the first paged pool PDE */
+ PageFrameIndex = MiRemoveZeroPage(0);
TempPte.u.Hard.PageFrameNumber = PageFrameIndex;
ASSERT(PointerPde->u.Hard.Valid == 0);
ASSERT(TempPte.u.Hard.Valid == 1);
*PointerPde = TempPte;
+ /* Initialize the PFN entry for it */
+ MiInitializePfnForOtherProcess(PageFrameIndex,
+ PointerPde,
+ MmSystemPageDirectory[(PointerPde - (PMMPTE)PDE_BASE) / PDE_COUNT]);
+
//
// Release the PFN database lock
//
//
InitializePool(PagedPool, 0);
- //
- // Initialize the paged pool mutex
- //
- KeInitializeGuardedMutex(&MmPagedPoolMutex);
+ /* Default low threshold of 30MB or one fifth of paged pool */
+ MiLowPagedPoolThreshold = (30 * _1MB) >> PAGE_SHIFT;
+ MiLowPagedPoolThreshold = min(MiLowPagedPoolThreshold, Size / 5);
+
+ /* Default high threshold of 60MB or 25% */
+ MiHighPagedPoolThreshold = (60 * _1MB) >> PAGE_SHIFT;
+ MiHighPagedPoolThreshold = min(MiHighPagedPoolThreshold, (Size * 2) / 5);
+ ASSERT(MiLowPagedPoolThreshold < MiHighPagedPoolThreshold);
}
NTSTATUS
IncludeType[LoaderBBTMemory] = FALSE;
if (Phase == 0)
{
+ /* Initialize the phase 0 temporary event */
+ KeInitializeEvent(&MiTempEvent, NotificationEvent, FALSE);
+
+ /* Set all the events to use the temporary event for now */
+ MiLowMemoryEvent = &MiTempEvent;
+ MiHighMemoryEvent = &MiTempEvent;
+ MiLowPagedPoolEvent = &MiTempEvent;
+ MiHighPagedPoolEvent = &MiTempEvent;
+ MiLowNonPagedPoolEvent = &MiTempEvent;
+ MiHighNonPagedPoolEvent = &MiTempEvent;
+
//
// Define the basic user vs. kernel address space separation
//
MmUserProbeAddress = (ULONG_PTR)MmSystemRangeStart - 0x10000;
MmHighestUserAddress = (PVOID)(MmUserProbeAddress - 1);
+ /* Highest PTE and PDE based on the addresses above */
+ MiHighestUserPte = MiAddressToPte(MmHighestUserAddress);
+ MiHighestUserPde = MiAddressToPde(MmHighestUserAddress);
+
//
// Get the size of the boot loader's image allocations and then round
// that region up to a PDE size, so that any PDEs we might create for
//
MmBootImageSize = KeLoaderBlock->Extension->LoaderPagesSpanned;
MmBootImageSize *= PAGE_SIZE;
- MmBootImageSize = (MmBootImageSize + (4 * 1024 * 1024) - 1) & ~((4 * 1024 * 1024) - 1);
- ASSERT((MmBootImageSize % (4 * 1024 * 1024)) == 0);
+ MmBootImageSize = (MmBootImageSize + PDE_MAPPED_VA - 1) & ~(PDE_MAPPED_VA - 1);
+ ASSERT((MmBootImageSize % PDE_MAPPED_VA) == 0);
//
// Set the size of session view, pool, and image
//
MiSystemViewStart = (PVOID)((ULONG_PTR)MmSessionBase -
MmSystemViewSize);
+
+ /* Compute the PTE addresses for all the addresses we carved out */
+ MiSessionImagePteStart = MiAddressToPte(MiSessionImageStart);
+ MiSessionImagePteEnd = MiAddressToPte(MiSessionImageEnd);
+ MiSessionBasePte = MiAddressToPte(MmSessionBase);
+ MiSessionLastPte = MiAddressToPte(MiSessionSpaceEnd);
+
+ /* Initialize the user mode image list */
+ InitializeListHead(&MmLoadedUserImageList);
+
+ /* Initialize the paged pool mutex */
+ KeInitializeGuardedMutex(&MmPagedPoolMutex);
+
+ /* Initialize the Loader Lock */
+ KeInitializeMutant(&MmSystemLoadLock, FALSE);
//
// Count physical pages on the system
}
}
- //
- // Size up paged pool and build the shadow system page directory
- //
+ /* Look for large page cache entries that need caching */
+ MiSyncCachedRanges();
+
+ /* Loop for HAL Heap I/O device mappings that need coherency tracking */
+ MiAddHalIoMappings();
+
+ /* Set the initial resident page count */
+ MmResidentAvailablePages = MmAvailablePages - 32;
+
+ /* Initialize large page structures on PAE/x64, and MmProcessList on x86 */
+ MiInitializeLargePageSupport();
+
+ /* Check if the registry says any drivers should be loaded with large pages */
+ MiInitializeDriverLargePageList();
+
+ /* Relocate the boot drivers into system PTE space and fixup their PFNs */
+ MiReloadBootLoadedDrivers(LoaderBlock);
+
+ /* FIXME: Call out into Driver Verifier for initialization */
+
+ /* Check how many pages the system has */
+ if (MmNumberOfPhysicalPages <= (13 * _1MB))
+ {
+ /* Set small system */
+ MmSystemSize = MmSmallSystem;
+ }
+ else if (MmNumberOfPhysicalPages <= (19 * _1MB))
+ {
+ /* Set small system and add 100 pages for the cache */
+ MmSystemSize = MmSmallSystem;
+ MmSystemCacheWsMinimum += 100;
+ }
+ else
+ {
+ /* Set medium system and add 400 pages for the cache */
+ MmSystemSize = MmMediumSystem;
+ MmSystemCacheWsMinimum += 400;
+ }
+
+ /* Check for less than 24MB */
+ if (MmNumberOfPhysicalPages < ((24 * _1MB) / PAGE_SIZE))
+ {
+ /* No more than 32 pages */
+ MmSystemCacheWsMinimum = 32;
+ }
+
+ /* Check for more than 32MB */
+ if (MmNumberOfPhysicalPages >= ((32 * _1MB) / PAGE_SIZE))
+ {
+ /* Check for product type being "Wi" for WinNT */
+ if (MmProductType == '\0i\0W')
+ {
+ /* Then this is a large system */
+ MmSystemSize = MmLargeSystem;
+ }
+ else
+ {
+ /* For servers, we need 64MB to consider this as being large */
+ if (MmNumberOfPhysicalPages >= ((64 * _1MB) / PAGE_SIZE))
+ {
+ /* Set it as large */
+ MmSystemSize = MmLargeSystem;
+ }
+ }
+ }
+
+ /* Check for more than 33 MB */
+ if (MmNumberOfPhysicalPages > ((33 * _1MB) / PAGE_SIZE))
+ {
+ /* Add another 500 pages to the cache */
+ MmSystemCacheWsMinimum += 500;
+ }
+
+ /* Now setup the shared user data fields */
+ ASSERT(SharedUserData->NumberOfPhysicalPages == 0);
+ SharedUserData->NumberOfPhysicalPages = MmNumberOfPhysicalPages;
+ SharedUserData->LargePageMinimum = 0;
+
+ /* Check for workstation (Wi for WinNT) */
+ if (MmProductType == '\0i\0W')
+ {
+ /* Set Windows NT Workstation product type */
+ SharedUserData->NtProductType = NtProductWinNt;
+ MmProductType = 0;
+ }
+ else
+ {
+ /* Check for LanMan server */
+ if (MmProductType == '\0a\0L')
+ {
+ /* This is a domain controller */
+ SharedUserData->NtProductType = NtProductLanManNt;
+ }
+ else
+ {
+ /* Otherwise it must be a normal server */
+ SharedUserData->NtProductType = NtProductServer;
+ }
+
+ /* Set the product type, and make the system more aggressive with low memory */
+ MmProductType = 1;
+ MmMinimumFreePages = 81;
+ }
+
+ /* Update working set tuning parameters */
+ MiAdjustWorkingSetManagerParameters(!MmProductType);
+
+ /* Finetune the page count by removing working set and NP expansion */
+ MmResidentAvailablePages -= MiExpansionPoolPagesInitialCharge;
+ MmResidentAvailablePages -= MmSystemCacheWsMinimum;
+ MmResidentAvailableAtInit = MmResidentAvailablePages;
+ if (MmResidentAvailablePages <= 0)
+ {
+ /* This should not happen */
+ DPRINT1("System cache working set too big\n");
+ return FALSE;
+ }
+
+ /* Size up paged pool and build the shadow system page directory */
MiBuildPagedPool();
+
+ /* Debugger physical memory support is now ready to be used */
+ MiDbgReadyForPhysical = TRUE;
+
+ /* Initialize the loaded module list */
+ MiInitializeLoadedModuleList(LoaderBlock);
}
//