// figure out the most appropriate values.
//
ULONG MmMaximumNonPagedPoolPercent;
-ULONG MmSizeOfNonPagedPoolInBytes;
-ULONG MmMaximumNonPagedPoolInBytes;
+SIZE_T MmSizeOfNonPagedPoolInBytes;
+SIZE_T MmMaximumNonPagedPoolInBytes;
/* Some of the same values, in pages */
PFN_NUMBER MmMaximumNonPagedPoolInPages;
// They are described on http://support.microsoft.com/default.aspx/kb/126402/ja
// along with the algorithm that uses them, which is implemented later below.
//
-ULONG MmMinimumNonPagedPoolSize = 256 * 1024;
+SIZE_T MmMinimumNonPagedPoolSize = 256 * 1024;
ULONG MmMinAdditionNonPagedPoolPerMb = 32 * 1024;
-ULONG MmDefaultMaximumNonPagedPool = 1024 * 1024;
+SIZE_T MmDefaultMaximumNonPagedPool = 1024 * 1024;
ULONG MmMaxAdditionNonPagedPoolPerMb = 400 * 1024;
//
//
// And this is its default size
//
-ULONG MmSizeOfPagedPoolInBytes = MI_MIN_INIT_PAGED_POOLSIZE;
+SIZE_T MmSizeOfPagedPoolInBytes = MI_MIN_INIT_PAGED_POOLSIZE;
PFN_NUMBER MmSizeOfPagedPoolInPages = MI_MIN_INIT_PAGED_POOLSIZE / PAGE_SIZE;
//
PVOID MiSessionPoolEnd; // 0xBE000000
PVOID MiSessionPoolStart; // 0xBD000000
PVOID MmSessionBase; // 0xBD000000
-ULONG MmSessionSize;
-ULONG MmSessionViewSize;
-ULONG MmSessionPoolSize;
-ULONG MmSessionImageSize;
+SIZE_T MmSessionSize;
+SIZE_T MmSessionViewSize;
+SIZE_T MmSessionPoolSize;
+SIZE_T MmSessionImageSize;
+
+/*
+ * These are the PTE addresses of the boundaries carved out above
+ */
+PMMPTE MiSessionImagePteStart;
+PMMPTE MiSessionImagePteEnd;
+PMMPTE MiSessionBasePte;
+PMMPTE MiSessionLastPte;
//
// The system view space, on the other hand, is where sections that are memory
// By default, it is a 16MB region.
//
PVOID MiSystemViewStart;
-ULONG MmSystemViewSize;
+SIZE_T MmSystemViewSize;
//
// A copy of the system page directory (the page directory associated with the
// map paged pool PDEs into external processes when they fault on a paged pool
// address.
//
-PFN_NUMBER MmSystemPageDirectory;
+PFN_NUMBER MmSystemPageDirectory[PD_COUNT];
PMMPTE MmSystemPagePtes;
//
// On systems with more than 32MB, this number is then doubled, and further
// aligned up to a PDE boundary (4MB).
//
-ULONG MmNumberOfSystemPtes;
+ULONG_PTR MmNumberOfSystemPtes;
//
// This is how many pages the PFN database will take up
// In Windows, this includes the Quark Color Table, but not in ARMĀ³
//
-ULONG MxPfnAllocation;
+PFN_NUMBER MxPfnAllocation;
//
// Unlike the old ReactOS Memory Manager, ARMĀ³ (and Windows) does not keep track
//
// This is where we keep track of the most basic physical layout markers
//
-ULONG MmNumberOfPhysicalPages, MmHighestPhysicalPage, MmLowestPhysicalPage = -1;
+PFN_NUMBER MmNumberOfPhysicalPages, MmHighestPhysicalPage, MmLowestPhysicalPage = -1;
//
// The total number of pages mapped by the boot loader, which include the kernel
//
// This number is later aligned up to a PDE boundary.
//
-ULONG MmBootImageSize;
+SIZE_T MmBootImageSize;
//
// These three variables keep track of the core separation of address space that
// exists between kernel mode and user mode.
//
-ULONG MmUserProbeAddress;
+ULONG_PTR MmUserProbeAddress;
PVOID MmHighestUserAddress;
PVOID MmSystemRangeStart;
+/* And these store the respective highest PTE/PDE address */
+PMMPTE MiHighestUserPte;
+PMMPDE MiHighestUserPde;
+
+/* These variables define the system cache address space */
PVOID MmSystemCacheStart;
PVOID MmSystemCacheEnd;
MMSUPPORT MmSystemCacheWs;
ULONG MmProductType;
MM_SYSTEMSIZE MmSystemSize;
+/*
+ * These values store the cache working set minimums and maximums, in pages
+ *
+ * The minimum value is boosted on systems with more than 24MB of RAM, and cut
+ * down to only 32 pages on embedded (<24MB RAM) systems.
+ *
+ * An extra boost of 2MB is given on systems with more than 33MB of RAM.
+ */
+PFN_NUMBER MmSystemCacheWsMinimum = 288;
+PFN_NUMBER MmSystemCacheWsMaximum = 350;
+
+/* FIXME: Move to cache/working set code later */
+BOOLEAN MmLargeSystemCache;
+
/* PRIVATE FUNCTIONS **********************************************************/
//
MMPTE TempPte = ValidKernelPte;
/* The color table starts after the ARM3 PFN database */
- MmFreePagesByColor[0] = (PMMCOLOR_TABLES)&MmPfnDatabase[1][MmHighestPhysicalPage + 1];
+ MmFreePagesByColor[0] = (PMMCOLOR_TABLES)&MmPfnDatabase[MmHighestPhysicalPage + 1];
/* Loop the PTEs. We have two color tables for each secondary color */
PointerPte = MiAddressToPte(&MmFreePagesByColor[0][0]);
{
/* Get a page and map it */
TempPte.u.Hard.PageFrameNumber = MxGetNextPage(1);
- ASSERT(TempPte.u.Hard.Valid == 1);
- *PointerPte = TempPte;
-
+ MI_WRITE_VALID_PTE(PointerPte, TempPte);
+
/* Zero out the page */
RtlZeroMemory(MiPteToAddress(PointerPte), PAGE_SIZE);
}
}
/* Get the PTEs for this range */
- PointerPte = MiAddressToPte(&MmPfnDatabase[0][BasePage]);
- LastPte = MiAddressToPte(((ULONG_PTR)&MmPfnDatabase[0][BasePage + PageCount]) - 1);
- DPRINT("MD Type: %lx Base: %lx Count: %lx\n", MdBlock->MemoryType, BasePage, PageCount);
-
- /* Loop them */
- while (PointerPte <= LastPte)
- {
- /* We'll only touch PTEs that aren't already valid */
- if (PointerPte->u.Hard.Valid == 0)
- {
- /* Use the next free page */
- TempPte.u.Hard.PageFrameNumber = FreePage;
- ASSERT(FreePageCount != 0);
-
- /* Consume free pages */
- FreePage++;
- FreePageCount--;
- if (!FreePageCount)
- {
- /* Out of memory */
- KeBugCheckEx(INSTALL_MORE_MEMORY,
- MmNumberOfPhysicalPages,
- FreePageCount,
- MxOldFreeDescriptor.PageCount,
- 1);
- }
-
- /* Write out this PTE */
- PagesLeft++;
- ASSERT(PointerPte->u.Hard.Valid == 0);
- ASSERT(TempPte.u.Hard.Valid == 1);
- *PointerPte = TempPte;
-
- /* Zero this page */
- RtlZeroMemory(MiPteToAddress(PointerPte), PAGE_SIZE);
- }
-
- /* Next! */
- PointerPte++;
- }
-
- /* Get the PTEs for this range */
- PointerPte = MiAddressToPte(&MmPfnDatabase[1][BasePage]);
- LastPte = MiAddressToPte(((ULONG_PTR)&MmPfnDatabase[1][BasePage + PageCount]) - 1);
+ PointerPte = MiAddressToPte(&MmPfnDatabase[BasePage]);
+ LastPte = MiAddressToPte(((ULONG_PTR)&MmPfnDatabase[BasePage + PageCount]) - 1);
DPRINT("MD Type: %lx Base: %lx Count: %lx\n", MdBlock->MemoryType, BasePage, PageCount);
/* Loop them */
/* Write out this PTE */
PagesLeft++;
- ASSERT(PointerPte->u.Hard.Valid == 0);
- ASSERT(TempPte.u.Hard.Valid == 1);
- *PointerPte = TempPte;
+ MI_WRITE_VALID_PTE(PointerPte, TempPte);
/* Zero this page */
RtlZeroMemory(MiPteToAddress(PointerPte), PAGE_SIZE);
/* Next! */
PointerPte++;
}
-
+
/* Do the next address range */
NextEntry = MdBlock->ListEntry.Flink;
}
if (MiIsRegularMemory(LoaderBlock, PageFrameIndex))
{
/* Yes we do, set it up */
- Pfn1 = MI_PFN_TO_PFNENTRY(PageFrameIndex);
+ Pfn1 = MiGetPfnEntry(PageFrameIndex);
Pfn1->u4.PteFrame = StartupPdIndex;
Pfn1->PteAddress = PointerPde;
Pfn1->u2.ShareCount++;
MmSizeOfNonPagedPoolInBytes)))
{
/* Get the PFN entry and make sure it too is valid */
- Pfn2 = MI_PFN_TO_PFNENTRY(PtePageIndex);
+ Pfn2 = MiGetPfnEntry(PtePageIndex);
if ((MmIsAddressValid(Pfn2)) &&
(MmIsAddressValid(Pfn2 + 1)))
{
else
{
/* Next PDE mapped address */
- BaseAddress += PTE_COUNT * PAGE_SIZE;
+ BaseAddress += PDE_MAPPED_VA;
}
/* Next PTE */
PMMPDE PointerPde;
/* Grab the lowest page and check if it has no real references */
- Pfn1 = MI_PFN_TO_PFNENTRY(MmLowestPhysicalPage);
+ Pfn1 = MiGetPfnEntry(MmLowestPhysicalPage);
if (!(MmLowestPhysicalPage) && !(Pfn1->u3.e2.ReferenceCount))
{
/* Make it a bogus page to catch errors */
PMMPFN Pfn1;
PMMPTE PointerPte;
PMMPDE PointerPde;
+ KIRQL OldIrql;
/* Now loop through the descriptors */
NextEntry = LoaderBlock->MemoryDescriptorListHead.Flink;
/* Get the last page of this descriptor. Note we loop backwards */
PageFrameIndex += PageCount - 1;
- Pfn1 = MI_PFN_TO_PFNENTRY(PageFrameIndex);
+ Pfn1 = MiGetPfnEntry(PageFrameIndex);
+
+ /* Lock the PFN Database */
+ OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
while (PageCount--)
{
/* If the page really has no references, mark it as free */
if (!Pfn1->u3.e2.ReferenceCount)
{
+ /* Add it to the free list */
Pfn1->u3.e1.CacheAttribute = MiNonCached;
- //MiInsertPageInFreeList(PageFrameIndex);
+ MiInsertPageInFreeList(PageFrameIndex);
}
/* Go to the next page */
PageFrameIndex--;
}
+ /* Release PFN database */
+ KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
+
/* Done with this block */
break;
/* Map these pages with the KSEG0 mapping that adds 0x80000000 */
PointerPte = MiAddressToPte(KSEG0_BASE + (PageFrameIndex << PAGE_SHIFT));
- Pfn1 = MI_PFN_TO_PFNENTRY(PageFrameIndex);
+ Pfn1 = MiGetPfnEntry(PageFrameIndex);
while (PageCount--)
{
/* Check if the page is really unused */
PMMPFN Pfn1;
/* Loop the PFN database page */
- PointerPte = MiAddressToPte(MI_PFN_TO_PFNENTRY(MmLowestPhysicalPage));
- LastPte = MiAddressToPte(MI_PFN_TO_PFNENTRY(MmHighestPhysicalPage));
+ PointerPte = MiAddressToPte(MiGetPfnEntry(MmLowestPhysicalPage));
+ LastPte = MiAddressToPte(MiGetPfnEntry(MmHighestPhysicalPage));
while (PointerPte <= LastPte)
{
/* Make sure the page is valid */
if (PointerPte->u.Hard.Valid == 1)
{
/* Get the PFN entry and just mark it referenced */
- Pfn1 = MI_PFN_TO_PFNENTRY(PointerPte->u.Hard.PageFrameNumber);
+ Pfn1 = MiGetPfnEntry(PointerPte->u.Hard.PageFrameNumber);
Pfn1->u2.ShareCount = 1;
Pfn1->u3.e2.ReferenceCount = 1;
}
return TRUE;
}
+VOID
+NTAPI
+MiAddHalIoMappings(VOID)
+{
+ PVOID BaseAddress;
+ PMMPTE PointerPde;
+ PMMPTE PointerPte;
+ ULONG i, j, PdeCount;
+ PFN_NUMBER PageFrameIndex;
+
+ /* HAL Heap address -- should be on a PDE boundary */
+ BaseAddress = (PVOID)0xFFC00000;
+ ASSERT(MiAddressToPteOffset(BaseAddress) == 0);
+
+ /* Check how many PDEs the heap has */
+ PointerPde = MiAddressToPde(BaseAddress);
+ PdeCount = PDE_COUNT - ADDR_TO_PDE_OFFSET(BaseAddress);
+ for (i = 0; i < PdeCount; i++)
+ {
+ /* Does the HAL own this mapping? */
+ if ((PointerPde->u.Hard.Valid == 1) &&
+ (PointerPde->u.Hard.LargePage == 0))
+ {
+ /* Get the PTE for it and scan each page */
+ PointerPte = MiAddressToPte(BaseAddress);
+ for (j = 0 ; j < PTE_COUNT; j++)
+ {
+ /* Does the HAL own this page? */
+ if (PointerPte->u.Hard.Valid == 1)
+ {
+ /* Is the HAL using it for device or I/O mapped memory? */
+ PageFrameIndex = PFN_FROM_PTE(PointerPte);
+ if (!MiGetPfnEntry(PageFrameIndex))
+ {
+ /* FIXME: For PAT, we need to track I/O cache attributes for coherency */
+ DPRINT1("HAL I/O Mapping at %p is unsafe\n", BaseAddress);
+ }
+ }
+
+ /* Move to the next page */
+ BaseAddress = (PVOID)((ULONG_PTR)BaseAddress + PAGE_SIZE);
+ PointerPte++;
+ }
+ }
+ else
+ {
+ /* Move to the next address */
+ BaseAddress = (PVOID)((ULONG_PTR)BaseAddress + PDE_MAPPED_VA);
+ }
+
+ /* Move to the next PDE */
+ PointerPde++;
+ }
+}
+
VOID
NTAPI
MmDumpArmPfnDatabase(VOID)
//
for (i = 0; i <= MmHighestPhysicalPage; i++)
{
- Pfn1 = MI_PFN_TO_PFNENTRY(i);
+ Pfn1 = MiGetPfnEntry(i);
if (!Pfn1) continue;
//
// Get the page frame number for the system page directory
//
PointerPte = MiAddressToPte(PDE_BASE);
- MmSystemPageDirectory = PFN_FROM_PTE(PointerPte);
+ ASSERT(PD_COUNT == 1);
+ MmSystemPageDirectory[0] = PFN_FROM_PTE(PointerPte);
//
// Allocate a system PTE which will hold a copy of the page directory
// way).
//
TempPte = ValidKernelPte;
- TempPte.u.Hard.PageFrameNumber = MmSystemPageDirectory;
- ASSERT(PointerPte->u.Hard.Valid == 0);
- ASSERT(TempPte.u.Hard.Valid == 1);
- *PointerPte = TempPte;
+ ASSERT(PD_COUNT == 1);
+ TempPte.u.Hard.PageFrameNumber = MmSystemPageDirectory[0];
+ MI_WRITE_VALID_PTE(PointerPte, TempPte);
//
// Let's get back to paged pool work: size it up.
//
OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
- //
- // Allocate a page and map the first paged pool PDE
- //
- PageFrameIndex = MmAllocPage(MC_NPPOOL);
+ /* Allocate a page and map the first paged pool PDE */
+ PageFrameIndex = MiRemoveZeroPage(0);
TempPte.u.Hard.PageFrameNumber = PageFrameIndex;
- ASSERT(PointerPde->u.Hard.Valid == 0);
- ASSERT(TempPte.u.Hard.Valid == 1);
- *PointerPde = TempPte;
+ MI_WRITE_VALID_PTE(PointerPde, TempPte);
+
+ /* Initialize the PFN entry for it */
+ MiInitializePfnForOtherProcess(PageFrameIndex,
+ PointerPde,
+ MmSystemPageDirectory[(PointerPde - (PMMPTE)PDE_BASE) / PDE_COUNT]);
//
// Release the PFN database lock
MmUserProbeAddress = (ULONG_PTR)MmSystemRangeStart - 0x10000;
MmHighestUserAddress = (PVOID)(MmUserProbeAddress - 1);
+ /* Highest PTE and PDE based on the addresses above */
+ MiHighestUserPte = MiAddressToPte(MmHighestUserAddress);
+ MiHighestUserPde = MiAddressToPde(MmHighestUserAddress);
+
//
// Get the size of the boot loader's image allocations and then round
// that region up to a PDE size, so that any PDEs we might create for
//
MmBootImageSize = KeLoaderBlock->Extension->LoaderPagesSpanned;
MmBootImageSize *= PAGE_SIZE;
- MmBootImageSize = (MmBootImageSize + (4 * 1024 * 1024) - 1) & ~((4 * 1024 * 1024) - 1);
- ASSERT((MmBootImageSize % (4 * 1024 * 1024)) == 0);
+ MmBootImageSize = (MmBootImageSize + PDE_MAPPED_VA - 1) & ~(PDE_MAPPED_VA - 1);
+ ASSERT((MmBootImageSize % PDE_MAPPED_VA) == 0);
//
// Set the size of session view, pool, and image
//
MiSystemViewStart = (PVOID)((ULONG_PTR)MmSessionBase -
MmSystemViewSize);
-
+
+ /* Compute the PTE addresses for all the addresses we carved out */
+ MiSessionImagePteStart = MiAddressToPte(MiSessionImageStart);
+ MiSessionImagePteEnd = MiAddressToPte(MiSessionImageEnd);
+ MiSessionBasePte = MiAddressToPte(MmSessionBase);
+ MiSessionLastPte = MiAddressToPte(MiSessionSpaceEnd);
/* Initialize the user mode image list */
InitializeListHead(&MmLoadedUserImageList);
// Sync us up with ReactOS Mm
//
MiSyncARM3WithROS(MmNonPagedSystemStart, (PVOID)((ULONG_PTR)MmNonPagedPoolEnd - 1));
- MiSyncARM3WithROS(MmPfnDatabase[0], (PVOID)((ULONG_PTR)MmNonPagedPoolStart + MmSizeOfNonPagedPoolInBytes - 1));
+ MiSyncARM3WithROS(MmPfnDatabase, (PVOID)((ULONG_PTR)MmNonPagedPoolStart + MmSizeOfNonPagedPoolInBytes - 1));
MiSyncARM3WithROS((PVOID)HYPER_SPACE, (PVOID)(HYPER_SPACE + PAGE_SIZE - 1));
//
}
}
- //
- // Size up paged pool and build the shadow system page directory
- //
- MiBuildPagedPool();
-
+ /* Look for large page cache entries that need caching */
+ MiSyncCachedRanges();
+
+ /* Loop for HAL Heap I/O device mappings that need coherency tracking */
+ MiAddHalIoMappings();
+
+ /* Set the initial resident page count */
+ MmResidentAvailablePages = MmAvailablePages - 32;
+
+ /* Initialize large page structures on PAE/x64, and MmProcessList on x86 */
+ MiInitializeLargePageSupport();
+
+ /* Check if the registry says any drivers should be loaded with large pages */
+ MiInitializeDriverLargePageList();
+
+ /* Relocate the boot drivers into system PTE space and fixup their PFNs */
+ MiReloadBootLoadedDrivers(LoaderBlock);
+
+ /* FIXME: Call out into Driver Verifier for initialization */
+
/* Check how many pages the system has */
if (MmNumberOfPhysicalPages <= (13 * _1MB))
{
}
else if (MmNumberOfPhysicalPages <= (19 * _1MB))
{
- /* Set small system */
+ /* Set small system and add 100 pages for the cache */
MmSystemSize = MmSmallSystem;
+ MmSystemCacheWsMinimum += 100;
}
else
{
- /* Set medium system */
+ /* Set medium system and add 400 pages for the cache */
MmSystemSize = MmMediumSystem;
+ MmSystemCacheWsMinimum += 400;
+ }
+
+ /* Check for less than 24MB */
+ if (MmNumberOfPhysicalPages < ((24 * _1MB) / PAGE_SIZE))
+ {
+ /* No more than 32 pages */
+ MmSystemCacheWsMinimum = 32;
}
/* Check for more than 32MB */
}
}
}
-
+
+ /* Check for more than 33 MB */
+ if (MmNumberOfPhysicalPages > ((33 * _1MB) / PAGE_SIZE))
+ {
+ /* Add another 500 pages to the cache */
+ MmSystemCacheWsMinimum += 500;
+ }
+
/* Now setup the shared user data fields */
ASSERT(SharedUserData->NumberOfPhysicalPages == 0);
SharedUserData->NumberOfPhysicalPages = MmNumberOfPhysicalPages;
/* Update working set tuning parameters */
MiAdjustWorkingSetManagerParameters(!MmProductType);
+
+ /* Finetune the page count by removing working set and NP expansion */
+ MmResidentAvailablePages -= MiExpansionPoolPagesInitialCharge;
+ MmResidentAvailablePages -= MmSystemCacheWsMinimum;
+ MmResidentAvailableAtInit = MmResidentAvailablePages;
+ if (MmResidentAvailablePages <= 0)
+ {
+ /* This should not happen */
+ DPRINT1("System cache working set too big\n");
+ return FALSE;
+ }
+
+ /* Size up paged pool and build the shadow system page directory */
+ MiBuildPagedPool();
+
+ /* Debugger physical memory support is now ready to be used */
+ MiDbgReadyForPhysical = TRUE;
+
+ /* Initialize the loaded module list */
+ MiInitializeLoadedModuleList(LoaderBlock);
}
//