X-Git-Url: https://git.reactos.org/?p=reactos.git;a=blobdiff_plain;f=ntoskrnl%2Fmm%2FARM3%2Fmminit.c;h=28358f1b53e88f08c9e6b0b0bb44d951f02ec3fb;hp=89d082e4f95a37cc1f3972fb442e4f681ea7ad90;hb=82822656c3b240860ac8d5db139c56285634e8a5;hpb=62473c84e9d9ea0fe9b61f17b594a6d85c3b7b7b diff --git a/ntoskrnl/mm/ARM3/mminit.c b/ntoskrnl/mm/ARM3/mminit.c index 89d082e4f95..28358f1b53e 100644 --- a/ntoskrnl/mm/ARM3/mminit.c +++ b/ntoskrnl/mm/ARM3/mminit.c @@ -23,8 +23,8 @@ // figure out the most appropriate values. // ULONG MmMaximumNonPagedPoolPercent; -ULONG MmSizeOfNonPagedPoolInBytes; -ULONG MmMaximumNonPagedPoolInBytes; +SIZE_T MmSizeOfNonPagedPoolInBytes; +SIZE_T MmMaximumNonPagedPoolInBytes; /* Some of the same values, in pages */ PFN_NUMBER MmMaximumNonPagedPoolInPages; @@ -36,9 +36,9 @@ PFN_NUMBER MmMaximumNonPagedPoolInPages; // They are described on http://support.microsoft.com/default.aspx/kb/126402/ja // along with the algorithm that uses them, which is implemented later below. // -ULONG MmMinimumNonPagedPoolSize = 256 * 1024; +SIZE_T MmMinimumNonPagedPoolSize = 256 * 1024; ULONG MmMinAdditionNonPagedPoolPerMb = 32 * 1024; -ULONG MmDefaultMaximumNonPagedPool = 1024 * 1024; +SIZE_T MmDefaultMaximumNonPagedPool = 1024 * 1024; ULONG MmMaxAdditionNonPagedPoolPerMb = 400 * 1024; // @@ -107,7 +107,7 @@ PVOID MmPagedPoolEnd; // // And this is its default size // -ULONG MmSizeOfPagedPoolInBytes = MI_MIN_INIT_PAGED_POOLSIZE; +SIZE_T MmSizeOfPagedPoolInBytes = MI_MIN_INIT_PAGED_POOLSIZE; PFN_NUMBER MmSizeOfPagedPoolInPages = MI_MIN_INIT_PAGED_POOLSIZE / PAGE_SIZE; // @@ -131,10 +131,18 @@ PVOID MiSessionViewStart; // 0xBE000000 PVOID MiSessionPoolEnd; // 0xBE000000 PVOID MiSessionPoolStart; // 0xBD000000 PVOID MmSessionBase; // 0xBD000000 -ULONG MmSessionSize; -ULONG MmSessionViewSize; -ULONG MmSessionPoolSize; -ULONG MmSessionImageSize; +SIZE_T MmSessionSize; +SIZE_T MmSessionViewSize; +SIZE_T MmSessionPoolSize; +SIZE_T MmSessionImageSize; + +/* + * These are the PTE addresses of the boundaries carved out above + */ +PMMPTE MiSessionImagePteStart; +PMMPTE MiSessionImagePteEnd; +PMMPTE MiSessionBasePte; +PMMPTE MiSessionLastPte; // // The system view space, on the other hand, is where sections that are memory @@ -143,7 +151,7 @@ ULONG MmSessionImageSize; // By default, it is a 16MB region. // PVOID MiSystemViewStart; -ULONG MmSystemViewSize; +SIZE_T MmSystemViewSize; // // A copy of the system page directory (the page directory associated with the @@ -151,7 +159,7 @@ ULONG MmSystemViewSize; // map paged pool PDEs into external processes when they fault on a paged pool // address. // -PFN_NUMBER MmSystemPageDirectory; +PFN_NUMBER MmSystemPageDirectory[PD_COUNT]; PMMPTE MmSystemPagePtes; // @@ -167,13 +175,13 @@ PMMWSL MmSystemCacheWorkingSetList = MI_SYSTEM_CACHE_WS_START; // On systems with more than 32MB, this number is then doubled, and further // aligned up to a PDE boundary (4MB). // -ULONG MmNumberOfSystemPtes; +ULONG_PTR MmNumberOfSystemPtes; // // This is how many pages the PFN database will take up // In Windows, this includes the Quark Color Table, but not in ARM³ // -ULONG MxPfnAllocation; +PFN_NUMBER MxPfnAllocation; // // Unlike the old ReactOS Memory Manager, ARM³ (and Windows) does not keep track @@ -197,7 +205,7 @@ PPHYSICAL_MEMORY_DESCRIPTOR MmPhysicalMemoryBlock; // // This is where we keep track of the most basic physical layout markers // -ULONG MmNumberOfPhysicalPages, MmHighestPhysicalPage, MmLowestPhysicalPage = -1; +PFN_NUMBER MmNumberOfPhysicalPages, MmHighestPhysicalPage, MmLowestPhysicalPage = -1; // // The total number of pages mapped by the boot loader, which include the kernel @@ -207,16 +215,21 @@ ULONG MmNumberOfPhysicalPages, MmHighestPhysicalPage, MmLowestPhysicalPage = -1; // // This number is later aligned up to a PDE boundary. // -ULONG MmBootImageSize; +SIZE_T MmBootImageSize; // // These three variables keep track of the core separation of address space that // exists between kernel mode and user mode. // -ULONG MmUserProbeAddress; +ULONG_PTR MmUserProbeAddress; PVOID MmHighestUserAddress; PVOID MmSystemRangeStart; +/* And these store the respective highest PTE/PDE address */ +PMMPTE MiHighestUserPte; +PMMPDE MiHighestUserPde; + +/* These variables define the system cache address space */ PVOID MmSystemCacheStart; PVOID MmSystemCacheEnd; MMSUPPORT MmSystemCacheWs; @@ -301,8 +314,43 @@ PFN_NUMBER MmPlentyFreePages = 400; ULONG MmProductType; MM_SYSTEMSIZE MmSystemSize; +/* + * These values store the cache working set minimums and maximums, in pages + * + * The minimum value is boosted on systems with more than 24MB of RAM, and cut + * down to only 32 pages on embedded (<24MB RAM) systems. + * + * An extra boost of 2MB is given on systems with more than 33MB of RAM. + */ +PFN_NUMBER MmSystemCacheWsMinimum = 288; +PFN_NUMBER MmSystemCacheWsMaximum = 350; + +/* FIXME: Move to cache/working set code later */ +BOOLEAN MmLargeSystemCache; + +/* + * This value determines in how many fragments/chunks the subsection prototype + * PTEs should be allocated when mapping a section object. It is configurable in + * the registry through the MapAllocationFragment parameter. + * + * The default is 64KB on systems with more than 1GB of RAM, 32KB on systems with + * more than 256MB of RAM, and 16KB on systems with less than 256MB of RAM. + * + * The maximum it can be set to is 2MB, and the minimum is 4KB. + */ +SIZE_T MmAllocationFragment; + +/* + * These two values track how much virtual memory can be committed, and when + * expansion should happen. + */ + // FIXME: They should be moved elsewhere since it's not an "init" setting? +SIZE_T MmTotalCommitLimit; +SIZE_T MmTotalCommitLimitMaximum; + /* PRIVATE FUNCTIONS **********************************************************/ +#ifndef _M_AMD64 // // In Bavaria, this is probably a hate crime // @@ -325,6 +373,7 @@ MiSyncARM3WithROS(IN PVOID AddressStart, Pde++; } } +#endif PFN_NUMBER NTAPI @@ -412,7 +461,7 @@ MiInitializeColorTables(VOID) MMPTE TempPte = ValidKernelPte; /* The color table starts after the ARM3 PFN database */ - MmFreePagesByColor[0] = (PMMCOLOR_TABLES)&MmPfnDatabase[1][MmHighestPhysicalPage + 1]; + MmFreePagesByColor[0] = (PMMCOLOR_TABLES)&MmPfnDatabase[MmHighestPhysicalPage + 1]; /* Loop the PTEs. We have two color tables for each secondary color */ PointerPte = MiAddressToPte(&MmFreePagesByColor[0][0]); @@ -426,9 +475,8 @@ MiInitializeColorTables(VOID) { /* Get a page and map it */ TempPte.u.Hard.PageFrameNumber = MxGetNextPage(1); - ASSERT(TempPte.u.Hard.Valid == 1); - *PointerPte = TempPte; - + MI_WRITE_VALID_PTE(PointerPte, TempPte); + /* Zero out the page */ RtlZeroMemory(MiPteToAddress(PointerPte), PAGE_SIZE); } @@ -558,8 +606,8 @@ MiMapPfnDatabase(IN PLOADER_PARAMETER_BLOCK LoaderBlock) } /* Get the PTEs for this range */ - PointerPte = MiAddressToPte(&MmPfnDatabase[0][BasePage]); - LastPte = MiAddressToPte(((ULONG_PTR)&MmPfnDatabase[0][BasePage + PageCount]) - 1); + PointerPte = MiAddressToPte(&MmPfnDatabase[BasePage]); + LastPte = MiAddressToPte(((ULONG_PTR)&MmPfnDatabase[BasePage + PageCount]) - 1); DPRINT("MD Type: %lx Base: %lx Count: %lx\n", MdBlock->MemoryType, BasePage, PageCount); /* Loop them */ @@ -587,9 +635,7 @@ MiMapPfnDatabase(IN PLOADER_PARAMETER_BLOCK LoaderBlock) /* Write out this PTE */ PagesLeft++; - ASSERT(PointerPte->u.Hard.Valid == 0); - ASSERT(TempPte.u.Hard.Valid == 1); - *PointerPte = TempPte; + MI_WRITE_VALID_PTE(PointerPte, TempPte); /* Zero this page */ RtlZeroMemory(MiPteToAddress(PointerPte), PAGE_SIZE); @@ -598,49 +644,7 @@ MiMapPfnDatabase(IN PLOADER_PARAMETER_BLOCK LoaderBlock) /* Next! */ PointerPte++; } - - /* Get the PTEs for this range */ - PointerPte = MiAddressToPte(&MmPfnDatabase[1][BasePage]); - LastPte = MiAddressToPte(((ULONG_PTR)&MmPfnDatabase[1][BasePage + PageCount]) - 1); - DPRINT("MD Type: %lx Base: %lx Count: %lx\n", MdBlock->MemoryType, BasePage, PageCount); - - /* Loop them */ - while (PointerPte <= LastPte) - { - /* We'll only touch PTEs that aren't already valid */ - if (PointerPte->u.Hard.Valid == 0) - { - /* Use the next free page */ - TempPte.u.Hard.PageFrameNumber = FreePage; - ASSERT(FreePageCount != 0); - - /* Consume free pages */ - FreePage++; - FreePageCount--; - if (!FreePageCount) - { - /* Out of memory */ - KeBugCheckEx(INSTALL_MORE_MEMORY, - MmNumberOfPhysicalPages, - FreePageCount, - MxOldFreeDescriptor.PageCount, - 1); - } - - /* Write out this PTE */ - PagesLeft++; - ASSERT(PointerPte->u.Hard.Valid == 0); - ASSERT(TempPte.u.Hard.Valid == 1); - *PointerPte = TempPte; - - /* Zero this page */ - RtlZeroMemory(MiPteToAddress(PointerPte), PAGE_SIZE); - } - - /* Next! */ - PointerPte++; - } - + /* Do the next address range */ NextEntry = MdBlock->ListEntry.Flink; } @@ -679,7 +683,7 @@ MiBuildPfnDatabaseFromPages(IN PLOADER_PARAMETER_BLOCK LoaderBlock) if (MiIsRegularMemory(LoaderBlock, PageFrameIndex)) { /* Yes we do, set it up */ - Pfn1 = MI_PFN_TO_PFNENTRY(PageFrameIndex); + Pfn1 = MiGetPfnEntry(PageFrameIndex); Pfn1->u4.PteFrame = StartupPdIndex; Pfn1->PteAddress = PointerPde; Pfn1->u2.ShareCount++; @@ -718,7 +722,7 @@ MiBuildPfnDatabaseFromPages(IN PLOADER_PARAMETER_BLOCK LoaderBlock) MmSizeOfNonPagedPoolInBytes))) { /* Get the PFN entry and make sure it too is valid */ - Pfn2 = MI_PFN_TO_PFNENTRY(PtePageIndex); + Pfn2 = MiGetPfnEntry(PtePageIndex); if ((MmIsAddressValid(Pfn2)) && (MmIsAddressValid(Pfn2 + 1))) { @@ -742,7 +746,7 @@ MiBuildPfnDatabaseFromPages(IN PLOADER_PARAMETER_BLOCK LoaderBlock) else { /* Next PDE mapped address */ - BaseAddress += PTE_COUNT * PAGE_SIZE; + BaseAddress += PDE_MAPPED_VA; } /* Next PTE */ @@ -758,7 +762,7 @@ MiBuildPfnDatabaseZeroPage(VOID) PMMPDE PointerPde; /* Grab the lowest page and check if it has no real references */ - Pfn1 = MI_PFN_TO_PFNENTRY(MmLowestPhysicalPage); + Pfn1 = MiGetPfnEntry(MmLowestPhysicalPage); if (!(MmLowestPhysicalPage) && !(Pfn1->u3.e2.ReferenceCount)) { /* Make it a bogus page to catch errors */ @@ -783,6 +787,7 @@ MiBuildPfnDatabaseFromLoaderBlock(IN PLOADER_PARAMETER_BLOCK LoaderBlock) PMMPFN Pfn1; PMMPTE PointerPte; PMMPDE PointerPde; + KIRQL OldIrql; /* Now loop through the descriptors */ NextEntry = LoaderBlock->MemoryDescriptorListHead.Flink; @@ -833,14 +838,18 @@ MiBuildPfnDatabaseFromLoaderBlock(IN PLOADER_PARAMETER_BLOCK LoaderBlock) /* Get the last page of this descriptor. Note we loop backwards */ PageFrameIndex += PageCount - 1; - Pfn1 = MI_PFN_TO_PFNENTRY(PageFrameIndex); + Pfn1 = MiGetPfnEntry(PageFrameIndex); + + /* Lock the PFN Database */ + OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock); while (PageCount--) { /* If the page really has no references, mark it as free */ if (!Pfn1->u3.e2.ReferenceCount) { + /* Add it to the free list */ Pfn1->u3.e1.CacheAttribute = MiNonCached; - //MiInsertPageInFreeList(PageFrameIndex); + MiInsertPageInFreeList(PageFrameIndex); } /* Go to the next page */ @@ -848,6 +857,9 @@ MiBuildPfnDatabaseFromLoaderBlock(IN PLOADER_PARAMETER_BLOCK LoaderBlock) PageFrameIndex--; } + /* Release PFN database */ + KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql); + /* Done with this block */ break; @@ -863,7 +875,7 @@ MiBuildPfnDatabaseFromLoaderBlock(IN PLOADER_PARAMETER_BLOCK LoaderBlock) /* Map these pages with the KSEG0 mapping that adds 0x80000000 */ PointerPte = MiAddressToPte(KSEG0_BASE + (PageFrameIndex << PAGE_SHIFT)); - Pfn1 = MI_PFN_TO_PFNENTRY(PageFrameIndex); + Pfn1 = MiGetPfnEntry(PageFrameIndex); while (PageCount--) { /* Check if the page is really unused */ @@ -913,15 +925,15 @@ MiBuildPfnDatabaseSelf(VOID) PMMPFN Pfn1; /* Loop the PFN database page */ - PointerPte = MiAddressToPte(MI_PFN_TO_PFNENTRY(MmLowestPhysicalPage)); - LastPte = MiAddressToPte(MI_PFN_TO_PFNENTRY(MmHighestPhysicalPage)); + PointerPte = MiAddressToPte(MiGetPfnEntry(MmLowestPhysicalPage)); + LastPte = MiAddressToPte(MiGetPfnEntry(MmHighestPhysicalPage)); while (PointerPte <= LastPte) { /* Make sure the page is valid */ if (PointerPte->u.Hard.Valid == 1) { /* Get the PFN entry and just mark it referenced */ - Pfn1 = MI_PFN_TO_PFNENTRY(PointerPte->u.Hard.PageFrameNumber); + Pfn1 = MiGetPfnEntry(PointerPte->u.Hard.PageFrameNumber); Pfn1->u2.ShareCount = 1; Pfn1->u3.e2.ReferenceCount = 1; } @@ -1159,6 +1171,61 @@ MiInitializeMemoryEvents(VOID) return TRUE; } +VOID +NTAPI +MiAddHalIoMappings(VOID) +{ + PVOID BaseAddress; + PMMPTE PointerPde; + PMMPTE PointerPte; + ULONG i, j, PdeCount; + PFN_NUMBER PageFrameIndex; + + /* HAL Heap address -- should be on a PDE boundary */ + BaseAddress = (PVOID)0xFFC00000; + ASSERT(MiAddressToPteOffset(BaseAddress) == 0); + + /* Check how many PDEs the heap has */ + PointerPde = MiAddressToPde(BaseAddress); + PdeCount = PDE_COUNT - ADDR_TO_PDE_OFFSET(BaseAddress); + for (i = 0; i < PdeCount; i++) + { + /* Does the HAL own this mapping? */ + if ((PointerPde->u.Hard.Valid == 1) && + (PointerPde->u.Hard.LargePage == 0)) + { + /* Get the PTE for it and scan each page */ + PointerPte = MiAddressToPte(BaseAddress); + for (j = 0 ; j < PTE_COUNT; j++) + { + /* Does the HAL own this page? */ + if (PointerPte->u.Hard.Valid == 1) + { + /* Is the HAL using it for device or I/O mapped memory? */ + PageFrameIndex = PFN_FROM_PTE(PointerPte); + if (!MiGetPfnEntry(PageFrameIndex)) + { + /* FIXME: For PAT, we need to track I/O cache attributes for coherency */ + DPRINT1("HAL I/O Mapping at %p is unsafe\n", BaseAddress); + } + } + + /* Move to the next page */ + BaseAddress = (PVOID)((ULONG_PTR)BaseAddress + PAGE_SIZE); + PointerPte++; + } + } + else + { + /* Move to the next address */ + BaseAddress = (PVOID)((ULONG_PTR)BaseAddress + PDE_MAPPED_VA); + } + + /* Move to the next PDE */ + PointerPde++; + } +} + VOID NTAPI MmDumpArmPfnDatabase(VOID) @@ -1176,7 +1243,7 @@ MmDumpArmPfnDatabase(VOID) // for (i = 0; i <= MmHighestPhysicalPage; i++) { - Pfn1 = MI_PFN_TO_PFNENTRY(i); + Pfn1 = MiGetPfnEntry(i); if (!Pfn1) continue; // @@ -1414,7 +1481,8 @@ MiBuildPagedPool(VOID) // Get the page frame number for the system page directory // PointerPte = MiAddressToPte(PDE_BASE); - MmSystemPageDirectory = PFN_FROM_PTE(PointerPte); + ASSERT(PD_COUNT == 1); + MmSystemPageDirectory[0] = PFN_FROM_PTE(PointerPte); // // Allocate a system PTE which will hold a copy of the page directory @@ -1431,10 +1499,9 @@ MiBuildPagedPool(VOID) // way). // TempPte = ValidKernelPte; - TempPte.u.Hard.PageFrameNumber = MmSystemPageDirectory; - ASSERT(PointerPte->u.Hard.Valid == 0); - ASSERT(TempPte.u.Hard.Valid == 1); - *PointerPte = TempPte; + ASSERT(PD_COUNT == 1); + TempPte.u.Hard.PageFrameNumber = MmSystemPageDirectory[0]; + MI_WRITE_VALID_PTE(PointerPte, TempPte); // // Let's get back to paged pool work: size it up. @@ -1502,14 +1569,15 @@ MiBuildPagedPool(VOID) // OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock); - // - // Allocate a page and map the first paged pool PDE - // - PageFrameIndex = MmAllocPage(MC_NPPOOL); + /* Allocate a page and map the first paged pool PDE */ + PageFrameIndex = MiRemoveZeroPage(0); TempPte.u.Hard.PageFrameNumber = PageFrameIndex; - ASSERT(PointerPde->u.Hard.Valid == 0); - ASSERT(TempPte.u.Hard.Valid == 1); - *PointerPde = TempPte; + MI_WRITE_VALID_PTE(PointerPde, TempPte); + + /* Initialize the PFN entry for it */ + MiInitializePfnForOtherProcess(PageFrameIndex, + PointerPde, + MmSystemPageDirectory[(PointerPde - (PMMPTE)PDE_BASE) / PDE_COUNT]); // // Release the PFN database lock @@ -1629,6 +1697,10 @@ MmArmInitSystem(IN ULONG Phase, MmUserProbeAddress = (ULONG_PTR)MmSystemRangeStart - 0x10000; MmHighestUserAddress = (PVOID)(MmUserProbeAddress - 1); + /* Highest PTE and PDE based on the addresses above */ + MiHighestUserPte = MiAddressToPte(MmHighestUserAddress); + MiHighestUserPde = MiAddressToPde(MmHighestUserAddress); + // // Get the size of the boot loader's image allocations and then round // that region up to a PDE size, so that any PDEs we might create for @@ -1637,8 +1709,8 @@ MmArmInitSystem(IN ULONG Phase, // MmBootImageSize = KeLoaderBlock->Extension->LoaderPagesSpanned; MmBootImageSize *= PAGE_SIZE; - MmBootImageSize = (MmBootImageSize + (4 * 1024 * 1024) - 1) & ~((4 * 1024 * 1024) - 1); - ASSERT((MmBootImageSize % (4 * 1024 * 1024)) == 0); + MmBootImageSize = (MmBootImageSize + PDE_MAPPED_VA - 1) & ~(PDE_MAPPED_VA - 1); + ASSERT((MmBootImageSize % PDE_MAPPED_VA) == 0); // // Set the size of session view, pool, and image @@ -1701,7 +1773,12 @@ MmArmInitSystem(IN ULONG Phase, // MiSystemViewStart = (PVOID)((ULONG_PTR)MmSessionBase - MmSystemViewSize); - + + /* Compute the PTE addresses for all the addresses we carved out */ + MiSessionImagePteStart = MiAddressToPte(MiSessionImageStart); + MiSessionImagePteEnd = MiAddressToPte(MiSessionImageEnd); + MiSessionBasePte = MiAddressToPte(MmSessionBase); + MiSessionLastPte = MiAddressToPte(MiSessionSpaceEnd); /* Initialize the user mode image list */ InitializeListHead(&MmLoadedUserImageList); @@ -1744,6 +1821,44 @@ MmArmInitSystem(IN ULONG Phase, DPRINT("System PTE count has been tuned to %d (%d bytes)\n", MmNumberOfSystemPtes, MmNumberOfSystemPtes * PAGE_SIZE); + + /* Initialize the working set lock */ + ExInitializePushLock((PULONG_PTR)&MmSystemCacheWs.WorkingSetMutex); + + /* Set commit limit */ + MmTotalCommitLimit = 2 * _1GB; + MmTotalCommitLimitMaximum = MmTotalCommitLimit; + + /* Has the allocation fragment been setup? */ + if (!MmAllocationFragment) + { + /* Use the default value */ + MmAllocationFragment = MI_ALLOCATION_FRAGMENT; + if (PageCount < ((256 * _1MB) / PAGE_SIZE)) + { + /* On memory systems with less than 256MB, divide by 4 */ + MmAllocationFragment = MI_ALLOCATION_FRAGMENT / 4; + } + else if (PageCount < (_1GB / PAGE_SIZE)) + { + /* On systems with less than 1GB, divide by 2 */ + MmAllocationFragment = MI_ALLOCATION_FRAGMENT / 2; + } + } + else + { + /* Convert from 1KB fragments to pages */ + MmAllocationFragment *= _1KB; + MmAllocationFragment = ROUND_TO_PAGES(MmAllocationFragment); + + /* Don't let it past the maximum */ + MmAllocationFragment = min(MmAllocationFragment, + MI_MAX_ALLOCATION_FRAGMENT); + + /* Don't let it too small either */ + MmAllocationFragment = max(MmAllocationFragment, + MI_MIN_ALLOCATION_FRAGMENT); + } /* Initialize the platform-specific parts */ MiInitMachineDependent(LoaderBlock); @@ -1752,7 +1867,7 @@ MmArmInitSystem(IN ULONG Phase, // Sync us up with ReactOS Mm // MiSyncARM3WithROS(MmNonPagedSystemStart, (PVOID)((ULONG_PTR)MmNonPagedPoolEnd - 1)); - MiSyncARM3WithROS(MmPfnDatabase[0], (PVOID)((ULONG_PTR)MmNonPagedPoolStart + MmSizeOfNonPagedPoolInBytes - 1)); + MiSyncARM3WithROS(MmPfnDatabase, (PVOID)((ULONG_PTR)MmNonPagedPoolStart + MmSizeOfNonPagedPoolInBytes - 1)); MiSyncARM3WithROS((PVOID)HYPER_SPACE, (PVOID)(HYPER_SPACE + PAGE_SIZE - 1)); // @@ -1813,26 +1928,50 @@ MmArmInitSystem(IN ULONG Phase, } } - // - // Size up paged pool and build the shadow system page directory - // - MiBuildPagedPool(); - + /* Look for large page cache entries that need caching */ + MiSyncCachedRanges(); + + /* Loop for HAL Heap I/O device mappings that need coherency tracking */ + MiAddHalIoMappings(); + + /* Set the initial resident page count */ + MmResidentAvailablePages = MmAvailablePages - 32; + + /* Initialize large page structures on PAE/x64, and MmProcessList on x86 */ + MiInitializeLargePageSupport(); + + /* Check if the registry says any drivers should be loaded with large pages */ + MiInitializeDriverLargePageList(); + + /* Relocate the boot drivers into system PTE space and fixup their PFNs */ + MiReloadBootLoadedDrivers(LoaderBlock); + + /* FIXME: Call out into Driver Verifier for initialization */ + /* Check how many pages the system has */ - if (MmNumberOfPhysicalPages <= (13 * _1MB)) + if (MmNumberOfPhysicalPages <= ((13 * _1MB) / PAGE_SIZE)) { /* Set small system */ MmSystemSize = MmSmallSystem; } - else if (MmNumberOfPhysicalPages <= (19 * _1MB)) + else if (MmNumberOfPhysicalPages <= ((19 * _1MB) / PAGE_SIZE)) { - /* Set small system */ + /* Set small system and add 100 pages for the cache */ MmSystemSize = MmSmallSystem; + MmSystemCacheWsMinimum += 100; } else { - /* Set medium system */ + /* Set medium system and add 400 pages for the cache */ MmSystemSize = MmMediumSystem; + MmSystemCacheWsMinimum += 400; + } + + /* Check for less than 24MB */ + if (MmNumberOfPhysicalPages < ((24 * _1MB) / PAGE_SIZE)) + { + /* No more than 32 pages */ + MmSystemCacheWsMinimum = 32; } /* Check for more than 32MB */ @@ -1854,7 +1993,14 @@ MmArmInitSystem(IN ULONG Phase, } } } - + + /* Check for more than 33 MB */ + if (MmNumberOfPhysicalPages > ((33 * _1MB) / PAGE_SIZE)) + { + /* Add another 500 pages to the cache */ + MmSystemCacheWsMinimum += 500; + } + /* Now setup the shared user data fields */ ASSERT(SharedUserData->NumberOfPhysicalPages == 0); SharedUserData->NumberOfPhysicalPages = MmNumberOfPhysicalPages; @@ -1888,6 +2034,34 @@ MmArmInitSystem(IN ULONG Phase, /* Update working set tuning parameters */ MiAdjustWorkingSetManagerParameters(!MmProductType); + + /* Finetune the page count by removing working set and NP expansion */ + MmResidentAvailablePages -= MiExpansionPoolPagesInitialCharge; + MmResidentAvailablePages -= MmSystemCacheWsMinimum; + MmResidentAvailableAtInit = MmResidentAvailablePages; + if (MmResidentAvailablePages <= 0) + { + /* This should not happen */ + DPRINT1("System cache working set too big\n"); + return FALSE; + } + + /* Initialize the system cache */ + //MiInitializeSystemCache(MmSystemCacheWsMinimum, MmAvailablePages); + + /* Update the commit limit */ + MmTotalCommitLimit = MmAvailablePages; + if (MmTotalCommitLimit > 1024) MmTotalCommitLimit -= 1024; + MmTotalCommitLimitMaximum = MmTotalCommitLimit; + + /* Size up paged pool and build the shadow system page directory */ + MiBuildPagedPool(); + + /* Debugger physical memory support is now ready to be used */ + MmDebugPte = MiAddressToPte(MiDebugMapping); + + /* Initialize the loaded module list */ + MiInitializeLoadedModuleList(LoaderBlock); } //