#define NDEBUG
#include <debug.h>
-#line 15 "ARM³::INIT"
#define MODULE_INVOLVED_IN_ARM3
#include "miarm.h"
+#undef MmSystemRangeStart
/* GLOBALS ********************************************************************/
//
SIZE_T MmMinimumNonPagedPoolSize = 256 * 1024;
ULONG MmMinAdditionNonPagedPoolPerMb = 32 * 1024;
-SIZE_T MmDefaultMaximumNonPagedPool = 1024 * 1024;
+SIZE_T MmDefaultMaximumNonPagedPool = 1024 * 1024;
ULONG MmMaxAdditionNonPagedPoolPerMb = 400 * 1024;
//
// Right now we call this the "ARM³ Nonpaged Pool" and it begins somewhere after
// the PFN database (which starts at 0xB0000000).
//
-// The expansion nonpaged pool, on the other hand, can grow much bigger (400MB
+// The expansion nonpaged pool, on the other hand, can grow much bigger (400MB
// for a 1GB system). On ARM³ however, it is currently capped at 128MB.
//
// The address where the initial nonpaged pool starts is aptly named
// a System PTE, it is always valid, until the System PTE is torn down.
//
// System PTEs are actually composed of two "spaces", the system space proper,
-// and the nonpaged pool expansion space. The latter, as we've already seen,
+// and the nonpaged pool expansion space. The latter, as we've already seen,
// begins at MmNonPagedPoolExpansionStart. Based on the number of System PTEs
// that the system will support, the remaining address space below this address
// is used to hold the system space PTEs. This address, in turn, is held in the
// http://www.ditii.com/2007/09/28/windows-memory-management-x86-virtual-address-space/
//
PVOID MmNonPagedSystemStart;
+SIZE_T MiNonPagedSystemSize;
PVOID MmNonPagedPoolStart;
PVOID MmNonPagedPoolExpansionStart;
PVOID MmNonPagedPoolEnd = MI_NONPAGED_POOL_END;
// drivers, followed by a 4MB area containing the session's working set. This is
// then followed by a 20MB mapped view area and finally by the session's paged
// pool, by default 16MB.
-//
+//
// On a normal system, this results in session space occupying the region from
// 0xBD000000 to 0xC0000000
//
PVOID MiSessionSpaceEnd; // 0xC0000000
PVOID MiSessionImageEnd; // 0xC0000000
PVOID MiSessionImageStart; // 0xBF800000
+PVOID MiSessionSpaceWs;
PVOID MiSessionViewStart; // 0xBE000000
PVOID MiSessionPoolEnd; // 0xBE000000
PVOID MiSessionPoolStart; // 0xBD000000
// The system view space, on the other hand, is where sections that are memory
// mapped into "system space" end up.
//
-// By default, it is a 16MB region.
+// By default, it is a 16MB region, but we hack it to be 32MB for ReactOS
//
PVOID MiSystemViewStart;
SIZE_T MmSystemViewSize;
// address.
//
PFN_NUMBER MmSystemPageDirectory[PD_COUNT];
-PMMPTE MmSystemPagePtes;
+PMMPDE MmSystemPagePtes;
#endif
//
//
// This should be 0xC0C00000 -- the cache itself starts at 0xC1000000
//
-PMMWSL MmSystemCacheWorkingSetList = MI_SYSTEM_CACHE_WS_START;
+PMMWSL MmSystemCacheWorkingSetList = (PVOID)MI_SYSTEM_CACHE_WS_START;
//
// Windows NT seems to choose between 7000, 11000 and 50000
// On systems with more than 32MB, this number is then doubled, and further
// aligned up to a PDE boundary (4MB).
//
-ULONG_PTR MmNumberOfSystemPtes;
+PFN_COUNT MmNumberOfSystemPtes;
//
// This is how many pages the PFN database will take up
//
// This is where we keep track of the most basic physical layout markers
//
-PFN_NUMBER MmNumberOfPhysicalPages, MmHighestPhysicalPage, MmLowestPhysicalPage = -1;
+PFN_NUMBER MmHighestPhysicalPage, MmLowestPhysicalPage = -1;
+PFN_COUNT MmNumberOfPhysicalPages;
//
// The total number of pages mapped by the boot loader, which include the kernel
PMMPTE MiHighestUserPte;
PMMPDE MiHighestUserPde;
#if (_MI_PAGING_LEVELS >= 3)
-/* We need the highest PPE and PXE addresses */
+PMMPTE MiHighestUserPpe;
+#if (_MI_PAGING_LEVELS >= 4)
+PMMPTE MiHighestUserPxe;
+#endif
#endif
/* These variables define the system cache address space */
*/
PFN_NUMBER MmMinimumFreePages = 26;
-/*
+/*
* This number indicates how many pages we consider to be a low limit of having
* "plenty" of free memory.
*
SIZE_T MmTotalCommitLimit;
SIZE_T MmTotalCommitLimitMaximum;
+/* Internal setting used for debugging memory descriptors */
+BOOLEAN MiDbgEnableMdDump =
+#ifdef _ARM_
+TRUE;
+#else
+FALSE;
+#endif
+
+/* Number of memory descriptors in the loader block */
+ULONG MiNumberDescriptors = 0;
+
+/* Number of free pages in the loader block */
+PFN_NUMBER MiNumberOfFreePages = 0;
+
+
/* PRIVATE FUNCTIONS **********************************************************/
-#ifndef _M_AMD64
-//
-// In Bavaria, this is probably a hate crime
-//
VOID
-FASTCALL
-MiSyncARM3WithROS(IN PVOID AddressStart,
- IN PVOID AddressEnd)
+NTAPI
+MiScanMemoryDescriptors(IN PLOADER_PARAMETER_BLOCK LoaderBlock)
{
- //
- // Puerile piece of junk-grade carbonized horseshit puss sold to the lowest bidder
- //
- ULONG Pde = ADDR_TO_PDE_OFFSET(AddressStart);
- while (Pde <= ADDR_TO_PDE_OFFSET(AddressEnd))
+ PLIST_ENTRY ListEntry;
+ PMEMORY_ALLOCATION_DESCRIPTOR Descriptor;
+ PFN_NUMBER PageFrameIndex, FreePages = 0;
+
+ /* Loop the memory descriptors */
+ for (ListEntry = LoaderBlock->MemoryDescriptorListHead.Flink;
+ ListEntry != &LoaderBlock->MemoryDescriptorListHead;
+ ListEntry = ListEntry->Flink)
{
- //
- // This both odious and heinous
- //
- extern ULONG MmGlobalKernelPageDirectory[1024];
- MmGlobalKernelPageDirectory[Pde] = ((PULONG)PDE_BASE)[Pde];
- Pde++;
+ /* Get the descriptor */
+ Descriptor = CONTAINING_RECORD(ListEntry,
+ MEMORY_ALLOCATION_DESCRIPTOR,
+ ListEntry);
+ DPRINT("MD Type: %lx Base: %lx Count: %lx\n",
+ Descriptor->MemoryType, Descriptor->BasePage, Descriptor->PageCount);
+
+ /* Count this descriptor */
+ MiNumberDescriptors++;
+
+ /* Check if this is invisible memory */
+ if ((Descriptor->MemoryType == LoaderFirmwarePermanent) ||
+ (Descriptor->MemoryType == LoaderSpecialMemory) ||
+ (Descriptor->MemoryType == LoaderHALCachedMemory) ||
+ (Descriptor->MemoryType == LoaderBBTMemory))
+ {
+ /* Skip this descriptor */
+ continue;
+ }
+
+ /* Check if this is bad memory */
+ if (Descriptor->MemoryType != LoaderBad)
+ {
+ /* Count this in the total of pages */
+ MmNumberOfPhysicalPages += (PFN_COUNT)Descriptor->PageCount;
+ }
+
+ /* Check if this is the new lowest page */
+ if (Descriptor->BasePage < MmLowestPhysicalPage)
+ {
+ /* Update the lowest page */
+ MmLowestPhysicalPage = Descriptor->BasePage;
+ }
+
+ /* Check if this is the new highest page */
+ PageFrameIndex = Descriptor->BasePage + Descriptor->PageCount;
+ if (PageFrameIndex > MmHighestPhysicalPage)
+ {
+ /* Update the highest page */
+ MmHighestPhysicalPage = PageFrameIndex - 1;
+ }
+
+ /* Check if this is free memory */
+ if ((Descriptor->MemoryType == LoaderFree) ||
+ (Descriptor->MemoryType == LoaderLoadedProgram) ||
+ (Descriptor->MemoryType == LoaderFirmwareTemporary) ||
+ (Descriptor->MemoryType == LoaderOsloaderStack))
+ {
+ /* Count it too free pages */
+ MiNumberOfFreePages += Descriptor->PageCount;
+
+ /* Check if this is the largest memory descriptor */
+ if (Descriptor->PageCount > FreePages)
+ {
+ /* Remember it */
+ MxFreeDescriptor = Descriptor;
+ FreePages = Descriptor->PageCount;
+ }
+ }
}
+
+ /* Save original values of the free descriptor, since it'll be
+ * altered by early allocations */
+ MxOldFreeDescriptor = *MxFreeDescriptor;
}
-#endif
PFN_NUMBER
NTAPI
+INIT_FUNCTION
MxGetNextPage(IN PFN_NUMBER PageCount)
{
PFN_NUMBER Pfn;
-
+
/* Make sure we have enough pages */
if (PageCount > MxFreeDescriptor->PageCount)
{
MxOldFreeDescriptor.PageCount,
PageCount);
}
-
+
/* Use our lowest usable free pages */
Pfn = MxFreeDescriptor->BasePage;
MxFreeDescriptor->BasePage += PageCount;
VOID
NTAPI
+INIT_FUNCTION
MiComputeColorInformation(VOID)
{
ULONG L2Associativity;
-
+
/* Check if no setting was provided already */
if (!MmSecondaryColors)
{
/* Get L2 cache information */
L2Associativity = KeGetPcr()->SecondLevelCacheAssociativity;
-
+
/* The number of colors is the number of cache bytes by set/way */
MmSecondaryColors = KeGetPcr()->SecondLevelCacheSize;
if (L2Associativity) MmSecondaryColors /= L2Associativity;
}
-
+
/* Now convert cache bytes into pages */
MmSecondaryColors >>= PAGE_SHIFT;
if (!MmSecondaryColors)
/* Set the maximum */
MmSecondaryColors = MI_MAX_SECONDARY_COLORS;
}
-
+
/* Make sure there aren't too little colors */
if (MmSecondaryColors < MI_MIN_SECONDARY_COLORS)
{
/* Set the default */
MmSecondaryColors = MI_SECONDARY_COLORS;
}
-
+
/* Finally make sure the colors are a power of two */
if (MmSecondaryColors & (MmSecondaryColors - 1))
{
MmSecondaryColors = MI_SECONDARY_COLORS;
}
}
-
+
/* Compute the mask and store it */
MmSecondaryColorMask = MmSecondaryColors - 1;
- KeGetCurrentPrcb()->SecondaryColorMask = MmSecondaryColorMask;
+ KeGetCurrentPrcb()->SecondaryColorMask = MmSecondaryColorMask;
}
VOID
NTAPI
+INIT_FUNCTION
MiInitializeColorTables(VOID)
{
ULONG i;
PMMPTE PointerPte, LastPte;
MMPTE TempPte = ValidKernelPte;
-
+
/* The color table starts after the ARM3 PFN database */
MmFreePagesByColor[0] = (PMMCOLOR_TABLES)&MmPfnDatabase[MmHighestPhysicalPage + 1];
-
+
/* Loop the PTEs. We have two color tables for each secondary color */
PointerPte = MiAddressToPte(&MmFreePagesByColor[0][0]);
LastPte = MiAddressToPte((ULONG_PTR)MmFreePagesByColor[0] +
/* Zero out the page */
RtlZeroMemory(MiPteToAddress(PointerPte), PAGE_SIZE);
}
-
+
/* Next */
PointerPte++;
}
-
+
/* Now set the address of the next list, right after this one */
MmFreePagesByColor[1] = &MmFreePagesByColor[0][MmSecondaryColors];
-
+
/* Now loop the lists to set them up */
for (i = 0; i < MmSecondaryColors; i++)
{
/* Set both free and zero lists for each color */
- MmFreePagesByColor[ZeroedPageList][i].Flink = 0xFFFFFFFF;
- MmFreePagesByColor[ZeroedPageList][i].Blink = (PVOID)0xFFFFFFFF;
+ MmFreePagesByColor[ZeroedPageList][i].Flink = LIST_HEAD;
+ MmFreePagesByColor[ZeroedPageList][i].Blink = (PVOID)LIST_HEAD;
MmFreePagesByColor[ZeroedPageList][i].Count = 0;
- MmFreePagesByColor[FreePageList][i].Flink = 0xFFFFFFFF;
- MmFreePagesByColor[FreePageList][i].Blink = (PVOID)0xFFFFFFFF;
+ MmFreePagesByColor[FreePageList][i].Flink = LIST_HEAD;
+ MmFreePagesByColor[FreePageList][i].Blink = (PVOID)LIST_HEAD;
MmFreePagesByColor[FreePageList][i].Count = 0;
}
}
+#ifndef _M_AMD64
BOOLEAN
NTAPI
+INIT_FUNCTION
MiIsRegularMemory(IN PLOADER_PARAMETER_BLOCK LoaderBlock,
IN PFN_NUMBER Pfn)
{
VOID
NTAPI
+INIT_FUNCTION
MiMapPfnDatabase(IN PLOADER_PARAMETER_BLOCK LoaderBlock)
{
- ULONG FreePage, FreePageCount, PagesLeft, BasePage, PageCount;
+ PFN_NUMBER FreePage, FreePageCount, PagesLeft, BasePage, PageCount;
PLIST_ENTRY NextEntry;
PMEMORY_ALLOCATION_DESCRIPTOR MdBlock;
PMMPTE PointerPte, LastPte;
MMPTE TempPte = ValidKernelPte;
-
+
/* Get current page data, since we won't be using MxGetNextPage as it would corrupt our state */
FreePage = MxFreeDescriptor->BasePage;
FreePageCount = MxFreeDescriptor->PageCount;
PagesLeft = 0;
-
+
/* Loop the memory descriptors */
NextEntry = LoaderBlock->MemoryDescriptorListHead.Flink;
while (NextEntry != &LoaderBlock->MemoryDescriptorListHead)
NextEntry = MdBlock->ListEntry.Flink;
continue;
}
-
+
/* Next, check if this is our special free descriptor we've found */
if (MdBlock == MxFreeDescriptor)
{
BasePage = MdBlock->BasePage;
PageCount = MdBlock->PageCount;
}
-
+
/* Get the PTEs for this range */
PointerPte = MiAddressToPte(&MmPfnDatabase[BasePage]);
LastPte = MiAddressToPte(((ULONG_PTR)&MmPfnDatabase[BasePage + PageCount]) - 1);
DPRINT("MD Type: %lx Base: %lx Count: %lx\n", MdBlock->MemoryType, BasePage, PageCount);
-
+
/* Loop them */
while (PointerPte <= LastPte)
{
/* Use the next free page */
TempPte.u.Hard.PageFrameNumber = FreePage;
ASSERT(FreePageCount != 0);
-
+
/* Consume free pages */
FreePage++;
FreePageCount--;
MxOldFreeDescriptor.PageCount,
1);
}
-
+
/* Write out this PTE */
PagesLeft++;
MI_WRITE_VALID_PTE(PointerPte, TempPte);
-
+
/* Zero this page */
RtlZeroMemory(MiPteToAddress(PointerPte), PAGE_SIZE);
}
-
+
/* Next! */
PointerPte++;
}
/* Do the next address range */
NextEntry = MdBlock->ListEntry.Flink;
}
-
+
/* Now update the free descriptors to consume the pages we used up during the PFN allocation loop */
MxFreeDescriptor->BasePage = FreePage;
MxFreeDescriptor->PageCount = FreePageCount;
VOID
NTAPI
+INIT_FUNCTION
MiBuildPfnDatabaseFromPages(IN PLOADER_PARAMETER_BLOCK LoaderBlock)
{
PMMPDE PointerPde;
PFN_NUMBER PageFrameIndex, StartupPdIndex, PtePageIndex;
PMMPFN Pfn1, Pfn2;
ULONG_PTR BaseAddress = 0;
-
+
/* PFN of the startup page directory */
StartupPdIndex = PFN_FROM_PTE(MiAddressToPde(PDE_BASE));
-
+
/* Start with the first PDE and scan them all */
PointerPde = MiAddressToPde(NULL);
Count = PD_COUNT * PDE_COUNT;
{
/* Get the PFN from it */
PageFrameIndex = PFN_FROM_PTE(PointerPde);
-
+
/* Do we want a PFN entry for this page? */
if (MiIsRegularMemory(LoaderBlock, PageFrameIndex))
{
/* Yes we do, set it up */
Pfn1 = MiGetPfnEntry(PageFrameIndex);
Pfn1->u4.PteFrame = StartupPdIndex;
- Pfn1->PteAddress = PointerPde;
+ Pfn1->PteAddress = (PMMPTE)PointerPde;
Pfn1->u2.ShareCount++;
Pfn1->u3.e2.ReferenceCount = 1;
Pfn1->u3.e1.PageLocation = ActiveAndValid;
Pfn1->u3.e1.CacheAttribute = MiNonCached;
+#if MI_TRACE_PFNS
+ Pfn1->PfnUsage = MI_USAGE_INIT_MEMORY;
+ memcpy(Pfn1->ProcessName, "Initial PDE", 16);
+#endif
}
else
{
/* No PFN entry */
Pfn1 = NULL;
}
-
+
/* Now get the PTE and scan the pages */
PointerPte = MiAddressToPte(BaseAddress);
for (j = 0; j < PTE_COUNT; j++)
/* Increase the shared count of the PFN entry for the PDE */
ASSERT(Pfn1 != NULL);
Pfn1->u2.ShareCount++;
-
+
/* Now check if the PTE is valid memory too */
PtePageIndex = PFN_FROM_PTE(PointerPte);
if (MiIsRegularMemory(LoaderBlock, PtePageIndex))
Pfn2->u3.e2.ReferenceCount = 1;
Pfn2->u3.e1.PageLocation = ActiveAndValid;
Pfn2->u3.e1.CacheAttribute = MiNonCached;
+#if MI_TRACE_PFNS
+ Pfn2->PfnUsage = MI_USAGE_INIT_MEMORY;
+ memcpy(Pfn1->ProcessName, "Initial PTE", 16);
+#endif
}
}
}
}
-
+
/* Next PTE */
PointerPte++;
BaseAddress += PAGE_SIZE;
/* Next PDE mapped address */
BaseAddress += PDE_MAPPED_VA;
}
-
+
/* Next PTE */
PointerPde++;
}
VOID
NTAPI
+INIT_FUNCTION
MiBuildPfnDatabaseZeroPage(VOID)
{
PMMPFN Pfn1;
PMMPDE PointerPde;
-
+
/* Grab the lowest page and check if it has no real references */
Pfn1 = MiGetPfnEntry(MmLowestPhysicalPage);
if (!(MmLowestPhysicalPage) && !(Pfn1->u3.e2.ReferenceCount))
/* Make it a bogus page to catch errors */
PointerPde = MiAddressToPde(0xFFFFFFFF);
Pfn1->u4.PteFrame = PFN_FROM_PTE(PointerPde);
- Pfn1->PteAddress = PointerPde;
+ Pfn1->PteAddress = (PMMPTE)PointerPde;
Pfn1->u2.ShareCount++;
Pfn1->u3.e2.ReferenceCount = 0xFFF0;
Pfn1->u3.e1.PageLocation = ActiveAndValid;
Pfn1->u3.e1.CacheAttribute = MiNonCached;
- }
+ }
}
VOID
NTAPI
+INIT_FUNCTION
MiBuildPfnDatabaseFromLoaderBlock(IN PLOADER_PARAMETER_BLOCK LoaderBlock)
{
PLIST_ENTRY NextEntry;
PMMPTE PointerPte;
PMMPDE PointerPde;
KIRQL OldIrql;
-
+
/* Now loop through the descriptors */
NextEntry = LoaderBlock->MemoryDescriptorListHead.Flink;
while (NextEntry != &LoaderBlock->MemoryDescriptorListHead)
/* In which case we'll trim the descriptor to go as high as we can */
PageCount = MmHighestPhysicalPage + 1 - PageFrameIndex;
MdBlock->PageCount = PageCount;
-
+
/* But if there's nothing left to trim, we got too high, so quit */
if (!PageCount) break;
}
{
/* Check for bad RAM */
case LoaderBad:
-
- DPRINT1("You have damaged RAM modules. Stopping boot\n");
- while (TRUE);
+
+ DPRINT1("You either have specified /BURNMEMORY or damaged RAM modules.\n");
break;
/* Check for free RAM */
/* Get the last page of this descriptor. Note we loop backwards */
PageFrameIndex += PageCount - 1;
Pfn1 = MiGetPfnEntry(PageFrameIndex);
-
+
/* Lock the PFN Database */
OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
while (PageCount--)
Pfn1--;
PageFrameIndex--;
}
-
+
/* Release PFN database */
KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
-
+
/* Done with this block */
break;
Pfn1->u3.e2.ReferenceCount = 1;
Pfn1->u3.e1.PageLocation = ActiveAndValid;
Pfn1->u3.e1.CacheAttribute = MiNonCached;
-
+#if MI_TRACE_PFNS
+ Pfn1->PfnUsage = MI_USAGE_BOOT_DRIVER;
+#endif
+
/* Check for RAM disk page */
if (MdBlock->MemoryType == LoaderXIPRom)
{
Pfn1->u3.e1.PrototypePte = 1;
}
}
-
+
/* Advance page structures */
Pfn1++;
PageFrameIndex++;
VOID
NTAPI
+INIT_FUNCTION
MiBuildPfnDatabaseSelf(VOID)
{
PMMPTE PointerPte, LastPte;
PMMPFN Pfn1;
-
+
/* Loop the PFN database page */
PointerPte = MiAddressToPte(MiGetPfnEntry(MmLowestPhysicalPage));
LastPte = MiAddressToPte(MiGetPfnEntry(MmHighestPhysicalPage));
Pfn1 = MiGetPfnEntry(PointerPte->u.Hard.PageFrameNumber);
Pfn1->u2.ShareCount = 1;
Pfn1->u3.e2.ReferenceCount = 1;
+#if MI_TRACE_PFNS
+ Pfn1->PfnUsage = MI_USAGE_PFN_DATABASE;
+#endif
}
-
+
/* Next */
PointerPte++;
}
VOID
NTAPI
+INIT_FUNCTION
MiInitializePfnDatabase(IN PLOADER_PARAMETER_BLOCK LoaderBlock)
{
/* Scan memory and start setting up PFN entries */
MiBuildPfnDatabaseFromPages(LoaderBlock);
-
+
/* Add the zero page */
MiBuildPfnDatabaseZeroPage();
-
+
/* Scan the loader block and build the rest of the PFN database */
MiBuildPfnDatabaseFromLoaderBlock(LoaderBlock);
-
- /* Finally add the pages for the PFN database itself */
+
+ /* Finally add the pages for the PFN database itself */
MiBuildPfnDatabaseSelf();
}
+#endif /* !_M_AMD64 */
VOID
NTAPI
+INIT_FUNCTION
+MmFreeLoaderBlock(IN PLOADER_PARAMETER_BLOCK LoaderBlock)
+{
+ PLIST_ENTRY NextMd;
+ PMEMORY_ALLOCATION_DESCRIPTOR MdBlock;
+ ULONG_PTR i;
+ PFN_NUMBER BasePage, LoaderPages;
+ PMMPFN Pfn1;
+ KIRQL OldIrql;
+ PPHYSICAL_MEMORY_RUN Buffer, Entry;
+
+ /* Loop the descriptors in order to count them */
+ i = 0;
+ NextMd = LoaderBlock->MemoryDescriptorListHead.Flink;
+ while (NextMd != &LoaderBlock->MemoryDescriptorListHead)
+ {
+ MdBlock = CONTAINING_RECORD(NextMd,
+ MEMORY_ALLOCATION_DESCRIPTOR,
+ ListEntry);
+ i++;
+ NextMd = MdBlock->ListEntry.Flink;
+ }
+
+ /* Allocate a structure to hold the physical runs */
+ Buffer = ExAllocatePoolWithTag(NonPagedPool,
+ i * sizeof(PHYSICAL_MEMORY_RUN),
+ 'lMmM');
+ ASSERT(Buffer != NULL);
+ Entry = Buffer;
+
+ /* Loop the descriptors again */
+ NextMd = LoaderBlock->MemoryDescriptorListHead.Flink;
+ while (NextMd != &LoaderBlock->MemoryDescriptorListHead)
+ {
+ /* Check what kind this was */
+ MdBlock = CONTAINING_RECORD(NextMd,
+ MEMORY_ALLOCATION_DESCRIPTOR,
+ ListEntry);
+ switch (MdBlock->MemoryType)
+ {
+ /* Registry, NLS, and heap data */
+ case LoaderRegistryData:
+ case LoaderOsloaderHeap:
+ case LoaderNlsData:
+ /* Are all a candidate for deletion */
+ Entry->BasePage = MdBlock->BasePage;
+ Entry->PageCount = MdBlock->PageCount;
+ Entry++;
+
+ /* We keep the rest */
+ default:
+ break;
+ }
+
+ /* Move to the next descriptor */
+ NextMd = MdBlock->ListEntry.Flink;
+ }
+
+ /* Acquire the PFN lock */
+ OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
+
+ /* Loop the runs */
+ LoaderPages = 0;
+ while (--Entry >= Buffer)
+ {
+ /* See how many pages are in this run */
+ i = Entry->PageCount;
+ BasePage = Entry->BasePage;
+
+ /* Loop each page */
+ Pfn1 = MiGetPfnEntry(BasePage);
+ while (i--)
+ {
+ /* Check if it has references or is in any kind of list */
+ if (!(Pfn1->u3.e2.ReferenceCount) && (!Pfn1->u1.Flink))
+ {
+ /* Set the new PTE address and put this page into the free list */
+ Pfn1->PteAddress = (PMMPTE)(BasePage << PAGE_SHIFT);
+ MiInsertPageInFreeList(BasePage);
+ LoaderPages++;
+ }
+ else if (BasePage)
+ {
+ /* It has a reference, so simply drop it */
+ ASSERT(MI_IS_PHYSICAL_ADDRESS(MiPteToAddress(Pfn1->PteAddress)) == FALSE);
+
+ /* Drop a dereference on this page, which should delete it */
+ Pfn1->PteAddress->u.Long = 0;
+ MI_SET_PFN_DELETED(Pfn1);
+ MiDecrementShareCount(Pfn1, BasePage);
+ LoaderPages++;
+ }
+
+ /* Move to the next page */
+ Pfn1++;
+ BasePage++;
+ }
+ }
+
+ /* Release the PFN lock and flush the TLB */
+ DPRINT1("Loader pages freed: %lx\n", LoaderPages);
+ KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
+ KeFlushCurrentTb();
+
+ /* Free our run structure */
+ ExFreePoolWithTag(Buffer, 'lMmM');
+}
+
+VOID
+NTAPI
+INIT_FUNCTION
MiAdjustWorkingSetManagerParameters(IN BOOLEAN Client)
{
/* This function needs to do more work, for now, we tune page minimums */
-
+
/* Check for a system with around 64MB RAM or more */
if (MmNumberOfPhysicalPages >= (63 * _1MB) / PAGE_SIZE)
{
VOID
NTAPI
+INIT_FUNCTION
MiNotifyMemoryEvents(VOID)
{
/* Are we in a low-memory situation? */
NTSTATUS
NTAPI
+INIT_FUNCTION
MiCreateMemoryEvent(IN PUNICODE_STRING Name,
OUT PKEVENT *Event)
{
/* Setup the ACL inside it */
Status = RtlCreateAcl(Dacl, DaclLength, ACL_REVISION);
if (!NT_SUCCESS(Status)) goto CleanUp;
-
+
/* Add query rights for everyone */
Status = RtlAddAccessAllowedAce(Dacl,
ACL_REVISION,
SYNCHRONIZE | EVENT_QUERY_STATE | READ_CONTROL,
SeWorldSid);
if (!NT_SUCCESS(Status)) goto CleanUp;
-
+
/* Full rights for the admin */
Status = RtlAddAccessAllowedAce(Dacl,
ACL_REVISION,
EVENT_ALL_ACCESS,
SeAliasAdminsSid);
if (!NT_SUCCESS(Status)) goto CleanUp;
-
+
/* As well as full rights for the system */
Status = RtlAddAccessAllowedAce(Dacl,
ACL_REVISION,
EVENT_ALL_ACCESS,
SeLocalSystemSid);
if (!NT_SUCCESS(Status)) goto CleanUp;
-
+
/* Set this DACL inside the SD */
Status = RtlSetDaclSecurityDescriptor(&SecurityDescriptor,
TRUE,
Dacl,
FALSE);
if (!NT_SUCCESS(Status)) goto CleanUp;
-
+
/* Setup the event attributes, making sure it's a permanent one */
InitializeObjectAttributes(&ObjectAttributes,
Name,
FALSE);
CleanUp:
/* Free the DACL */
- ExFreePool(Dacl);
+ ExFreePoolWithTag(Dacl, 'lcaD');
/* Check if this is the success path */
if (NT_SUCCESS(Status))
KernelMode,
(PVOID*)Event,
NULL);
- ZwClose (EventHandle);
+ ZwClose (EventHandle);
}
/* Return status */
BOOLEAN
NTAPI
+INIT_FUNCTION
MiInitializeMemoryEvents(VOID)
{
UNICODE_STRING LowString = RTL_CONSTANT_STRING(L"\\KernelObjects\\LowMemoryCondition");
VOID
NTAPI
+INIT_FUNCTION
MiAddHalIoMappings(VOID)
{
PVOID BaseAddress;
- PMMPTE PointerPde;
+ PMMPDE PointerPde, LastPde;
PMMPTE PointerPte;
- ULONG i, j, PdeCount;
+ ULONG j;
PFN_NUMBER PageFrameIndex;
/* HAL Heap address -- should be on a PDE boundary */
- BaseAddress = (PVOID)0xFFC00000;
+ BaseAddress = (PVOID)MM_HAL_VA_START;
ASSERT(MiAddressToPteOffset(BaseAddress) == 0);
/* Check how many PDEs the heap has */
PointerPde = MiAddressToPde(BaseAddress);
- PdeCount = PDE_COUNT - ADDR_TO_PDE_OFFSET(BaseAddress);
- for (i = 0; i < PdeCount; i++)
+ LastPde = MiAddressToPde((PVOID)MM_HAL_VA_END);
+
+ while (PointerPde <= LastPde)
{
/* Does the HAL own this mapping? */
if ((PointerPde->u.Hard.Valid == 1) &&
- (PointerPde->u.Hard.LargePage == 0))
+ (MI_IS_PAGE_LARGE(PointerPde) == FALSE))
{
/* Get the PTE for it and scan each page */
PointerPte = MiAddressToPte(BaseAddress);
VOID
NTAPI
-MmDumpArmPfnDatabase(VOID)
+MmDumpArmPfnDatabase(IN BOOLEAN StatusOnly)
{
ULONG i;
PMMPFN Pfn1;
PCHAR Consumer = "Unknown";
KIRQL OldIrql;
ULONG ActivePages = 0, FreePages = 0, OtherPages = 0;
-
- KeRaiseIrql(HIGH_LEVEL, &OldIrql);
-
+#if MI_TRACE_PFNS
+ ULONG UsageBucket[MI_USAGE_FREE_PAGE + 1] = {0};
+ PCHAR MI_USAGE_TEXT[MI_USAGE_FREE_PAGE + 1] =
+ {
+ "Not set",
+ "Paged Pool",
+ "Nonpaged Pool",
+ "Nonpaged Pool Ex",
+ "Kernel Stack",
+ "Kernel Stack Ex",
+ "System PTE",
+ "VAD",
+ "PEB/TEB",
+ "Section",
+ "Page Table",
+ "Page Directory",
+ "Old Page Table",
+ "Driver Page",
+ "Contiguous Alloc",
+ "MDL",
+ "Demand Zero",
+ "Zero Loop",
+ "Cache",
+ "PFN Database",
+ "Boot Driver",
+ "Initial Memory",
+ "Free Page"
+ };
+#endif
//
// Loop the PFN database
//
+ KeRaiseIrql(HIGH_LEVEL, &OldIrql);
for (i = 0; i <= MmHighestPhysicalPage; i++)
{
Pfn1 = MiGetPfnEntry(i);
if (!Pfn1) continue;
-
+#if MI_TRACE_PFNS
+ ASSERT(Pfn1->PfnUsage <= MI_USAGE_FREE_PAGE);
+#endif
//
// Get the page location
//
switch (Pfn1->u3.e1.PageLocation)
{
case ActiveAndValid:
-
+
Consumer = "Active and Valid";
ActivePages++;
break;
-
+
+ case ZeroedPageList:
+
+ Consumer = "Zero Page List";
+ FreePages++;
+ break;//continue;
+
case FreePageList:
-
+
Consumer = "Free Page List";
FreePages++;
- break;
-
+ break;//continue;
+
default:
-
+
Consumer = "Other (ASSERT!)";
OtherPages++;
break;
}
-
+
+#if MI_TRACE_PFNS
+ /* Add into bucket */
+ UsageBucket[Pfn1->PfnUsage]++;
+#endif
+
//
// Pretty-print the page
//
- DbgPrint("0x%08p:\t%20s\t(%02d.%02d) [%08p-%08p])\n",
+ if (!StatusOnly)
+ DbgPrint("0x%08p:\t%20s\t(%04d.%04d)\t[%16s - %16s])\n",
i << PAGE_SHIFT,
Consumer,
Pfn1->u3.e2.ReferenceCount,
- Pfn1->u2.ShareCount,
- Pfn1->PteAddress,
- Pfn1->u4.PteFrame);
+ Pfn1->u2.ShareCount == LIST_HEAD ? 0xFFFF : Pfn1->u2.ShareCount,
+#if MI_TRACE_PFNS
+ MI_USAGE_TEXT[Pfn1->PfnUsage],
+ Pfn1->ProcessName);
+#else
+ "Page tracking",
+ "is disabled");
+#endif
}
-
- DbgPrint("Active: %d pages\t[%d KB]\n", ActivePages, (ActivePages << PAGE_SHIFT) / 1024);
- DbgPrint("Free: %d pages\t[%d KB]\n", FreePages, (FreePages << PAGE_SHIFT) / 1024);
- DbgPrint("Other: %d pages\t[%d KB]\n", OtherPages, (OtherPages << PAGE_SHIFT) / 1024);
-
- KeLowerIrql(OldIrql);
-}
-PFN_NUMBER
-NTAPI
-MiPagesInLoaderBlock(IN PLOADER_PARAMETER_BLOCK LoaderBlock,
- IN PBOOLEAN IncludeType)
-{
- PLIST_ENTRY NextEntry;
- PFN_NUMBER PageCount = 0;
- PMEMORY_ALLOCATION_DESCRIPTOR MdBlock;
-
- //
- // Now loop through the descriptors
- //
- NextEntry = LoaderBlock->MemoryDescriptorListHead.Flink;
- while (NextEntry != &LoaderBlock->MemoryDescriptorListHead)
- {
- //
- // Grab each one, and check if it's one we should include
- //
- MdBlock = CONTAINING_RECORD(NextEntry,
- MEMORY_ALLOCATION_DESCRIPTOR,
- ListEntry);
- if ((MdBlock->MemoryType < LoaderMaximum) &&
- (IncludeType[MdBlock->MemoryType]))
- {
- //
- // Add this to our running total
- //
- PageCount += MdBlock->PageCount;
- }
-
- //
- // Try the next descriptor
- //
- NextEntry = MdBlock->ListEntry.Flink;
- }
-
- //
- // Return the total
- //
- return PageCount;
+ DbgPrint("Active: %5d pages\t[%6d KB]\n", ActivePages, (ActivePages << PAGE_SHIFT) / 1024);
+ DbgPrint("Free: %5d pages\t[%6d KB]\n", FreePages, (FreePages << PAGE_SHIFT) / 1024);
+ DbgPrint("-----------------------------------------\n");
+#if MI_TRACE_PFNS
+ OtherPages = UsageBucket[MI_USAGE_BOOT_DRIVER];
+ DbgPrint("Boot Images: %5d pages\t[%6d KB]\n", OtherPages, (OtherPages << PAGE_SHIFT) / 1024);
+ OtherPages = UsageBucket[MI_USAGE_DRIVER_PAGE];
+ DbgPrint("System Drivers: %5d pages\t[%6d KB]\n", OtherPages, (OtherPages << PAGE_SHIFT) / 1024);
+ OtherPages = UsageBucket[MI_USAGE_PFN_DATABASE];
+ DbgPrint("PFN Database: %5d pages\t[%6d KB]\n", OtherPages, (OtherPages << PAGE_SHIFT) / 1024);
+ OtherPages = UsageBucket[MI_USAGE_PAGE_TABLE] + UsageBucket[MI_USAGE_LEGACY_PAGE_DIRECTORY];
+ DbgPrint("Page Tables: %5d pages\t[%6d KB]\n", OtherPages, (OtherPages << PAGE_SHIFT) / 1024);
+ OtherPages = UsageBucket[MI_USAGE_NONPAGED_POOL] + UsageBucket[MI_USAGE_NONPAGED_POOL_EXPANSION];
+ DbgPrint("NonPaged Pool: %5d pages\t[%6d KB]\n", OtherPages, (OtherPages << PAGE_SHIFT) / 1024);
+ OtherPages = UsageBucket[MI_USAGE_PAGED_POOL];
+ DbgPrint("Paged Pool: %5d pages\t[%6d KB]\n", OtherPages, (OtherPages << PAGE_SHIFT) / 1024);
+ OtherPages = UsageBucket[MI_USAGE_KERNEL_STACK] + UsageBucket[MI_USAGE_KERNEL_STACK_EXPANSION];
+ DbgPrint("Kernel Stack: %5d pages\t[%6d KB]\n", OtherPages, (OtherPages << PAGE_SHIFT) / 1024);
+ OtherPages = UsageBucket[MI_USAGE_INIT_MEMORY];
+ DbgPrint("Init Memory: %5d pages\t[%6d KB]\n", OtherPages, (OtherPages << PAGE_SHIFT) / 1024);
+ OtherPages = UsageBucket[MI_USAGE_SECTION];
+ DbgPrint("Sections: %5d pages\t[%6d KB]\n", OtherPages, (OtherPages << PAGE_SHIFT) / 1024);
+ OtherPages = UsageBucket[MI_USAGE_CACHE];
+ DbgPrint("Cache: %5d pages\t[%6d KB]\n", OtherPages, (OtherPages << PAGE_SHIFT) / 1024);
+#endif
+ KeLowerIrql(OldIrql);
}
PPHYSICAL_MEMORY_DESCRIPTOR
NTAPI
+INIT_FUNCTION
MmInitializeMemoryLimits(IN PLOADER_PARAMETER_BLOCK LoaderBlock,
IN PBOOLEAN IncludeType)
{
PLIST_ENTRY NextEntry;
- ULONG Run = 0, InitialRuns = 0;
+ ULONG Run = 0, InitialRuns;
PFN_NUMBER NextPage = -1, PageCount = 0;
PPHYSICAL_MEMORY_DESCRIPTOR Buffer, NewBuffer;
PMEMORY_ALLOCATION_DESCRIPTOR MdBlock;
-
+
//
- // Scan the memory descriptors
+ // Start with the maximum we might need
//
- NextEntry = LoaderBlock->MemoryDescriptorListHead.Flink;
- while (NextEntry != &LoaderBlock->MemoryDescriptorListHead)
- {
- //
- // For each one, increase the memory allocation estimate
- //
- InitialRuns++;
- NextEntry = NextEntry->Flink;
- }
-
+ InitialRuns = MiNumberDescriptors;
+
//
// Allocate the maximum we'll ever need
//
// For now that's how many runs we have
//
Buffer->NumberOfRuns = InitialRuns;
-
+
//
// Now loop through the descriptors again
//
// Add this to our running total
//
PageCount += MdBlock->PageCount;
-
+
//
// Check if the next page is described by the next descriptor
- //
+ //
if (MdBlock->BasePage == NextPage)
{
//
Buffer->Run[Run].BasePage = MdBlock->BasePage;
Buffer->Run[Run].PageCount = MdBlock->PageCount;
NextPage = Buffer->Run[Run].BasePage + Buffer->Run[Run].PageCount;
-
+
//
// And in this case, increase the number of runs
//
Run++;
}
}
-
+
//
// Try the next descriptor
//
NextEntry = MdBlock->ListEntry.Flink;
}
-
+
//
// We should not have been able to go past our initial estimate
//
RtlCopyMemory(NewBuffer->Run,
Buffer->Run,
sizeof(PHYSICAL_MEMORY_RUN) * Run);
- ExFreePool(Buffer);
-
+ ExFreePoolWithTag(Buffer, 'lMmM');
+
//
// Now use the new buffer
//
Buffer = NewBuffer;
}
}
-
+
//
// Write the final numbers, and return it
//
VOID
NTAPI
+INIT_FUNCTION
MiBuildPagedPool(VOID)
{
- PMMPTE PointerPte, PointerPde;
- MMPTE TempPte = ValidKernelPte;
+ PMMPTE PointerPte;
+ PMMPDE PointerPde;
+ MMPDE TempPde = ValidKernelPde;
PFN_NUMBER PageFrameIndex;
KIRQL OldIrql;
- ULONG Size, BitMapSize;
-#if (_MI_PAGING_LEVELS == 2)
+ SIZE_T Size;
+ ULONG BitMapSize;
+#if (_MI_PAGING_LEVELS >= 3)
+ MMPPE TempPpe = ValidKernelPpe;
+ PMMPPE PointerPpe;
+#elif (_MI_PAGING_LEVELS == 2)
+ MMPTE TempPte = ValidKernelPte;
+
//
// Get the page frame number for the system page directory
//
PointerPte = MiAddressToPte(PDE_BASE);
ASSERT(PD_COUNT == 1);
MmSystemPageDirectory[0] = PFN_FROM_PTE(PointerPte);
-
+
//
// Allocate a system PTE which will hold a copy of the page directory
//
//
// Let's be really sure this doesn't overflow into nonpaged system VA
//
- ASSERT((MmSizeOfPagedPoolInBytes + (ULONG_PTR)MmPagedPoolStart) <=
+ ASSERT((MmSizeOfPagedPoolInBytes + (ULONG_PTR)MmPagedPoolStart) <=
(ULONG_PTR)MmNonPagedSystemStart);
//
MmSizeOfPagedPoolInBytes) - 1);
//
- // So now get the PDE for paged pool and zero it out
+ // Lock the PFN database
//
- PointerPde = MiAddressToPde(MmPagedPoolStart);
-
+ OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
+
#if (_MI_PAGING_LEVELS >= 3)
- /* On these systems, there's no double-mapping, so instead, the PPE and PXEs
+ /* On these systems, there's no double-mapping, so instead, the PPEs
* are setup to span the entire paged pool area, so there's no need for the
* system PD */
- ASSERT(FALSE);
+ for (PointerPpe = MiAddressToPpe(MmPagedPoolStart);
+ PointerPpe <= MiAddressToPpe(MmPagedPoolEnd);
+ PointerPpe++)
+ {
+ /* Check if the PPE is already valid */
+ if (!PointerPpe->u.Hard.Valid)
+ {
+ /* It is not, so map a fresh zeroed page */
+ TempPpe.u.Hard.PageFrameNumber = MiRemoveZeroPage(0);
+ MI_WRITE_VALID_PPE(PointerPpe, TempPpe);
+ }
+ }
#endif
+ //
+ // So now get the PDE for paged pool and zero it out
+ //
+ PointerPde = MiAddressToPde(MmPagedPoolStart);
RtlZeroMemory(PointerPde,
- (1 + MiAddressToPde(MmPagedPoolEnd) - PointerPde) * sizeof(MMPTE));
+ (1 + MiAddressToPde(MmPagedPoolEnd) - PointerPde) * sizeof(MMPDE));
//
// Next, get the first and last PTE
MmPagedPoolInfo.FirstPteForPagedPool = PointerPte;
MmPagedPoolInfo.LastPteForPagedPool = MiAddressToPte(MmPagedPoolEnd);
- //
- // Lock the PFN database
- //
- OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
-
/* Allocate a page and map the first paged pool PDE */
+ MI_SET_USAGE(MI_USAGE_PAGED_POOL);
+ MI_SET_PROCESS2("Kernel");
PageFrameIndex = MiRemoveZeroPage(0);
- TempPte.u.Hard.PageFrameNumber = PageFrameIndex;
- MI_WRITE_VALID_PTE(PointerPde, TempPte);
+ TempPde.u.Hard.PageFrameNumber = PageFrameIndex;
+ MI_WRITE_VALID_PDE(PointerPde, TempPde);
#if (_MI_PAGING_LEVELS >= 3)
/* Use the PPE of MmPagedPoolStart that was setup above */
// Bla = PFN_FROM_PTE(PpeAddress(MmPagedPool...));
- ASSERT(FALSE);
+
+ /* Initialize the PFN entry for it */
+ MiInitializePfnForOtherProcess(PageFrameIndex,
+ (PMMPTE)PointerPde,
+ PFN_FROM_PTE(MiAddressToPpe(MmPagedPoolStart)));
#else
/* Do it this way */
// Bla = MmSystemPageDirectory[(PointerPde - (PMMPTE)PDE_BASE) / PDE_COUNT]
-#endif
+
/* Initialize the PFN entry for it */
MiInitializePfnForOtherProcess(PageFrameIndex,
- PointerPde,
- MmSystemPageDirectory[(PointerPde - (PMMPTE)PDE_BASE) / PDE_COUNT]);
+ (PMMPTE)PointerPde,
+ MmSystemPageDirectory[(PointerPde - (PMMPDE)PDE_BASE) / PDE_COUNT]);
+#endif
//
// Release the PFN database lock
//
Size = Size * 1024;
ASSERT(Size == MmSizeOfPagedPoolInPages);
- BitMapSize = Size;
+ BitMapSize = (ULONG)Size;
Size = sizeof(RTL_BITMAP) + (((Size + 31) / 32) * sizeof(ULONG));
//
//
InitializePool(PagedPool, 0);
+ /* Initialize special pool */
+ MiInitializeSpecialPool();
+
/* Default low threshold of 30MB or one fifth of paged pool */
MiLowPagedPoolThreshold = (30 * _1MB) >> PAGE_SHIFT;
MiLowPagedPoolThreshold = min(MiLowPagedPoolThreshold, Size / 5);
MiHighPagedPoolThreshold = (60 * _1MB) >> PAGE_SHIFT;
MiHighPagedPoolThreshold = min(MiHighPagedPoolThreshold, (Size * 2) / 5);
ASSERT(MiLowPagedPoolThreshold < MiHighPagedPoolThreshold);
+
+ /* Setup the global session space */
+ MiInitializeSystemSpaceMap(NULL);
}
-NTSTATUS
+VOID
NTAPI
+INIT_FUNCTION
+MiDbgDumpMemoryDescriptors(VOID)
+{
+ PLIST_ENTRY NextEntry;
+ PMEMORY_ALLOCATION_DESCRIPTOR Md;
+ PFN_NUMBER TotalPages = 0;
+ PCHAR
+ MemType[] =
+ {
+ "ExceptionBlock ",
+ "SystemBlock ",
+ "Free ",
+ "Bad ",
+ "LoadedProgram ",
+ "FirmwareTemporary ",
+ "FirmwarePermanent ",
+ "OsloaderHeap ",
+ "OsloaderStack ",
+ "SystemCode ",
+ "HalCode ",
+ "BootDriver ",
+ "ConsoleInDriver ",
+ "ConsoleOutDriver ",
+ "StartupDpcStack ",
+ "StartupKernelStack",
+ "StartupPanicStack ",
+ "StartupPcrPage ",
+ "StartupPdrPage ",
+ "RegistryData ",
+ "MemoryData ",
+ "NlsData ",
+ "SpecialMemory ",
+ "BBTMemory ",
+ "LoaderReserve ",
+ "LoaderXIPRom "
+ };
+
+ DPRINT1("Base\t\tLength\t\tType\n");
+ for (NextEntry = KeLoaderBlock->MemoryDescriptorListHead.Flink;
+ NextEntry != &KeLoaderBlock->MemoryDescriptorListHead;
+ NextEntry = NextEntry->Flink)
+ {
+ Md = CONTAINING_RECORD(NextEntry, MEMORY_ALLOCATION_DESCRIPTOR, ListEntry);
+ DPRINT1("%08lX\t%08lX\t%s\n", Md->BasePage, Md->PageCount, MemType[Md->MemoryType]);
+ TotalPages += Md->PageCount;
+ }
+
+ DPRINT1("Total: %08lX (%d MB)\n", (ULONG)TotalPages, (ULONG)(TotalPages * PAGE_SIZE) / 1024 / 1024);
+}
+
+BOOLEAN
+NTAPI
+INIT_FUNCTION
MmArmInitSystem(IN ULONG Phase,
IN PLOADER_PARAMETER_BLOCK LoaderBlock)
{
PVOID Bitmap;
PPHYSICAL_MEMORY_RUN Run;
PFN_NUMBER PageCount;
-
+#if DBG
+ ULONG j;
+ PMMPTE PointerPte, TestPte;
+ MMPTE TempPte;
+#endif
+
+ /* Dump memory descriptors */
+ if (MiDbgEnableMdDump) MiDbgDumpMemoryDescriptors();
+
//
// Instantiate memory that we don't consider RAM/usable
// We use the same exclusions that Windows does, in order to try to be
IncludeType[LoaderBBTMemory] = FALSE;
if (Phase == 0)
{
+ /* Count physical pages on the system */
+ MiScanMemoryDescriptors(LoaderBlock);
+
/* Initialize the phase 0 temporary event */
KeInitializeEvent(&MiTempEvent, NotificationEvent, FALSE);
MiHighPagedPoolEvent = &MiTempEvent;
MiLowNonPagedPoolEvent = &MiTempEvent;
MiHighNonPagedPoolEvent = &MiTempEvent;
-
+
//
// Define the basic user vs. kernel address space separation
//
- MmSystemRangeStart = (PVOID)KSEG0_BASE;
- MmUserProbeAddress = (ULONG_PTR)MmSystemRangeStart - 0x10000;
- MmHighestUserAddress = (PVOID)(MmUserProbeAddress - 1);
-
+ MmSystemRangeStart = (PVOID)MI_DEFAULT_SYSTEM_RANGE_START;
+ MmUserProbeAddress = (ULONG_PTR)MI_HIGHEST_USER_ADDRESS;
+ MmHighestUserAddress = (PVOID)MI_HIGHEST_USER_ADDRESS;
+
/* Highest PTE and PDE based on the addresses above */
MiHighestUserPte = MiAddressToPte(MmHighestUserAddress);
MiHighestUserPde = MiAddressToPde(MmHighestUserAddress);
#if (_MI_PAGING_LEVELS >= 3)
- /* We need the highest PPE and PXE addresses */
- ASSERT(FALSE);
+ MiHighestUserPpe = MiAddressToPpe(MmHighestUserAddress);
+#if (_MI_PAGING_LEVELS >= 4)
+ MiHighestUserPxe = MiAddressToPxe(MmHighestUserAddress);
+#endif
#endif
//
// Get the size of the boot loader's image allocations and then round
MmBootImageSize *= PAGE_SIZE;
MmBootImageSize = (MmBootImageSize + PDE_MAPPED_VA - 1) & ~(PDE_MAPPED_VA - 1);
ASSERT((MmBootImageSize % PDE_MAPPED_VA) == 0);
-
- //
- // Set the size of session view, pool, and image
- //
- MmSessionSize = MI_SESSION_SIZE;
- MmSessionViewSize = MI_SESSION_VIEW_SIZE;
- MmSessionPoolSize = MI_SESSION_POOL_SIZE;
- MmSessionImageSize = MI_SESSION_IMAGE_SIZE;
-
- //
- // Set the size of system view
- //
- MmSystemViewSize = MI_SYSTEM_VIEW_SIZE;
-
- //
- // This is where it all ends
- //
- MiSessionImageEnd = (PVOID)PTE_BASE;
-
- //
- // This is where we will load Win32k.sys and the video driver
- //
- MiSessionImageStart = (PVOID)((ULONG_PTR)MiSessionImageEnd -
- MmSessionImageSize);
-
- //
- // So the view starts right below the session working set (itself below
- // the image area)
- //
- MiSessionViewStart = (PVOID)((ULONG_PTR)MiSessionImageEnd -
- MmSessionImageSize -
- MI_SESSION_WORKING_SET_SIZE -
- MmSessionViewSize);
-
- //
- // Session pool follows
- //
- MiSessionPoolEnd = MiSessionViewStart;
- MiSessionPoolStart = (PVOID)((ULONG_PTR)MiSessionPoolEnd -
- MmSessionPoolSize);
-
- //
- // And it all begins here
- //
- MmSessionBase = MiSessionPoolStart;
-
- //
- // Sanity check that our math is correct
- //
- ASSERT((ULONG_PTR)MmSessionBase + MmSessionSize == PTE_BASE);
-
- //
- // Session space ends wherever image session space ends
- //
- MiSessionSpaceEnd = MiSessionImageEnd;
-
- //
- // System view space ends at session space, so now that we know where
- // this is, we can compute the base address of system view space itself.
- //
- MiSystemViewStart = (PVOID)((ULONG_PTR)MmSessionBase -
- MmSystemViewSize);
-
- /* Compute the PTE addresses for all the addresses we carved out */
- MiSessionImagePteStart = MiAddressToPte(MiSessionImageStart);
- MiSessionImagePteEnd = MiAddressToPte(MiSessionImageEnd);
- MiSessionBasePte = MiAddressToPte(MmSessionBase);
- MiSessionLastPte = MiAddressToPte(MiSessionSpaceEnd);
-
+
+ /* Initialize session space address layout */
+ MiInitializeSessionSpaceLayout();
+
+ /* Set the based section highest address */
+ MmHighSectionBase = (PVOID)((ULONG_PTR)MmHighestUserAddress - 0x800000);
+
+#if DBG
+ /* The subection PTE format depends on things being 8-byte aligned */
+ ASSERT((sizeof(CONTROL_AREA) % 8) == 0);
+ ASSERT((sizeof(SUBSECTION) % 8) == 0);
+
+ /* Prototype PTEs are assumed to be in paged pool, so check if the math works */
+ PointerPte = (PMMPTE)MmPagedPoolStart;
+ MI_MAKE_PROTOTYPE_PTE(&TempPte, PointerPte);
+ TestPte = MiProtoPteToPte(&TempPte);
+ ASSERT(PointerPte == TestPte);
+
+ /* Try the last nonpaged pool address */
+ PointerPte = (PMMPTE)MI_NONPAGED_POOL_END;
+ MI_MAKE_PROTOTYPE_PTE(&TempPte, PointerPte);
+ TestPte = MiProtoPteToPte(&TempPte);
+ ASSERT(PointerPte == TestPte);
+
+ /* Try a bunch of random addresses near the end of the address space */
+ PointerPte = (PMMPTE)0xFFFC8000;
+ for (j = 0; j < 20; j += 1)
+ {
+ MI_MAKE_PROTOTYPE_PTE(&TempPte, PointerPte);
+ TestPte = MiProtoPteToPte(&TempPte);
+ ASSERT(PointerPte == TestPte);
+ PointerPte++;
+ }
+
+ /* Subsection PTEs are always in nonpaged pool, pick a random address to try */
+ PointerPte = (PMMPTE)0xFFAACBB8;
+ MI_MAKE_SUBSECTION_PTE(&TempPte, PointerPte);
+ TestPte = MiSubsectionPteToSubsection(&TempPte);
+ ASSERT(PointerPte == TestPte);
+#endif
+
+ /* Loop all 8 standby lists */
+ for (i = 0; i < 8; i++)
+ {
+ /* Initialize them */
+ MmStandbyPageListByPriority[i].Total = 0;
+ MmStandbyPageListByPriority[i].ListName = StandbyPageList;
+ MmStandbyPageListByPriority[i].Flink = MM_EMPTY_LIST;
+ MmStandbyPageListByPriority[i].Blink = MM_EMPTY_LIST;
+ }
+
/* Initialize the user mode image list */
InitializeListHead(&MmLoadedUserImageList);
-
- /* Initialize the paged pool mutex */
+
+ /* Initialize the paged pool mutex and the section commit mutex */
KeInitializeGuardedMutex(&MmPagedPoolMutex);
-
+ KeInitializeGuardedMutex(&MmSectionCommitMutex);
+ KeInitializeGuardedMutex(&MmSectionBasedMutex);
+
/* Initialize the Loader Lock */
- KeInitializeMutant(&MmSystemLoadLock, FALSE);
-
- //
- // Count physical pages on the system
- //
- PageCount = MiPagesInLoaderBlock(LoaderBlock, IncludeType);
-
+ KeInitializeMutant(&MmSystemLoadLock, FALSE);
+
+ /* Set the zero page event */
+ KeInitializeEvent(&MmZeroingPageEvent, SynchronizationEvent, FALSE);
+ MmZeroingPageThreadActive = FALSE;
+
+ /* Initialize the dead stack S-LIST */
+ InitializeSListHead(&MmDeadStackSListHead);
+
//
// Check if this is a machine with less than 19MB of RAM
//
+ PageCount = MmNumberOfPhysicalPages;
if (PageCount < MI_MIN_PAGES_FOR_SYSPTE_TUNING)
{
//
else
{
//
- // Use the default, but check if we have more than 32MB of RAM
+ // Use the default
//
MmNumberOfSystemPtes = 11000;
if (PageCount > MI_MIN_PAGES_FOR_SYSPTE_BOOST)
//
MmNumberOfSystemPtes <<= 1;
}
+ if (PageCount > MI_MIN_PAGES_FOR_SYSPTE_BOOST_BOOST)
+ {
+ //
+ // Double the amount of system PTEs
+ //
+ MmNumberOfSystemPtes <<= 1;
+ }
}
-
+
DPRINT("System PTE count has been tuned to %d (%d bytes)\n",
MmNumberOfSystemPtes, MmNumberOfSystemPtes * PAGE_SIZE);
-
+
/* Initialize the working set lock */
- ExInitializePushLock((PULONG_PTR)&MmSystemCacheWs.WorkingSetMutex);
-
+ ExInitializePushLock(&MmSystemCacheWs.WorkingSetMutex);
+
/* Set commit limit */
MmTotalCommitLimit = 2 * _1GB;
MmTotalCommitLimitMaximum = MmTotalCommitLimit;
-
+
/* Has the allocation fragment been setup? */
if (!MmAllocationFragment)
{
/* Convert from 1KB fragments to pages */
MmAllocationFragment *= _1KB;
MmAllocationFragment = ROUND_TO_PAGES(MmAllocationFragment);
-
+
/* Don't let it past the maximum */
MmAllocationFragment = min(MmAllocationFragment,
MI_MAX_ALLOCATION_FRAGMENT);
-
+
/* Don't let it too small either */
MmAllocationFragment = max(MmAllocationFragment,
MI_MIN_ALLOCATION_FRAGMENT);
}
-
- /* Initialize the platform-specific parts */
+
+ /* Check for kernel stack size that's too big */
+ if (MmLargeStackSize > (KERNEL_LARGE_STACK_SIZE / _1KB))
+ {
+ /* Sanitize to default value */
+ MmLargeStackSize = KERNEL_LARGE_STACK_SIZE;
+ }
+ else
+ {
+ /* Take the registry setting, and convert it into bytes */
+ MmLargeStackSize *= _1KB;
+
+ /* Now align it to a page boundary */
+ MmLargeStackSize = PAGE_ROUND_UP(MmLargeStackSize);
+
+ /* Sanity checks */
+ ASSERT(MmLargeStackSize <= KERNEL_LARGE_STACK_SIZE);
+ ASSERT((MmLargeStackSize & (PAGE_SIZE - 1)) == 0);
+
+ /* Make sure it's not too low */
+ if (MmLargeStackSize < KERNEL_STACK_SIZE) MmLargeStackSize = KERNEL_STACK_SIZE;
+ }
+
+ /* Compute color information (L2 cache-separated paging lists) */
+ MiComputeColorInformation();
+
+ // Calculate the number of bytes for the PFN database
+ // then add the color tables and convert to pages
+ MxPfnAllocation = (MmHighestPhysicalPage + 1) * sizeof(MMPFN);
+ MxPfnAllocation += (MmSecondaryColors * sizeof(MMCOLOR_TABLES) * 2);
+ MxPfnAllocation >>= PAGE_SHIFT;
+
+ // We have to add one to the count here, because in the process of
+ // shifting down to the page size, we actually ended up getting the
+ // lower aligned size (so say, 0x5FFFF bytes is now 0x5F pages).
+ // Later on, we'll shift this number back into bytes, which would cause
+ // us to end up with only 0x5F000 bytes -- when we actually want to have
+ // 0x60000 bytes.
+ MxPfnAllocation++;
+
+ /* Initialize the platform-specific parts */
MiInitMachineDependent(LoaderBlock);
-
- //
- // Sync us up with ReactOS Mm
- //
- MiSyncARM3WithROS(MmNonPagedSystemStart, (PVOID)((ULONG_PTR)MmNonPagedPoolEnd - 1));
- MiSyncARM3WithROS(MmPfnDatabase, (PVOID)((ULONG_PTR)MmNonPagedPoolStart + MmSizeOfNonPagedPoolInBytes - 1));
- MiSyncARM3WithROS((PVOID)HYPER_SPACE, (PVOID)(HYPER_SPACE + PAGE_SIZE - 1));
-
+
//
// Build the physical memory block
//
MmPhysicalMemoryBlock = MmInitializeMemoryLimits(LoaderBlock,
IncludeType);
-
+
//
// Allocate enough buffer for the PFN bitmap
// Align it up to a 32-bit boundary
MmHighestPhysicalPage,
0x101);
}
-
+
//
// Initialize it and clear all the bits to begin with
//
RtlInitializeBitMap(&MiPfnBitMap,
Bitmap,
- MmHighestPhysicalPage + 1);
+ (ULONG)MmHighestPhysicalPage + 1);
RtlClearAllBits(&MiPfnBitMap);
-
+
//
// Loop physical memory runs
//
//
// Set the bits in the PFN bitmap
//
- RtlSetBits(&MiPfnBitMap, Run->BasePage, Run->PageCount);
+ RtlSetBits(&MiPfnBitMap, (ULONG)Run->BasePage, (ULONG)Run->PageCount);
}
}
-
+
/* Look for large page cache entries that need caching */
MiSyncCachedRanges();
{
/* Set small system */
MmSystemSize = MmSmallSystem;
+ MmMaximumDeadKernelStacks = 0;
}
else if (MmNumberOfPhysicalPages <= ((19 * _1MB) / PAGE_SIZE))
{
/* Set small system and add 100 pages for the cache */
MmSystemSize = MmSmallSystem;
MmSystemCacheWsMinimum += 100;
+ MmMaximumDeadKernelStacks = 2;
}
else
{
/* Set medium system and add 400 pages for the cache */
MmSystemSize = MmMediumSystem;
MmSystemCacheWsMinimum += 400;
+ MmMaximumDeadKernelStacks = 5;
}
-
+
/* Check for less than 24MB */
if (MmNumberOfPhysicalPages < ((24 * _1MB) / PAGE_SIZE))
{
}
}
}
-
+
/* Check for more than 33 MB */
if (MmNumberOfPhysicalPages > ((33 * _1MB) / PAGE_SIZE))
{
/* Add another 500 pages to the cache */
MmSystemCacheWsMinimum += 500;
}
-
+
/* Now setup the shared user data fields */
ASSERT(SharedUserData->NumberOfPhysicalPages == 0);
SharedUserData->NumberOfPhysicalPages = MmNumberOfPhysicalPages;
}
else
{
- /* Check for LanMan server */
+ /* Check for LanMan server (La for LanmanNT) */
if (MmProductType == '\0a\0L')
{
/* This is a domain controller */
}
else
{
- /* Otherwise it must be a normal server */
+ /* Otherwise it must be a normal server (Se for ServerNT) */
SharedUserData->NtProductType = NtProductServer;
}
DPRINT1("System cache working set too big\n");
return FALSE;
}
-
+
/* Initialize the system cache */
//MiInitializeSystemCache(MmSystemCacheWsMinimum, MmAvailablePages);
-
+
/* Update the commit limit */
MmTotalCommitLimit = MmAvailablePages;
if (MmTotalCommitLimit > 1024) MmTotalCommitLimit -= 1024;
MmTotalCommitLimitMaximum = MmTotalCommitLimit;
-
+
/* Size up paged pool and build the shadow system page directory */
MiBuildPagedPool();
-
+
/* Debugger physical memory support is now ready to be used */
MmDebugPte = MiAddressToPte(MiDebugMapping);
/* Initialize the loaded module list */
MiInitializeLoadedModuleList(LoaderBlock);
}
-
+
//
// Always return success for now
//
- return STATUS_SUCCESS;
+ return TRUE;
}
/* EOF */