#define MODULE_INVOLVED_IN_ARM3
#include "miarm.h"
+#undef MmSystemRangeStart
/* GLOBALS ********************************************************************/
PVOID MiSessionSpaceEnd; // 0xC0000000
PVOID MiSessionImageEnd; // 0xC0000000
PVOID MiSessionImageStart; // 0xBF800000
+PVOID MiSessionSpaceWs;
PVOID MiSessionViewStart; // 0xBE000000
PVOID MiSessionPoolEnd; // 0xBE000000
PVOID MiSessionPoolStart; // 0xBD000000
// The system view space, on the other hand, is where sections that are memory
// mapped into "system space" end up.
//
-// By default, it is a 16MB region.
+// By default, it is a 16MB region, but we hack it to be 32MB for ReactOS
//
PVOID MiSystemViewStart;
SIZE_T MmSystemViewSize;
//
// This should be 0xC0C00000 -- the cache itself starts at 0xC1000000
//
-PMMWSL MmSystemCacheWorkingSetList = MI_SYSTEM_CACHE_WS_START;
+PMMWSL MmSystemCacheWorkingSetList = (PVOID)MI_SYSTEM_CACHE_WS_START;
//
// Windows NT seems to choose between 7000, 11000 and 50000
SIZE_T MmTotalCommitLimit;
SIZE_T MmTotalCommitLimitMaximum;
+/*
+ * These values tune certain user parameters. They have default values set here,
+ * as well as in the code, and can be overwritten by registry settings.
+ */
+SIZE_T MmHeapSegmentReserve = 1 * _1MB;
+SIZE_T MmHeapSegmentCommit = 2 * PAGE_SIZE;
+SIZE_T MmHeapDeCommitTotalFreeThreshold = 64 * _1KB;
+SIZE_T MmHeapDeCommitFreeBlockThreshold = PAGE_SIZE;
+SIZE_T MmMinimumStackCommitInBytes = 0;
+
/* Internal setting used for debugging memory descriptors */
BOOLEAN MiDbgEnableMdDump =
#ifdef _ARM_
/* Number of free pages in the loader block */
PFN_NUMBER MiNumberOfFreePages = 0;
+/* Timeout value for critical sections (2.5 minutes) */
+ULONG MmCritsectTimeoutSeconds = 150; // NT value: 720 * 60 * 60; (30 days)
+LARGE_INTEGER MmCriticalSectionTimeout;
/* PRIVATE FUNCTIONS **********************************************************/
Pfn1 = MiGetPfnEntry(PageFrameIndex);
/* Lock the PFN Database */
- OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
+ OldIrql = MiAcquirePfnLock();
while (PageCount--)
{
/* If the page really has no references, mark it as free */
}
/* Release PFN database */
- KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
+ MiReleasePfnLock(OldIrql);
/* Done with this block */
break;
}
#endif /* !_M_AMD64 */
+VOID
+NTAPI
+INIT_FUNCTION
+MmFreeLoaderBlock(IN PLOADER_PARAMETER_BLOCK LoaderBlock)
+{
+ PLIST_ENTRY NextMd;
+ PMEMORY_ALLOCATION_DESCRIPTOR MdBlock;
+ ULONG_PTR i;
+ PFN_NUMBER BasePage, LoaderPages;
+ PMMPFN Pfn1;
+ KIRQL OldIrql;
+ PPHYSICAL_MEMORY_RUN Buffer, Entry;
+
+ /* Loop the descriptors in order to count them */
+ i = 0;
+ NextMd = LoaderBlock->MemoryDescriptorListHead.Flink;
+ while (NextMd != &LoaderBlock->MemoryDescriptorListHead)
+ {
+ MdBlock = CONTAINING_RECORD(NextMd,
+ MEMORY_ALLOCATION_DESCRIPTOR,
+ ListEntry);
+ i++;
+ NextMd = MdBlock->ListEntry.Flink;
+ }
+
+ /* Allocate a structure to hold the physical runs */
+ Buffer = ExAllocatePoolWithTag(NonPagedPool,
+ i * sizeof(PHYSICAL_MEMORY_RUN),
+ 'lMmM');
+ ASSERT(Buffer != NULL);
+ Entry = Buffer;
+
+ /* Loop the descriptors again */
+ NextMd = LoaderBlock->MemoryDescriptorListHead.Flink;
+ while (NextMd != &LoaderBlock->MemoryDescriptorListHead)
+ {
+ /* Check what kind this was */
+ MdBlock = CONTAINING_RECORD(NextMd,
+ MEMORY_ALLOCATION_DESCRIPTOR,
+ ListEntry);
+ switch (MdBlock->MemoryType)
+ {
+ /* Registry, NLS, and heap data */
+ case LoaderRegistryData:
+ case LoaderOsloaderHeap:
+ case LoaderNlsData:
+ /* Are all a candidate for deletion */
+ Entry->BasePage = MdBlock->BasePage;
+ Entry->PageCount = MdBlock->PageCount;
+ Entry++;
+
+ /* We keep the rest */
+ default:
+ break;
+ }
+
+ /* Move to the next descriptor */
+ NextMd = MdBlock->ListEntry.Flink;
+ }
+
+ /* Acquire the PFN lock */
+ OldIrql = MiAcquirePfnLock();
+
+ /* Loop the runs */
+ LoaderPages = 0;
+ while (--Entry >= Buffer)
+ {
+ /* See how many pages are in this run */
+ i = Entry->PageCount;
+ BasePage = Entry->BasePage;
+
+ /* Loop each page */
+ Pfn1 = MiGetPfnEntry(BasePage);
+ while (i--)
+ {
+ /* Check if it has references or is in any kind of list */
+ if (!(Pfn1->u3.e2.ReferenceCount) && (!Pfn1->u1.Flink))
+ {
+ /* Set the new PTE address and put this page into the free list */
+ Pfn1->PteAddress = (PMMPTE)(BasePage << PAGE_SHIFT);
+ MiInsertPageInFreeList(BasePage);
+ LoaderPages++;
+ }
+ else if (BasePage)
+ {
+ /* It has a reference, so simply drop it */
+ ASSERT(MI_IS_PHYSICAL_ADDRESS(MiPteToAddress(Pfn1->PteAddress)) == FALSE);
+
+ /* Drop a dereference on this page, which should delete it */
+ Pfn1->PteAddress->u.Long = 0;
+ MI_SET_PFN_DELETED(Pfn1);
+ MiDecrementShareCount(Pfn1, BasePage);
+ LoaderPages++;
+ }
+
+ /* Move to the next page */
+ Pfn1++;
+ BasePage++;
+ }
+ }
+
+ /* Release the PFN lock and flush the TLB */
+ DPRINT("Loader pages freed: %lx\n", LoaderPages);
+ MiReleasePfnLock(OldIrql);
+ KeFlushCurrentTb();
+
+ /* Free our run structure */
+ ExFreePoolWithTag(Buffer, 'lMmM');
+}
+
VOID
NTAPI
INIT_FUNCTION
MiAddHalIoMappings(VOID)
{
PVOID BaseAddress;
- PMMPDE PointerPde;
+ PMMPDE PointerPde, LastPde;
PMMPTE PointerPte;
- ULONG i, j, PdeCount;
+ ULONG j;
PFN_NUMBER PageFrameIndex;
/* HAL Heap address -- should be on a PDE boundary */
/* Check how many PDEs the heap has */
PointerPde = MiAddressToPde(BaseAddress);
- PdeCount = PDE_COUNT - MiGetPdeOffset(BaseAddress);
- for (i = 0; i < PdeCount; i++)
+ LastPde = MiAddressToPde((PVOID)MM_HAL_VA_END);
+
+ while (PointerPde <= LastPde)
{
/* Does the HAL own this mapping? */
if ((PointerPde->u.Hard.Valid == 1) &&
DbgPrint("Active: %5d pages\t[%6d KB]\n", ActivePages, (ActivePages << PAGE_SHIFT) / 1024);
DbgPrint("Free: %5d pages\t[%6d KB]\n", FreePages, (FreePages << PAGE_SHIFT) / 1024);
+ DbgPrint("Other: %5d pages\t[%6d KB]\n", OtherPages, (OtherPages << PAGE_SHIFT) / 1024);
DbgPrint("-----------------------------------------\n");
#if MI_TRACE_PFNS
OtherPages = UsageBucket[MI_USAGE_BOOT_DRIVER];
DbgPrint("System Drivers: %5d pages\t[%6d KB]\n", OtherPages, (OtherPages << PAGE_SHIFT) / 1024);
OtherPages = UsageBucket[MI_USAGE_PFN_DATABASE];
DbgPrint("PFN Database: %5d pages\t[%6d KB]\n", OtherPages, (OtherPages << PAGE_SHIFT) / 1024);
- OtherPages = UsageBucket[MI_USAGE_PAGE_TABLE] + UsageBucket[MI_USAGE_LEGACY_PAGE_DIRECTORY];
+ OtherPages = UsageBucket[MI_USAGE_PAGE_TABLE] + UsageBucket[MI_USAGE_PAGE_DIRECTORY] + UsageBucket[MI_USAGE_LEGACY_PAGE_DIRECTORY];
DbgPrint("Page Tables: %5d pages\t[%6d KB]\n", OtherPages, (OtherPages << PAGE_SHIFT) / 1024);
+ OtherPages = UsageBucket[MI_USAGE_SYSTEM_PTE];
+ DbgPrint("System PTEs: %5d pages\t[%6d KB]\n", OtherPages, (OtherPages << PAGE_SHIFT) / 1024);
+ OtherPages = UsageBucket[MI_USAGE_VAD];
+ DbgPrint("VADs: %5d pages\t[%6d KB]\n", OtherPages, (OtherPages << PAGE_SHIFT) / 1024);
+ OtherPages = UsageBucket[MI_USAGE_CONTINOUS_ALLOCATION];
+ DbgPrint("Continuous Allocs: %5d pages\t[%6d KB]\n", OtherPages, (OtherPages << PAGE_SHIFT) / 1024);
+ OtherPages = UsageBucket[MI_USAGE_MDL];
+ DbgPrint("MDLs: %5d pages\t[%6d KB]\n", OtherPages, (OtherPages << PAGE_SHIFT) / 1024);
OtherPages = UsageBucket[MI_USAGE_NONPAGED_POOL] + UsageBucket[MI_USAGE_NONPAGED_POOL_EXPANSION];
DbgPrint("NonPaged Pool: %5d pages\t[%6d KB]\n", OtherPages, (OtherPages << PAGE_SHIFT) / 1024);
OtherPages = UsageBucket[MI_USAGE_PAGED_POOL];
DbgPrint("Paged Pool: %5d pages\t[%6d KB]\n", OtherPages, (OtherPages << PAGE_SHIFT) / 1024);
+ OtherPages = UsageBucket[MI_USAGE_DEMAND_ZERO];
+ DbgPrint("Demand Zero: %5d pages\t[%6d KB]\n", OtherPages, (OtherPages << PAGE_SHIFT) / 1024);
+ OtherPages = UsageBucket[MI_USAGE_ZERO_LOOP];
+ DbgPrint("Zero Loop: %5d pages\t[%6d KB]\n", OtherPages, (OtherPages << PAGE_SHIFT) / 1024);
+ OtherPages = UsageBucket[MI_USAGE_PEB_TEB];
+ DbgPrint("PEB/TEB: %5d pages\t[%6d KB]\n", OtherPages, (OtherPages << PAGE_SHIFT) / 1024);
OtherPages = UsageBucket[MI_USAGE_KERNEL_STACK] + UsageBucket[MI_USAGE_KERNEL_STACK_EXPANSION];
DbgPrint("Kernel Stack: %5d pages\t[%6d KB]\n", OtherPages, (OtherPages << PAGE_SHIFT) / 1024);
OtherPages = UsageBucket[MI_USAGE_INIT_MEMORY];
DbgPrint("Sections: %5d pages\t[%6d KB]\n", OtherPages, (OtherPages << PAGE_SHIFT) / 1024);
OtherPages = UsageBucket[MI_USAGE_CACHE];
DbgPrint("Cache: %5d pages\t[%6d KB]\n", OtherPages, (OtherPages << PAGE_SHIFT) / 1024);
+ OtherPages = UsageBucket[MI_USAGE_FREE_PAGE];
+ DbgPrint("Free: %5d pages\t[%6d KB]\n", OtherPages, (OtherPages << PAGE_SHIFT) / 1024);
#endif
KeLowerIrql(OldIrql);
}
KIRQL OldIrql;
SIZE_T Size;
ULONG BitMapSize;
-#if (_MI_PAGING_LEVELS == 2)
+#if (_MI_PAGING_LEVELS >= 3)
+ MMPPE TempPpe = ValidKernelPpe;
+ PMMPPE PointerPpe;
+#elif (_MI_PAGING_LEVELS == 2)
MMPTE TempPte = ValidKernelPte;
//
MmSizeOfPagedPoolInBytes) - 1);
//
- // So now get the PDE for paged pool and zero it out
+ // Lock the PFN database
//
- PointerPde = MiAddressToPde(MmPagedPoolStart);
+ OldIrql = MiAcquirePfnLock();
#if (_MI_PAGING_LEVELS >= 3)
- /* On these systems, there's no double-mapping, so instead, the PPE and PXEs
+ /* On these systems, there's no double-mapping, so instead, the PPEs
* are setup to span the entire paged pool area, so there's no need for the
* system PD */
- ASSERT(FALSE);
+ for (PointerPpe = MiAddressToPpe(MmPagedPoolStart);
+ PointerPpe <= MiAddressToPpe(MmPagedPoolEnd);
+ PointerPpe++)
+ {
+ /* Check if the PPE is already valid */
+ if (!PointerPpe->u.Hard.Valid)
+ {
+ /* It is not, so map a fresh zeroed page */
+ TempPpe.u.Hard.PageFrameNumber = MiRemoveZeroPage(0);
+ MI_WRITE_VALID_PPE(PointerPpe, TempPpe);
+ }
+ }
#endif
+ //
+ // So now get the PDE for paged pool and zero it out
+ //
+ PointerPde = MiAddressToPde(MmPagedPoolStart);
RtlZeroMemory(PointerPde,
(1 + MiAddressToPde(MmPagedPoolEnd) - PointerPde) * sizeof(MMPDE));
MmPagedPoolInfo.FirstPteForPagedPool = PointerPte;
MmPagedPoolInfo.LastPteForPagedPool = MiAddressToPte(MmPagedPoolEnd);
- //
- // Lock the PFN database
- //
- OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
-
/* Allocate a page and map the first paged pool PDE */
MI_SET_USAGE(MI_USAGE_PAGED_POOL);
MI_SET_PROCESS2("Kernel");
#if (_MI_PAGING_LEVELS >= 3)
/* Use the PPE of MmPagedPoolStart that was setup above */
// Bla = PFN_FROM_PTE(PpeAddress(MmPagedPool...));
- ASSERT(FALSE);
+
+ /* Initialize the PFN entry for it */
+ MiInitializePfnForOtherProcess(PageFrameIndex,
+ (PMMPTE)PointerPde,
+ PFN_FROM_PTE(MiAddressToPpe(MmPagedPoolStart)));
#else
/* Do it this way */
// Bla = MmSystemPageDirectory[(PointerPde - (PMMPTE)PDE_BASE) / PDE_COUNT]
//
// Release the PFN database lock
//
- KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
+ MiReleasePfnLock(OldIrql);
//
// We only have one PDE mapped for now... at fault time, additional PDEs
//
MmPagedPoolInfo.PagedPoolAllocationMap = ExAllocatePoolWithTag(NonPagedPool,
Size,
- ' mM');
+ TAG_MM);
ASSERT(MmPagedPoolInfo.PagedPoolAllocationMap);
//
//
MmPagedPoolInfo.EndOfPagedPoolBitmap = ExAllocatePoolWithTag(NonPagedPool,
Size,
- ' mM');
+ TAG_MM);
ASSERT(MmPagedPoolInfo.EndOfPagedPoolBitmap);
RtlInitializeBitMap(MmPagedPoolInfo.EndOfPagedPoolBitmap,
(PULONG)(MmPagedPoolInfo.EndOfPagedPoolBitmap + 1),
TotalPages += Md->PageCount;
}
- DPRINT1("Total: %08lX (%d MB)\n", (ULONG)TotalPages, (ULONG)(TotalPages * PAGE_SIZE) / 1024 / 1024);
+ DPRINT1("Total: %08lX (%lu MB)\n", (ULONG)TotalPages, (ULONG)(TotalPages * PAGE_SIZE) / 1024 / 1024);
}
BOOLEAN
PVOID Bitmap;
PPHYSICAL_MEMORY_RUN Run;
PFN_NUMBER PageCount;
+#if DBG
+ ULONG j;
+ PMMPTE PointerPte, TestPte;
+ MMPTE TempPte;
+#endif
/* Dump memory descriptors */
if (MiDbgEnableMdDump) MiDbgDumpMemoryDescriptors();
// Define the basic user vs. kernel address space separation
//
MmSystemRangeStart = (PVOID)MI_DEFAULT_SYSTEM_RANGE_START;
- MmUserProbeAddress = (ULONG_PTR)MI_HIGHEST_USER_ADDRESS;
+ MmUserProbeAddress = (ULONG_PTR)MI_USER_PROBE_ADDRESS;
MmHighestUserAddress = (PVOID)MI_HIGHEST_USER_ADDRESS;
/* Highest PTE and PDE based on the addresses above */
/* Initialize session space address layout */
MiInitializeSessionSpaceLayout();
+ /* Set the based section highest address */
+ MmHighSectionBase = (PVOID)((ULONG_PTR)MmHighestUserAddress - 0x800000);
+
+#if DBG
+ /* The subection PTE format depends on things being 8-byte aligned */
+ ASSERT((sizeof(CONTROL_AREA) % 8) == 0);
+ ASSERT((sizeof(SUBSECTION) % 8) == 0);
+
+ /* Prototype PTEs are assumed to be in paged pool, so check if the math works */
+ PointerPte = (PMMPTE)MmPagedPoolStart;
+ MI_MAKE_PROTOTYPE_PTE(&TempPte, PointerPte);
+ TestPte = MiProtoPteToPte(&TempPte);
+ ASSERT(PointerPte == TestPte);
+
+ /* Try the last nonpaged pool address */
+ PointerPte = (PMMPTE)MI_NONPAGED_POOL_END;
+ MI_MAKE_PROTOTYPE_PTE(&TempPte, PointerPte);
+ TestPte = MiProtoPteToPte(&TempPte);
+ ASSERT(PointerPte == TestPte);
+
+ /* Try a bunch of random addresses near the end of the address space */
+ PointerPte = (PMMPTE)((ULONG_PTR)MI_HIGHEST_SYSTEM_ADDRESS - 0x37FFF);
+ for (j = 0; j < 20; j += 1)
+ {
+ MI_MAKE_PROTOTYPE_PTE(&TempPte, PointerPte);
+ TestPte = MiProtoPteToPte(&TempPte);
+ ASSERT(PointerPte == TestPte);
+ PointerPte++;
+ }
+
+ /* Subsection PTEs are always in nonpaged pool, pick a random address to try */
+ PointerPte = (PMMPTE)((ULONG_PTR)MmNonPagedPoolStart + (MmSizeOfNonPagedPoolInBytes / 2));
+ MI_MAKE_SUBSECTION_PTE(&TempPte, PointerPte);
+ TestPte = MiSubsectionPteToSubsection(&TempPte);
+ ASSERT(PointerPte == TestPte);
+#endif
+
+ /* Loop all 8 standby lists */
+ for (i = 0; i < 8; i++)
+ {
+ /* Initialize them */
+ MmStandbyPageListByPriority[i].Total = 0;
+ MmStandbyPageListByPriority[i].ListName = StandbyPageList;
+ MmStandbyPageListByPriority[i].Flink = MM_EMPTY_LIST;
+ MmStandbyPageListByPriority[i].Blink = MM_EMPTY_LIST;
+ }
+
/* Initialize the user mode image list */
InitializeListHead(&MmLoadedUserImageList);
- /* Initialize the paged pool mutex */
+ /* Initialize critical section timeout value (relative time is negative) */
+ MmCriticalSectionTimeout.QuadPart = MmCritsectTimeoutSeconds * (-10000000LL);
+
+ /* Initialize the paged pool mutex and the section commit mutex */
KeInitializeGuardedMutex(&MmPagedPoolMutex);
+ KeInitializeGuardedMutex(&MmSectionCommitMutex);
+ KeInitializeGuardedMutex(&MmSectionBasedMutex);
/* Initialize the Loader Lock */
KeInitializeMutant(&MmSystemLoadLock, FALSE);
KeInitializeEvent(&MmZeroingPageEvent, SynchronizationEvent, FALSE);
MmZeroingPageThreadActive = FALSE;
+ /* Initialize the dead stack S-LIST */
+ InitializeSListHead(&MmDeadStackSListHead);
+
//
// Check if this is a machine with less than 19MB of RAM
//
else
{
//
- // Use the default, but check if we have more than 32MB of RAM
+ // Use the default
//
MmNumberOfSystemPtes = 11000;
if (PageCount > MI_MIN_PAGES_FOR_SYSPTE_BOOST)
//
MmNumberOfSystemPtes <<= 1;
}
+ if (PageCount > MI_MIN_PAGES_FOR_SYSPTE_BOOST_BOOST)
+ {
+ //
+ // Double the amount of system PTEs
+ //
+ MmNumberOfSystemPtes <<= 1;
+ }
+ if (MmSpecialPoolTag != 0 && MmSpecialPoolTag != -1)
+ {
+ //
+ // Add some extra PTEs for special pool
+ //
+ MmNumberOfSystemPtes += 0x6000;
+ }
}
- DPRINT("System PTE count has been tuned to %d (%d bytes)\n",
+ DPRINT("System PTE count has been tuned to %lu (%lu bytes)\n",
MmNumberOfSystemPtes, MmNumberOfSystemPtes * PAGE_SIZE);
+ /* Check if no values are set for the heap limits */
+ if (MmHeapSegmentReserve == 0)
+ {
+ MmHeapSegmentReserve = 2 * _1MB;
+ }
+
+ if (MmHeapSegmentCommit == 0)
+ {
+ MmHeapSegmentCommit = 2 * PAGE_SIZE;
+ }
+
+ if (MmHeapDeCommitTotalFreeThreshold == 0)
+ {
+ MmHeapDeCommitTotalFreeThreshold = 64 * _1KB;
+ }
+
+ if (MmHeapDeCommitFreeBlockThreshold == 0)
+ {
+ MmHeapDeCommitFreeBlockThreshold = PAGE_SIZE;
+ }
+
/* Initialize the working set lock */
ExInitializePushLock(&MmSystemCacheWs.WorkingSetMutex);
//
Bitmap = ExAllocatePoolWithTag(NonPagedPool,
(((MmHighestPhysicalPage + 1) + 31) / 32) * 4,
- ' mM');
+ TAG_MM);
if (!Bitmap)
{
//
{
/* Set small system */
MmSystemSize = MmSmallSystem;
+ MmMaximumDeadKernelStacks = 0;
}
else if (MmNumberOfPhysicalPages <= ((19 * _1MB) / PAGE_SIZE))
{
/* Set small system and add 100 pages for the cache */
MmSystemSize = MmSmallSystem;
MmSystemCacheWsMinimum += 100;
+ MmMaximumDeadKernelStacks = 2;
}
else
{
/* Set medium system and add 400 pages for the cache */
MmSystemSize = MmMediumSystem;
MmSystemCacheWsMinimum += 400;
+ MmMaximumDeadKernelStacks = 5;
}
/* Check for less than 24MB */
}
else
{
- /* Check for LanMan server */
+ /* Check for LanMan server (La for LanmanNT) */
if (MmProductType == '\0a\0L')
{
/* This is a domain controller */
}
else
{
- /* Otherwise it must be a normal server */
+ /* Otherwise it must be a normal server (Se for ServerNT) */
SharedUserData->NtProductType = NtProductServer;
}