// http://www.ditii.com/2007/09/28/windows-memory-management-x86-virtual-address-space/
//
PVOID MmNonPagedSystemStart;
-SIZE_T MiNonPagedSystemSize;
PVOID MmNonPagedPoolStart;
PVOID MmNonPagedPoolExpansionStart;
PVOID MmNonPagedPoolEnd = MI_NONPAGED_POOL_END;
#endif
/* These variables define the system cache address space */
-PVOID MmSystemCacheStart;
+PVOID MmSystemCacheStart = (PVOID)MI_SYSTEM_CACHE_START;
PVOID MmSystemCacheEnd;
+ULONG MmSizeOfSystemCacheInPages;
MMSUPPORT MmSystemCacheWs;
//
SIZE_T MmTotalCommitLimit;
SIZE_T MmTotalCommitLimitMaximum;
+/*
+ * These values tune certain user parameters. They have default values set here,
+ * as well as in the code, and can be overwritten by registry settings.
+ */
+SIZE_T MmHeapSegmentReserve = 1 * _1MB;
+SIZE_T MmHeapSegmentCommit = 2 * PAGE_SIZE;
+SIZE_T MmHeapDeCommitTotalFreeThreshold = 64 * _1KB;
+SIZE_T MmHeapDeCommitFreeBlockThreshold = PAGE_SIZE;
+SIZE_T MmMinimumStackCommitInBytes = 0;
+
/* Internal setting used for debugging memory descriptors */
BOOLEAN MiDbgEnableMdDump =
#ifdef _ARM_
ULONG MmCritsectTimeoutSeconds = 150; // NT value: 720 * 60 * 60; (30 days)
LARGE_INTEGER MmCriticalSectionTimeout;
+//
+// Throttling limits for Cc (in pages)
+// Above top, we don't throttle
+// Above bottom, we throttle depending on the amount of modified pages
+// Otherwise, we throttle!
+//
+ULONG MmThrottleTop;
+ULONG MmThrottleBottom;
+
/* PRIVATE FUNCTIONS **********************************************************/
VOID
Pfn1 = MiGetPfnEntry(PageFrameIndex);
/* Lock the PFN Database */
- OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
+ OldIrql = MiAcquirePfnLock();
while (PageCount--)
{
/* If the page really has no references, mark it as free */
}
/* Release PFN database */
- KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
+ MiReleasePfnLock(OldIrql);
/* Done with this block */
break;
}
/* Acquire the PFN lock */
- OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
+ OldIrql = MiAcquirePfnLock();
/* Loop the runs */
LoaderPages = 0;
}
/* Release the PFN lock and flush the TLB */
- DPRINT1("Loader pages freed: %lx\n", LoaderPages);
- KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
+ DPRINT("Loader pages freed: %lx\n", LoaderPages);
+ MiReleasePfnLock(OldIrql);
KeFlushCurrentTb();
/* Free our run structure */
DbgPrint("Active: %5d pages\t[%6d KB]\n", ActivePages, (ActivePages << PAGE_SHIFT) / 1024);
DbgPrint("Free: %5d pages\t[%6d KB]\n", FreePages, (FreePages << PAGE_SHIFT) / 1024);
+ DbgPrint("Other: %5d pages\t[%6d KB]\n", OtherPages, (OtherPages << PAGE_SHIFT) / 1024);
DbgPrint("-----------------------------------------\n");
#if MI_TRACE_PFNS
OtherPages = UsageBucket[MI_USAGE_BOOT_DRIVER];
DbgPrint("System Drivers: %5d pages\t[%6d KB]\n", OtherPages, (OtherPages << PAGE_SHIFT) / 1024);
OtherPages = UsageBucket[MI_USAGE_PFN_DATABASE];
DbgPrint("PFN Database: %5d pages\t[%6d KB]\n", OtherPages, (OtherPages << PAGE_SHIFT) / 1024);
- OtherPages = UsageBucket[MI_USAGE_PAGE_TABLE] + UsageBucket[MI_USAGE_LEGACY_PAGE_DIRECTORY];
+ OtherPages = UsageBucket[MI_USAGE_PAGE_TABLE] + UsageBucket[MI_USAGE_PAGE_DIRECTORY] + UsageBucket[MI_USAGE_LEGACY_PAGE_DIRECTORY];
DbgPrint("Page Tables: %5d pages\t[%6d KB]\n", OtherPages, (OtherPages << PAGE_SHIFT) / 1024);
+ OtherPages = UsageBucket[MI_USAGE_SYSTEM_PTE];
+ DbgPrint("System PTEs: %5d pages\t[%6d KB]\n", OtherPages, (OtherPages << PAGE_SHIFT) / 1024);
+ OtherPages = UsageBucket[MI_USAGE_VAD];
+ DbgPrint("VADs: %5d pages\t[%6d KB]\n", OtherPages, (OtherPages << PAGE_SHIFT) / 1024);
+ OtherPages = UsageBucket[MI_USAGE_CONTINOUS_ALLOCATION];
+ DbgPrint("Continuous Allocs: %5d pages\t[%6d KB]\n", OtherPages, (OtherPages << PAGE_SHIFT) / 1024);
+ OtherPages = UsageBucket[MI_USAGE_MDL];
+ DbgPrint("MDLs: %5d pages\t[%6d KB]\n", OtherPages, (OtherPages << PAGE_SHIFT) / 1024);
OtherPages = UsageBucket[MI_USAGE_NONPAGED_POOL] + UsageBucket[MI_USAGE_NONPAGED_POOL_EXPANSION];
DbgPrint("NonPaged Pool: %5d pages\t[%6d KB]\n", OtherPages, (OtherPages << PAGE_SHIFT) / 1024);
OtherPages = UsageBucket[MI_USAGE_PAGED_POOL];
DbgPrint("Paged Pool: %5d pages\t[%6d KB]\n", OtherPages, (OtherPages << PAGE_SHIFT) / 1024);
+ OtherPages = UsageBucket[MI_USAGE_DEMAND_ZERO];
+ DbgPrint("Demand Zero: %5d pages\t[%6d KB]\n", OtherPages, (OtherPages << PAGE_SHIFT) / 1024);
+ OtherPages = UsageBucket[MI_USAGE_ZERO_LOOP];
+ DbgPrint("Zero Loop: %5d pages\t[%6d KB]\n", OtherPages, (OtherPages << PAGE_SHIFT) / 1024);
+ OtherPages = UsageBucket[MI_USAGE_PEB_TEB];
+ DbgPrint("PEB/TEB: %5d pages\t[%6d KB]\n", OtherPages, (OtherPages << PAGE_SHIFT) / 1024);
OtherPages = UsageBucket[MI_USAGE_KERNEL_STACK] + UsageBucket[MI_USAGE_KERNEL_STACK_EXPANSION];
DbgPrint("Kernel Stack: %5d pages\t[%6d KB]\n", OtherPages, (OtherPages << PAGE_SHIFT) / 1024);
OtherPages = UsageBucket[MI_USAGE_INIT_MEMORY];
DbgPrint("Sections: %5d pages\t[%6d KB]\n", OtherPages, (OtherPages << PAGE_SHIFT) / 1024);
OtherPages = UsageBucket[MI_USAGE_CACHE];
DbgPrint("Cache: %5d pages\t[%6d KB]\n", OtherPages, (OtherPages << PAGE_SHIFT) / 1024);
+ OtherPages = UsageBucket[MI_USAGE_FREE_PAGE];
+ DbgPrint("Free: %5d pages\t[%6d KB]\n", OtherPages, (OtherPages << PAGE_SHIFT) / 1024);
#endif
KeLowerIrql(OldIrql);
}
TempPte.u.Hard.PageFrameNumber = MmSystemPageDirectory[0];
MI_WRITE_VALID_PTE(PointerPte, TempPte);
#endif
+
+#ifdef _M_IX86
//
// Let's get back to paged pool work: size it up.
// By default, it should be twice as big as nonpaged pool.
MmSizeOfPagedPoolInBytes = (ULONG_PTR)MmNonPagedSystemStart -
(ULONG_PTR)MmPagedPoolStart;
}
+#endif // _M_IX86
//
// Get the size in pages and make sure paged pool is at least 32MB.
MmSizeOfPagedPoolInBytes = Size * PAGE_SIZE * 1024;
MmSizeOfPagedPoolInPages = MmSizeOfPagedPoolInBytes >> PAGE_SHIFT;
+#ifdef _M_IX86
//
// Let's be really sure this doesn't overflow into nonpaged system VA
//
ASSERT((MmSizeOfPagedPoolInBytes + (ULONG_PTR)MmPagedPoolStart) <=
(ULONG_PTR)MmNonPagedSystemStart);
+#endif // _M_IX86
//
// This is where paged pool ends
//
// Lock the PFN database
//
- OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
+ OldIrql = MiAcquirePfnLock();
#if (_MI_PAGING_LEVELS >= 3)
/* On these systems, there's no double-mapping, so instead, the PPEs
//
// Release the PFN database lock
//
- KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
+ MiReleasePfnLock(OldIrql);
//
// We only have one PDE mapped for now... at fault time, additional PDEs
//
MmPagedPoolInfo.PagedPoolAllocationMap = ExAllocatePoolWithTag(NonPagedPool,
Size,
- ' mM');
+ TAG_MM);
ASSERT(MmPagedPoolInfo.PagedPoolAllocationMap);
//
//
MmPagedPoolInfo.EndOfPagedPoolBitmap = ExAllocatePoolWithTag(NonPagedPool,
Size,
- ' mM');
+ TAG_MM);
ASSERT(MmPagedPoolInfo.EndOfPagedPoolBitmap);
RtlInitializeBitMap(MmPagedPoolInfo.EndOfPagedPoolBitmap,
(PULONG)(MmPagedPoolInfo.EndOfPagedPoolBitmap + 1),
TotalPages += Md->PageCount;
}
- DPRINT1("Total: %08lX (%d MB)\n", (ULONG)TotalPages, (ULONG)(TotalPages * PAGE_SIZE) / 1024 / 1024);
+ DPRINT1("Total: %08lX (%lu MB)\n", (ULONG)TotalPages, (ULONG)(TotalPages * PAGE_SIZE) / 1024 / 1024);
}
BOOLEAN
MiLowNonPagedPoolEvent = &MiTempEvent;
MiHighNonPagedPoolEvent = &MiTempEvent;
+ //
+ // Default throttling limits for Cc
+ // May be ajusted later on depending on system type
+ //
+ MmThrottleTop = 450;
+ MmThrottleBottom = 127;
+
//
// Define the basic user vs. kernel address space separation
//
ASSERT(PointerPte == TestPte);
/* Try a bunch of random addresses near the end of the address space */
- PointerPte = (PMMPTE)0xFFFC8000;
+ PointerPte = (PMMPTE)((ULONG_PTR)MI_HIGHEST_SYSTEM_ADDRESS - 0x37FFF);
for (j = 0; j < 20; j += 1)
{
MI_MAKE_PROTOTYPE_PTE(&TempPte, PointerPte);
}
/* Subsection PTEs are always in nonpaged pool, pick a random address to try */
- PointerPte = (PMMPTE)0xFFAACBB8;
+ PointerPte = (PMMPTE)((ULONG_PTR)MmNonPagedPoolStart + (MmSizeOfNonPagedPoolInBytes / 2));
MI_MAKE_SUBSECTION_PTE(&TempPte, PointerPte);
TestPte = MiSubsectionPteToSubsection(&TempPte);
ASSERT(PointerPte == TestPte);
//
MmNumberOfSystemPtes <<= 1;
}
+ if (MmSpecialPoolTag != 0 && MmSpecialPoolTag != -1)
+ {
+ //
+ // Add some extra PTEs for special pool
+ //
+ MmNumberOfSystemPtes += 0x6000;
+ }
}
- DPRINT("System PTE count has been tuned to %d (%d bytes)\n",
+ DPRINT("System PTE count has been tuned to %lu (%lu bytes)\n",
MmNumberOfSystemPtes, MmNumberOfSystemPtes * PAGE_SIZE);
+ /* Check if no values are set for the heap limits */
+ if (MmHeapSegmentReserve == 0)
+ {
+ MmHeapSegmentReserve = 2 * _1MB;
+ }
+
+ if (MmHeapSegmentCommit == 0)
+ {
+ MmHeapSegmentCommit = 2 * PAGE_SIZE;
+ }
+
+ if (MmHeapDeCommitTotalFreeThreshold == 0)
+ {
+ MmHeapDeCommitTotalFreeThreshold = 64 * _1KB;
+ }
+
+ if (MmHeapDeCommitFreeBlockThreshold == 0)
+ {
+ MmHeapDeCommitFreeBlockThreshold = PAGE_SIZE;
+ }
+
/* Initialize the working set lock */
ExInitializePushLock(&MmSystemCacheWs.WorkingSetMutex);
//
Bitmap = ExAllocatePoolWithTag(NonPagedPool,
(((MmHighestPhysicalPage + 1) + 31) / 32) * 4,
- ' mM');
+ TAG_MM);
if (!Bitmap)
{
//
/* Set Windows NT Workstation product type */
SharedUserData->NtProductType = NtProductWinNt;
MmProductType = 0;
+
+ /* For this product, we wait till the last moment to throttle */
+ MmThrottleTop = 250;
+ MmThrottleBottom = 30;
}
else
{
/* Set the product type, and make the system more aggressive with low memory */
MmProductType = 1;
MmMinimumFreePages = 81;
+
+ /* We will throttle earlier to preserve memory */
+ MmThrottleTop = 450;
+ MmThrottleBottom = 80;
}
/* Update working set tuning parameters */
return FALSE;
}
+ /* Define limits for system cache */
+#ifdef _M_AMD64
+ MmSizeOfSystemCacheInPages = (MI_SYSTEM_CACHE_END - MI_SYSTEM_CACHE_START) / PAGE_SIZE;
+#else
+ MmSizeOfSystemCacheInPages = ((ULONG_PTR)MI_PAGED_POOL_START - (ULONG_PTR)MI_SYSTEM_CACHE_START) / PAGE_SIZE;
+#endif
+ MmSystemCacheEnd = (PVOID)((ULONG_PTR)MmSystemCacheStart + (MmSizeOfSystemCacheInPages * PAGE_SIZE) - 1);
+
/* Initialize the system cache */
//MiInitializeSystemCache(MmSystemCacheWsMinimum, MmAvailablePages);