#define MODULE_INVOLVED_IN_ARM3
#include "miarm.h"
+#undef MmSystemRangeStart
/* GLOBALS ********************************************************************/
// http://www.ditii.com/2007/09/28/windows-memory-management-x86-virtual-address-space/
//
PVOID MmNonPagedSystemStart;
+SIZE_T MiNonPagedSystemSize;
PVOID MmNonPagedPoolStart;
PVOID MmNonPagedPoolExpansionStart;
PVOID MmNonPagedPoolEnd = MI_NONPAGED_POOL_END;
PVOID MiSessionSpaceEnd; // 0xC0000000
PVOID MiSessionImageEnd; // 0xC0000000
PVOID MiSessionImageStart; // 0xBF800000
+PVOID MiSessionSpaceWs;
PVOID MiSessionViewStart; // 0xBE000000
PVOID MiSessionPoolEnd; // 0xBE000000
PVOID MiSessionPoolStart; // 0xBD000000
// The system view space, on the other hand, is where sections that are memory
// mapped into "system space" end up.
//
-// By default, it is a 16MB region.
+// By default, it is a 16MB region, but we hack it to be 32MB for ReactOS
//
PVOID MiSystemViewStart;
SIZE_T MmSystemViewSize;
//
// This should be 0xC0C00000 -- the cache itself starts at 0xC1000000
//
-PMMWSL MmSystemCacheWorkingSetList = MI_SYSTEM_CACHE_WS_START;
+PMMWSL MmSystemCacheWorkingSetList = (PVOID)MI_SYSTEM_CACHE_WS_START;
//
// Windows NT seems to choose between 7000, 11000 and 50000
MiNumberDescriptors++;
/* Check if this is invisible memory */
- if ((Descriptor->MemoryType == LoaderFirmwarePermanent) &&
- (Descriptor->MemoryType == LoaderSpecialMemory) &&
- (Descriptor->MemoryType == LoaderHALCachedMemory) &&
+ if ((Descriptor->MemoryType == LoaderFirmwarePermanent) ||
+ (Descriptor->MemoryType == LoaderSpecialMemory) ||
+ (Descriptor->MemoryType == LoaderHALCachedMemory) ||
(Descriptor->MemoryType == LoaderBBTMemory))
{
/* Skip this descriptor */
for (i = 0; i < MmSecondaryColors; i++)
{
/* Set both free and zero lists for each color */
- MmFreePagesByColor[ZeroedPageList][i].Flink = 0xFFFFFFFF;
- MmFreePagesByColor[ZeroedPageList][i].Blink = (PVOID)0xFFFFFFFF;
+ MmFreePagesByColor[ZeroedPageList][i].Flink = LIST_HEAD;
+ MmFreePagesByColor[ZeroedPageList][i].Blink = (PVOID)LIST_HEAD;
MmFreePagesByColor[ZeroedPageList][i].Count = 0;
- MmFreePagesByColor[FreePageList][i].Flink = 0xFFFFFFFF;
- MmFreePagesByColor[FreePageList][i].Blink = (PVOID)0xFFFFFFFF;
+ MmFreePagesByColor[FreePageList][i].Flink = LIST_HEAD;
+ MmFreePagesByColor[FreePageList][i].Blink = (PVOID)LIST_HEAD;
MmFreePagesByColor[FreePageList][i].Count = 0;
}
}
+#ifndef _M_AMD64
BOOLEAN
NTAPI
INIT_FUNCTION
/* Finally add the pages for the PFN database itself */
MiBuildPfnDatabaseSelf();
}
+#endif /* !_M_AMD64 */
+
+VOID
+NTAPI
+INIT_FUNCTION
+MmFreeLoaderBlock(IN PLOADER_PARAMETER_BLOCK LoaderBlock)
+{
+ PLIST_ENTRY NextMd;
+ PMEMORY_ALLOCATION_DESCRIPTOR MdBlock;
+ ULONG_PTR i;
+ PFN_NUMBER BasePage, LoaderPages;
+ PMMPFN Pfn1;
+ KIRQL OldIrql;
+ PPHYSICAL_MEMORY_RUN Buffer, Entry;
+
+ /* Loop the descriptors in order to count them */
+ i = 0;
+ NextMd = LoaderBlock->MemoryDescriptorListHead.Flink;
+ while (NextMd != &LoaderBlock->MemoryDescriptorListHead)
+ {
+ MdBlock = CONTAINING_RECORD(NextMd,
+ MEMORY_ALLOCATION_DESCRIPTOR,
+ ListEntry);
+ i++;
+ NextMd = MdBlock->ListEntry.Flink;
+ }
+
+ /* Allocate a structure to hold the physical runs */
+ Buffer = ExAllocatePoolWithTag(NonPagedPool,
+ i * sizeof(PHYSICAL_MEMORY_RUN),
+ 'lMmM');
+ ASSERT(Buffer != NULL);
+ Entry = Buffer;
+
+ /* Loop the descriptors again */
+ NextMd = LoaderBlock->MemoryDescriptorListHead.Flink;
+ while (NextMd != &LoaderBlock->MemoryDescriptorListHead)
+ {
+ /* Check what kind this was */
+ MdBlock = CONTAINING_RECORD(NextMd,
+ MEMORY_ALLOCATION_DESCRIPTOR,
+ ListEntry);
+ switch (MdBlock->MemoryType)
+ {
+ /* Registry, NLS, and heap data */
+ case LoaderRegistryData:
+ case LoaderOsloaderHeap:
+ case LoaderNlsData:
+ /* Are all a candidate for deletion */
+ Entry->BasePage = MdBlock->BasePage;
+ Entry->PageCount = MdBlock->PageCount;
+ Entry++;
+
+ /* We keep the rest */
+ default:
+ break;
+ }
+
+ /* Move to the next descriptor */
+ NextMd = MdBlock->ListEntry.Flink;
+ }
+
+ /* Acquire the PFN lock */
+ OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
+
+ /* Loop the runs */
+ LoaderPages = 0;
+ while (--Entry >= Buffer)
+ {
+ /* See how many pages are in this run */
+ i = Entry->PageCount;
+ BasePage = Entry->BasePage;
+
+ /* Loop each page */
+ Pfn1 = MiGetPfnEntry(BasePage);
+ while (i--)
+ {
+ /* Check if it has references or is in any kind of list */
+ if (!(Pfn1->u3.e2.ReferenceCount) && (!Pfn1->u1.Flink))
+ {
+ /* Set the new PTE address and put this page into the free list */
+ Pfn1->PteAddress = (PMMPTE)(BasePage << PAGE_SHIFT);
+ MiInsertPageInFreeList(BasePage);
+ LoaderPages++;
+ }
+ else if (BasePage)
+ {
+ /* It has a reference, so simply drop it */
+ ASSERT(MI_IS_PHYSICAL_ADDRESS(MiPteToAddress(Pfn1->PteAddress)) == FALSE);
+
+ /* Drop a dereference on this page, which should delete it */
+ Pfn1->PteAddress->u.Long = 0;
+ MI_SET_PFN_DELETED(Pfn1);
+ MiDecrementShareCount(Pfn1, BasePage);
+ LoaderPages++;
+ }
+
+ /* Move to the next page */
+ Pfn1++;
+ BasePage++;
+ }
+ }
+
+ /* Release the PFN lock and flush the TLB */
+ DPRINT1("Loader pages freed: %lx\n", LoaderPages);
+ KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
+ KeFlushCurrentTb();
+
+ /* Free our run structure */
+ ExFreePoolWithTag(Buffer, 'lMmM');
+}
VOID
NTAPI
MiAddHalIoMappings(VOID)
{
PVOID BaseAddress;
- PMMPDE PointerPde;
+ PMMPDE PointerPde, LastPde;
PMMPTE PointerPte;
- ULONG i, j, PdeCount;
+ ULONG j;
PFN_NUMBER PageFrameIndex;
/* HAL Heap address -- should be on a PDE boundary */
- BaseAddress = (PVOID)0xFFC00000;
+ BaseAddress = (PVOID)MM_HAL_VA_START;
ASSERT(MiAddressToPteOffset(BaseAddress) == 0);
/* Check how many PDEs the heap has */
PointerPde = MiAddressToPde(BaseAddress);
- PdeCount = PDE_COUNT - MiGetPdeOffset(BaseAddress);
- for (i = 0; i < PdeCount; i++)
+ LastPde = MiAddressToPde((PVOID)MM_HAL_VA_END);
+
+ while (PointerPde <= LastPde)
{
/* Does the HAL own this mapping? */
if ((PointerPde->u.Hard.Valid == 1) &&
KIRQL OldIrql;
SIZE_T Size;
ULONG BitMapSize;
-#if (_MI_PAGING_LEVELS == 2)
+#if (_MI_PAGING_LEVELS >= 3)
+ MMPPE TempPpe = ValidKernelPpe;
+ PMMPPE PointerPpe;
+#elif (_MI_PAGING_LEVELS == 2)
MMPTE TempPte = ValidKernelPte;
//
MmSizeOfPagedPoolInBytes) - 1);
//
- // So now get the PDE for paged pool and zero it out
+ // Lock the PFN database
//
- PointerPde = MiAddressToPde(MmPagedPoolStart);
+ OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
#if (_MI_PAGING_LEVELS >= 3)
- /* On these systems, there's no double-mapping, so instead, the PPE and PXEs
+ /* On these systems, there's no double-mapping, so instead, the PPEs
* are setup to span the entire paged pool area, so there's no need for the
* system PD */
- ASSERT(FALSE);
+ for (PointerPpe = MiAddressToPpe(MmPagedPoolStart);
+ PointerPpe <= MiAddressToPpe(MmPagedPoolEnd);
+ PointerPpe++)
+ {
+ /* Check if the PPE is already valid */
+ if (!PointerPpe->u.Hard.Valid)
+ {
+ /* It is not, so map a fresh zeroed page */
+ TempPpe.u.Hard.PageFrameNumber = MiRemoveZeroPage(0);
+ MI_WRITE_VALID_PPE(PointerPpe, TempPpe);
+ }
+ }
#endif
+ //
+ // So now get the PDE for paged pool and zero it out
+ //
+ PointerPde = MiAddressToPde(MmPagedPoolStart);
RtlZeroMemory(PointerPde,
(1 + MiAddressToPde(MmPagedPoolEnd) - PointerPde) * sizeof(MMPDE));
MmPagedPoolInfo.FirstPteForPagedPool = PointerPte;
MmPagedPoolInfo.LastPteForPagedPool = MiAddressToPte(MmPagedPoolEnd);
- //
- // Lock the PFN database
- //
- OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
-
/* Allocate a page and map the first paged pool PDE */
MI_SET_USAGE(MI_USAGE_PAGED_POOL);
MI_SET_PROCESS2("Kernel");
#if (_MI_PAGING_LEVELS >= 3)
/* Use the PPE of MmPagedPoolStart that was setup above */
// Bla = PFN_FROM_PTE(PpeAddress(MmPagedPool...));
- ASSERT(FALSE);
+
+ /* Initialize the PFN entry for it */
+ MiInitializePfnForOtherProcess(PageFrameIndex,
+ (PMMPTE)PointerPde,
+ PFN_FROM_PTE(MiAddressToPpe(MmPagedPoolStart)));
#else
/* Do it this way */
// Bla = MmSystemPageDirectory[(PointerPde - (PMMPTE)PDE_BASE) / PDE_COUNT]
//
InitializePool(PagedPool, 0);
+ /* Initialize special pool */
+ MiInitializeSpecialPool();
+
/* Default low threshold of 30MB or one fifth of paged pool */
MiLowPagedPoolThreshold = (30 * _1MB) >> PAGE_SHIFT;
MiLowPagedPoolThreshold = min(MiLowPagedPoolThreshold, Size / 5);
PVOID Bitmap;
PPHYSICAL_MEMORY_RUN Run;
PFN_NUMBER PageCount;
+#if DBG
+ ULONG j;
+ PMMPTE PointerPte, TestPte;
+ MMPTE TempPte;
+#endif
/* Dump memory descriptors */
if (MiDbgEnableMdDump) MiDbgDumpMemoryDescriptors();
/* Initialize session space address layout */
MiInitializeSessionSpaceLayout();
+ /* Set the based section highest address */
+ MmHighSectionBase = (PVOID)((ULONG_PTR)MmHighestUserAddress - 0x800000);
+
+#if DBG
+ /* The subection PTE format depends on things being 8-byte aligned */
+ ASSERT((sizeof(CONTROL_AREA) % 8) == 0);
+ ASSERT((sizeof(SUBSECTION) % 8) == 0);
+
+ /* Prototype PTEs are assumed to be in paged pool, so check if the math works */
+ PointerPte = (PMMPTE)MmPagedPoolStart;
+ MI_MAKE_PROTOTYPE_PTE(&TempPte, PointerPte);
+ TestPte = MiProtoPteToPte(&TempPte);
+ ASSERT(PointerPte == TestPte);
+
+ /* Try the last nonpaged pool address */
+ PointerPte = (PMMPTE)MI_NONPAGED_POOL_END;
+ MI_MAKE_PROTOTYPE_PTE(&TempPte, PointerPte);
+ TestPte = MiProtoPteToPte(&TempPte);
+ ASSERT(PointerPte == TestPte);
+
+ /* Try a bunch of random addresses near the end of the address space */
+ PointerPte = (PMMPTE)0xFFFC8000;
+ for (j = 0; j < 20; j += 1)
+ {
+ MI_MAKE_PROTOTYPE_PTE(&TempPte, PointerPte);
+ TestPte = MiProtoPteToPte(&TempPte);
+ ASSERT(PointerPte == TestPte);
+ PointerPte++;
+ }
+
+ /* Subsection PTEs are always in nonpaged pool, pick a random address to try */
+ PointerPte = (PMMPTE)0xFFAACBB8;
+ MI_MAKE_SUBSECTION_PTE(&TempPte, PointerPte);
+ TestPte = MiSubsectionPteToSubsection(&TempPte);
+ ASSERT(PointerPte == TestPte);
+#endif
+
+ /* Loop all 8 standby lists */
+ for (i = 0; i < 8; i++)
+ {
+ /* Initialize them */
+ MmStandbyPageListByPriority[i].Total = 0;
+ MmStandbyPageListByPriority[i].ListName = StandbyPageList;
+ MmStandbyPageListByPriority[i].Flink = MM_EMPTY_LIST;
+ MmStandbyPageListByPriority[i].Blink = MM_EMPTY_LIST;
+ }
+
/* Initialize the user mode image list */
InitializeListHead(&MmLoadedUserImageList);
- /* Initialize the paged pool mutex */
+ /* Initialize the paged pool mutex and the section commit mutex */
KeInitializeGuardedMutex(&MmPagedPoolMutex);
+ KeInitializeGuardedMutex(&MmSectionCommitMutex);
+ KeInitializeGuardedMutex(&MmSectionBasedMutex);
/* Initialize the Loader Lock */
KeInitializeMutant(&MmSystemLoadLock, FALSE);
KeInitializeEvent(&MmZeroingPageEvent, SynchronizationEvent, FALSE);
MmZeroingPageThreadActive = FALSE;
+ /* Initialize the dead stack S-LIST */
+ InitializeSListHead(&MmDeadStackSListHead);
+
//
// Check if this is a machine with less than 19MB of RAM
//
else
{
//
- // Use the default, but check if we have more than 32MB of RAM
+ // Use the default
//
MmNumberOfSystemPtes = 11000;
if (PageCount > MI_MIN_PAGES_FOR_SYSPTE_BOOST)
//
MmNumberOfSystemPtes <<= 1;
}
+ if (PageCount > MI_MIN_PAGES_FOR_SYSPTE_BOOST_BOOST)
+ {
+ //
+ // Double the amount of system PTEs
+ //
+ MmNumberOfSystemPtes <<= 1;
+ }
}
DPRINT("System PTE count has been tuned to %d (%d bytes)\n",
{
/* Set small system */
MmSystemSize = MmSmallSystem;
+ MmMaximumDeadKernelStacks = 0;
}
else if (MmNumberOfPhysicalPages <= ((19 * _1MB) / PAGE_SIZE))
{
/* Set small system and add 100 pages for the cache */
MmSystemSize = MmSmallSystem;
MmSystemCacheWsMinimum += 100;
+ MmMaximumDeadKernelStacks = 2;
}
else
{
/* Set medium system and add 400 pages for the cache */
MmSystemSize = MmMediumSystem;
MmSystemCacheWsMinimum += 400;
+ MmMaximumDeadKernelStacks = 5;
}
/* Check for less than 24MB */
}
else
{
- /* Check for LanMan server */
+ /* Check for LanMan server (La for LanmanNT) */
if (MmProductType == '\0a\0L')
{
/* This is a domain controller */
}
else
{
- /* Otherwise it must be a normal server */
+ /* Otherwise it must be a normal server (Se for ServerNT) */
SharedUserData->NtProductType = NtProductServer;
}