2 * PROJECT: ReactOS Kernel
3 * LICENSE: BSD - See COPYING.ARM in the top level directory
4 * FILE: ntoskrnl/mm/ARM3/mminit.c
5 * PURPOSE: ARM Memory Manager Initialization
6 * PROGRAMMERS: ReactOS Portable Systems Group
9 /* INCLUDES *******************************************************************/
15 #define MODULE_INVOLVED_IN_ARM3
18 /* GLOBALS ********************************************************************/
21 // These are all registry-configurable, but by default, the memory manager will
22 // figure out the most appropriate values.
24 ULONG MmMaximumNonPagedPoolPercent
;
25 SIZE_T MmSizeOfNonPagedPoolInBytes
;
26 SIZE_T MmMaximumNonPagedPoolInBytes
;
28 /* Some of the same values, in pages */
29 PFN_NUMBER MmMaximumNonPagedPoolInPages
;
32 // These numbers describe the discrete equation components of the nonpaged
33 // pool sizing algorithm.
35 // They are described on http://support.microsoft.com/default.aspx/kb/126402/ja
36 // along with the algorithm that uses them, which is implemented later below.
38 SIZE_T MmMinimumNonPagedPoolSize
= 256 * 1024;
39 ULONG MmMinAdditionNonPagedPoolPerMb
= 32 * 1024;
40 SIZE_T MmDefaultMaximumNonPagedPool
= 1024 * 1024;
41 ULONG MmMaxAdditionNonPagedPoolPerMb
= 400 * 1024;
44 // The memory layout (and especially variable names) of the NT kernel mode
45 // components can be a bit hard to twig, especially when it comes to the non
48 // There are really two components to the non-paged pool:
50 // - The initial nonpaged pool, sized dynamically up to a maximum.
51 // - The expansion nonpaged pool, sized dynamically up to a maximum.
53 // The initial nonpaged pool is physically continuous for performance, and
54 // immediately follows the PFN database, typically sharing the same PDE. It is
55 // a very small resource (32MB on a 1GB system), and capped at 128MB.
57 // Right now we call this the "ARM³ Nonpaged Pool" and it begins somewhere after
58 // the PFN database (which starts at 0xB0000000).
60 // The expansion nonpaged pool, on the other hand, can grow much bigger (400MB
61 // for a 1GB system). On ARM³ however, it is currently capped at 128MB.
63 // The address where the initial nonpaged pool starts is aptly named
64 // MmNonPagedPoolStart, and it describes a range of MmSizeOfNonPagedPoolInBytes
67 // Expansion nonpaged pool starts at an address described by the variable called
68 // MmNonPagedPoolExpansionStart, and it goes on for MmMaximumNonPagedPoolInBytes
69 // minus MmSizeOfNonPagedPoolInBytes bytes, always reaching MmNonPagedPoolEnd
70 // (because of the way it's calculated) at 0xFFBE0000.
72 // Initial nonpaged pool is allocated and mapped early-on during boot, but what
73 // about the expansion nonpaged pool? It is instead composed of special pages
74 // which belong to what are called System PTEs. These PTEs are the matter of a
75 // later discussion, but they are also considered part of the "nonpaged" OS, due
76 // to the fact that they are never paged out -- once an address is described by
77 // a System PTE, it is always valid, until the System PTE is torn down.
79 // System PTEs are actually composed of two "spaces", the system space proper,
80 // and the nonpaged pool expansion space. The latter, as we've already seen,
81 // begins at MmNonPagedPoolExpansionStart. Based on the number of System PTEs
82 // that the system will support, the remaining address space below this address
83 // is used to hold the system space PTEs. This address, in turn, is held in the
84 // variable named MmNonPagedSystemStart, which itself is never allowed to go
85 // below 0xEB000000 (thus creating an upper bound on the number of System PTEs).
87 // This means that 330MB are reserved for total nonpaged system VA, on top of
88 // whatever the initial nonpaged pool allocation is.
90 // The following URLs, valid as of April 23rd, 2008, support this evidence:
92 // http://www.cs.miami.edu/~burt/journal/NT/memory.html
93 // http://www.ditii.com/2007/09/28/windows-memory-management-x86-virtual-address-space/
95 PVOID MmNonPagedSystemStart
;
96 PVOID MmNonPagedPoolStart
;
97 PVOID MmNonPagedPoolExpansionStart
;
98 PVOID MmNonPagedPoolEnd
= MI_NONPAGED_POOL_END
;
101 // This is where paged pool starts by default
103 PVOID MmPagedPoolStart
= MI_PAGED_POOL_START
;
104 PVOID MmPagedPoolEnd
;
107 // And this is its default size
109 SIZE_T MmSizeOfPagedPoolInBytes
= MI_MIN_INIT_PAGED_POOLSIZE
;
110 PFN_NUMBER MmSizeOfPagedPoolInPages
= MI_MIN_INIT_PAGED_POOLSIZE
/ PAGE_SIZE
;
113 // Session space starts at 0xBFFFFFFF and grows downwards
114 // By default, it includes an 8MB image area where we map win32k and video card
115 // drivers, followed by a 4MB area containing the session's working set. This is
116 // then followed by a 20MB mapped view area and finally by the session's paged
117 // pool, by default 16MB.
119 // On a normal system, this results in session space occupying the region from
120 // 0xBD000000 to 0xC0000000
122 // See miarm.h for the defines that determine the sizing of this region. On an
123 // NT system, some of these can be configured through the registry, but we don't
126 PVOID MiSessionSpaceEnd
; // 0xC0000000
127 PVOID MiSessionImageEnd
; // 0xC0000000
128 PVOID MiSessionImageStart
; // 0xBF800000
129 PVOID MiSessionViewStart
; // 0xBE000000
130 PVOID MiSessionPoolEnd
; // 0xBE000000
131 PVOID MiSessionPoolStart
; // 0xBD000000
132 PVOID MmSessionBase
; // 0xBD000000
133 SIZE_T MmSessionSize
;
134 SIZE_T MmSessionViewSize
;
135 SIZE_T MmSessionPoolSize
;
136 SIZE_T MmSessionImageSize
;
139 * These are the PTE addresses of the boundaries carved out above
141 PMMPTE MiSessionImagePteStart
;
142 PMMPTE MiSessionImagePteEnd
;
143 PMMPTE MiSessionBasePte
;
144 PMMPTE MiSessionLastPte
;
147 // The system view space, on the other hand, is where sections that are memory
148 // mapped into "system space" end up.
150 // By default, it is a 16MB region.
152 PVOID MiSystemViewStart
;
153 SIZE_T MmSystemViewSize
;
155 #if (_MI_PAGING_LEVELS == 2)
157 // A copy of the system page directory (the page directory associated with the
158 // System process) is kept (double-mapped) by the manager in order to lazily
159 // map paged pool PDEs into external processes when they fault on a paged pool
162 PFN_NUMBER MmSystemPageDirectory
[PD_COUNT
];
163 PMMPDE MmSystemPagePtes
;
167 // The system cache starts right after hyperspace. The first few pages are for
168 // keeping track of the system working set list.
170 // This should be 0xC0C00000 -- the cache itself starts at 0xC1000000
172 PMMWSL MmSystemCacheWorkingSetList
= MI_SYSTEM_CACHE_WS_START
;
175 // Windows NT seems to choose between 7000, 11000 and 50000
176 // On systems with more than 32MB, this number is then doubled, and further
177 // aligned up to a PDE boundary (4MB).
179 ULONG_PTR MmNumberOfSystemPtes
;
182 // This is how many pages the PFN database will take up
183 // In Windows, this includes the Quark Color Table, but not in ARM³
185 PFN_NUMBER MxPfnAllocation
;
188 // Unlike the old ReactOS Memory Manager, ARM³ (and Windows) does not keep track
189 // of pages that are not actually valid physical memory, such as ACPI reserved
190 // regions, BIOS address ranges, or holes in physical memory address space which
191 // could indicate device-mapped I/O memory.
193 // In fact, the lack of a PFN entry for a page usually indicates that this is
194 // I/O space instead.
196 // A bitmap, called the PFN bitmap, keeps track of all page frames by assigning
197 // a bit to each. If the bit is set, then the page is valid physical RAM.
199 RTL_BITMAP MiPfnBitMap
;
202 // This structure describes the different pieces of RAM-backed address space
204 PPHYSICAL_MEMORY_DESCRIPTOR MmPhysicalMemoryBlock
;
207 // This is where we keep track of the most basic physical layout markers
209 PFN_NUMBER MmNumberOfPhysicalPages
, MmHighestPhysicalPage
, MmLowestPhysicalPage
= -1;
212 // The total number of pages mapped by the boot loader, which include the kernel
213 // HAL, boot drivers, registry, NLS files and other loader data structures is
214 // kept track of here. This depends on "LoaderPagesSpanned" being correct when
215 // coming from the loader.
217 // This number is later aligned up to a PDE boundary.
219 SIZE_T MmBootImageSize
;
222 // These three variables keep track of the core separation of address space that
223 // exists between kernel mode and user mode.
225 ULONG_PTR MmUserProbeAddress
;
226 PVOID MmHighestUserAddress
;
227 PVOID MmSystemRangeStart
;
229 /* And these store the respective highest PTE/PDE address */
230 PMMPTE MiHighestUserPte
;
231 PMMPDE MiHighestUserPde
;
232 #if (_MI_PAGING_LEVELS >= 3)
233 /* We need the highest PPE and PXE addresses */
236 /* These variables define the system cache address space */
237 PVOID MmSystemCacheStart
;
238 PVOID MmSystemCacheEnd
;
239 MMSUPPORT MmSystemCacheWs
;
242 // This is where hyperspace ends (followed by the system cache working set)
244 PVOID MmHyperSpaceEnd
;
247 // Page coloring algorithm data
249 ULONG MmSecondaryColors
;
250 ULONG MmSecondaryColorMask
;
253 // Actual (registry-configurable) size of a GUI thread's stack
255 ULONG MmLargeStackSize
= KERNEL_LARGE_STACK_SIZE
;
258 // Before we have a PFN database, memory comes straight from our physical memory
259 // blocks, which is nice because it's guaranteed contiguous and also because once
260 // we take a page from here, the system doesn't see it anymore.
261 // However, once the fun is over, those pages must be re-integrated back into
262 // PFN society life, and that requires us keeping a copy of the original layout
263 // so that we can parse it later.
265 PMEMORY_ALLOCATION_DESCRIPTOR MxFreeDescriptor
;
266 MEMORY_ALLOCATION_DESCRIPTOR MxOldFreeDescriptor
;
269 * For each page's worth bytes of L2 cache in a given set/way line, the zero and
270 * free lists are organized in what is called a "color".
272 * This array points to the two lists, so it can be thought of as a multi-dimensional
273 * array of MmFreePagesByColor[2][MmSecondaryColors]. Since the number is dynamic,
274 * we describe the array in pointer form instead.
276 * On a final note, the color tables themselves are right after the PFN database.
278 C_ASSERT(FreePageList
== 1);
279 PMMCOLOR_TABLES MmFreePagesByColor
[FreePageList
+ 1];
281 /* An event used in Phase 0 before the rest of the system is ready to go */
284 /* All the events used for memory threshold notifications */
285 PKEVENT MiLowMemoryEvent
;
286 PKEVENT MiHighMemoryEvent
;
287 PKEVENT MiLowPagedPoolEvent
;
288 PKEVENT MiHighPagedPoolEvent
;
289 PKEVENT MiLowNonPagedPoolEvent
;
290 PKEVENT MiHighNonPagedPoolEvent
;
292 /* The actual thresholds themselves, in page numbers */
293 PFN_NUMBER MmLowMemoryThreshold
;
294 PFN_NUMBER MmHighMemoryThreshold
;
295 PFN_NUMBER MiLowPagedPoolThreshold
;
296 PFN_NUMBER MiHighPagedPoolThreshold
;
297 PFN_NUMBER MiLowNonPagedPoolThreshold
;
298 PFN_NUMBER MiHighNonPagedPoolThreshold
;
301 * This number determines how many free pages must exist, at minimum, until we
302 * start trimming working sets and flushing modified pages to obtain more free
305 * This number changes if the system detects that this is a server product
307 PFN_NUMBER MmMinimumFreePages
= 26;
310 * This number indicates how many pages we consider to be a low limit of having
311 * "plenty" of free memory.
313 * It is doubled on systems that have more than 63MB of memory
315 PFN_NUMBER MmPlentyFreePages
= 400;
317 /* These values store the type of system this is (small, med, large) and if server */
319 MM_SYSTEMSIZE MmSystemSize
;
322 * These values store the cache working set minimums and maximums, in pages
324 * The minimum value is boosted on systems with more than 24MB of RAM, and cut
325 * down to only 32 pages on embedded (<24MB RAM) systems.
327 * An extra boost of 2MB is given on systems with more than 33MB of RAM.
329 PFN_NUMBER MmSystemCacheWsMinimum
= 288;
330 PFN_NUMBER MmSystemCacheWsMaximum
= 350;
332 /* FIXME: Move to cache/working set code later */
333 BOOLEAN MmLargeSystemCache
;
336 * This value determines in how many fragments/chunks the subsection prototype
337 * PTEs should be allocated when mapping a section object. It is configurable in
338 * the registry through the MapAllocationFragment parameter.
340 * The default is 64KB on systems with more than 1GB of RAM, 32KB on systems with
341 * more than 256MB of RAM, and 16KB on systems with less than 256MB of RAM.
343 * The maximum it can be set to is 2MB, and the minimum is 4KB.
345 SIZE_T MmAllocationFragment
;
348 * These two values track how much virtual memory can be committed, and when
349 * expansion should happen.
351 // FIXME: They should be moved elsewhere since it's not an "init" setting?
352 SIZE_T MmTotalCommitLimit
;
353 SIZE_T MmTotalCommitLimitMaximum
;
355 /* Internal setting used for debugging memory descriptors */
356 BOOLEAN MiDbgEnableMdDump
=
363 /* PRIVATE FUNCTIONS **********************************************************/
368 MxGetNextPage(IN PFN_NUMBER PageCount
)
372 /* Make sure we have enough pages */
373 if (PageCount
> MxFreeDescriptor
->PageCount
)
375 /* Crash the system */
376 KeBugCheckEx(INSTALL_MORE_MEMORY
,
377 MmNumberOfPhysicalPages
,
378 MxFreeDescriptor
->PageCount
,
379 MxOldFreeDescriptor
.PageCount
,
383 /* Use our lowest usable free pages */
384 Pfn
= MxFreeDescriptor
->BasePage
;
385 MxFreeDescriptor
->BasePage
+= PageCount
;
386 MxFreeDescriptor
->PageCount
-= PageCount
;
393 MiComputeColorInformation(VOID
)
395 ULONG L2Associativity
;
397 /* Check if no setting was provided already */
398 if (!MmSecondaryColors
)
400 /* Get L2 cache information */
401 L2Associativity
= KeGetPcr()->SecondLevelCacheAssociativity
;
403 /* The number of colors is the number of cache bytes by set/way */
404 MmSecondaryColors
= KeGetPcr()->SecondLevelCacheSize
;
405 if (L2Associativity
) MmSecondaryColors
/= L2Associativity
;
408 /* Now convert cache bytes into pages */
409 MmSecondaryColors
>>= PAGE_SHIFT
;
410 if (!MmSecondaryColors
)
412 /* If there was no cache data from the KPCR, use the default colors */
413 MmSecondaryColors
= MI_SECONDARY_COLORS
;
417 /* Otherwise, make sure there aren't too many colors */
418 if (MmSecondaryColors
> MI_MAX_SECONDARY_COLORS
)
420 /* Set the maximum */
421 MmSecondaryColors
= MI_MAX_SECONDARY_COLORS
;
424 /* Make sure there aren't too little colors */
425 if (MmSecondaryColors
< MI_MIN_SECONDARY_COLORS
)
427 /* Set the default */
428 MmSecondaryColors
= MI_SECONDARY_COLORS
;
431 /* Finally make sure the colors are a power of two */
432 if (MmSecondaryColors
& (MmSecondaryColors
- 1))
434 /* Set the default */
435 MmSecondaryColors
= MI_SECONDARY_COLORS
;
439 /* Compute the mask and store it */
440 MmSecondaryColorMask
= MmSecondaryColors
- 1;
441 KeGetCurrentPrcb()->SecondaryColorMask
= MmSecondaryColorMask
;
447 MiInitializeColorTables(VOID
)
450 PMMPTE PointerPte
, LastPte
;
451 MMPTE TempPte
= ValidKernelPte
;
453 /* The color table starts after the ARM3 PFN database */
454 MmFreePagesByColor
[0] = (PMMCOLOR_TABLES
)&MmPfnDatabase
[MmHighestPhysicalPage
+ 1];
456 /* Loop the PTEs. We have two color tables for each secondary color */
457 PointerPte
= MiAddressToPte(&MmFreePagesByColor
[0][0]);
458 LastPte
= MiAddressToPte((ULONG_PTR
)MmFreePagesByColor
[0] +
459 (2 * MmSecondaryColors
* sizeof(MMCOLOR_TABLES
))
461 while (PointerPte
<= LastPte
)
463 /* Check for valid PTE */
464 if (PointerPte
->u
.Hard
.Valid
== 0)
466 /* Get a page and map it */
467 TempPte
.u
.Hard
.PageFrameNumber
= MxGetNextPage(1);
468 MI_WRITE_VALID_PTE(PointerPte
, TempPte
);
470 /* Zero out the page */
471 RtlZeroMemory(MiPteToAddress(PointerPte
), PAGE_SIZE
);
478 /* Now set the address of the next list, right after this one */
479 MmFreePagesByColor
[1] = &MmFreePagesByColor
[0][MmSecondaryColors
];
481 /* Now loop the lists to set them up */
482 for (i
= 0; i
< MmSecondaryColors
; i
++)
484 /* Set both free and zero lists for each color */
485 MmFreePagesByColor
[ZeroedPageList
][i
].Flink
= 0xFFFFFFFF;
486 MmFreePagesByColor
[ZeroedPageList
][i
].Blink
= (PVOID
)0xFFFFFFFF;
487 MmFreePagesByColor
[ZeroedPageList
][i
].Count
= 0;
488 MmFreePagesByColor
[FreePageList
][i
].Flink
= 0xFFFFFFFF;
489 MmFreePagesByColor
[FreePageList
][i
].Blink
= (PVOID
)0xFFFFFFFF;
490 MmFreePagesByColor
[FreePageList
][i
].Count
= 0;
497 MiIsRegularMemory(IN PLOADER_PARAMETER_BLOCK LoaderBlock
,
500 PLIST_ENTRY NextEntry
;
501 PMEMORY_ALLOCATION_DESCRIPTOR MdBlock
;
503 /* Loop the memory descriptors */
504 NextEntry
= LoaderBlock
->MemoryDescriptorListHead
.Flink
;
505 while (NextEntry
!= &LoaderBlock
->MemoryDescriptorListHead
)
507 /* Get the memory descriptor */
508 MdBlock
= CONTAINING_RECORD(NextEntry
,
509 MEMORY_ALLOCATION_DESCRIPTOR
,
512 /* Check if this PFN could be part of the block */
513 if (Pfn
>= (MdBlock
->BasePage
))
515 /* Check if it really is part of the block */
516 if (Pfn
< (MdBlock
->BasePage
+ MdBlock
->PageCount
))
518 /* Check if the block is actually memory we don't map */
519 if ((MdBlock
->MemoryType
== LoaderFirmwarePermanent
) ||
520 (MdBlock
->MemoryType
== LoaderBBTMemory
) ||
521 (MdBlock
->MemoryType
== LoaderSpecialMemory
))
523 /* We don't need PFN database entries for this memory */
527 /* This is memory we want to map */
533 /* Blocks are ordered, so if it's not here, it doesn't exist */
537 /* Get to the next descriptor */
538 NextEntry
= MdBlock
->ListEntry
.Flink
;
541 /* Check if this PFN is actually from our free memory descriptor */
542 if ((Pfn
>= MxOldFreeDescriptor
.BasePage
) &&
543 (Pfn
< MxOldFreeDescriptor
.BasePage
+ MxOldFreeDescriptor
.PageCount
))
545 /* We use these pages for initial mappings, so we do want to count them */
549 /* Otherwise this isn't memory that we describe or care about */
556 MiMapPfnDatabase(IN PLOADER_PARAMETER_BLOCK LoaderBlock
)
558 ULONG FreePage
, FreePageCount
, PagesLeft
, BasePage
, PageCount
;
559 PLIST_ENTRY NextEntry
;
560 PMEMORY_ALLOCATION_DESCRIPTOR MdBlock
;
561 PMMPTE PointerPte
, LastPte
;
562 MMPTE TempPte
= ValidKernelPte
;
564 /* Get current page data, since we won't be using MxGetNextPage as it would corrupt our state */
565 FreePage
= MxFreeDescriptor
->BasePage
;
566 FreePageCount
= MxFreeDescriptor
->PageCount
;
569 /* Loop the memory descriptors */
570 NextEntry
= LoaderBlock
->MemoryDescriptorListHead
.Flink
;
571 while (NextEntry
!= &LoaderBlock
->MemoryDescriptorListHead
)
573 /* Get the descriptor */
574 MdBlock
= CONTAINING_RECORD(NextEntry
,
575 MEMORY_ALLOCATION_DESCRIPTOR
,
577 if ((MdBlock
->MemoryType
== LoaderFirmwarePermanent
) ||
578 (MdBlock
->MemoryType
== LoaderBBTMemory
) ||
579 (MdBlock
->MemoryType
== LoaderSpecialMemory
))
581 /* These pages are not part of the PFN database */
582 NextEntry
= MdBlock
->ListEntry
.Flink
;
586 /* Next, check if this is our special free descriptor we've found */
587 if (MdBlock
== MxFreeDescriptor
)
589 /* Use the real numbers instead */
590 BasePage
= MxOldFreeDescriptor
.BasePage
;
591 PageCount
= MxOldFreeDescriptor
.PageCount
;
595 /* Use the descriptor's numbers */
596 BasePage
= MdBlock
->BasePage
;
597 PageCount
= MdBlock
->PageCount
;
600 /* Get the PTEs for this range */
601 PointerPte
= MiAddressToPte(&MmPfnDatabase
[BasePage
]);
602 LastPte
= MiAddressToPte(((ULONG_PTR
)&MmPfnDatabase
[BasePage
+ PageCount
]) - 1);
603 DPRINT("MD Type: %lx Base: %lx Count: %lx\n", MdBlock
->MemoryType
, BasePage
, PageCount
);
606 while (PointerPte
<= LastPte
)
608 /* We'll only touch PTEs that aren't already valid */
609 if (PointerPte
->u
.Hard
.Valid
== 0)
611 /* Use the next free page */
612 TempPte
.u
.Hard
.PageFrameNumber
= FreePage
;
613 ASSERT(FreePageCount
!= 0);
615 /* Consume free pages */
621 KeBugCheckEx(INSTALL_MORE_MEMORY
,
622 MmNumberOfPhysicalPages
,
624 MxOldFreeDescriptor
.PageCount
,
628 /* Write out this PTE */
630 MI_WRITE_VALID_PTE(PointerPte
, TempPte
);
633 RtlZeroMemory(MiPteToAddress(PointerPte
), PAGE_SIZE
);
640 /* Do the next address range */
641 NextEntry
= MdBlock
->ListEntry
.Flink
;
644 /* Now update the free descriptors to consume the pages we used up during the PFN allocation loop */
645 MxFreeDescriptor
->BasePage
= FreePage
;
646 MxFreeDescriptor
->PageCount
= FreePageCount
;
652 MiBuildPfnDatabaseFromPages(IN PLOADER_PARAMETER_BLOCK LoaderBlock
)
657 PFN_NUMBER PageFrameIndex
, StartupPdIndex
, PtePageIndex
;
659 ULONG_PTR BaseAddress
= 0;
661 /* PFN of the startup page directory */
662 StartupPdIndex
= PFN_FROM_PTE(MiAddressToPde(PDE_BASE
));
664 /* Start with the first PDE and scan them all */
665 PointerPde
= MiAddressToPde(NULL
);
666 Count
= PD_COUNT
* PDE_COUNT
;
667 for (i
= 0; i
< Count
; i
++)
669 /* Check for valid PDE */
670 if (PointerPde
->u
.Hard
.Valid
== 1)
672 /* Get the PFN from it */
673 PageFrameIndex
= PFN_FROM_PTE(PointerPde
);
675 /* Do we want a PFN entry for this page? */
676 if (MiIsRegularMemory(LoaderBlock
, PageFrameIndex
))
678 /* Yes we do, set it up */
679 Pfn1
= MiGetPfnEntry(PageFrameIndex
);
680 Pfn1
->u4
.PteFrame
= StartupPdIndex
;
681 Pfn1
->PteAddress
= (PMMPTE
)PointerPde
;
682 Pfn1
->u2
.ShareCount
++;
683 Pfn1
->u3
.e2
.ReferenceCount
= 1;
684 Pfn1
->u3
.e1
.PageLocation
= ActiveAndValid
;
685 Pfn1
->u3
.e1
.CacheAttribute
= MiNonCached
;
687 Pfn1
->PfnUsage
= MI_USAGE_INIT_MEMORY
;
688 memcpy(Pfn1
->ProcessName
, "Initial PDE", 16);
697 /* Now get the PTE and scan the pages */
698 PointerPte
= MiAddressToPte(BaseAddress
);
699 for (j
= 0; j
< PTE_COUNT
; j
++)
701 /* Check for a valid PTE */
702 if (PointerPte
->u
.Hard
.Valid
== 1)
704 /* Increase the shared count of the PFN entry for the PDE */
705 ASSERT(Pfn1
!= NULL
);
706 Pfn1
->u2
.ShareCount
++;
708 /* Now check if the PTE is valid memory too */
709 PtePageIndex
= PFN_FROM_PTE(PointerPte
);
710 if (MiIsRegularMemory(LoaderBlock
, PtePageIndex
))
713 * Only add pages above the end of system code or pages
714 * that are part of nonpaged pool
716 if ((BaseAddress
>= 0xA0000000) ||
717 ((BaseAddress
>= (ULONG_PTR
)MmNonPagedPoolStart
) &&
718 (BaseAddress
< (ULONG_PTR
)MmNonPagedPoolStart
+
719 MmSizeOfNonPagedPoolInBytes
)))
721 /* Get the PFN entry and make sure it too is valid */
722 Pfn2
= MiGetPfnEntry(PtePageIndex
);
723 if ((MmIsAddressValid(Pfn2
)) &&
724 (MmIsAddressValid(Pfn2
+ 1)))
726 /* Setup the PFN entry */
727 Pfn2
->u4
.PteFrame
= PageFrameIndex
;
728 Pfn2
->PteAddress
= PointerPte
;
729 Pfn2
->u2
.ShareCount
++;
730 Pfn2
->u3
.e2
.ReferenceCount
= 1;
731 Pfn2
->u3
.e1
.PageLocation
= ActiveAndValid
;
732 Pfn2
->u3
.e1
.CacheAttribute
= MiNonCached
;
734 Pfn2
->PfnUsage
= MI_USAGE_INIT_MEMORY
;
735 memcpy(Pfn1
->ProcessName
, "Initial PTE", 16);
744 BaseAddress
+= PAGE_SIZE
;
749 /* Next PDE mapped address */
750 BaseAddress
+= PDE_MAPPED_VA
;
761 MiBuildPfnDatabaseZeroPage(VOID
)
766 /* Grab the lowest page and check if it has no real references */
767 Pfn1
= MiGetPfnEntry(MmLowestPhysicalPage
);
768 if (!(MmLowestPhysicalPage
) && !(Pfn1
->u3
.e2
.ReferenceCount
))
770 /* Make it a bogus page to catch errors */
771 PointerPde
= MiAddressToPde(0xFFFFFFFF);
772 Pfn1
->u4
.PteFrame
= PFN_FROM_PTE(PointerPde
);
773 Pfn1
->PteAddress
= (PMMPTE
)PointerPde
;
774 Pfn1
->u2
.ShareCount
++;
775 Pfn1
->u3
.e2
.ReferenceCount
= 0xFFF0;
776 Pfn1
->u3
.e1
.PageLocation
= ActiveAndValid
;
777 Pfn1
->u3
.e1
.CacheAttribute
= MiNonCached
;
784 MiBuildPfnDatabaseFromLoaderBlock(IN PLOADER_PARAMETER_BLOCK LoaderBlock
)
786 PLIST_ENTRY NextEntry
;
787 PFN_NUMBER PageCount
= 0;
788 PMEMORY_ALLOCATION_DESCRIPTOR MdBlock
;
789 PFN_NUMBER PageFrameIndex
;
795 /* Now loop through the descriptors */
796 NextEntry
= LoaderBlock
->MemoryDescriptorListHead
.Flink
;
797 while (NextEntry
!= &LoaderBlock
->MemoryDescriptorListHead
)
799 /* Get the current descriptor */
800 MdBlock
= CONTAINING_RECORD(NextEntry
,
801 MEMORY_ALLOCATION_DESCRIPTOR
,
805 PageCount
= MdBlock
->PageCount
;
806 PageFrameIndex
= MdBlock
->BasePage
;
808 /* Don't allow memory above what the PFN database is mapping */
809 if (PageFrameIndex
> MmHighestPhysicalPage
)
811 /* Since they are ordered, everything past here will be larger */
815 /* On the other hand, the end page might be higher up... */
816 if ((PageFrameIndex
+ PageCount
) > (MmHighestPhysicalPage
+ 1))
818 /* In which case we'll trim the descriptor to go as high as we can */
819 PageCount
= MmHighestPhysicalPage
+ 1 - PageFrameIndex
;
820 MdBlock
->PageCount
= PageCount
;
822 /* But if there's nothing left to trim, we got too high, so quit */
823 if (!PageCount
) break;
826 /* Now check the descriptor type */
827 switch (MdBlock
->MemoryType
)
829 /* Check for bad RAM */
832 DPRINT1("You either have specified /BURNMEMORY or damaged RAM modules.\n");
835 /* Check for free RAM */
837 case LoaderLoadedProgram
:
838 case LoaderFirmwareTemporary
:
839 case LoaderOsloaderStack
:
841 /* Get the last page of this descriptor. Note we loop backwards */
842 PageFrameIndex
+= PageCount
- 1;
843 Pfn1
= MiGetPfnEntry(PageFrameIndex
);
845 /* Lock the PFN Database */
846 OldIrql
= KeAcquireQueuedSpinLock(LockQueuePfnLock
);
849 /* If the page really has no references, mark it as free */
850 if (!Pfn1
->u3
.e2
.ReferenceCount
)
852 /* Add it to the free list */
853 Pfn1
->u3
.e1
.CacheAttribute
= MiNonCached
;
854 MiInsertPageInFreeList(PageFrameIndex
);
857 /* Go to the next page */
862 /* Release PFN database */
863 KeReleaseQueuedSpinLock(LockQueuePfnLock
, OldIrql
);
865 /* Done with this block */
868 /* Check for pages that are invisible to us */
869 case LoaderFirmwarePermanent
:
870 case LoaderSpecialMemory
:
871 case LoaderBBTMemory
:
878 /* Map these pages with the KSEG0 mapping that adds 0x80000000 */
879 PointerPte
= MiAddressToPte(KSEG0_BASE
+ (PageFrameIndex
<< PAGE_SHIFT
));
880 Pfn1
= MiGetPfnEntry(PageFrameIndex
);
883 /* Check if the page is really unused */
884 PointerPde
= MiAddressToPde(KSEG0_BASE
+ (PageFrameIndex
<< PAGE_SHIFT
));
885 if (!Pfn1
->u3
.e2
.ReferenceCount
)
887 /* Mark it as being in-use */
888 Pfn1
->u4
.PteFrame
= PFN_FROM_PTE(PointerPde
);
889 Pfn1
->PteAddress
= PointerPte
;
890 Pfn1
->u2
.ShareCount
++;
891 Pfn1
->u3
.e2
.ReferenceCount
= 1;
892 Pfn1
->u3
.e1
.PageLocation
= ActiveAndValid
;
893 Pfn1
->u3
.e1
.CacheAttribute
= MiNonCached
;
895 Pfn1
->PfnUsage
= MI_USAGE_BOOT_DRIVER
;
898 /* Check for RAM disk page */
899 if (MdBlock
->MemoryType
== LoaderXIPRom
)
901 /* Make it a pseudo-I/O ROM mapping */
903 Pfn1
->u2
.ShareCount
= 0;
904 Pfn1
->u3
.e2
.ReferenceCount
= 0;
905 Pfn1
->u3
.e1
.PageLocation
= 0;
907 Pfn1
->u4
.InPageError
= 0;
908 Pfn1
->u3
.e1
.PrototypePte
= 1;
912 /* Advance page structures */
920 /* Next descriptor entry */
921 NextEntry
= MdBlock
->ListEntry
.Flink
;
928 MiBuildPfnDatabaseSelf(VOID
)
930 PMMPTE PointerPte
, LastPte
;
933 /* Loop the PFN database page */
934 PointerPte
= MiAddressToPte(MiGetPfnEntry(MmLowestPhysicalPage
));
935 LastPte
= MiAddressToPte(MiGetPfnEntry(MmHighestPhysicalPage
));
936 while (PointerPte
<= LastPte
)
938 /* Make sure the page is valid */
939 if (PointerPte
->u
.Hard
.Valid
== 1)
941 /* Get the PFN entry and just mark it referenced */
942 Pfn1
= MiGetPfnEntry(PointerPte
->u
.Hard
.PageFrameNumber
);
943 Pfn1
->u2
.ShareCount
= 1;
944 Pfn1
->u3
.e2
.ReferenceCount
= 1;
946 Pfn1
->PfnUsage
= MI_USAGE_PFN_DATABASE
;
958 MiInitializePfnDatabase(IN PLOADER_PARAMETER_BLOCK LoaderBlock
)
960 /* Scan memory and start setting up PFN entries */
961 MiBuildPfnDatabaseFromPages(LoaderBlock
);
963 /* Add the zero page */
964 MiBuildPfnDatabaseZeroPage();
966 /* Scan the loader block and build the rest of the PFN database */
967 MiBuildPfnDatabaseFromLoaderBlock(LoaderBlock
);
969 /* Finally add the pages for the PFN database itself */
970 MiBuildPfnDatabaseSelf();
976 MiAdjustWorkingSetManagerParameters(IN BOOLEAN Client
)
978 /* This function needs to do more work, for now, we tune page minimums */
980 /* Check for a system with around 64MB RAM or more */
981 if (MmNumberOfPhysicalPages
>= (63 * _1MB
) / PAGE_SIZE
)
983 /* Double the minimum amount of pages we consider for a "plenty free" scenario */
984 MmPlentyFreePages
*= 2;
991 MiNotifyMemoryEvents(VOID
)
993 /* Are we in a low-memory situation? */
994 if (MmAvailablePages
< MmLowMemoryThreshold
)
996 /* Clear high, set low */
997 if (KeReadStateEvent(MiHighMemoryEvent
)) KeClearEvent(MiHighMemoryEvent
);
998 if (!KeReadStateEvent(MiLowMemoryEvent
)) KeSetEvent(MiLowMemoryEvent
, 0, FALSE
);
1000 else if (MmAvailablePages
< MmHighMemoryThreshold
)
1002 /* We are in between, clear both */
1003 if (KeReadStateEvent(MiHighMemoryEvent
)) KeClearEvent(MiHighMemoryEvent
);
1004 if (KeReadStateEvent(MiLowMemoryEvent
)) KeClearEvent(MiLowMemoryEvent
);
1008 /* Clear low, set high */
1009 if (KeReadStateEvent(MiLowMemoryEvent
)) KeClearEvent(MiLowMemoryEvent
);
1010 if (!KeReadStateEvent(MiHighMemoryEvent
)) KeSetEvent(MiHighMemoryEvent
, 0, FALSE
);
1017 MiCreateMemoryEvent(IN PUNICODE_STRING Name
,
1024 OBJECT_ATTRIBUTES ObjectAttributes
;
1025 SECURITY_DESCRIPTOR SecurityDescriptor
;
1028 Status
= RtlCreateSecurityDescriptor(&SecurityDescriptor
,
1029 SECURITY_DESCRIPTOR_REVISION
);
1030 if (!NT_SUCCESS(Status
)) return Status
;
1032 /* One ACL with 3 ACEs, containing each one SID */
1033 DaclLength
= sizeof(ACL
) +
1034 3 * sizeof(ACCESS_ALLOWED_ACE
) +
1035 RtlLengthSid(SeLocalSystemSid
) +
1036 RtlLengthSid(SeAliasAdminsSid
) +
1037 RtlLengthSid(SeWorldSid
);
1039 /* Allocate space for the DACL */
1040 Dacl
= ExAllocatePoolWithTag(PagedPool
, DaclLength
, 'lcaD');
1041 if (!Dacl
) return STATUS_INSUFFICIENT_RESOURCES
;
1043 /* Setup the ACL inside it */
1044 Status
= RtlCreateAcl(Dacl
, DaclLength
, ACL_REVISION
);
1045 if (!NT_SUCCESS(Status
)) goto CleanUp
;
1047 /* Add query rights for everyone */
1048 Status
= RtlAddAccessAllowedAce(Dacl
,
1050 SYNCHRONIZE
| EVENT_QUERY_STATE
| READ_CONTROL
,
1052 if (!NT_SUCCESS(Status
)) goto CleanUp
;
1054 /* Full rights for the admin */
1055 Status
= RtlAddAccessAllowedAce(Dacl
,
1059 if (!NT_SUCCESS(Status
)) goto CleanUp
;
1061 /* As well as full rights for the system */
1062 Status
= RtlAddAccessAllowedAce(Dacl
,
1066 if (!NT_SUCCESS(Status
)) goto CleanUp
;
1068 /* Set this DACL inside the SD */
1069 Status
= RtlSetDaclSecurityDescriptor(&SecurityDescriptor
,
1073 if (!NT_SUCCESS(Status
)) goto CleanUp
;
1075 /* Setup the event attributes, making sure it's a permanent one */
1076 InitializeObjectAttributes(&ObjectAttributes
,
1078 OBJ_KERNEL_HANDLE
| OBJ_PERMANENT
,
1080 &SecurityDescriptor
);
1082 /* Create the event */
1083 Status
= ZwCreateEvent(&EventHandle
,
1092 /* Check if this is the success path */
1093 if (NT_SUCCESS(Status
))
1095 /* Add a reference to the object, then close the handle we had */
1096 Status
= ObReferenceObjectByHandle(EventHandle
,
1102 ZwClose (EventHandle
);
1112 MiInitializeMemoryEvents(VOID
)
1114 UNICODE_STRING LowString
= RTL_CONSTANT_STRING(L
"\\KernelObjects\\LowMemoryCondition");
1115 UNICODE_STRING HighString
= RTL_CONSTANT_STRING(L
"\\KernelObjects\\HighMemoryCondition");
1116 UNICODE_STRING LowPagedPoolString
= RTL_CONSTANT_STRING(L
"\\KernelObjects\\LowPagedPoolCondition");
1117 UNICODE_STRING HighPagedPoolString
= RTL_CONSTANT_STRING(L
"\\KernelObjects\\HighPagedPoolCondition");
1118 UNICODE_STRING LowNonPagedPoolString
= RTL_CONSTANT_STRING(L
"\\KernelObjects\\LowNonPagedPoolCondition");
1119 UNICODE_STRING HighNonPagedPoolString
= RTL_CONSTANT_STRING(L
"\\KernelObjects\\HighNonPagedPoolCondition");
1122 /* Check if we have a registry setting */
1123 if (MmLowMemoryThreshold
)
1125 /* Convert it to pages */
1126 MmLowMemoryThreshold
*= (_1MB
/ PAGE_SIZE
);
1130 /* The low memory threshold is hit when we don't consider that we have "plenty" of free pages anymore */
1131 MmLowMemoryThreshold
= MmPlentyFreePages
;
1133 /* More than one GB of memory? */
1134 if (MmNumberOfPhysicalPages
> 0x40000)
1136 /* Start at 32MB, and add another 16MB for each GB */
1137 MmLowMemoryThreshold
= (32 * _1MB
) / PAGE_SIZE
;
1138 MmLowMemoryThreshold
+= ((MmNumberOfPhysicalPages
- 0x40000) >> 7);
1140 else if (MmNumberOfPhysicalPages
> 0x8000)
1142 /* For systems with > 128MB RAM, add another 4MB for each 128MB */
1143 MmLowMemoryThreshold
+= ((MmNumberOfPhysicalPages
- 0x8000) >> 5);
1146 /* Don't let the minimum threshold go past 64MB */
1147 MmLowMemoryThreshold
= min(MmLowMemoryThreshold
, (64 * _1MB
) / PAGE_SIZE
);
1150 /* Check if we have a registry setting */
1151 if (MmHighMemoryThreshold
)
1153 /* Convert it into pages */
1154 MmHighMemoryThreshold
*= (_1MB
/ PAGE_SIZE
);
1158 /* Otherwise, the default is three times the low memory threshold */
1159 MmHighMemoryThreshold
= 3 * MmLowMemoryThreshold
;
1160 ASSERT(MmHighMemoryThreshold
> MmLowMemoryThreshold
);
1163 /* Make sure high threshold is actually higher than the low */
1164 MmHighMemoryThreshold
= max(MmHighMemoryThreshold
, MmLowMemoryThreshold
);
1166 /* Create the memory events for all the thresholds */
1167 Status
= MiCreateMemoryEvent(&LowString
, &MiLowMemoryEvent
);
1168 if (!NT_SUCCESS(Status
)) return FALSE
;
1169 Status
= MiCreateMemoryEvent(&HighString
, &MiHighMemoryEvent
);
1170 if (!NT_SUCCESS(Status
)) return FALSE
;
1171 Status
= MiCreateMemoryEvent(&LowPagedPoolString
, &MiLowPagedPoolEvent
);
1172 if (!NT_SUCCESS(Status
)) return FALSE
;
1173 Status
= MiCreateMemoryEvent(&HighPagedPoolString
, &MiHighPagedPoolEvent
);
1174 if (!NT_SUCCESS(Status
)) return FALSE
;
1175 Status
= MiCreateMemoryEvent(&LowNonPagedPoolString
, &MiLowNonPagedPoolEvent
);
1176 if (!NT_SUCCESS(Status
)) return FALSE
;
1177 Status
= MiCreateMemoryEvent(&HighNonPagedPoolString
, &MiHighNonPagedPoolEvent
);
1178 if (!NT_SUCCESS(Status
)) return FALSE
;
1180 /* Now setup the pool events */
1181 MiInitializePoolEvents();
1183 /* Set the initial event state */
1184 MiNotifyMemoryEvents();
1191 MiAddHalIoMappings(VOID
)
1196 ULONG i
, j
, PdeCount
;
1197 PFN_NUMBER PageFrameIndex
;
1199 /* HAL Heap address -- should be on a PDE boundary */
1200 BaseAddress
= (PVOID
)0xFFC00000;
1201 ASSERT(MiAddressToPteOffset(BaseAddress
) == 0);
1203 /* Check how many PDEs the heap has */
1204 PointerPde
= MiAddressToPde(BaseAddress
);
1205 PdeCount
= PDE_COUNT
- MiGetPdeOffset(BaseAddress
);
1206 for (i
= 0; i
< PdeCount
; i
++)
1208 /* Does the HAL own this mapping? */
1209 if ((PointerPde
->u
.Hard
.Valid
== 1) &&
1210 (MI_IS_PAGE_LARGE(PointerPde
) == FALSE
))
1212 /* Get the PTE for it and scan each page */
1213 PointerPte
= MiAddressToPte(BaseAddress
);
1214 for (j
= 0 ; j
< PTE_COUNT
; j
++)
1216 /* Does the HAL own this page? */
1217 if (PointerPte
->u
.Hard
.Valid
== 1)
1219 /* Is the HAL using it for device or I/O mapped memory? */
1220 PageFrameIndex
= PFN_FROM_PTE(PointerPte
);
1221 if (!MiGetPfnEntry(PageFrameIndex
))
1223 /* FIXME: For PAT, we need to track I/O cache attributes for coherency */
1224 DPRINT1("HAL I/O Mapping at %p is unsafe\n", BaseAddress
);
1228 /* Move to the next page */
1229 BaseAddress
= (PVOID
)((ULONG_PTR
)BaseAddress
+ PAGE_SIZE
);
1235 /* Move to the next address */
1236 BaseAddress
= (PVOID
)((ULONG_PTR
)BaseAddress
+ PDE_MAPPED_VA
);
1239 /* Move to the next PDE */
1246 MmDumpArmPfnDatabase(IN BOOLEAN StatusOnly
)
1250 PCHAR Consumer
= "Unknown";
1252 ULONG ActivePages
= 0, FreePages
= 0, OtherPages
= 0;
1254 ULONG UsageBucket
[MI_USAGE_FREE_PAGE
+ 1] = {0};
1255 PCHAR MI_USAGE_TEXT
[MI_USAGE_FREE_PAGE
+ 1] =
1283 // Loop the PFN database
1285 KeRaiseIrql(HIGH_LEVEL
, &OldIrql
);
1286 for (i
= 0; i
<= MmHighestPhysicalPage
; i
++)
1288 Pfn1
= MiGetPfnEntry(i
);
1289 if (!Pfn1
) continue;
1291 ASSERT(Pfn1
->PfnUsage
<= MI_USAGE_FREE_PAGE
);
1294 // Get the page location
1296 switch (Pfn1
->u3
.e1
.PageLocation
)
1298 case ActiveAndValid
:
1300 Consumer
= "Active and Valid";
1304 case ZeroedPageList
:
1306 Consumer
= "Zero Page List";
1312 Consumer
= "Free Page List";
1318 Consumer
= "Other (ASSERT!)";
1324 /* Add into bucket */
1325 UsageBucket
[Pfn1
->PfnUsage
]++;
1329 // Pretty-print the page
1332 DbgPrint("0x%08p:\t%20s\t(%04d.%04d)\t[%16s - %16s])\n",
1335 Pfn1
->u3
.e2
.ReferenceCount
,
1336 Pfn1
->u2
.ShareCount
== LIST_HEAD
? 0xFFFF : Pfn1
->u2
.ShareCount
,
1338 MI_USAGE_TEXT
[Pfn1
->PfnUsage
],
1346 DbgPrint("Active: %5d pages\t[%6d KB]\n", ActivePages
, (ActivePages
<< PAGE_SHIFT
) / 1024);
1347 DbgPrint("Free: %5d pages\t[%6d KB]\n", FreePages
, (FreePages
<< PAGE_SHIFT
) / 1024);
1348 DbgPrint("-----------------------------------------\n");
1350 OtherPages
= UsageBucket
[MI_USAGE_BOOT_DRIVER
];
1351 DbgPrint("Boot Images: %5d pages\t[%6d KB]\n", OtherPages
, (OtherPages
<< PAGE_SHIFT
) / 1024);
1352 OtherPages
= UsageBucket
[MI_USAGE_DRIVER_PAGE
];
1353 DbgPrint("System Drivers: %5d pages\t[%6d KB]\n", OtherPages
, (OtherPages
<< PAGE_SHIFT
) / 1024);
1354 OtherPages
= UsageBucket
[MI_USAGE_PFN_DATABASE
];
1355 DbgPrint("PFN Database: %5d pages\t[%6d KB]\n", OtherPages
, (OtherPages
<< PAGE_SHIFT
) / 1024);
1356 OtherPages
= UsageBucket
[MI_USAGE_PAGE_TABLE
] + UsageBucket
[MI_USAGE_LEGACY_PAGE_DIRECTORY
];
1357 DbgPrint("Page Tables: %5d pages\t[%6d KB]\n", OtherPages
, (OtherPages
<< PAGE_SHIFT
) / 1024);
1358 OtherPages
= UsageBucket
[MI_USAGE_NONPAGED_POOL
] + UsageBucket
[MI_USAGE_NONPAGED_POOL_EXPANSION
];
1359 DbgPrint("NonPaged Pool: %5d pages\t[%6d KB]\n", OtherPages
, (OtherPages
<< PAGE_SHIFT
) / 1024);
1360 OtherPages
= UsageBucket
[MI_USAGE_PAGED_POOL
];
1361 DbgPrint("Paged Pool: %5d pages\t[%6d KB]\n", OtherPages
, (OtherPages
<< PAGE_SHIFT
) / 1024);
1362 OtherPages
= UsageBucket
[MI_USAGE_KERNEL_STACK
] + UsageBucket
[MI_USAGE_KERNEL_STACK_EXPANSION
];
1363 DbgPrint("Kernel Stack: %5d pages\t[%6d KB]\n", OtherPages
, (OtherPages
<< PAGE_SHIFT
) / 1024);
1364 OtherPages
= UsageBucket
[MI_USAGE_INIT_MEMORY
];
1365 DbgPrint("Init Memory: %5d pages\t[%6d KB]\n", OtherPages
, (OtherPages
<< PAGE_SHIFT
) / 1024);
1366 OtherPages
= UsageBucket
[MI_USAGE_SECTION
];
1367 DbgPrint("Sections: %5d pages\t[%6d KB]\n", OtherPages
, (OtherPages
<< PAGE_SHIFT
) / 1024);
1368 OtherPages
= UsageBucket
[MI_USAGE_CACHE
];
1369 DbgPrint("Cache: %5d pages\t[%6d KB]\n", OtherPages
, (OtherPages
<< PAGE_SHIFT
) / 1024);
1371 KeLowerIrql(OldIrql
);
1376 MiPagesInLoaderBlock(IN PLOADER_PARAMETER_BLOCK LoaderBlock
,
1377 IN PBOOLEAN IncludeType
)
1379 PLIST_ENTRY NextEntry
;
1380 PFN_NUMBER PageCount
= 0;
1381 PMEMORY_ALLOCATION_DESCRIPTOR MdBlock
;
1384 // Now loop through the descriptors
1386 NextEntry
= LoaderBlock
->MemoryDescriptorListHead
.Flink
;
1387 while (NextEntry
!= &LoaderBlock
->MemoryDescriptorListHead
)
1390 // Grab each one, and check if it's one we should include
1392 MdBlock
= CONTAINING_RECORD(NextEntry
,
1393 MEMORY_ALLOCATION_DESCRIPTOR
,
1395 if ((MdBlock
->MemoryType
< LoaderMaximum
) &&
1396 (IncludeType
[MdBlock
->MemoryType
]))
1399 // Add this to our running total
1401 PageCount
+= MdBlock
->PageCount
;
1405 // Try the next descriptor
1407 NextEntry
= MdBlock
->ListEntry
.Flink
;
1416 PPHYSICAL_MEMORY_DESCRIPTOR
1419 MmInitializeMemoryLimits(IN PLOADER_PARAMETER_BLOCK LoaderBlock
,
1420 IN PBOOLEAN IncludeType
)
1422 PLIST_ENTRY NextEntry
;
1423 ULONG Run
= 0, InitialRuns
= 0;
1424 PFN_NUMBER NextPage
= -1, PageCount
= 0;
1425 PPHYSICAL_MEMORY_DESCRIPTOR Buffer
, NewBuffer
;
1426 PMEMORY_ALLOCATION_DESCRIPTOR MdBlock
;
1429 // Scan the memory descriptors
1431 NextEntry
= LoaderBlock
->MemoryDescriptorListHead
.Flink
;
1432 while (NextEntry
!= &LoaderBlock
->MemoryDescriptorListHead
)
1435 // For each one, increase the memory allocation estimate
1438 NextEntry
= NextEntry
->Flink
;
1442 // Allocate the maximum we'll ever need
1444 Buffer
= ExAllocatePoolWithTag(NonPagedPool
,
1445 sizeof(PHYSICAL_MEMORY_DESCRIPTOR
) +
1446 sizeof(PHYSICAL_MEMORY_RUN
) *
1449 if (!Buffer
) return NULL
;
1452 // For now that's how many runs we have
1454 Buffer
->NumberOfRuns
= InitialRuns
;
1457 // Now loop through the descriptors again
1459 NextEntry
= LoaderBlock
->MemoryDescriptorListHead
.Flink
;
1460 while (NextEntry
!= &LoaderBlock
->MemoryDescriptorListHead
)
1463 // Grab each one, and check if it's one we should include
1465 MdBlock
= CONTAINING_RECORD(NextEntry
,
1466 MEMORY_ALLOCATION_DESCRIPTOR
,
1468 if ((MdBlock
->MemoryType
< LoaderMaximum
) &&
1469 (IncludeType
[MdBlock
->MemoryType
]))
1472 // Add this to our running total
1474 PageCount
+= MdBlock
->PageCount
;
1477 // Check if the next page is described by the next descriptor
1479 if (MdBlock
->BasePage
== NextPage
)
1482 // Combine it into the same physical run
1484 ASSERT(MdBlock
->PageCount
!= 0);
1485 Buffer
->Run
[Run
- 1].PageCount
+= MdBlock
->PageCount
;
1486 NextPage
+= MdBlock
->PageCount
;
1491 // Otherwise just duplicate the descriptor's contents
1493 Buffer
->Run
[Run
].BasePage
= MdBlock
->BasePage
;
1494 Buffer
->Run
[Run
].PageCount
= MdBlock
->PageCount
;
1495 NextPage
= Buffer
->Run
[Run
].BasePage
+ Buffer
->Run
[Run
].PageCount
;
1498 // And in this case, increase the number of runs
1505 // Try the next descriptor
1507 NextEntry
= MdBlock
->ListEntry
.Flink
;
1511 // We should not have been able to go past our initial estimate
1513 ASSERT(Run
<= Buffer
->NumberOfRuns
);
1516 // Our guess was probably exaggerated...
1518 if (InitialRuns
> Run
)
1521 // Allocate a more accurately sized buffer
1523 NewBuffer
= ExAllocatePoolWithTag(NonPagedPool
,
1524 sizeof(PHYSICAL_MEMORY_DESCRIPTOR
) +
1525 sizeof(PHYSICAL_MEMORY_RUN
) *
1531 // Copy the old buffer into the new, then free it
1533 RtlCopyMemory(NewBuffer
->Run
,
1535 sizeof(PHYSICAL_MEMORY_RUN
) * Run
);
1539 // Now use the new buffer
1546 // Write the final numbers, and return it
1548 Buffer
->NumberOfRuns
= Run
;
1549 Buffer
->NumberOfPages
= PageCount
;
1556 MiBuildPagedPool(VOID
)
1560 MMPTE TempPte
= ValidKernelPte
;
1561 MMPDE TempPde
= ValidKernelPde
;
1562 PFN_NUMBER PageFrameIndex
;
1564 ULONG Size
, BitMapSize
;
1565 #if (_MI_PAGING_LEVELS == 2)
1567 // Get the page frame number for the system page directory
1569 PointerPte
= MiAddressToPte(PDE_BASE
);
1570 ASSERT(PD_COUNT
== 1);
1571 MmSystemPageDirectory
[0] = PFN_FROM_PTE(PointerPte
);
1574 // Allocate a system PTE which will hold a copy of the page directory
1576 PointerPte
= MiReserveSystemPtes(1, SystemPteSpace
);
1578 MmSystemPagePtes
= MiPteToAddress(PointerPte
);
1581 // Make this system PTE point to the system page directory.
1582 // It is now essentially double-mapped. This will be used later for lazy
1583 // evaluation of PDEs accross process switches, similarly to how the Global
1584 // page directory array in the old ReactOS Mm is used (but in a less hacky
1587 TempPte
= ValidKernelPte
;
1588 ASSERT(PD_COUNT
== 1);
1589 TempPte
.u
.Hard
.PageFrameNumber
= MmSystemPageDirectory
[0];
1590 MI_WRITE_VALID_PTE(PointerPte
, TempPte
);
1593 // Let's get back to paged pool work: size it up.
1594 // By default, it should be twice as big as nonpaged pool.
1596 MmSizeOfPagedPoolInBytes
= 2 * MmMaximumNonPagedPoolInBytes
;
1597 if (MmSizeOfPagedPoolInBytes
> ((ULONG_PTR
)MmNonPagedSystemStart
-
1598 (ULONG_PTR
)MmPagedPoolStart
))
1601 // On the other hand, we have limited VA space, so make sure that the VA
1602 // for paged pool doesn't overflow into nonpaged pool VA. Otherwise, set
1603 // whatever maximum is possible.
1605 MmSizeOfPagedPoolInBytes
= (ULONG_PTR
)MmNonPagedSystemStart
-
1606 (ULONG_PTR
)MmPagedPoolStart
;
1610 // Get the size in pages and make sure paged pool is at least 32MB.
1612 Size
= MmSizeOfPagedPoolInBytes
;
1613 if (Size
< MI_MIN_INIT_PAGED_POOLSIZE
) Size
= MI_MIN_INIT_PAGED_POOLSIZE
;
1614 Size
= BYTES_TO_PAGES(Size
);
1617 // Now check how many PTEs will be required for these many pages.
1619 Size
= (Size
+ (1024 - 1)) / 1024;
1622 // Recompute the page-aligned size of the paged pool, in bytes and pages.
1624 MmSizeOfPagedPoolInBytes
= Size
* PAGE_SIZE
* 1024;
1625 MmSizeOfPagedPoolInPages
= MmSizeOfPagedPoolInBytes
>> PAGE_SHIFT
;
1628 // Let's be really sure this doesn't overflow into nonpaged system VA
1630 ASSERT((MmSizeOfPagedPoolInBytes
+ (ULONG_PTR
)MmPagedPoolStart
) <=
1631 (ULONG_PTR
)MmNonPagedSystemStart
);
1634 // This is where paged pool ends
1636 MmPagedPoolEnd
= (PVOID
)(((ULONG_PTR
)MmPagedPoolStart
+
1637 MmSizeOfPagedPoolInBytes
) - 1);
1640 // So now get the PDE for paged pool and zero it out
1642 PointerPde
= MiAddressToPde(MmPagedPoolStart
);
1644 #if (_MI_PAGING_LEVELS >= 3)
1645 /* On these systems, there's no double-mapping, so instead, the PPE and PXEs
1646 * are setup to span the entire paged pool area, so there's no need for the
1651 RtlZeroMemory(PointerPde
,
1652 (1 + MiAddressToPde(MmPagedPoolEnd
) - PointerPde
) * sizeof(MMPDE
));
1655 // Next, get the first and last PTE
1657 PointerPte
= MiAddressToPte(MmPagedPoolStart
);
1658 MmPagedPoolInfo
.FirstPteForPagedPool
= PointerPte
;
1659 MmPagedPoolInfo
.LastPteForPagedPool
= MiAddressToPte(MmPagedPoolEnd
);
1662 // Lock the PFN database
1664 OldIrql
= KeAcquireQueuedSpinLock(LockQueuePfnLock
);
1666 /* Allocate a page and map the first paged pool PDE */
1667 MI_SET_USAGE(MI_USAGE_PAGED_POOL
);
1668 MI_SET_PROCESS2("Kernel");
1669 PageFrameIndex
= MiRemoveZeroPage(0);
1670 TempPde
.u
.Hard
.PageFrameNumber
= PageFrameIndex
;
1671 MI_WRITE_VALID_PDE(PointerPde
, TempPde
);
1672 #if (_MI_PAGING_LEVELS >= 3)
1673 /* Use the PPE of MmPagedPoolStart that was setup above */
1674 // Bla = PFN_FROM_PTE(PpeAddress(MmPagedPool...));
1677 /* Do it this way */
1678 // Bla = MmSystemPageDirectory[(PointerPde - (PMMPTE)PDE_BASE) / PDE_COUNT]
1680 /* Initialize the PFN entry for it */
1681 MiInitializePfnForOtherProcess(PageFrameIndex
,
1683 MmSystemPageDirectory
[(PointerPde
- (PMMPDE
)PDE_BASE
) / PDE_COUNT
]);
1687 // Release the PFN database lock
1689 KeReleaseQueuedSpinLock(LockQueuePfnLock
, OldIrql
);
1692 // We only have one PDE mapped for now... at fault time, additional PDEs
1693 // will be allocated to handle paged pool growth. This is where they'll have
1696 MmPagedPoolInfo
.NextPdeForPagedPoolExpansion
= PointerPde
+ 1;
1699 // We keep track of each page via a bit, so check how big the bitmap will
1700 // have to be (make sure to align our page count such that it fits nicely
1701 // into a 4-byte aligned bitmap.
1703 // We'll also allocate the bitmap header itself part of the same buffer.
1706 ASSERT(Size
== MmSizeOfPagedPoolInPages
);
1708 Size
= sizeof(RTL_BITMAP
) + (((Size
+ 31) / 32) * sizeof(ULONG
));
1711 // Allocate the allocation bitmap, which tells us which regions have not yet
1712 // been mapped into memory
1714 MmPagedPoolInfo
.PagedPoolAllocationMap
= ExAllocatePoolWithTag(NonPagedPool
,
1717 ASSERT(MmPagedPoolInfo
.PagedPoolAllocationMap
);
1720 // Initialize it such that at first, only the first page's worth of PTEs is
1721 // marked as allocated (incidentially, the first PDE we allocated earlier).
1723 RtlInitializeBitMap(MmPagedPoolInfo
.PagedPoolAllocationMap
,
1724 (PULONG
)(MmPagedPoolInfo
.PagedPoolAllocationMap
+ 1),
1726 RtlSetAllBits(MmPagedPoolInfo
.PagedPoolAllocationMap
);
1727 RtlClearBits(MmPagedPoolInfo
.PagedPoolAllocationMap
, 0, 1024);
1730 // We have a second bitmap, which keeps track of where allocations end.
1731 // Given the allocation bitmap and a base address, we can therefore figure
1732 // out which page is the last page of that allocation, and thus how big the
1733 // entire allocation is.
1735 MmPagedPoolInfo
.EndOfPagedPoolBitmap
= ExAllocatePoolWithTag(NonPagedPool
,
1738 ASSERT(MmPagedPoolInfo
.EndOfPagedPoolBitmap
);
1739 RtlInitializeBitMap(MmPagedPoolInfo
.EndOfPagedPoolBitmap
,
1740 (PULONG
)(MmPagedPoolInfo
.EndOfPagedPoolBitmap
+ 1),
1744 // Since no allocations have been made yet, there are no bits set as the end
1746 RtlClearAllBits(MmPagedPoolInfo
.EndOfPagedPoolBitmap
);
1749 // Initialize paged pool.
1751 InitializePool(PagedPool
, 0);
1753 /* Default low threshold of 30MB or one fifth of paged pool */
1754 MiLowPagedPoolThreshold
= (30 * _1MB
) >> PAGE_SHIFT
;
1755 MiLowPagedPoolThreshold
= min(MiLowPagedPoolThreshold
, Size
/ 5);
1757 /* Default high threshold of 60MB or 25% */
1758 MiHighPagedPoolThreshold
= (60 * _1MB
) >> PAGE_SHIFT
;
1759 MiHighPagedPoolThreshold
= min(MiHighPagedPoolThreshold
, (Size
* 2) / 5);
1760 ASSERT(MiLowPagedPoolThreshold
< MiHighPagedPoolThreshold
);
1762 /* Setup the global session space */
1763 MiInitializeSystemSpaceMap(NULL
);
1769 MiDbgDumpMemoryDescriptors(VOID
)
1771 PLIST_ENTRY NextEntry
;
1772 PMEMORY_ALLOCATION_DESCRIPTOR Md
;
1773 ULONG TotalPages
= 0;
1782 "FirmwareTemporary ",
1783 "FirmwarePermanent ",
1790 "ConsoleOutDriver ",
1792 "StartupKernelStack",
1793 "StartupPanicStack ",
1805 DPRINT1("Base\t\tLength\t\tType\n");
1806 for (NextEntry
= KeLoaderBlock
->MemoryDescriptorListHead
.Flink
;
1807 NextEntry
!= &KeLoaderBlock
->MemoryDescriptorListHead
;
1808 NextEntry
= NextEntry
->Flink
)
1810 Md
= CONTAINING_RECORD(NextEntry
, MEMORY_ALLOCATION_DESCRIPTOR
, ListEntry
);
1811 DPRINT1("%08lX\t%08lX\t%s\n", Md
->BasePage
, Md
->PageCount
, MemType
[Md
->MemoryType
]);
1812 TotalPages
+= Md
->PageCount
;
1815 DPRINT1("Total: %08lX (%d MB)\n", TotalPages
, (TotalPages
* PAGE_SIZE
) / 1024 / 1024);
1821 MmArmInitSystem(IN ULONG Phase
,
1822 IN PLOADER_PARAMETER_BLOCK LoaderBlock
)
1825 BOOLEAN IncludeType
[LoaderMaximum
];
1827 PPHYSICAL_MEMORY_RUN Run
;
1828 PFN_NUMBER PageCount
;
1830 /* Dump memory descriptors */
1831 if (MiDbgEnableMdDump
) MiDbgDumpMemoryDescriptors();
1834 // Instantiate memory that we don't consider RAM/usable
1835 // We use the same exclusions that Windows does, in order to try to be
1836 // compatible with WinLDR-style booting
1838 for (i
= 0; i
< LoaderMaximum
; i
++) IncludeType
[i
] = TRUE
;
1839 IncludeType
[LoaderBad
] = FALSE
;
1840 IncludeType
[LoaderFirmwarePermanent
] = FALSE
;
1841 IncludeType
[LoaderSpecialMemory
] = FALSE
;
1842 IncludeType
[LoaderBBTMemory
] = FALSE
;
1845 /* Initialize the phase 0 temporary event */
1846 KeInitializeEvent(&MiTempEvent
, NotificationEvent
, FALSE
);
1848 /* Set all the events to use the temporary event for now */
1849 MiLowMemoryEvent
= &MiTempEvent
;
1850 MiHighMemoryEvent
= &MiTempEvent
;
1851 MiLowPagedPoolEvent
= &MiTempEvent
;
1852 MiHighPagedPoolEvent
= &MiTempEvent
;
1853 MiLowNonPagedPoolEvent
= &MiTempEvent
;
1854 MiHighNonPagedPoolEvent
= &MiTempEvent
;
1857 // Define the basic user vs. kernel address space separation
1859 MmSystemRangeStart
= (PVOID
)KSEG0_BASE
;
1860 MmUserProbeAddress
= (ULONG_PTR
)MmSystemRangeStart
- 0x10000;
1861 MmHighestUserAddress
= (PVOID
)(MmUserProbeAddress
- 1);
1863 /* Highest PTE and PDE based on the addresses above */
1864 MiHighestUserPte
= MiAddressToPte(MmHighestUserAddress
);
1865 MiHighestUserPde
= MiAddressToPde(MmHighestUserAddress
);
1866 #if (_MI_PAGING_LEVELS >= 3)
1867 /* We need the highest PPE and PXE addresses */
1871 // Get the size of the boot loader's image allocations and then round
1872 // that region up to a PDE size, so that any PDEs we might create for
1873 // whatever follows are separate from the PDEs that boot loader might've
1874 // already created (and later, we can blow all that away if we want to).
1876 MmBootImageSize
= KeLoaderBlock
->Extension
->LoaderPagesSpanned
;
1877 MmBootImageSize
*= PAGE_SIZE
;
1878 MmBootImageSize
= (MmBootImageSize
+ PDE_MAPPED_VA
- 1) & ~(PDE_MAPPED_VA
- 1);
1879 ASSERT((MmBootImageSize
% PDE_MAPPED_VA
) == 0);
1882 // Set the size of session view, pool, and image
1884 MmSessionSize
= MI_SESSION_SIZE
;
1885 MmSessionViewSize
= MI_SESSION_VIEW_SIZE
;
1886 MmSessionPoolSize
= MI_SESSION_POOL_SIZE
;
1887 MmSessionImageSize
= MI_SESSION_IMAGE_SIZE
;
1890 // Set the size of system view
1892 MmSystemViewSize
= MI_SYSTEM_VIEW_SIZE
;
1895 // This is where it all ends
1897 MiSessionImageEnd
= (PVOID
)PTE_BASE
;
1900 // This is where we will load Win32k.sys and the video driver
1902 MiSessionImageStart
= (PVOID
)((ULONG_PTR
)MiSessionImageEnd
-
1903 MmSessionImageSize
);
1906 // So the view starts right below the session working set (itself below
1909 MiSessionViewStart
= (PVOID
)((ULONG_PTR
)MiSessionImageEnd
-
1910 MmSessionImageSize
-
1911 MI_SESSION_WORKING_SET_SIZE
-
1915 // Session pool follows
1917 MiSessionPoolEnd
= MiSessionViewStart
;
1918 MiSessionPoolStart
= (PVOID
)((ULONG_PTR
)MiSessionPoolEnd
-
1922 // And it all begins here
1924 MmSessionBase
= MiSessionPoolStart
;
1927 // Sanity check that our math is correct
1929 ASSERT((ULONG_PTR
)MmSessionBase
+ MmSessionSize
== PTE_BASE
);
1932 // Session space ends wherever image session space ends
1934 MiSessionSpaceEnd
= MiSessionImageEnd
;
1937 // System view space ends at session space, so now that we know where
1938 // this is, we can compute the base address of system view space itself.
1940 MiSystemViewStart
= (PVOID
)((ULONG_PTR
)MmSessionBase
-
1943 /* Compute the PTE addresses for all the addresses we carved out */
1944 MiSessionImagePteStart
= MiAddressToPte(MiSessionImageStart
);
1945 MiSessionImagePteEnd
= MiAddressToPte(MiSessionImageEnd
);
1946 MiSessionBasePte
= MiAddressToPte(MmSessionBase
);
1947 MiSessionLastPte
= MiAddressToPte(MiSessionSpaceEnd
);
1949 /* Initialize the user mode image list */
1950 InitializeListHead(&MmLoadedUserImageList
);
1952 /* Initialize the paged pool mutex */
1953 KeInitializeGuardedMutex(&MmPagedPoolMutex
);
1955 /* Initialize the Loader Lock */
1956 KeInitializeMutant(&MmSystemLoadLock
, FALSE
);
1958 /* Set the zero page event */
1959 KeInitializeEvent(&MmZeroingPageEvent
, SynchronizationEvent
, FALSE
);
1960 MmZeroingPageThreadActive
= FALSE
;
1963 // Count physical pages on the system
1965 PageCount
= MiPagesInLoaderBlock(LoaderBlock
, IncludeType
);
1968 // Check if this is a machine with less than 19MB of RAM
1970 if (PageCount
< MI_MIN_PAGES_FOR_SYSPTE_TUNING
)
1973 // Use the very minimum of system PTEs
1975 MmNumberOfSystemPtes
= 7000;
1980 // Use the default, but check if we have more than 32MB of RAM
1982 MmNumberOfSystemPtes
= 11000;
1983 if (PageCount
> MI_MIN_PAGES_FOR_SYSPTE_BOOST
)
1986 // Double the amount of system PTEs
1988 MmNumberOfSystemPtes
<<= 1;
1992 DPRINT("System PTE count has been tuned to %d (%d bytes)\n",
1993 MmNumberOfSystemPtes
, MmNumberOfSystemPtes
* PAGE_SIZE
);
1995 /* Initialize the working set lock */
1996 ExInitializePushLock(&MmSystemCacheWs
.WorkingSetMutex
);
1998 /* Set commit limit */
1999 MmTotalCommitLimit
= 2 * _1GB
;
2000 MmTotalCommitLimitMaximum
= MmTotalCommitLimit
;
2002 /* Has the allocation fragment been setup? */
2003 if (!MmAllocationFragment
)
2005 /* Use the default value */
2006 MmAllocationFragment
= MI_ALLOCATION_FRAGMENT
;
2007 if (PageCount
< ((256 * _1MB
) / PAGE_SIZE
))
2009 /* On memory systems with less than 256MB, divide by 4 */
2010 MmAllocationFragment
= MI_ALLOCATION_FRAGMENT
/ 4;
2012 else if (PageCount
< (_1GB
/ PAGE_SIZE
))
2014 /* On systems with less than 1GB, divide by 2 */
2015 MmAllocationFragment
= MI_ALLOCATION_FRAGMENT
/ 2;
2020 /* Convert from 1KB fragments to pages */
2021 MmAllocationFragment
*= _1KB
;
2022 MmAllocationFragment
= ROUND_TO_PAGES(MmAllocationFragment
);
2024 /* Don't let it past the maximum */
2025 MmAllocationFragment
= min(MmAllocationFragment
,
2026 MI_MAX_ALLOCATION_FRAGMENT
);
2028 /* Don't let it too small either */
2029 MmAllocationFragment
= max(MmAllocationFragment
,
2030 MI_MIN_ALLOCATION_FRAGMENT
);
2033 /* Initialize the platform-specific parts */
2034 MiInitMachineDependent(LoaderBlock
);
2037 // Build the physical memory block
2039 MmPhysicalMemoryBlock
= MmInitializeMemoryLimits(LoaderBlock
,
2043 // Allocate enough buffer for the PFN bitmap
2044 // Align it up to a 32-bit boundary
2046 Bitmap
= ExAllocatePoolWithTag(NonPagedPool
,
2047 (((MmHighestPhysicalPage
+ 1) + 31) / 32) * 4,
2054 KeBugCheckEx(INSTALL_MORE_MEMORY
,
2055 MmNumberOfPhysicalPages
,
2056 MmLowestPhysicalPage
,
2057 MmHighestPhysicalPage
,
2062 // Initialize it and clear all the bits to begin with
2064 RtlInitializeBitMap(&MiPfnBitMap
,
2066 MmHighestPhysicalPage
+ 1);
2067 RtlClearAllBits(&MiPfnBitMap
);
2070 // Loop physical memory runs
2072 for (i
= 0; i
< MmPhysicalMemoryBlock
->NumberOfRuns
; i
++)
2077 Run
= &MmPhysicalMemoryBlock
->Run
[i
];
2078 DPRINT("PHYSICAL RAM [0x%08p to 0x%08p]\n",
2079 Run
->BasePage
<< PAGE_SHIFT
,
2080 (Run
->BasePage
+ Run
->PageCount
) << PAGE_SHIFT
);
2083 // Make sure it has pages inside it
2088 // Set the bits in the PFN bitmap
2090 RtlSetBits(&MiPfnBitMap
, Run
->BasePage
, Run
->PageCount
);
2094 /* Look for large page cache entries that need caching */
2095 MiSyncCachedRanges();
2097 /* Loop for HAL Heap I/O device mappings that need coherency tracking */
2098 MiAddHalIoMappings();
2100 /* Set the initial resident page count */
2101 MmResidentAvailablePages
= MmAvailablePages
- 32;
2103 /* Initialize large page structures on PAE/x64, and MmProcessList on x86 */
2104 MiInitializeLargePageSupport();
2106 /* Check if the registry says any drivers should be loaded with large pages */
2107 MiInitializeDriverLargePageList();
2109 /* Relocate the boot drivers into system PTE space and fixup their PFNs */
2110 MiReloadBootLoadedDrivers(LoaderBlock
);
2112 /* FIXME: Call out into Driver Verifier for initialization */
2114 /* Check how many pages the system has */
2115 if (MmNumberOfPhysicalPages
<= ((13 * _1MB
) / PAGE_SIZE
))
2117 /* Set small system */
2118 MmSystemSize
= MmSmallSystem
;
2120 else if (MmNumberOfPhysicalPages
<= ((19 * _1MB
) / PAGE_SIZE
))
2122 /* Set small system and add 100 pages for the cache */
2123 MmSystemSize
= MmSmallSystem
;
2124 MmSystemCacheWsMinimum
+= 100;
2128 /* Set medium system and add 400 pages for the cache */
2129 MmSystemSize
= MmMediumSystem
;
2130 MmSystemCacheWsMinimum
+= 400;
2133 /* Check for less than 24MB */
2134 if (MmNumberOfPhysicalPages
< ((24 * _1MB
) / PAGE_SIZE
))
2136 /* No more than 32 pages */
2137 MmSystemCacheWsMinimum
= 32;
2140 /* Check for more than 32MB */
2141 if (MmNumberOfPhysicalPages
>= ((32 * _1MB
) / PAGE_SIZE
))
2143 /* Check for product type being "Wi" for WinNT */
2144 if (MmProductType
== '\0i\0W')
2146 /* Then this is a large system */
2147 MmSystemSize
= MmLargeSystem
;
2151 /* For servers, we need 64MB to consider this as being large */
2152 if (MmNumberOfPhysicalPages
>= ((64 * _1MB
) / PAGE_SIZE
))
2154 /* Set it as large */
2155 MmSystemSize
= MmLargeSystem
;
2160 /* Check for more than 33 MB */
2161 if (MmNumberOfPhysicalPages
> ((33 * _1MB
) / PAGE_SIZE
))
2163 /* Add another 500 pages to the cache */
2164 MmSystemCacheWsMinimum
+= 500;
2167 /* Now setup the shared user data fields */
2168 ASSERT(SharedUserData
->NumberOfPhysicalPages
== 0);
2169 SharedUserData
->NumberOfPhysicalPages
= MmNumberOfPhysicalPages
;
2170 SharedUserData
->LargePageMinimum
= 0;
2172 /* Check for workstation (Wi for WinNT) */
2173 if (MmProductType
== '\0i\0W')
2175 /* Set Windows NT Workstation product type */
2176 SharedUserData
->NtProductType
= NtProductWinNt
;
2181 /* Check for LanMan server */
2182 if (MmProductType
== '\0a\0L')
2184 /* This is a domain controller */
2185 SharedUserData
->NtProductType
= NtProductLanManNt
;
2189 /* Otherwise it must be a normal server */
2190 SharedUserData
->NtProductType
= NtProductServer
;
2193 /* Set the product type, and make the system more aggressive with low memory */
2195 MmMinimumFreePages
= 81;
2198 /* Update working set tuning parameters */
2199 MiAdjustWorkingSetManagerParameters(!MmProductType
);
2201 /* Finetune the page count by removing working set and NP expansion */
2202 MmResidentAvailablePages
-= MiExpansionPoolPagesInitialCharge
;
2203 MmResidentAvailablePages
-= MmSystemCacheWsMinimum
;
2204 MmResidentAvailableAtInit
= MmResidentAvailablePages
;
2205 if (MmResidentAvailablePages
<= 0)
2207 /* This should not happen */
2208 DPRINT1("System cache working set too big\n");
2212 /* Initialize the system cache */
2213 //MiInitializeSystemCache(MmSystemCacheWsMinimum, MmAvailablePages);
2215 /* Update the commit limit */
2216 MmTotalCommitLimit
= MmAvailablePages
;
2217 if (MmTotalCommitLimit
> 1024) MmTotalCommitLimit
-= 1024;
2218 MmTotalCommitLimitMaximum
= MmTotalCommitLimit
;
2220 /* Size up paged pool and build the shadow system page directory */
2223 /* Debugger physical memory support is now ready to be used */
2224 MmDebugPte
= MiAddressToPte(MiDebugMapping
);
2226 /* Initialize the loaded module list */
2227 MiInitializeLoadedModuleList(LoaderBlock
);
2231 // Always return success for now