2 * PROJECT: ReactOS Kernel
3 * LICENSE: BSD - See COPYING.ARM in the top level directory
4 * FILE: ntoskrnl/mm/ARM3/mminit.c
5 * PURPOSE: ARM Memory Manager Initialization
6 * PROGRAMMERS: ReactOS Portable Systems Group
9 /* INCLUDES *******************************************************************/
15 #define MODULE_INVOLVED_IN_ARM3
17 #undef MmSystemRangeStart
19 /* GLOBALS ********************************************************************/
22 // These are all registry-configurable, but by default, the memory manager will
23 // figure out the most appropriate values.
25 ULONG MmMaximumNonPagedPoolPercent
;
26 SIZE_T MmSizeOfNonPagedPoolInBytes
;
27 SIZE_T MmMaximumNonPagedPoolInBytes
;
29 /* Some of the same values, in pages */
30 PFN_NUMBER MmMaximumNonPagedPoolInPages
;
33 // These numbers describe the discrete equation components of the nonpaged
34 // pool sizing algorithm.
36 // They are described on http://support.microsoft.com/default.aspx/kb/126402/ja
37 // along with the algorithm that uses them, which is implemented later below.
39 SIZE_T MmMinimumNonPagedPoolSize
= 256 * 1024;
40 ULONG MmMinAdditionNonPagedPoolPerMb
= 32 * 1024;
41 SIZE_T MmDefaultMaximumNonPagedPool
= 1024 * 1024;
42 ULONG MmMaxAdditionNonPagedPoolPerMb
= 400 * 1024;
45 // The memory layout (and especially variable names) of the NT kernel mode
46 // components can be a bit hard to twig, especially when it comes to the non
49 // There are really two components to the non-paged pool:
51 // - The initial nonpaged pool, sized dynamically up to a maximum.
52 // - The expansion nonpaged pool, sized dynamically up to a maximum.
54 // The initial nonpaged pool is physically continuous for performance, and
55 // immediately follows the PFN database, typically sharing the same PDE. It is
56 // a very small resource (32MB on a 1GB system), and capped at 128MB.
58 // Right now we call this the "ARM³ Nonpaged Pool" and it begins somewhere after
59 // the PFN database (which starts at 0xB0000000).
61 // The expansion nonpaged pool, on the other hand, can grow much bigger (400MB
62 // for a 1GB system). On ARM³ however, it is currently capped at 128MB.
64 // The address where the initial nonpaged pool starts is aptly named
65 // MmNonPagedPoolStart, and it describes a range of MmSizeOfNonPagedPoolInBytes
68 // Expansion nonpaged pool starts at an address described by the variable called
69 // MmNonPagedPoolExpansionStart, and it goes on for MmMaximumNonPagedPoolInBytes
70 // minus MmSizeOfNonPagedPoolInBytes bytes, always reaching MmNonPagedPoolEnd
71 // (because of the way it's calculated) at 0xFFBE0000.
73 // Initial nonpaged pool is allocated and mapped early-on during boot, but what
74 // about the expansion nonpaged pool? It is instead composed of special pages
75 // which belong to what are called System PTEs. These PTEs are the matter of a
76 // later discussion, but they are also considered part of the "nonpaged" OS, due
77 // to the fact that they are never paged out -- once an address is described by
78 // a System PTE, it is always valid, until the System PTE is torn down.
80 // System PTEs are actually composed of two "spaces", the system space proper,
81 // and the nonpaged pool expansion space. The latter, as we've already seen,
82 // begins at MmNonPagedPoolExpansionStart. Based on the number of System PTEs
83 // that the system will support, the remaining address space below this address
84 // is used to hold the system space PTEs. This address, in turn, is held in the
85 // variable named MmNonPagedSystemStart, which itself is never allowed to go
86 // below 0xEB000000 (thus creating an upper bound on the number of System PTEs).
88 // This means that 330MB are reserved for total nonpaged system VA, on top of
89 // whatever the initial nonpaged pool allocation is.
91 // The following URLs, valid as of April 23rd, 2008, support this evidence:
93 // http://www.cs.miami.edu/~burt/journal/NT/memory.html
94 // http://www.ditii.com/2007/09/28/windows-memory-management-x86-virtual-address-space/
96 PVOID MmNonPagedSystemStart
;
97 PVOID MmNonPagedPoolStart
;
98 PVOID MmNonPagedPoolExpansionStart
;
99 PVOID MmNonPagedPoolEnd
= MI_NONPAGED_POOL_END
;
102 // This is where paged pool starts by default
104 PVOID MmPagedPoolStart
= MI_PAGED_POOL_START
;
105 PVOID MmPagedPoolEnd
;
108 // And this is its default size
110 SIZE_T MmSizeOfPagedPoolInBytes
= MI_MIN_INIT_PAGED_POOLSIZE
;
111 PFN_NUMBER MmSizeOfPagedPoolInPages
= MI_MIN_INIT_PAGED_POOLSIZE
/ PAGE_SIZE
;
114 // Session space starts at 0xBFFFFFFF and grows downwards
115 // By default, it includes an 8MB image area where we map win32k and video card
116 // drivers, followed by a 4MB area containing the session's working set. This is
117 // then followed by a 20MB mapped view area and finally by the session's paged
118 // pool, by default 16MB.
120 // On a normal system, this results in session space occupying the region from
121 // 0xBD000000 to 0xC0000000
123 // See miarm.h for the defines that determine the sizing of this region. On an
124 // NT system, some of these can be configured through the registry, but we don't
127 PVOID MiSessionSpaceEnd
; // 0xC0000000
128 PVOID MiSessionImageEnd
; // 0xC0000000
129 PVOID MiSessionImageStart
; // 0xBF800000
130 PVOID MiSessionSpaceWs
;
131 PVOID MiSessionViewStart
; // 0xBE000000
132 PVOID MiSessionPoolEnd
; // 0xBE000000
133 PVOID MiSessionPoolStart
; // 0xBD000000
134 PVOID MmSessionBase
; // 0xBD000000
135 SIZE_T MmSessionSize
;
136 SIZE_T MmSessionViewSize
;
137 SIZE_T MmSessionPoolSize
;
138 SIZE_T MmSessionImageSize
;
141 * These are the PTE addresses of the boundaries carved out above
143 PMMPTE MiSessionImagePteStart
;
144 PMMPTE MiSessionImagePteEnd
;
145 PMMPTE MiSessionBasePte
;
146 PMMPTE MiSessionLastPte
;
149 // The system view space, on the other hand, is where sections that are memory
150 // mapped into "system space" end up.
152 // By default, it is a 16MB region, but we hack it to be 32MB for ReactOS
154 PVOID MiSystemViewStart
;
155 SIZE_T MmSystemViewSize
;
157 #if (_MI_PAGING_LEVELS == 2)
159 // A copy of the system page directory (the page directory associated with the
160 // System process) is kept (double-mapped) by the manager in order to lazily
161 // map paged pool PDEs into external processes when they fault on a paged pool
164 PFN_NUMBER MmSystemPageDirectory
[PD_COUNT
];
165 PMMPDE MmSystemPagePtes
;
169 // The system cache starts right after hyperspace. The first few pages are for
170 // keeping track of the system working set list.
172 // This should be 0xC0C00000 -- the cache itself starts at 0xC1000000
174 PMMWSL MmSystemCacheWorkingSetList
= (PVOID
)MI_SYSTEM_CACHE_WS_START
;
177 // Windows NT seems to choose between 7000, 11000 and 50000
178 // On systems with more than 32MB, this number is then doubled, and further
179 // aligned up to a PDE boundary (4MB).
181 PFN_COUNT MmNumberOfSystemPtes
;
184 // This is how many pages the PFN database will take up
185 // In Windows, this includes the Quark Color Table, but not in ARM³
187 PFN_NUMBER MxPfnAllocation
;
190 // Unlike the old ReactOS Memory Manager, ARM³ (and Windows) does not keep track
191 // of pages that are not actually valid physical memory, such as ACPI reserved
192 // regions, BIOS address ranges, or holes in physical memory address space which
193 // could indicate device-mapped I/O memory.
195 // In fact, the lack of a PFN entry for a page usually indicates that this is
196 // I/O space instead.
198 // A bitmap, called the PFN bitmap, keeps track of all page frames by assigning
199 // a bit to each. If the bit is set, then the page is valid physical RAM.
201 RTL_BITMAP MiPfnBitMap
;
204 // This structure describes the different pieces of RAM-backed address space
206 PPHYSICAL_MEMORY_DESCRIPTOR MmPhysicalMemoryBlock
;
209 // This is where we keep track of the most basic physical layout markers
211 PFN_NUMBER MmHighestPhysicalPage
, MmLowestPhysicalPage
= -1;
212 PFN_COUNT MmNumberOfPhysicalPages
;
215 // The total number of pages mapped by the boot loader, which include the kernel
216 // HAL, boot drivers, registry, NLS files and other loader data structures is
217 // kept track of here. This depends on "LoaderPagesSpanned" being correct when
218 // coming from the loader.
220 // This number is later aligned up to a PDE boundary.
222 SIZE_T MmBootImageSize
;
225 // These three variables keep track of the core separation of address space that
226 // exists between kernel mode and user mode.
228 ULONG_PTR MmUserProbeAddress
;
229 PVOID MmHighestUserAddress
;
230 PVOID MmSystemRangeStart
;
232 /* And these store the respective highest PTE/PDE address */
233 PMMPTE MiHighestUserPte
;
234 PMMPDE MiHighestUserPde
;
235 #if (_MI_PAGING_LEVELS >= 3)
236 PMMPTE MiHighestUserPpe
;
237 #if (_MI_PAGING_LEVELS >= 4)
238 PMMPTE MiHighestUserPxe
;
242 /* These variables define the system cache address space */
243 PVOID MmSystemCacheStart
= (PVOID
)MI_SYSTEM_CACHE_START
;
244 PVOID MmSystemCacheEnd
;
245 ULONG_PTR MmSizeOfSystemCacheInPages
;
246 MMSUPPORT MmSystemCacheWs
;
249 // This is where hyperspace ends (followed by the system cache working set)
251 PVOID MmHyperSpaceEnd
;
254 // Page coloring algorithm data
256 ULONG MmSecondaryColors
;
257 ULONG MmSecondaryColorMask
;
260 // Actual (registry-configurable) size of a GUI thread's stack
262 ULONG MmLargeStackSize
= KERNEL_LARGE_STACK_SIZE
;
265 // Before we have a PFN database, memory comes straight from our physical memory
266 // blocks, which is nice because it's guaranteed contiguous and also because once
267 // we take a page from here, the system doesn't see it anymore.
268 // However, once the fun is over, those pages must be re-integrated back into
269 // PFN society life, and that requires us keeping a copy of the original layout
270 // so that we can parse it later.
272 PMEMORY_ALLOCATION_DESCRIPTOR MxFreeDescriptor
;
273 MEMORY_ALLOCATION_DESCRIPTOR MxOldFreeDescriptor
;
276 * For each page's worth bytes of L2 cache in a given set/way line, the zero and
277 * free lists are organized in what is called a "color".
279 * This array points to the two lists, so it can be thought of as a multi-dimensional
280 * array of MmFreePagesByColor[2][MmSecondaryColors]. Since the number is dynamic,
281 * we describe the array in pointer form instead.
283 * On a final note, the color tables themselves are right after the PFN database.
285 C_ASSERT(FreePageList
== 1);
286 PMMCOLOR_TABLES MmFreePagesByColor
[FreePageList
+ 1];
288 /* An event used in Phase 0 before the rest of the system is ready to go */
291 /* All the events used for memory threshold notifications */
292 PKEVENT MiLowMemoryEvent
;
293 PKEVENT MiHighMemoryEvent
;
294 PKEVENT MiLowPagedPoolEvent
;
295 PKEVENT MiHighPagedPoolEvent
;
296 PKEVENT MiLowNonPagedPoolEvent
;
297 PKEVENT MiHighNonPagedPoolEvent
;
299 /* The actual thresholds themselves, in page numbers */
300 PFN_NUMBER MmLowMemoryThreshold
;
301 PFN_NUMBER MmHighMemoryThreshold
;
302 PFN_NUMBER MiLowPagedPoolThreshold
;
303 PFN_NUMBER MiHighPagedPoolThreshold
;
304 PFN_NUMBER MiLowNonPagedPoolThreshold
;
305 PFN_NUMBER MiHighNonPagedPoolThreshold
;
308 * This number determines how many free pages must exist, at minimum, until we
309 * start trimming working sets and flushing modified pages to obtain more free
312 * This number changes if the system detects that this is a server product
314 PFN_NUMBER MmMinimumFreePages
= 26;
317 * This number indicates how many pages we consider to be a low limit of having
318 * "plenty" of free memory.
320 * It is doubled on systems that have more than 63MB of memory
322 PFN_NUMBER MmPlentyFreePages
= 400;
324 /* These values store the type of system this is (small, med, large) and if server */
326 MM_SYSTEMSIZE MmSystemSize
;
329 * These values store the cache working set minimums and maximums, in pages
331 * The minimum value is boosted on systems with more than 24MB of RAM, and cut
332 * down to only 32 pages on embedded (<24MB RAM) systems.
334 * An extra boost of 2MB is given on systems with more than 33MB of RAM.
336 PFN_NUMBER MmSystemCacheWsMinimum
= 288;
337 PFN_NUMBER MmSystemCacheWsMaximum
= 350;
339 /* FIXME: Move to cache/working set code later */
340 BOOLEAN MmLargeSystemCache
;
343 * This value determines in how many fragments/chunks the subsection prototype
344 * PTEs should be allocated when mapping a section object. It is configurable in
345 * the registry through the MapAllocationFragment parameter.
347 * The default is 64KB on systems with more than 1GB of RAM, 32KB on systems with
348 * more than 256MB of RAM, and 16KB on systems with less than 256MB of RAM.
350 * The maximum it can be set to is 2MB, and the minimum is 4KB.
352 SIZE_T MmAllocationFragment
;
355 * These two values track how much virtual memory can be committed, and when
356 * expansion should happen.
358 // FIXME: They should be moved elsewhere since it's not an "init" setting?
359 SIZE_T MmTotalCommitLimit
;
360 SIZE_T MmTotalCommitLimitMaximum
;
363 * These values tune certain user parameters. They have default values set here,
364 * as well as in the code, and can be overwritten by registry settings.
366 SIZE_T MmHeapSegmentReserve
= 1 * _1MB
;
367 SIZE_T MmHeapSegmentCommit
= 2 * PAGE_SIZE
;
368 SIZE_T MmHeapDeCommitTotalFreeThreshold
= 64 * _1KB
;
369 SIZE_T MmHeapDeCommitFreeBlockThreshold
= PAGE_SIZE
;
370 SIZE_T MmMinimumStackCommitInBytes
= 0;
372 /* Internal setting used for debugging memory descriptors */
373 BOOLEAN MiDbgEnableMdDump
=
380 /* Number of memory descriptors in the loader block */
381 ULONG MiNumberDescriptors
= 0;
383 /* Number of free pages in the loader block */
384 PFN_NUMBER MiNumberOfFreePages
= 0;
386 /* Timeout value for critical sections (2.5 minutes) */
387 ULONG MmCritsectTimeoutSeconds
= 150; // NT value: 720 * 60 * 60; (30 days)
388 LARGE_INTEGER MmCriticalSectionTimeout
;
391 // Throttling limits for Cc (in pages)
392 // Above top, we don't throttle
393 // Above bottom, we throttle depending on the amount of modified pages
394 // Otherwise, we throttle!
397 ULONG MmThrottleBottom
;
399 /* PRIVATE FUNCTIONS **********************************************************/
403 MiScanMemoryDescriptors(IN PLOADER_PARAMETER_BLOCK LoaderBlock
)
405 PLIST_ENTRY ListEntry
;
406 PMEMORY_ALLOCATION_DESCRIPTOR Descriptor
;
407 PFN_NUMBER PageFrameIndex
, FreePages
= 0;
409 /* Loop the memory descriptors */
410 for (ListEntry
= LoaderBlock
->MemoryDescriptorListHead
.Flink
;
411 ListEntry
!= &LoaderBlock
->MemoryDescriptorListHead
;
412 ListEntry
= ListEntry
->Flink
)
414 /* Get the descriptor */
415 Descriptor
= CONTAINING_RECORD(ListEntry
,
416 MEMORY_ALLOCATION_DESCRIPTOR
,
418 DPRINT("MD Type: %lx Base: %lx Count: %lx\n",
419 Descriptor
->MemoryType
, Descriptor
->BasePage
, Descriptor
->PageCount
);
421 /* Count this descriptor */
422 MiNumberDescriptors
++;
424 /* Check if this is invisible memory */
425 if ((Descriptor
->MemoryType
== LoaderFirmwarePermanent
) ||
426 (Descriptor
->MemoryType
== LoaderSpecialMemory
) ||
427 (Descriptor
->MemoryType
== LoaderHALCachedMemory
) ||
428 (Descriptor
->MemoryType
== LoaderBBTMemory
))
430 /* Skip this descriptor */
434 /* Check if this is bad memory */
435 if (Descriptor
->MemoryType
!= LoaderBad
)
437 /* Count this in the total of pages */
438 MmNumberOfPhysicalPages
+= (PFN_COUNT
)Descriptor
->PageCount
;
441 /* Check if this is the new lowest page */
442 if (Descriptor
->BasePage
< MmLowestPhysicalPage
)
444 /* Update the lowest page */
445 MmLowestPhysicalPage
= Descriptor
->BasePage
;
448 /* Check if this is the new highest page */
449 PageFrameIndex
= Descriptor
->BasePage
+ Descriptor
->PageCount
;
450 if (PageFrameIndex
> MmHighestPhysicalPage
)
452 /* Update the highest page */
453 MmHighestPhysicalPage
= PageFrameIndex
- 1;
456 /* Check if this is free memory */
457 if ((Descriptor
->MemoryType
== LoaderFree
) ||
458 (Descriptor
->MemoryType
== LoaderLoadedProgram
) ||
459 (Descriptor
->MemoryType
== LoaderFirmwareTemporary
) ||
460 (Descriptor
->MemoryType
== LoaderOsloaderStack
))
462 /* Count it too free pages */
463 MiNumberOfFreePages
+= Descriptor
->PageCount
;
465 /* Check if this is the largest memory descriptor */
466 if (Descriptor
->PageCount
> FreePages
)
469 MxFreeDescriptor
= Descriptor
;
470 FreePages
= Descriptor
->PageCount
;
475 /* Save original values of the free descriptor, since it'll be
476 * altered by early allocations */
477 MxOldFreeDescriptor
= *MxFreeDescriptor
;
483 MxGetNextPage(IN PFN_NUMBER PageCount
)
487 /* Make sure we have enough pages */
488 if (PageCount
> MxFreeDescriptor
->PageCount
)
490 /* Crash the system */
491 KeBugCheckEx(INSTALL_MORE_MEMORY
,
492 MmNumberOfPhysicalPages
,
493 MxFreeDescriptor
->PageCount
,
494 MxOldFreeDescriptor
.PageCount
,
498 /* Use our lowest usable free pages */
499 Pfn
= MxFreeDescriptor
->BasePage
;
500 MxFreeDescriptor
->BasePage
+= PageCount
;
501 MxFreeDescriptor
->PageCount
-= PageCount
;
508 MiComputeColorInformation(VOID
)
510 ULONG L2Associativity
;
512 /* Check if no setting was provided already */
513 if (!MmSecondaryColors
)
515 /* Get L2 cache information */
516 L2Associativity
= KeGetPcr()->SecondLevelCacheAssociativity
;
518 /* The number of colors is the number of cache bytes by set/way */
519 MmSecondaryColors
= KeGetPcr()->SecondLevelCacheSize
;
520 if (L2Associativity
) MmSecondaryColors
/= L2Associativity
;
523 /* Now convert cache bytes into pages */
524 MmSecondaryColors
>>= PAGE_SHIFT
;
525 if (!MmSecondaryColors
)
527 /* If there was no cache data from the KPCR, use the default colors */
528 MmSecondaryColors
= MI_SECONDARY_COLORS
;
532 /* Otherwise, make sure there aren't too many colors */
533 if (MmSecondaryColors
> MI_MAX_SECONDARY_COLORS
)
535 /* Set the maximum */
536 MmSecondaryColors
= MI_MAX_SECONDARY_COLORS
;
539 /* Make sure there aren't too little colors */
540 if (MmSecondaryColors
< MI_MIN_SECONDARY_COLORS
)
542 /* Set the default */
543 MmSecondaryColors
= MI_SECONDARY_COLORS
;
546 /* Finally make sure the colors are a power of two */
547 if (MmSecondaryColors
& (MmSecondaryColors
- 1))
549 /* Set the default */
550 MmSecondaryColors
= MI_SECONDARY_COLORS
;
554 /* Compute the mask and store it */
555 MmSecondaryColorMask
= MmSecondaryColors
- 1;
556 KeGetCurrentPrcb()->SecondaryColorMask
= MmSecondaryColorMask
;
562 MiInitializeColorTables(VOID
)
565 PMMPTE PointerPte
, LastPte
;
566 MMPTE TempPte
= ValidKernelPte
;
568 /* The color table starts after the ARM3 PFN database */
569 MmFreePagesByColor
[0] = (PMMCOLOR_TABLES
)&MmPfnDatabase
[MmHighestPhysicalPage
+ 1];
571 /* Loop the PTEs. We have two color tables for each secondary color */
572 PointerPte
= MiAddressToPte(&MmFreePagesByColor
[0][0]);
573 LastPte
= MiAddressToPte((ULONG_PTR
)MmFreePagesByColor
[0] +
574 (2 * MmSecondaryColors
* sizeof(MMCOLOR_TABLES
))
576 while (PointerPte
<= LastPte
)
578 /* Check for valid PTE */
579 if (PointerPte
->u
.Hard
.Valid
== 0)
581 /* Get a page and map it */
582 TempPte
.u
.Hard
.PageFrameNumber
= MxGetNextPage(1);
583 MI_WRITE_VALID_PTE(PointerPte
, TempPte
);
585 /* Zero out the page */
586 RtlZeroMemory(MiPteToAddress(PointerPte
), PAGE_SIZE
);
593 /* Now set the address of the next list, right after this one */
594 MmFreePagesByColor
[1] = &MmFreePagesByColor
[0][MmSecondaryColors
];
596 /* Now loop the lists to set them up */
597 for (i
= 0; i
< MmSecondaryColors
; i
++)
599 /* Set both free and zero lists for each color */
600 MmFreePagesByColor
[ZeroedPageList
][i
].Flink
= LIST_HEAD
;
601 MmFreePagesByColor
[ZeroedPageList
][i
].Blink
= (PVOID
)LIST_HEAD
;
602 MmFreePagesByColor
[ZeroedPageList
][i
].Count
= 0;
603 MmFreePagesByColor
[FreePageList
][i
].Flink
= LIST_HEAD
;
604 MmFreePagesByColor
[FreePageList
][i
].Blink
= (PVOID
)LIST_HEAD
;
605 MmFreePagesByColor
[FreePageList
][i
].Count
= 0;
613 MiIsRegularMemory(IN PLOADER_PARAMETER_BLOCK LoaderBlock
,
616 PLIST_ENTRY NextEntry
;
617 PMEMORY_ALLOCATION_DESCRIPTOR MdBlock
;
619 /* Loop the memory descriptors */
620 NextEntry
= LoaderBlock
->MemoryDescriptorListHead
.Flink
;
621 while (NextEntry
!= &LoaderBlock
->MemoryDescriptorListHead
)
623 /* Get the memory descriptor */
624 MdBlock
= CONTAINING_RECORD(NextEntry
,
625 MEMORY_ALLOCATION_DESCRIPTOR
,
628 /* Check if this PFN could be part of the block */
629 if (Pfn
>= (MdBlock
->BasePage
))
631 /* Check if it really is part of the block */
632 if (Pfn
< (MdBlock
->BasePage
+ MdBlock
->PageCount
))
634 /* Check if the block is actually memory we don't map */
635 if ((MdBlock
->MemoryType
== LoaderFirmwarePermanent
) ||
636 (MdBlock
->MemoryType
== LoaderBBTMemory
) ||
637 (MdBlock
->MemoryType
== LoaderSpecialMemory
))
639 /* We don't need PFN database entries for this memory */
643 /* This is memory we want to map */
649 /* Blocks are ordered, so if it's not here, it doesn't exist */
653 /* Get to the next descriptor */
654 NextEntry
= MdBlock
->ListEntry
.Flink
;
657 /* Check if this PFN is actually from our free memory descriptor */
658 if ((Pfn
>= MxOldFreeDescriptor
.BasePage
) &&
659 (Pfn
< MxOldFreeDescriptor
.BasePage
+ MxOldFreeDescriptor
.PageCount
))
661 /* We use these pages for initial mappings, so we do want to count them */
665 /* Otherwise this isn't memory that we describe or care about */
672 MiMapPfnDatabase(IN PLOADER_PARAMETER_BLOCK LoaderBlock
)
674 PFN_NUMBER FreePage
, FreePageCount
, PagesLeft
, BasePage
, PageCount
;
675 PLIST_ENTRY NextEntry
;
676 PMEMORY_ALLOCATION_DESCRIPTOR MdBlock
;
677 PMMPTE PointerPte
, LastPte
;
678 MMPTE TempPte
= ValidKernelPte
;
680 /* Get current page data, since we won't be using MxGetNextPage as it would corrupt our state */
681 FreePage
= MxFreeDescriptor
->BasePage
;
682 FreePageCount
= MxFreeDescriptor
->PageCount
;
685 /* Loop the memory descriptors */
686 NextEntry
= LoaderBlock
->MemoryDescriptorListHead
.Flink
;
687 while (NextEntry
!= &LoaderBlock
->MemoryDescriptorListHead
)
689 /* Get the descriptor */
690 MdBlock
= CONTAINING_RECORD(NextEntry
,
691 MEMORY_ALLOCATION_DESCRIPTOR
,
693 if ((MdBlock
->MemoryType
== LoaderFirmwarePermanent
) ||
694 (MdBlock
->MemoryType
== LoaderBBTMemory
) ||
695 (MdBlock
->MemoryType
== LoaderSpecialMemory
))
697 /* These pages are not part of the PFN database */
698 NextEntry
= MdBlock
->ListEntry
.Flink
;
702 /* Next, check if this is our special free descriptor we've found */
703 if (MdBlock
== MxFreeDescriptor
)
705 /* Use the real numbers instead */
706 BasePage
= MxOldFreeDescriptor
.BasePage
;
707 PageCount
= MxOldFreeDescriptor
.PageCount
;
711 /* Use the descriptor's numbers */
712 BasePage
= MdBlock
->BasePage
;
713 PageCount
= MdBlock
->PageCount
;
716 /* Get the PTEs for this range */
717 PointerPte
= MiAddressToPte(&MmPfnDatabase
[BasePage
]);
718 LastPte
= MiAddressToPte(((ULONG_PTR
)&MmPfnDatabase
[BasePage
+ PageCount
]) - 1);
719 DPRINT("MD Type: %lx Base: %lx Count: %lx\n", MdBlock
->MemoryType
, BasePage
, PageCount
);
722 while (PointerPte
<= LastPte
)
724 /* We'll only touch PTEs that aren't already valid */
725 if (PointerPte
->u
.Hard
.Valid
== 0)
727 /* Use the next free page */
728 TempPte
.u
.Hard
.PageFrameNumber
= FreePage
;
729 ASSERT(FreePageCount
!= 0);
731 /* Consume free pages */
737 KeBugCheckEx(INSTALL_MORE_MEMORY
,
738 MmNumberOfPhysicalPages
,
740 MxOldFreeDescriptor
.PageCount
,
744 /* Write out this PTE */
746 MI_WRITE_VALID_PTE(PointerPte
, TempPte
);
749 RtlZeroMemory(MiPteToAddress(PointerPte
), PAGE_SIZE
);
756 /* Do the next address range */
757 NextEntry
= MdBlock
->ListEntry
.Flink
;
760 /* Now update the free descriptors to consume the pages we used up during the PFN allocation loop */
761 MxFreeDescriptor
->BasePage
= FreePage
;
762 MxFreeDescriptor
->PageCount
= FreePageCount
;
768 MiBuildPfnDatabaseFromPages(IN PLOADER_PARAMETER_BLOCK LoaderBlock
)
773 PFN_NUMBER PageFrameIndex
, StartupPdIndex
, PtePageIndex
;
775 ULONG_PTR BaseAddress
= 0;
777 /* PFN of the startup page directory */
778 StartupPdIndex
= PFN_FROM_PTE(MiAddressToPde(PDE_BASE
));
780 /* Start with the first PDE and scan them all */
781 PointerPde
= MiAddressToPde(NULL
);
782 Count
= PD_COUNT
* PDE_COUNT
;
783 for (i
= 0; i
< Count
; i
++)
785 /* Check for valid PDE */
786 if (PointerPde
->u
.Hard
.Valid
== 1)
788 /* Get the PFN from it */
789 PageFrameIndex
= PFN_FROM_PTE(PointerPde
);
791 /* Do we want a PFN entry for this page? */
792 if (MiIsRegularMemory(LoaderBlock
, PageFrameIndex
))
794 /* Yes we do, set it up */
795 Pfn1
= MiGetPfnEntry(PageFrameIndex
);
796 Pfn1
->u4
.PteFrame
= StartupPdIndex
;
797 Pfn1
->PteAddress
= (PMMPTE
)PointerPde
;
798 Pfn1
->u2
.ShareCount
++;
799 Pfn1
->u3
.e2
.ReferenceCount
= 1;
800 Pfn1
->u3
.e1
.PageLocation
= ActiveAndValid
;
801 Pfn1
->u3
.e1
.CacheAttribute
= MiNonCached
;
803 Pfn1
->PfnUsage
= MI_USAGE_INIT_MEMORY
;
804 memcpy(Pfn1
->ProcessName
, "Initial PDE", 16);
813 /* Now get the PTE and scan the pages */
814 PointerPte
= MiAddressToPte(BaseAddress
);
815 for (j
= 0; j
< PTE_COUNT
; j
++)
817 /* Check for a valid PTE */
818 if (PointerPte
->u
.Hard
.Valid
== 1)
820 /* Increase the shared count of the PFN entry for the PDE */
821 ASSERT(Pfn1
!= NULL
);
822 Pfn1
->u2
.ShareCount
++;
824 /* Now check if the PTE is valid memory too */
825 PtePageIndex
= PFN_FROM_PTE(PointerPte
);
826 if (MiIsRegularMemory(LoaderBlock
, PtePageIndex
))
829 * Only add pages above the end of system code or pages
830 * that are part of nonpaged pool
832 if ((BaseAddress
>= 0xA0000000) ||
833 ((BaseAddress
>= (ULONG_PTR
)MmNonPagedPoolStart
) &&
834 (BaseAddress
< (ULONG_PTR
)MmNonPagedPoolStart
+
835 MmSizeOfNonPagedPoolInBytes
)))
837 /* Get the PFN entry and make sure it too is valid */
838 Pfn2
= MiGetPfnEntry(PtePageIndex
);
839 if ((MmIsAddressValid(Pfn2
)) &&
840 (MmIsAddressValid(Pfn2
+ 1)))
842 /* Setup the PFN entry */
843 Pfn2
->u4
.PteFrame
= PageFrameIndex
;
844 Pfn2
->PteAddress
= PointerPte
;
845 Pfn2
->u2
.ShareCount
++;
846 Pfn2
->u3
.e2
.ReferenceCount
= 1;
847 Pfn2
->u3
.e1
.PageLocation
= ActiveAndValid
;
848 Pfn2
->u3
.e1
.CacheAttribute
= MiNonCached
;
850 Pfn2
->PfnUsage
= MI_USAGE_INIT_MEMORY
;
851 memcpy(Pfn1
->ProcessName
, "Initial PTE", 16);
860 BaseAddress
+= PAGE_SIZE
;
865 /* Next PDE mapped address */
866 BaseAddress
+= PDE_MAPPED_VA
;
877 MiBuildPfnDatabaseZeroPage(VOID
)
882 /* Grab the lowest page and check if it has no real references */
883 Pfn1
= MiGetPfnEntry(MmLowestPhysicalPage
);
884 if (!(MmLowestPhysicalPage
) && !(Pfn1
->u3
.e2
.ReferenceCount
))
886 /* Make it a bogus page to catch errors */
887 PointerPde
= MiAddressToPde(0xFFFFFFFF);
888 Pfn1
->u4
.PteFrame
= PFN_FROM_PTE(PointerPde
);
889 Pfn1
->PteAddress
= (PMMPTE
)PointerPde
;
890 Pfn1
->u2
.ShareCount
++;
891 Pfn1
->u3
.e2
.ReferenceCount
= 0xFFF0;
892 Pfn1
->u3
.e1
.PageLocation
= ActiveAndValid
;
893 Pfn1
->u3
.e1
.CacheAttribute
= MiNonCached
;
900 MiBuildPfnDatabaseFromLoaderBlock(IN PLOADER_PARAMETER_BLOCK LoaderBlock
)
902 PLIST_ENTRY NextEntry
;
903 PFN_NUMBER PageCount
= 0;
904 PMEMORY_ALLOCATION_DESCRIPTOR MdBlock
;
905 PFN_NUMBER PageFrameIndex
;
911 /* Now loop through the descriptors */
912 NextEntry
= LoaderBlock
->MemoryDescriptorListHead
.Flink
;
913 while (NextEntry
!= &LoaderBlock
->MemoryDescriptorListHead
)
915 /* Get the current descriptor */
916 MdBlock
= CONTAINING_RECORD(NextEntry
,
917 MEMORY_ALLOCATION_DESCRIPTOR
,
921 PageCount
= MdBlock
->PageCount
;
922 PageFrameIndex
= MdBlock
->BasePage
;
924 /* Don't allow memory above what the PFN database is mapping */
925 if (PageFrameIndex
> MmHighestPhysicalPage
)
927 /* Since they are ordered, everything past here will be larger */
931 /* On the other hand, the end page might be higher up... */
932 if ((PageFrameIndex
+ PageCount
) > (MmHighestPhysicalPage
+ 1))
934 /* In which case we'll trim the descriptor to go as high as we can */
935 PageCount
= MmHighestPhysicalPage
+ 1 - PageFrameIndex
;
936 MdBlock
->PageCount
= PageCount
;
938 /* But if there's nothing left to trim, we got too high, so quit */
939 if (!PageCount
) break;
942 /* Now check the descriptor type */
943 switch (MdBlock
->MemoryType
)
945 /* Check for bad RAM */
948 DPRINT1("You either have specified /BURNMEMORY or damaged RAM modules.\n");
951 /* Check for free RAM */
953 case LoaderLoadedProgram
:
954 case LoaderFirmwareTemporary
:
955 case LoaderOsloaderStack
:
957 /* Get the last page of this descriptor. Note we loop backwards */
958 PageFrameIndex
+= PageCount
- 1;
959 Pfn1
= MiGetPfnEntry(PageFrameIndex
);
961 /* Lock the PFN Database */
962 OldIrql
= MiAcquirePfnLock();
965 /* If the page really has no references, mark it as free */
966 if (!Pfn1
->u3
.e2
.ReferenceCount
)
968 /* Add it to the free list */
969 Pfn1
->u3
.e1
.CacheAttribute
= MiNonCached
;
970 MiInsertPageInFreeList(PageFrameIndex
);
973 /* Go to the next page */
978 /* Release PFN database */
979 MiReleasePfnLock(OldIrql
);
981 /* Done with this block */
984 /* Check for pages that are invisible to us */
985 case LoaderFirmwarePermanent
:
986 case LoaderSpecialMemory
:
987 case LoaderBBTMemory
:
994 /* Map these pages with the KSEG0 mapping that adds 0x80000000 */
995 PointerPte
= MiAddressToPte(KSEG0_BASE
+ (PageFrameIndex
<< PAGE_SHIFT
));
996 Pfn1
= MiGetPfnEntry(PageFrameIndex
);
999 /* Check if the page is really unused */
1000 PointerPde
= MiAddressToPde(KSEG0_BASE
+ (PageFrameIndex
<< PAGE_SHIFT
));
1001 if (!Pfn1
->u3
.e2
.ReferenceCount
)
1003 /* Mark it as being in-use */
1004 Pfn1
->u4
.PteFrame
= PFN_FROM_PTE(PointerPde
);
1005 Pfn1
->PteAddress
= PointerPte
;
1006 Pfn1
->u2
.ShareCount
++;
1007 Pfn1
->u3
.e2
.ReferenceCount
= 1;
1008 Pfn1
->u3
.e1
.PageLocation
= ActiveAndValid
;
1009 Pfn1
->u3
.e1
.CacheAttribute
= MiNonCached
;
1011 Pfn1
->PfnUsage
= MI_USAGE_BOOT_DRIVER
;
1014 /* Check for RAM disk page */
1015 if (MdBlock
->MemoryType
== LoaderXIPRom
)
1017 /* Make it a pseudo-I/O ROM mapping */
1019 Pfn1
->u2
.ShareCount
= 0;
1020 Pfn1
->u3
.e2
.ReferenceCount
= 0;
1021 Pfn1
->u3
.e1
.PageLocation
= 0;
1022 Pfn1
->u3
.e1
.Rom
= 1;
1023 Pfn1
->u4
.InPageError
= 0;
1024 Pfn1
->u3
.e1
.PrototypePte
= 1;
1028 /* Advance page structures */
1036 /* Next descriptor entry */
1037 NextEntry
= MdBlock
->ListEntry
.Flink
;
1044 MiBuildPfnDatabaseSelf(VOID
)
1046 PMMPTE PointerPte
, LastPte
;
1049 /* Loop the PFN database page */
1050 PointerPte
= MiAddressToPte(MiGetPfnEntry(MmLowestPhysicalPage
));
1051 LastPte
= MiAddressToPte(MiGetPfnEntry(MmHighestPhysicalPage
));
1052 while (PointerPte
<= LastPte
)
1054 /* Make sure the page is valid */
1055 if (PointerPte
->u
.Hard
.Valid
== 1)
1057 /* Get the PFN entry and just mark it referenced */
1058 Pfn1
= MiGetPfnEntry(PointerPte
->u
.Hard
.PageFrameNumber
);
1059 Pfn1
->u2
.ShareCount
= 1;
1060 Pfn1
->u3
.e2
.ReferenceCount
= 1;
1062 Pfn1
->PfnUsage
= MI_USAGE_PFN_DATABASE
;
1074 MiInitializePfnDatabase(IN PLOADER_PARAMETER_BLOCK LoaderBlock
)
1076 /* Scan memory and start setting up PFN entries */
1077 MiBuildPfnDatabaseFromPages(LoaderBlock
);
1079 /* Add the zero page */
1080 MiBuildPfnDatabaseZeroPage();
1082 /* Scan the loader block and build the rest of the PFN database */
1083 MiBuildPfnDatabaseFromLoaderBlock(LoaderBlock
);
1085 /* Finally add the pages for the PFN database itself */
1086 MiBuildPfnDatabaseSelf();
1088 #endif /* !_M_AMD64 */
1093 MmFreeLoaderBlock(IN PLOADER_PARAMETER_BLOCK LoaderBlock
)
1096 PMEMORY_ALLOCATION_DESCRIPTOR MdBlock
;
1098 PFN_NUMBER BasePage
, LoaderPages
;
1101 PPHYSICAL_MEMORY_RUN Buffer
, Entry
;
1103 /* Loop the descriptors in order to count them */
1105 NextMd
= LoaderBlock
->MemoryDescriptorListHead
.Flink
;
1106 while (NextMd
!= &LoaderBlock
->MemoryDescriptorListHead
)
1108 MdBlock
= CONTAINING_RECORD(NextMd
,
1109 MEMORY_ALLOCATION_DESCRIPTOR
,
1112 NextMd
= MdBlock
->ListEntry
.Flink
;
1115 /* Allocate a structure to hold the physical runs */
1116 Buffer
= ExAllocatePoolWithTag(NonPagedPool
,
1117 i
* sizeof(PHYSICAL_MEMORY_RUN
),
1119 ASSERT(Buffer
!= NULL
);
1122 /* Loop the descriptors again */
1123 NextMd
= LoaderBlock
->MemoryDescriptorListHead
.Flink
;
1124 while (NextMd
!= &LoaderBlock
->MemoryDescriptorListHead
)
1126 /* Check what kind this was */
1127 MdBlock
= CONTAINING_RECORD(NextMd
,
1128 MEMORY_ALLOCATION_DESCRIPTOR
,
1130 switch (MdBlock
->MemoryType
)
1132 /* Registry, NLS, and heap data */
1133 case LoaderRegistryData
:
1134 case LoaderOsloaderHeap
:
1136 /* Are all a candidate for deletion */
1137 Entry
->BasePage
= MdBlock
->BasePage
;
1138 Entry
->PageCount
= MdBlock
->PageCount
;
1141 /* We keep the rest */
1146 /* Move to the next descriptor */
1147 NextMd
= MdBlock
->ListEntry
.Flink
;
1150 /* Acquire the PFN lock */
1151 OldIrql
= MiAcquirePfnLock();
1155 while (--Entry
>= Buffer
)
1157 /* See how many pages are in this run */
1158 i
= Entry
->PageCount
;
1159 BasePage
= Entry
->BasePage
;
1161 /* Loop each page */
1162 Pfn1
= MiGetPfnEntry(BasePage
);
1165 /* Check if it has references or is in any kind of list */
1166 if (!(Pfn1
->u3
.e2
.ReferenceCount
) && (!Pfn1
->u1
.Flink
))
1168 /* Set the new PTE address and put this page into the free list */
1169 Pfn1
->PteAddress
= (PMMPTE
)(BasePage
<< PAGE_SHIFT
);
1170 MiInsertPageInFreeList(BasePage
);
1175 /* It has a reference, so simply drop it */
1176 ASSERT(MI_IS_PHYSICAL_ADDRESS(MiPteToAddress(Pfn1
->PteAddress
)) == FALSE
);
1178 /* Drop a dereference on this page, which should delete it */
1179 Pfn1
->PteAddress
->u
.Long
= 0;
1180 MI_SET_PFN_DELETED(Pfn1
);
1181 MiDecrementShareCount(Pfn1
, BasePage
);
1185 /* Move to the next page */
1191 /* Release the PFN lock and flush the TLB */
1192 DPRINT("Loader pages freed: %lx\n", LoaderPages
);
1193 MiReleasePfnLock(OldIrql
);
1196 /* Free our run structure */
1197 ExFreePoolWithTag(Buffer
, 'lMmM');
1203 MiAdjustWorkingSetManagerParameters(IN BOOLEAN Client
)
1205 /* This function needs to do more work, for now, we tune page minimums */
1207 /* Check for a system with around 64MB RAM or more */
1208 if (MmNumberOfPhysicalPages
>= (63 * _1MB
) / PAGE_SIZE
)
1210 /* Double the minimum amount of pages we consider for a "plenty free" scenario */
1211 MmPlentyFreePages
*= 2;
1218 MiNotifyMemoryEvents(VOID
)
1220 /* Are we in a low-memory situation? */
1221 if (MmAvailablePages
< MmLowMemoryThreshold
)
1223 /* Clear high, set low */
1224 if (KeReadStateEvent(MiHighMemoryEvent
)) KeClearEvent(MiHighMemoryEvent
);
1225 if (!KeReadStateEvent(MiLowMemoryEvent
)) KeSetEvent(MiLowMemoryEvent
, 0, FALSE
);
1227 else if (MmAvailablePages
< MmHighMemoryThreshold
)
1229 /* We are in between, clear both */
1230 if (KeReadStateEvent(MiHighMemoryEvent
)) KeClearEvent(MiHighMemoryEvent
);
1231 if (KeReadStateEvent(MiLowMemoryEvent
)) KeClearEvent(MiLowMemoryEvent
);
1235 /* Clear low, set high */
1236 if (KeReadStateEvent(MiLowMemoryEvent
)) KeClearEvent(MiLowMemoryEvent
);
1237 if (!KeReadStateEvent(MiHighMemoryEvent
)) KeSetEvent(MiHighMemoryEvent
, 0, FALSE
);
1244 MiCreateMemoryEvent(IN PUNICODE_STRING Name
,
1251 OBJECT_ATTRIBUTES ObjectAttributes
;
1252 SECURITY_DESCRIPTOR SecurityDescriptor
;
1255 Status
= RtlCreateSecurityDescriptor(&SecurityDescriptor
,
1256 SECURITY_DESCRIPTOR_REVISION
);
1257 if (!NT_SUCCESS(Status
)) return Status
;
1259 /* One ACL with 3 ACEs, containing each one SID */
1260 DaclLength
= sizeof(ACL
) +
1261 3 * sizeof(ACCESS_ALLOWED_ACE
) +
1262 RtlLengthSid(SeLocalSystemSid
) +
1263 RtlLengthSid(SeAliasAdminsSid
) +
1264 RtlLengthSid(SeWorldSid
);
1266 /* Allocate space for the DACL */
1267 Dacl
= ExAllocatePoolWithTag(PagedPool
, DaclLength
, 'lcaD');
1268 if (!Dacl
) return STATUS_INSUFFICIENT_RESOURCES
;
1270 /* Setup the ACL inside it */
1271 Status
= RtlCreateAcl(Dacl
, DaclLength
, ACL_REVISION
);
1272 if (!NT_SUCCESS(Status
)) goto CleanUp
;
1274 /* Add query rights for everyone */
1275 Status
= RtlAddAccessAllowedAce(Dacl
,
1277 SYNCHRONIZE
| EVENT_QUERY_STATE
| READ_CONTROL
,
1279 if (!NT_SUCCESS(Status
)) goto CleanUp
;
1281 /* Full rights for the admin */
1282 Status
= RtlAddAccessAllowedAce(Dacl
,
1286 if (!NT_SUCCESS(Status
)) goto CleanUp
;
1288 /* As well as full rights for the system */
1289 Status
= RtlAddAccessAllowedAce(Dacl
,
1293 if (!NT_SUCCESS(Status
)) goto CleanUp
;
1295 /* Set this DACL inside the SD */
1296 Status
= RtlSetDaclSecurityDescriptor(&SecurityDescriptor
,
1300 if (!NT_SUCCESS(Status
)) goto CleanUp
;
1302 /* Setup the event attributes, making sure it's a permanent one */
1303 InitializeObjectAttributes(&ObjectAttributes
,
1305 OBJ_KERNEL_HANDLE
| OBJ_PERMANENT
,
1307 &SecurityDescriptor
);
1309 /* Create the event */
1310 Status
= ZwCreateEvent(&EventHandle
,
1317 ExFreePoolWithTag(Dacl
, 'lcaD');
1319 /* Check if this is the success path */
1320 if (NT_SUCCESS(Status
))
1322 /* Add a reference to the object, then close the handle we had */
1323 Status
= ObReferenceObjectByHandle(EventHandle
,
1329 ZwClose (EventHandle
);
1339 MiInitializeMemoryEvents(VOID
)
1341 UNICODE_STRING LowString
= RTL_CONSTANT_STRING(L
"\\KernelObjects\\LowMemoryCondition");
1342 UNICODE_STRING HighString
= RTL_CONSTANT_STRING(L
"\\KernelObjects\\HighMemoryCondition");
1343 UNICODE_STRING LowPagedPoolString
= RTL_CONSTANT_STRING(L
"\\KernelObjects\\LowPagedPoolCondition");
1344 UNICODE_STRING HighPagedPoolString
= RTL_CONSTANT_STRING(L
"\\KernelObjects\\HighPagedPoolCondition");
1345 UNICODE_STRING LowNonPagedPoolString
= RTL_CONSTANT_STRING(L
"\\KernelObjects\\LowNonPagedPoolCondition");
1346 UNICODE_STRING HighNonPagedPoolString
= RTL_CONSTANT_STRING(L
"\\KernelObjects\\HighNonPagedPoolCondition");
1349 /* Check if we have a registry setting */
1350 if (MmLowMemoryThreshold
)
1352 /* Convert it to pages */
1353 MmLowMemoryThreshold
*= (_1MB
/ PAGE_SIZE
);
1357 /* The low memory threshold is hit when we don't consider that we have "plenty" of free pages anymore */
1358 MmLowMemoryThreshold
= MmPlentyFreePages
;
1360 /* More than one GB of memory? */
1361 if (MmNumberOfPhysicalPages
> 0x40000)
1363 /* Start at 32MB, and add another 16MB for each GB */
1364 MmLowMemoryThreshold
= (32 * _1MB
) / PAGE_SIZE
;
1365 MmLowMemoryThreshold
+= ((MmNumberOfPhysicalPages
- 0x40000) >> 7);
1367 else if (MmNumberOfPhysicalPages
> 0x8000)
1369 /* For systems with > 128MB RAM, add another 4MB for each 128MB */
1370 MmLowMemoryThreshold
+= ((MmNumberOfPhysicalPages
- 0x8000) >> 5);
1373 /* Don't let the minimum threshold go past 64MB */
1374 MmLowMemoryThreshold
= min(MmLowMemoryThreshold
, (64 * _1MB
) / PAGE_SIZE
);
1377 /* Check if we have a registry setting */
1378 if (MmHighMemoryThreshold
)
1380 /* Convert it into pages */
1381 MmHighMemoryThreshold
*= (_1MB
/ PAGE_SIZE
);
1385 /* Otherwise, the default is three times the low memory threshold */
1386 MmHighMemoryThreshold
= 3 * MmLowMemoryThreshold
;
1387 ASSERT(MmHighMemoryThreshold
> MmLowMemoryThreshold
);
1390 /* Make sure high threshold is actually higher than the low */
1391 MmHighMemoryThreshold
= max(MmHighMemoryThreshold
, MmLowMemoryThreshold
);
1393 /* Create the memory events for all the thresholds */
1394 Status
= MiCreateMemoryEvent(&LowString
, &MiLowMemoryEvent
);
1395 if (!NT_SUCCESS(Status
)) return FALSE
;
1396 Status
= MiCreateMemoryEvent(&HighString
, &MiHighMemoryEvent
);
1397 if (!NT_SUCCESS(Status
)) return FALSE
;
1398 Status
= MiCreateMemoryEvent(&LowPagedPoolString
, &MiLowPagedPoolEvent
);
1399 if (!NT_SUCCESS(Status
)) return FALSE
;
1400 Status
= MiCreateMemoryEvent(&HighPagedPoolString
, &MiHighPagedPoolEvent
);
1401 if (!NT_SUCCESS(Status
)) return FALSE
;
1402 Status
= MiCreateMemoryEvent(&LowNonPagedPoolString
, &MiLowNonPagedPoolEvent
);
1403 if (!NT_SUCCESS(Status
)) return FALSE
;
1404 Status
= MiCreateMemoryEvent(&HighNonPagedPoolString
, &MiHighNonPagedPoolEvent
);
1405 if (!NT_SUCCESS(Status
)) return FALSE
;
1407 /* Now setup the pool events */
1408 MiInitializePoolEvents();
1410 /* Set the initial event state */
1411 MiNotifyMemoryEvents();
1418 MiAddHalIoMappings(VOID
)
1421 PMMPDE PointerPde
, LastPde
;
1424 PFN_NUMBER PageFrameIndex
;
1426 /* HAL Heap address -- should be on a PDE boundary */
1427 BaseAddress
= (PVOID
)MM_HAL_VA_START
;
1428 ASSERT(MiAddressToPteOffset(BaseAddress
) == 0);
1430 /* Check how many PDEs the heap has */
1431 PointerPde
= MiAddressToPde(BaseAddress
);
1432 LastPde
= MiAddressToPde((PVOID
)MM_HAL_VA_END
);
1434 while (PointerPde
<= LastPde
)
1436 /* Does the HAL own this mapping? */
1437 if ((PointerPde
->u
.Hard
.Valid
== 1) &&
1438 (MI_IS_PAGE_LARGE(PointerPde
) == FALSE
))
1440 /* Get the PTE for it and scan each page */
1441 PointerPte
= MiAddressToPte(BaseAddress
);
1442 for (j
= 0 ; j
< PTE_COUNT
; j
++)
1444 /* Does the HAL own this page? */
1445 if (PointerPte
->u
.Hard
.Valid
== 1)
1447 /* Is the HAL using it for device or I/O mapped memory? */
1448 PageFrameIndex
= PFN_FROM_PTE(PointerPte
);
1449 if (!MiGetPfnEntry(PageFrameIndex
))
1451 /* FIXME: For PAT, we need to track I/O cache attributes for coherency */
1452 DPRINT1("HAL I/O Mapping at %p is unsafe\n", BaseAddress
);
1456 /* Move to the next page */
1457 BaseAddress
= (PVOID
)((ULONG_PTR
)BaseAddress
+ PAGE_SIZE
);
1463 /* Move to the next address */
1464 BaseAddress
= (PVOID
)((ULONG_PTR
)BaseAddress
+ PDE_MAPPED_VA
);
1467 /* Move to the next PDE */
1474 MmDumpArmPfnDatabase(IN BOOLEAN StatusOnly
)
1478 PCHAR Consumer
= "Unknown";
1480 ULONG ActivePages
= 0, FreePages
= 0, OtherPages
= 0;
1482 ULONG UsageBucket
[MI_USAGE_FREE_PAGE
+ 1] = {0};
1483 PCHAR MI_USAGE_TEXT
[MI_USAGE_FREE_PAGE
+ 1] =
1511 // Loop the PFN database
1513 KeRaiseIrql(HIGH_LEVEL
, &OldIrql
);
1514 for (i
= 0; i
<= MmHighestPhysicalPage
; i
++)
1516 Pfn1
= MiGetPfnEntry(i
);
1517 if (!Pfn1
) continue;
1519 ASSERT(Pfn1
->PfnUsage
<= MI_USAGE_FREE_PAGE
);
1522 // Get the page location
1524 switch (Pfn1
->u3
.e1
.PageLocation
)
1526 case ActiveAndValid
:
1528 Consumer
= "Active and Valid";
1532 case ZeroedPageList
:
1534 Consumer
= "Zero Page List";
1540 Consumer
= "Free Page List";
1546 Consumer
= "Other (ASSERT!)";
1552 /* Add into bucket */
1553 UsageBucket
[Pfn1
->PfnUsage
]++;
1557 // Pretty-print the page
1560 DbgPrint("0x%08p:\t%20s\t(%04d.%04d)\t[%16s - %16s])\n",
1563 Pfn1
->u3
.e2
.ReferenceCount
,
1564 Pfn1
->u2
.ShareCount
== LIST_HEAD
? 0xFFFF : Pfn1
->u2
.ShareCount
,
1566 MI_USAGE_TEXT
[Pfn1
->PfnUsage
],
1574 DbgPrint("Active: %5d pages\t[%6d KB]\n", ActivePages
, (ActivePages
<< PAGE_SHIFT
) / 1024);
1575 DbgPrint("Free: %5d pages\t[%6d KB]\n", FreePages
, (FreePages
<< PAGE_SHIFT
) / 1024);
1576 DbgPrint("Other: %5d pages\t[%6d KB]\n", OtherPages
, (OtherPages
<< PAGE_SHIFT
) / 1024);
1577 DbgPrint("-----------------------------------------\n");
1579 OtherPages
= UsageBucket
[MI_USAGE_BOOT_DRIVER
];
1580 DbgPrint("Boot Images: %5d pages\t[%6d KB]\n", OtherPages
, (OtherPages
<< PAGE_SHIFT
) / 1024);
1581 OtherPages
= UsageBucket
[MI_USAGE_DRIVER_PAGE
];
1582 DbgPrint("System Drivers: %5d pages\t[%6d KB]\n", OtherPages
, (OtherPages
<< PAGE_SHIFT
) / 1024);
1583 OtherPages
= UsageBucket
[MI_USAGE_PFN_DATABASE
];
1584 DbgPrint("PFN Database: %5d pages\t[%6d KB]\n", OtherPages
, (OtherPages
<< PAGE_SHIFT
) / 1024);
1585 OtherPages
= UsageBucket
[MI_USAGE_PAGE_TABLE
] + UsageBucket
[MI_USAGE_PAGE_DIRECTORY
] + UsageBucket
[MI_USAGE_LEGACY_PAGE_DIRECTORY
];
1586 DbgPrint("Page Tables: %5d pages\t[%6d KB]\n", OtherPages
, (OtherPages
<< PAGE_SHIFT
) / 1024);
1587 OtherPages
= UsageBucket
[MI_USAGE_SYSTEM_PTE
];
1588 DbgPrint("System PTEs: %5d pages\t[%6d KB]\n", OtherPages
, (OtherPages
<< PAGE_SHIFT
) / 1024);
1589 OtherPages
= UsageBucket
[MI_USAGE_VAD
];
1590 DbgPrint("VADs: %5d pages\t[%6d KB]\n", OtherPages
, (OtherPages
<< PAGE_SHIFT
) / 1024);
1591 OtherPages
= UsageBucket
[MI_USAGE_CONTINOUS_ALLOCATION
];
1592 DbgPrint("Continuous Allocs: %5d pages\t[%6d KB]\n", OtherPages
, (OtherPages
<< PAGE_SHIFT
) / 1024);
1593 OtherPages
= UsageBucket
[MI_USAGE_MDL
];
1594 DbgPrint("MDLs: %5d pages\t[%6d KB]\n", OtherPages
, (OtherPages
<< PAGE_SHIFT
) / 1024);
1595 OtherPages
= UsageBucket
[MI_USAGE_NONPAGED_POOL
] + UsageBucket
[MI_USAGE_NONPAGED_POOL_EXPANSION
];
1596 DbgPrint("NonPaged Pool: %5d pages\t[%6d KB]\n", OtherPages
, (OtherPages
<< PAGE_SHIFT
) / 1024);
1597 OtherPages
= UsageBucket
[MI_USAGE_PAGED_POOL
];
1598 DbgPrint("Paged Pool: %5d pages\t[%6d KB]\n", OtherPages
, (OtherPages
<< PAGE_SHIFT
) / 1024);
1599 OtherPages
= UsageBucket
[MI_USAGE_DEMAND_ZERO
];
1600 DbgPrint("Demand Zero: %5d pages\t[%6d KB]\n", OtherPages
, (OtherPages
<< PAGE_SHIFT
) / 1024);
1601 OtherPages
= UsageBucket
[MI_USAGE_ZERO_LOOP
];
1602 DbgPrint("Zero Loop: %5d pages\t[%6d KB]\n", OtherPages
, (OtherPages
<< PAGE_SHIFT
) / 1024);
1603 OtherPages
= UsageBucket
[MI_USAGE_PEB_TEB
];
1604 DbgPrint("PEB/TEB: %5d pages\t[%6d KB]\n", OtherPages
, (OtherPages
<< PAGE_SHIFT
) / 1024);
1605 OtherPages
= UsageBucket
[MI_USAGE_KERNEL_STACK
] + UsageBucket
[MI_USAGE_KERNEL_STACK_EXPANSION
];
1606 DbgPrint("Kernel Stack: %5d pages\t[%6d KB]\n", OtherPages
, (OtherPages
<< PAGE_SHIFT
) / 1024);
1607 OtherPages
= UsageBucket
[MI_USAGE_INIT_MEMORY
];
1608 DbgPrint("Init Memory: %5d pages\t[%6d KB]\n", OtherPages
, (OtherPages
<< PAGE_SHIFT
) / 1024);
1609 OtherPages
= UsageBucket
[MI_USAGE_SECTION
];
1610 DbgPrint("Sections: %5d pages\t[%6d KB]\n", OtherPages
, (OtherPages
<< PAGE_SHIFT
) / 1024);
1611 OtherPages
= UsageBucket
[MI_USAGE_CACHE
];
1612 DbgPrint("Cache: %5d pages\t[%6d KB]\n", OtherPages
, (OtherPages
<< PAGE_SHIFT
) / 1024);
1613 OtherPages
= UsageBucket
[MI_USAGE_FREE_PAGE
];
1614 DbgPrint("Free: %5d pages\t[%6d KB]\n", OtherPages
, (OtherPages
<< PAGE_SHIFT
) / 1024);
1616 KeLowerIrql(OldIrql
);
1620 PPHYSICAL_MEMORY_DESCRIPTOR
1622 MmInitializeMemoryLimits(IN PLOADER_PARAMETER_BLOCK LoaderBlock
,
1623 IN PBOOLEAN IncludeType
)
1625 PLIST_ENTRY NextEntry
;
1626 ULONG Run
= 0, InitialRuns
;
1627 PFN_NUMBER NextPage
= -1, PageCount
= 0;
1628 PPHYSICAL_MEMORY_DESCRIPTOR Buffer
, NewBuffer
;
1629 PMEMORY_ALLOCATION_DESCRIPTOR MdBlock
;
1632 // Start with the maximum we might need
1634 InitialRuns
= MiNumberDescriptors
;
1637 // Allocate the maximum we'll ever need
1639 Buffer
= ExAllocatePoolWithTag(NonPagedPool
,
1640 sizeof(PHYSICAL_MEMORY_DESCRIPTOR
) +
1641 sizeof(PHYSICAL_MEMORY_RUN
) *
1644 if (!Buffer
) return NULL
;
1647 // For now that's how many runs we have
1649 Buffer
->NumberOfRuns
= InitialRuns
;
1652 // Now loop through the descriptors again
1654 NextEntry
= LoaderBlock
->MemoryDescriptorListHead
.Flink
;
1655 while (NextEntry
!= &LoaderBlock
->MemoryDescriptorListHead
)
1658 // Grab each one, and check if it's one we should include
1660 MdBlock
= CONTAINING_RECORD(NextEntry
,
1661 MEMORY_ALLOCATION_DESCRIPTOR
,
1663 if ((MdBlock
->MemoryType
< LoaderMaximum
) &&
1664 (IncludeType
[MdBlock
->MemoryType
]))
1667 // Add this to our running total
1669 PageCount
+= MdBlock
->PageCount
;
1672 // Check if the next page is described by the next descriptor
1674 if (MdBlock
->BasePage
== NextPage
)
1677 // Combine it into the same physical run
1679 ASSERT(MdBlock
->PageCount
!= 0);
1680 Buffer
->Run
[Run
- 1].PageCount
+= MdBlock
->PageCount
;
1681 NextPage
+= MdBlock
->PageCount
;
1686 // Otherwise just duplicate the descriptor's contents
1688 Buffer
->Run
[Run
].BasePage
= MdBlock
->BasePage
;
1689 Buffer
->Run
[Run
].PageCount
= MdBlock
->PageCount
;
1690 NextPage
= Buffer
->Run
[Run
].BasePage
+ Buffer
->Run
[Run
].PageCount
;
1693 // And in this case, increase the number of runs
1700 // Try the next descriptor
1702 NextEntry
= MdBlock
->ListEntry
.Flink
;
1706 // We should not have been able to go past our initial estimate
1708 ASSERT(Run
<= Buffer
->NumberOfRuns
);
1711 // Our guess was probably exaggerated...
1713 if (InitialRuns
> Run
)
1716 // Allocate a more accurately sized buffer
1718 NewBuffer
= ExAllocatePoolWithTag(NonPagedPool
,
1719 sizeof(PHYSICAL_MEMORY_DESCRIPTOR
) +
1720 sizeof(PHYSICAL_MEMORY_RUN
) *
1726 // Copy the old buffer into the new, then free it
1728 RtlCopyMemory(NewBuffer
->Run
,
1730 sizeof(PHYSICAL_MEMORY_RUN
) * Run
);
1731 ExFreePoolWithTag(Buffer
, 'lMmM');
1734 // Now use the new buffer
1741 // Write the final numbers, and return it
1743 Buffer
->NumberOfRuns
= Run
;
1744 Buffer
->NumberOfPages
= PageCount
;
1751 MiBuildPagedPool(VOID
)
1755 MMPDE TempPde
= ValidKernelPde
;
1756 PFN_NUMBER PageFrameIndex
;
1760 #if (_MI_PAGING_LEVELS >= 3)
1761 MMPPE TempPpe
= ValidKernelPpe
;
1763 #elif (_MI_PAGING_LEVELS == 2)
1764 MMPTE TempPte
= ValidKernelPte
;
1767 // Get the page frame number for the system page directory
1769 PointerPte
= MiAddressToPte(PDE_BASE
);
1770 ASSERT(PD_COUNT
== 1);
1771 MmSystemPageDirectory
[0] = PFN_FROM_PTE(PointerPte
);
1774 // Allocate a system PTE which will hold a copy of the page directory
1776 PointerPte
= MiReserveSystemPtes(1, SystemPteSpace
);
1778 MmSystemPagePtes
= MiPteToAddress(PointerPte
);
1781 // Make this system PTE point to the system page directory.
1782 // It is now essentially double-mapped. This will be used later for lazy
1783 // evaluation of PDEs accross process switches, similarly to how the Global
1784 // page directory array in the old ReactOS Mm is used (but in a less hacky
1787 TempPte
= ValidKernelPte
;
1788 ASSERT(PD_COUNT
== 1);
1789 TempPte
.u
.Hard
.PageFrameNumber
= MmSystemPageDirectory
[0];
1790 MI_WRITE_VALID_PTE(PointerPte
, TempPte
);
1795 // Let's get back to paged pool work: size it up.
1796 // By default, it should be twice as big as nonpaged pool.
1798 MmSizeOfPagedPoolInBytes
= 2 * MmMaximumNonPagedPoolInBytes
;
1799 if (MmSizeOfPagedPoolInBytes
> ((ULONG_PTR
)MmNonPagedSystemStart
-
1800 (ULONG_PTR
)MmPagedPoolStart
))
1803 // On the other hand, we have limited VA space, so make sure that the VA
1804 // for paged pool doesn't overflow into nonpaged pool VA. Otherwise, set
1805 // whatever maximum is possible.
1807 MmSizeOfPagedPoolInBytes
= (ULONG_PTR
)MmNonPagedSystemStart
-
1808 (ULONG_PTR
)MmPagedPoolStart
;
1813 // Get the size in pages and make sure paged pool is at least 32MB.
1815 Size
= MmSizeOfPagedPoolInBytes
;
1816 if (Size
< MI_MIN_INIT_PAGED_POOLSIZE
) Size
= MI_MIN_INIT_PAGED_POOLSIZE
;
1817 Size
= BYTES_TO_PAGES(Size
);
1820 // Now check how many PTEs will be required for these many pages.
1822 Size
= (Size
+ (1024 - 1)) / 1024;
1825 // Recompute the page-aligned size of the paged pool, in bytes and pages.
1827 MmSizeOfPagedPoolInBytes
= Size
* PAGE_SIZE
* 1024;
1828 MmSizeOfPagedPoolInPages
= MmSizeOfPagedPoolInBytes
>> PAGE_SHIFT
;
1832 // Let's be really sure this doesn't overflow into nonpaged system VA
1834 ASSERT((MmSizeOfPagedPoolInBytes
+ (ULONG_PTR
)MmPagedPoolStart
) <=
1835 (ULONG_PTR
)MmNonPagedSystemStart
);
1839 // This is where paged pool ends
1841 MmPagedPoolEnd
= (PVOID
)(((ULONG_PTR
)MmPagedPoolStart
+
1842 MmSizeOfPagedPoolInBytes
) - 1);
1845 // Lock the PFN database
1847 OldIrql
= MiAcquirePfnLock();
1849 #if (_MI_PAGING_LEVELS >= 3)
1850 /* On these systems, there's no double-mapping, so instead, the PPEs
1851 * are setup to span the entire paged pool area, so there's no need for the
1853 for (PointerPpe
= MiAddressToPpe(MmPagedPoolStart
);
1854 PointerPpe
<= MiAddressToPpe(MmPagedPoolEnd
);
1857 /* Check if the PPE is already valid */
1858 if (!PointerPpe
->u
.Hard
.Valid
)
1860 /* It is not, so map a fresh zeroed page */
1861 TempPpe
.u
.Hard
.PageFrameNumber
= MiRemoveZeroPage(0);
1862 MI_WRITE_VALID_PPE(PointerPpe
, TempPpe
);
1868 // So now get the PDE for paged pool and zero it out
1870 PointerPde
= MiAddressToPde(MmPagedPoolStart
);
1871 RtlZeroMemory(PointerPde
,
1872 (1 + MiAddressToPde(MmPagedPoolEnd
) - PointerPde
) * sizeof(MMPDE
));
1875 // Next, get the first and last PTE
1877 PointerPte
= MiAddressToPte(MmPagedPoolStart
);
1878 MmPagedPoolInfo
.FirstPteForPagedPool
= PointerPte
;
1879 MmPagedPoolInfo
.LastPteForPagedPool
= MiAddressToPte(MmPagedPoolEnd
);
1881 /* Allocate a page and map the first paged pool PDE */
1882 MI_SET_USAGE(MI_USAGE_PAGED_POOL
);
1883 MI_SET_PROCESS2("Kernel");
1884 PageFrameIndex
= MiRemoveZeroPage(0);
1885 TempPde
.u
.Hard
.PageFrameNumber
= PageFrameIndex
;
1886 MI_WRITE_VALID_PDE(PointerPde
, TempPde
);
1887 #if (_MI_PAGING_LEVELS >= 3)
1888 /* Use the PPE of MmPagedPoolStart that was setup above */
1889 // Bla = PFN_FROM_PTE(PpeAddress(MmPagedPool...));
1891 /* Initialize the PFN entry for it */
1892 MiInitializePfnForOtherProcess(PageFrameIndex
,
1894 PFN_FROM_PTE(MiAddressToPpe(MmPagedPoolStart
)));
1896 /* Do it this way */
1897 // Bla = MmSystemPageDirectory[(PointerPde - (PMMPTE)PDE_BASE) / PDE_COUNT]
1899 /* Initialize the PFN entry for it */
1900 MiInitializePfnForOtherProcess(PageFrameIndex
,
1902 MmSystemPageDirectory
[(PointerPde
- (PMMPDE
)PDE_BASE
) / PDE_COUNT
]);
1906 // Release the PFN database lock
1908 MiReleasePfnLock(OldIrql
);
1911 // We only have one PDE mapped for now... at fault time, additional PDEs
1912 // will be allocated to handle paged pool growth. This is where they'll have
1915 MmPagedPoolInfo
.NextPdeForPagedPoolExpansion
= PointerPde
+ 1;
1918 // We keep track of each page via a bit, so check how big the bitmap will
1919 // have to be (make sure to align our page count such that it fits nicely
1920 // into a 4-byte aligned bitmap.
1922 // We'll also allocate the bitmap header itself part of the same buffer.
1925 ASSERT(Size
== MmSizeOfPagedPoolInPages
);
1926 BitMapSize
= (ULONG
)Size
;
1927 Size
= sizeof(RTL_BITMAP
) + (((Size
+ 31) / 32) * sizeof(ULONG
));
1930 // Allocate the allocation bitmap, which tells us which regions have not yet
1931 // been mapped into memory
1933 MmPagedPoolInfo
.PagedPoolAllocationMap
= ExAllocatePoolWithTag(NonPagedPool
,
1936 ASSERT(MmPagedPoolInfo
.PagedPoolAllocationMap
);
1939 // Initialize it such that at first, only the first page's worth of PTEs is
1940 // marked as allocated (incidentially, the first PDE we allocated earlier).
1942 RtlInitializeBitMap(MmPagedPoolInfo
.PagedPoolAllocationMap
,
1943 (PULONG
)(MmPagedPoolInfo
.PagedPoolAllocationMap
+ 1),
1945 RtlSetAllBits(MmPagedPoolInfo
.PagedPoolAllocationMap
);
1946 RtlClearBits(MmPagedPoolInfo
.PagedPoolAllocationMap
, 0, 1024);
1949 // We have a second bitmap, which keeps track of where allocations end.
1950 // Given the allocation bitmap and a base address, we can therefore figure
1951 // out which page is the last page of that allocation, and thus how big the
1952 // entire allocation is.
1954 MmPagedPoolInfo
.EndOfPagedPoolBitmap
= ExAllocatePoolWithTag(NonPagedPool
,
1957 ASSERT(MmPagedPoolInfo
.EndOfPagedPoolBitmap
);
1958 RtlInitializeBitMap(MmPagedPoolInfo
.EndOfPagedPoolBitmap
,
1959 (PULONG
)(MmPagedPoolInfo
.EndOfPagedPoolBitmap
+ 1),
1963 // Since no allocations have been made yet, there are no bits set as the end
1965 RtlClearAllBits(MmPagedPoolInfo
.EndOfPagedPoolBitmap
);
1968 // Initialize paged pool.
1970 InitializePool(PagedPool
, 0);
1972 /* Initialize special pool */
1973 MiInitializeSpecialPool();
1975 /* Default low threshold of 30MB or one fifth of paged pool */
1976 MiLowPagedPoolThreshold
= (30 * _1MB
) >> PAGE_SHIFT
;
1977 MiLowPagedPoolThreshold
= min(MiLowPagedPoolThreshold
, Size
/ 5);
1979 /* Default high threshold of 60MB or 25% */
1980 MiHighPagedPoolThreshold
= (60 * _1MB
) >> PAGE_SHIFT
;
1981 MiHighPagedPoolThreshold
= min(MiHighPagedPoolThreshold
, (Size
* 2) / 5);
1982 ASSERT(MiLowPagedPoolThreshold
< MiHighPagedPoolThreshold
);
1984 /* Setup the global session space */
1985 MiInitializeSystemSpaceMap(NULL
);
1991 MiDbgDumpMemoryDescriptors(VOID
)
1993 PLIST_ENTRY NextEntry
;
1994 PMEMORY_ALLOCATION_DESCRIPTOR Md
;
1995 PFN_NUMBER TotalPages
= 0;
2004 "FirmwareTemporary ",
2005 "FirmwarePermanent ",
2012 "ConsoleOutDriver ",
2014 "StartupKernelStack",
2015 "StartupPanicStack ",
2027 DPRINT1("Base\t\tLength\t\tType\n");
2028 for (NextEntry
= KeLoaderBlock
->MemoryDescriptorListHead
.Flink
;
2029 NextEntry
!= &KeLoaderBlock
->MemoryDescriptorListHead
;
2030 NextEntry
= NextEntry
->Flink
)
2032 Md
= CONTAINING_RECORD(NextEntry
, MEMORY_ALLOCATION_DESCRIPTOR
, ListEntry
);
2033 DPRINT1("%08lX\t%08lX\t%s\n", Md
->BasePage
, Md
->PageCount
, MemType
[Md
->MemoryType
]);
2034 TotalPages
+= Md
->PageCount
;
2037 DPRINT1("Total: %08lX (%lu MB)\n", (ULONG
)TotalPages
, (ULONG
)(TotalPages
* PAGE_SIZE
) / 1024 / 1024);
2043 MmArmInitSystem(IN ULONG Phase
,
2044 IN PLOADER_PARAMETER_BLOCK LoaderBlock
)
2047 BOOLEAN IncludeType
[LoaderMaximum
];
2049 PPHYSICAL_MEMORY_RUN Run
;
2050 PFN_NUMBER PageCount
;
2053 PMMPTE PointerPte
, TestPte
;
2057 /* Dump memory descriptors */
2058 if (MiDbgEnableMdDump
) MiDbgDumpMemoryDescriptors();
2061 // Instantiate memory that we don't consider RAM/usable
2062 // We use the same exclusions that Windows does, in order to try to be
2063 // compatible with WinLDR-style booting
2065 for (i
= 0; i
< LoaderMaximum
; i
++) IncludeType
[i
] = TRUE
;
2066 IncludeType
[LoaderBad
] = FALSE
;
2067 IncludeType
[LoaderFirmwarePermanent
] = FALSE
;
2068 IncludeType
[LoaderSpecialMemory
] = FALSE
;
2069 IncludeType
[LoaderBBTMemory
] = FALSE
;
2072 /* Count physical pages on the system */
2073 MiScanMemoryDescriptors(LoaderBlock
);
2075 /* Initialize the phase 0 temporary event */
2076 KeInitializeEvent(&MiTempEvent
, NotificationEvent
, FALSE
);
2078 /* Set all the events to use the temporary event for now */
2079 MiLowMemoryEvent
= &MiTempEvent
;
2080 MiHighMemoryEvent
= &MiTempEvent
;
2081 MiLowPagedPoolEvent
= &MiTempEvent
;
2082 MiHighPagedPoolEvent
= &MiTempEvent
;
2083 MiLowNonPagedPoolEvent
= &MiTempEvent
;
2084 MiHighNonPagedPoolEvent
= &MiTempEvent
;
2087 // Default throttling limits for Cc
2088 // May be ajusted later on depending on system type
2090 MmThrottleTop
= 450;
2091 MmThrottleBottom
= 127;
2094 // Define the basic user vs. kernel address space separation
2096 MmSystemRangeStart
= (PVOID
)MI_DEFAULT_SYSTEM_RANGE_START
;
2097 MmUserProbeAddress
= (ULONG_PTR
)MI_USER_PROBE_ADDRESS
;
2098 MmHighestUserAddress
= (PVOID
)MI_HIGHEST_USER_ADDRESS
;
2100 /* Highest PTE and PDE based on the addresses above */
2101 MiHighestUserPte
= MiAddressToPte(MmHighestUserAddress
);
2102 MiHighestUserPde
= MiAddressToPde(MmHighestUserAddress
);
2103 #if (_MI_PAGING_LEVELS >= 3)
2104 MiHighestUserPpe
= MiAddressToPpe(MmHighestUserAddress
);
2105 #if (_MI_PAGING_LEVELS >= 4)
2106 MiHighestUserPxe
= MiAddressToPxe(MmHighestUserAddress
);
2110 // Get the size of the boot loader's image allocations and then round
2111 // that region up to a PDE size, so that any PDEs we might create for
2112 // whatever follows are separate from the PDEs that boot loader might've
2113 // already created (and later, we can blow all that away if we want to).
2115 MmBootImageSize
= KeLoaderBlock
->Extension
->LoaderPagesSpanned
;
2116 MmBootImageSize
*= PAGE_SIZE
;
2117 MmBootImageSize
= (MmBootImageSize
+ PDE_MAPPED_VA
- 1) & ~(PDE_MAPPED_VA
- 1);
2118 ASSERT((MmBootImageSize
% PDE_MAPPED_VA
) == 0);
2120 /* Initialize session space address layout */
2121 MiInitializeSessionSpaceLayout();
2123 /* Set the based section highest address */
2124 MmHighSectionBase
= (PVOID
)((ULONG_PTR
)MmHighestUserAddress
- 0x800000);
2127 /* The subection PTE format depends on things being 8-byte aligned */
2128 ASSERT((sizeof(CONTROL_AREA
) % 8) == 0);
2129 ASSERT((sizeof(SUBSECTION
) % 8) == 0);
2131 /* Prototype PTEs are assumed to be in paged pool, so check if the math works */
2132 PointerPte
= (PMMPTE
)MmPagedPoolStart
;
2133 MI_MAKE_PROTOTYPE_PTE(&TempPte
, PointerPte
);
2134 TestPte
= MiProtoPteToPte(&TempPte
);
2135 ASSERT(PointerPte
== TestPte
);
2137 /* Try the last nonpaged pool address */
2138 PointerPte
= (PMMPTE
)MI_NONPAGED_POOL_END
;
2139 MI_MAKE_PROTOTYPE_PTE(&TempPte
, PointerPte
);
2140 TestPte
= MiProtoPteToPte(&TempPte
);
2141 ASSERT(PointerPte
== TestPte
);
2143 /* Try a bunch of random addresses near the end of the address space */
2144 PointerPte
= (PMMPTE
)((ULONG_PTR
)MI_HIGHEST_SYSTEM_ADDRESS
- 0x37FFF);
2145 for (j
= 0; j
< 20; j
+= 1)
2147 MI_MAKE_PROTOTYPE_PTE(&TempPte
, PointerPte
);
2148 TestPte
= MiProtoPteToPte(&TempPte
);
2149 ASSERT(PointerPte
== TestPte
);
2153 /* Subsection PTEs are always in nonpaged pool, pick a random address to try */
2154 PointerPte
= (PMMPTE
)((ULONG_PTR
)MmNonPagedPoolStart
+ (MmSizeOfNonPagedPoolInBytes
/ 2));
2155 MI_MAKE_SUBSECTION_PTE(&TempPte
, PointerPte
);
2156 TestPte
= MiSubsectionPteToSubsection(&TempPte
);
2157 ASSERT(PointerPte
== TestPte
);
2160 /* Loop all 8 standby lists */
2161 for (i
= 0; i
< 8; i
++)
2163 /* Initialize them */
2164 MmStandbyPageListByPriority
[i
].Total
= 0;
2165 MmStandbyPageListByPriority
[i
].ListName
= StandbyPageList
;
2166 MmStandbyPageListByPriority
[i
].Flink
= MM_EMPTY_LIST
;
2167 MmStandbyPageListByPriority
[i
].Blink
= MM_EMPTY_LIST
;
2170 /* Initialize the user mode image list */
2171 InitializeListHead(&MmLoadedUserImageList
);
2173 /* Initialize critical section timeout value (relative time is negative) */
2174 MmCriticalSectionTimeout
.QuadPart
= MmCritsectTimeoutSeconds
* (-10000000LL);
2176 /* Initialize the paged pool mutex and the section commit mutex */
2177 KeInitializeGuardedMutex(&MmPagedPoolMutex
);
2178 KeInitializeGuardedMutex(&MmSectionCommitMutex
);
2179 KeInitializeGuardedMutex(&MmSectionBasedMutex
);
2181 /* Initialize the Loader Lock */
2182 KeInitializeMutant(&MmSystemLoadLock
, FALSE
);
2184 /* Set the zero page event */
2185 KeInitializeEvent(&MmZeroingPageEvent
, SynchronizationEvent
, FALSE
);
2186 MmZeroingPageThreadActive
= FALSE
;
2188 /* Initialize the dead stack S-LIST */
2189 InitializeSListHead(&MmDeadStackSListHead
);
2192 // Check if this is a machine with less than 19MB of RAM
2194 PageCount
= MmNumberOfPhysicalPages
;
2195 if (PageCount
< MI_MIN_PAGES_FOR_SYSPTE_TUNING
)
2198 // Use the very minimum of system PTEs
2200 MmNumberOfSystemPtes
= 7000;
2207 MmNumberOfSystemPtes
= 11000;
2208 if (PageCount
> MI_MIN_PAGES_FOR_SYSPTE_BOOST
)
2211 // Double the amount of system PTEs
2213 MmNumberOfSystemPtes
<<= 1;
2215 if (PageCount
> MI_MIN_PAGES_FOR_SYSPTE_BOOST_BOOST
)
2218 // Double the amount of system PTEs
2220 MmNumberOfSystemPtes
<<= 1;
2222 if (MmSpecialPoolTag
!= 0 && MmSpecialPoolTag
!= -1)
2225 // Add some extra PTEs for special pool
2227 MmNumberOfSystemPtes
+= 0x6000;
2231 DPRINT("System PTE count has been tuned to %lu (%lu bytes)\n",
2232 MmNumberOfSystemPtes
, MmNumberOfSystemPtes
* PAGE_SIZE
);
2234 /* Check if no values are set for the heap limits */
2235 if (MmHeapSegmentReserve
== 0)
2237 MmHeapSegmentReserve
= 2 * _1MB
;
2240 if (MmHeapSegmentCommit
== 0)
2242 MmHeapSegmentCommit
= 2 * PAGE_SIZE
;
2245 if (MmHeapDeCommitTotalFreeThreshold
== 0)
2247 MmHeapDeCommitTotalFreeThreshold
= 64 * _1KB
;
2250 if (MmHeapDeCommitFreeBlockThreshold
== 0)
2252 MmHeapDeCommitFreeBlockThreshold
= PAGE_SIZE
;
2255 /* Initialize the working set lock */
2256 ExInitializePushLock(&MmSystemCacheWs
.WorkingSetMutex
);
2258 /* Set commit limit */
2259 MmTotalCommitLimit
= (2 * _1GB
) >> PAGE_SHIFT
;
2260 MmTotalCommitLimitMaximum
= MmTotalCommitLimit
;
2262 /* Has the allocation fragment been setup? */
2263 if (!MmAllocationFragment
)
2265 /* Use the default value */
2266 MmAllocationFragment
= MI_ALLOCATION_FRAGMENT
;
2267 if (PageCount
< ((256 * _1MB
) / PAGE_SIZE
))
2269 /* On memory systems with less than 256MB, divide by 4 */
2270 MmAllocationFragment
= MI_ALLOCATION_FRAGMENT
/ 4;
2272 else if (PageCount
< (_1GB
/ PAGE_SIZE
))
2274 /* On systems with less than 1GB, divide by 2 */
2275 MmAllocationFragment
= MI_ALLOCATION_FRAGMENT
/ 2;
2280 /* Convert from 1KB fragments to pages */
2281 MmAllocationFragment
*= _1KB
;
2282 MmAllocationFragment
= ROUND_TO_PAGES(MmAllocationFragment
);
2284 /* Don't let it past the maximum */
2285 MmAllocationFragment
= min(MmAllocationFragment
,
2286 MI_MAX_ALLOCATION_FRAGMENT
);
2288 /* Don't let it too small either */
2289 MmAllocationFragment
= max(MmAllocationFragment
,
2290 MI_MIN_ALLOCATION_FRAGMENT
);
2293 /* Check for kernel stack size that's too big */
2294 if (MmLargeStackSize
> (KERNEL_LARGE_STACK_SIZE
/ _1KB
))
2296 /* Sanitize to default value */
2297 MmLargeStackSize
= KERNEL_LARGE_STACK_SIZE
;
2301 /* Take the registry setting, and convert it into bytes */
2302 MmLargeStackSize
*= _1KB
;
2304 /* Now align it to a page boundary */
2305 MmLargeStackSize
= PAGE_ROUND_UP(MmLargeStackSize
);
2308 ASSERT(MmLargeStackSize
<= KERNEL_LARGE_STACK_SIZE
);
2309 ASSERT((MmLargeStackSize
& (PAGE_SIZE
- 1)) == 0);
2311 /* Make sure it's not too low */
2312 if (MmLargeStackSize
< KERNEL_STACK_SIZE
) MmLargeStackSize
= KERNEL_STACK_SIZE
;
2315 /* Compute color information (L2 cache-separated paging lists) */
2316 MiComputeColorInformation();
2318 // Calculate the number of bytes for the PFN database
2319 // then add the color tables and convert to pages
2320 MxPfnAllocation
= (MmHighestPhysicalPage
+ 1) * sizeof(MMPFN
);
2321 MxPfnAllocation
+= (MmSecondaryColors
* sizeof(MMCOLOR_TABLES
) * 2);
2322 MxPfnAllocation
>>= PAGE_SHIFT
;
2324 // We have to add one to the count here, because in the process of
2325 // shifting down to the page size, we actually ended up getting the
2326 // lower aligned size (so say, 0x5FFFF bytes is now 0x5F pages).
2327 // Later on, we'll shift this number back into bytes, which would cause
2328 // us to end up with only 0x5F000 bytes -- when we actually want to have
2332 /* Initialize the platform-specific parts */
2333 MiInitMachineDependent(LoaderBlock
);
2336 // Build the physical memory block
2338 MmPhysicalMemoryBlock
= MmInitializeMemoryLimits(LoaderBlock
,
2342 // Allocate enough buffer for the PFN bitmap
2343 // Align it up to a 32-bit boundary
2345 Bitmap
= ExAllocatePoolWithTag(NonPagedPool
,
2346 (((MmHighestPhysicalPage
+ 1) + 31) / 32) * 4,
2353 KeBugCheckEx(INSTALL_MORE_MEMORY
,
2354 MmNumberOfPhysicalPages
,
2355 MmLowestPhysicalPage
,
2356 MmHighestPhysicalPage
,
2361 // Initialize it and clear all the bits to begin with
2363 RtlInitializeBitMap(&MiPfnBitMap
,
2365 (ULONG
)MmHighestPhysicalPage
+ 1);
2366 RtlClearAllBits(&MiPfnBitMap
);
2369 // Loop physical memory runs
2371 for (i
= 0; i
< MmPhysicalMemoryBlock
->NumberOfRuns
; i
++)
2376 Run
= &MmPhysicalMemoryBlock
->Run
[i
];
2377 DPRINT("PHYSICAL RAM [0x%08p to 0x%08p]\n",
2378 Run
->BasePage
<< PAGE_SHIFT
,
2379 (Run
->BasePage
+ Run
->PageCount
) << PAGE_SHIFT
);
2382 // Make sure it has pages inside it
2387 // Set the bits in the PFN bitmap
2389 RtlSetBits(&MiPfnBitMap
, (ULONG
)Run
->BasePage
, (ULONG
)Run
->PageCount
);
2393 /* Look for large page cache entries that need caching */
2394 MiSyncCachedRanges();
2396 /* Loop for HAL Heap I/O device mappings that need coherency tracking */
2397 MiAddHalIoMappings();
2399 /* Set the initial resident page count */
2400 MmResidentAvailablePages
= MmAvailablePages
- 32;
2402 /* Initialize large page structures on PAE/x64, and MmProcessList on x86 */
2403 MiInitializeLargePageSupport();
2405 /* Check if the registry says any drivers should be loaded with large pages */
2406 MiInitializeDriverLargePageList();
2408 /* Relocate the boot drivers into system PTE space and fixup their PFNs */
2409 MiReloadBootLoadedDrivers(LoaderBlock
);
2411 /* FIXME: Call out into Driver Verifier for initialization */
2413 /* Check how many pages the system has */
2414 if (MmNumberOfPhysicalPages
<= ((13 * _1MB
) / PAGE_SIZE
))
2416 /* Set small system */
2417 MmSystemSize
= MmSmallSystem
;
2418 MmMaximumDeadKernelStacks
= 0;
2420 else if (MmNumberOfPhysicalPages
<= ((19 * _1MB
) / PAGE_SIZE
))
2422 /* Set small system and add 100 pages for the cache */
2423 MmSystemSize
= MmSmallSystem
;
2424 MmSystemCacheWsMinimum
+= 100;
2425 MmMaximumDeadKernelStacks
= 2;
2429 /* Set medium system and add 400 pages for the cache */
2430 MmSystemSize
= MmMediumSystem
;
2431 MmSystemCacheWsMinimum
+= 400;
2432 MmMaximumDeadKernelStacks
= 5;
2435 /* Check for less than 24MB */
2436 if (MmNumberOfPhysicalPages
< ((24 * _1MB
) / PAGE_SIZE
))
2438 /* No more than 32 pages */
2439 MmSystemCacheWsMinimum
= 32;
2442 /* Check for more than 32MB */
2443 if (MmNumberOfPhysicalPages
>= ((32 * _1MB
) / PAGE_SIZE
))
2445 /* Check for product type being "Wi" for WinNT */
2446 if (MmProductType
== '\0i\0W')
2448 /* Then this is a large system */
2449 MmSystemSize
= MmLargeSystem
;
2453 /* For servers, we need 64MB to consider this as being large */
2454 if (MmNumberOfPhysicalPages
>= ((64 * _1MB
) / PAGE_SIZE
))
2456 /* Set it as large */
2457 MmSystemSize
= MmLargeSystem
;
2462 /* Check for more than 33 MB */
2463 if (MmNumberOfPhysicalPages
> ((33 * _1MB
) / PAGE_SIZE
))
2465 /* Add another 500 pages to the cache */
2466 MmSystemCacheWsMinimum
+= 500;
2469 /* Now setup the shared user data fields */
2470 ASSERT(SharedUserData
->NumberOfPhysicalPages
== 0);
2471 SharedUserData
->NumberOfPhysicalPages
= MmNumberOfPhysicalPages
;
2472 SharedUserData
->LargePageMinimum
= 0;
2474 /* Check for workstation (Wi for WinNT) */
2475 if (MmProductType
== '\0i\0W')
2477 /* Set Windows NT Workstation product type */
2478 SharedUserData
->NtProductType
= NtProductWinNt
;
2481 /* For this product, we wait till the last moment to throttle */
2482 MmThrottleTop
= 250;
2483 MmThrottleBottom
= 30;
2487 /* Check for LanMan server (La for LanmanNT) */
2488 if (MmProductType
== '\0a\0L')
2490 /* This is a domain controller */
2491 SharedUserData
->NtProductType
= NtProductLanManNt
;
2495 /* Otherwise it must be a normal server (Se for ServerNT) */
2496 SharedUserData
->NtProductType
= NtProductServer
;
2499 /* Set the product type, and make the system more aggressive with low memory */
2501 MmMinimumFreePages
= 81;
2503 /* We will throttle earlier to preserve memory */
2504 MmThrottleTop
= 450;
2505 MmThrottleBottom
= 80;
2508 /* Update working set tuning parameters */
2509 MiAdjustWorkingSetManagerParameters(!MmProductType
);
2511 /* Finetune the page count by removing working set and NP expansion */
2512 MmResidentAvailablePages
-= MiExpansionPoolPagesInitialCharge
;
2513 MmResidentAvailablePages
-= MmSystemCacheWsMinimum
;
2514 MmResidentAvailableAtInit
= MmResidentAvailablePages
;
2515 if (MmResidentAvailablePages
<= 0)
2517 /* This should not happen */
2518 DPRINT1("System cache working set too big\n");
2522 /* Define limits for system cache */
2524 MmSizeOfSystemCacheInPages
= ((MI_SYSTEM_CACHE_END
+ 1) - MI_SYSTEM_CACHE_START
) / PAGE_SIZE
;
2526 MmSizeOfSystemCacheInPages
= ((ULONG_PTR
)MI_PAGED_POOL_START
- (ULONG_PTR
)MI_SYSTEM_CACHE_START
) / PAGE_SIZE
;
2528 MmSystemCacheEnd
= (PVOID
)((ULONG_PTR
)MmSystemCacheStart
+ (MmSizeOfSystemCacheInPages
* PAGE_SIZE
) - 1);
2530 ASSERT(MmSystemCacheEnd
== (PVOID
)MI_SYSTEM_CACHE_END
);
2532 ASSERT(MmSystemCacheEnd
== (PVOID
)((ULONG_PTR
)MI_PAGED_POOL_START
- 1));
2535 /* Initialize the system cache */
2536 //MiInitializeSystemCache(MmSystemCacheWsMinimum, MmAvailablePages);
2538 /* Update the commit limit */
2539 MmTotalCommitLimit
= MmAvailablePages
;
2540 if (MmTotalCommitLimit
> 1024) MmTotalCommitLimit
-= 1024;
2541 MmTotalCommitLimitMaximum
= MmTotalCommitLimit
;
2543 /* Size up paged pool and build the shadow system page directory */
2546 /* Debugger physical memory support is now ready to be used */
2547 MmDebugPte
= MiAddressToPte(MiDebugMapping
);
2549 /* Initialize the loaded module list */
2550 MiInitializeLoadedModuleList(LoaderBlock
);
2554 // Always return success for now