2 * PROJECT: ReactOS Kernel
3 * LICENSE: BSD - See COPYING.ARM in the top level directory
4 * FILE: ntoskrnl/mm/ARM3/mminit.c
5 * PURPOSE: ARM Memory Manager Initialization
6 * PROGRAMMERS: ReactOS Portable Systems Group
9 /* INCLUDES *******************************************************************/
16 #define MODULE_INVOLVED_IN_ARM3
19 /* GLOBALS ********************************************************************/
22 // These are all registry-configurable, but by default, the memory manager will
23 // figure out the most appropriate values.
25 ULONG MmMaximumNonPagedPoolPercent
;
26 ULONG MmSizeOfNonPagedPoolInBytes
;
27 ULONG MmMaximumNonPagedPoolInBytes
;
29 /* Some of the same values, in pages */
30 PFN_NUMBER MmMaximumNonPagedPoolInPages
;
33 // These numbers describe the discrete equation components of the nonpaged
34 // pool sizing algorithm.
36 // They are described on http://support.microsoft.com/default.aspx/kb/126402/ja
37 // along with the algorithm that uses them, which is implemented later below.
39 ULONG MmMinimumNonPagedPoolSize
= 256 * 1024;
40 ULONG MmMinAdditionNonPagedPoolPerMb
= 32 * 1024;
41 ULONG MmDefaultMaximumNonPagedPool
= 1024 * 1024;
42 ULONG MmMaxAdditionNonPagedPoolPerMb
= 400 * 1024;
45 // The memory layout (and especially variable names) of the NT kernel mode
46 // components can be a bit hard to twig, especially when it comes to the non
49 // There are really two components to the non-paged pool:
51 // - The initial nonpaged pool, sized dynamically up to a maximum.
52 // - The expansion nonpaged pool, sized dynamically up to a maximum.
54 // The initial nonpaged pool is physically continuous for performance, and
55 // immediately follows the PFN database, typically sharing the same PDE. It is
56 // a very small resource (32MB on a 1GB system), and capped at 128MB.
58 // Right now we call this the "ARM³ Nonpaged Pool" and it begins somewhere after
59 // the PFN database (which starts at 0xB0000000).
61 // The expansion nonpaged pool, on the other hand, can grow much bigger (400MB
62 // for a 1GB system). On ARM³ however, it is currently capped at 128MB.
64 // The address where the initial nonpaged pool starts is aptly named
65 // MmNonPagedPoolStart, and it describes a range of MmSizeOfNonPagedPoolInBytes
68 // Expansion nonpaged pool starts at an address described by the variable called
69 // MmNonPagedPoolExpansionStart, and it goes on for MmMaximumNonPagedPoolInBytes
70 // minus MmSizeOfNonPagedPoolInBytes bytes, always reaching MmNonPagedPoolEnd
71 // (because of the way it's calculated) at 0xFFBE0000.
73 // Initial nonpaged pool is allocated and mapped early-on during boot, but what
74 // about the expansion nonpaged pool? It is instead composed of special pages
75 // which belong to what are called System PTEs. These PTEs are the matter of a
76 // later discussion, but they are also considered part of the "nonpaged" OS, due
77 // to the fact that they are never paged out -- once an address is described by
78 // a System PTE, it is always valid, until the System PTE is torn down.
80 // System PTEs are actually composed of two "spaces", the system space proper,
81 // and the nonpaged pool expansion space. The latter, as we've already seen,
82 // begins at MmNonPagedPoolExpansionStart. Based on the number of System PTEs
83 // that the system will support, the remaining address space below this address
84 // is used to hold the system space PTEs. This address, in turn, is held in the
85 // variable named MmNonPagedSystemStart, which itself is never allowed to go
86 // below 0xEB000000 (thus creating an upper bound on the number of System PTEs).
88 // This means that 330MB are reserved for total nonpaged system VA, on top of
89 // whatever the initial nonpaged pool allocation is.
91 // The following URLs, valid as of April 23rd, 2008, support this evidence:
93 // http://www.cs.miami.edu/~burt/journal/NT/memory.html
94 // http://www.ditii.com/2007/09/28/windows-memory-management-x86-virtual-address-space/
96 PVOID MmNonPagedSystemStart
;
97 PVOID MmNonPagedPoolStart
;
98 PVOID MmNonPagedPoolExpansionStart
;
99 PVOID MmNonPagedPoolEnd
= MI_NONPAGED_POOL_END
;
102 // This is where paged pool starts by default
104 PVOID MmPagedPoolStart
= MI_PAGED_POOL_START
;
105 PVOID MmPagedPoolEnd
;
108 // And this is its default size
110 ULONG MmSizeOfPagedPoolInBytes
= MI_MIN_INIT_PAGED_POOLSIZE
;
111 PFN_NUMBER MmSizeOfPagedPoolInPages
= MI_MIN_INIT_PAGED_POOLSIZE
/ PAGE_SIZE
;
114 // Session space starts at 0xBFFFFFFF and grows downwards
115 // By default, it includes an 8MB image area where we map win32k and video card
116 // drivers, followed by a 4MB area containing the session's working set. This is
117 // then followed by a 20MB mapped view area and finally by the session's paged
118 // pool, by default 16MB.
120 // On a normal system, this results in session space occupying the region from
121 // 0xBD000000 to 0xC0000000
123 // See miarm.h for the defines that determine the sizing of this region. On an
124 // NT system, some of these can be configured through the registry, but we don't
127 PVOID MiSessionSpaceEnd
; // 0xC0000000
128 PVOID MiSessionImageEnd
; // 0xC0000000
129 PVOID MiSessionImageStart
; // 0xBF800000
130 PVOID MiSessionViewStart
; // 0xBE000000
131 PVOID MiSessionPoolEnd
; // 0xBE000000
132 PVOID MiSessionPoolStart
; // 0xBD000000
133 PVOID MmSessionBase
; // 0xBD000000
135 ULONG MmSessionViewSize
;
136 ULONG MmSessionPoolSize
;
137 ULONG MmSessionImageSize
;
140 // The system view space, on the other hand, is where sections that are memory
141 // mapped into "system space" end up.
143 // By default, it is a 16MB region.
145 PVOID MiSystemViewStart
;
146 ULONG MmSystemViewSize
;
149 // A copy of the system page directory (the page directory associated with the
150 // System process) is kept (double-mapped) by the manager in order to lazily
151 // map paged pool PDEs into external processes when they fault on a paged pool
154 PFN_NUMBER MmSystemPageDirectory
;
155 PMMPTE MmSystemPagePtes
;
158 // The system cache starts right after hyperspace. The first few pages are for
159 // keeping track of the system working set list.
161 // This should be 0xC0C00000 -- the cache itself starts at 0xC1000000
163 PMMWSL MmSystemCacheWorkingSetList
= MI_SYSTEM_CACHE_WS_START
;
166 // Windows NT seems to choose between 7000, 11000 and 50000
167 // On systems with more than 32MB, this number is then doubled, and further
168 // aligned up to a PDE boundary (4MB).
170 ULONG MmNumberOfSystemPtes
;
173 // This is how many pages the PFN database will take up
174 // In Windows, this includes the Quark Color Table, but not in ARM³
176 ULONG MxPfnAllocation
;
179 // Unlike the old ReactOS Memory Manager, ARM³ (and Windows) does not keep track
180 // of pages that are not actually valid physical memory, such as ACPI reserved
181 // regions, BIOS address ranges, or holes in physical memory address space which
182 // could indicate device-mapped I/O memory.
184 // In fact, the lack of a PFN entry for a page usually indicates that this is
185 // I/O space instead.
187 // A bitmap, called the PFN bitmap, keeps track of all page frames by assigning
188 // a bit to each. If the bit is set, then the page is valid physical RAM.
190 RTL_BITMAP MiPfnBitMap
;
193 // This structure describes the different pieces of RAM-backed address space
195 PPHYSICAL_MEMORY_DESCRIPTOR MmPhysicalMemoryBlock
;
198 // This is where we keep track of the most basic physical layout markers
200 ULONG MmNumberOfPhysicalPages
, MmHighestPhysicalPage
, MmLowestPhysicalPage
= -1;
203 // The total number of pages mapped by the boot loader, which include the kernel
204 // HAL, boot drivers, registry, NLS files and other loader data structures is
205 // kept track of here. This depends on "LoaderPagesSpanned" being correct when
206 // coming from the loader.
208 // This number is later aligned up to a PDE boundary.
210 ULONG MmBootImageSize
;
213 // These three variables keep track of the core separation of address space that
214 // exists between kernel mode and user mode.
216 ULONG MmUserProbeAddress
;
217 PVOID MmHighestUserAddress
;
218 PVOID MmSystemRangeStart
;
220 PVOID MmSystemCacheStart
;
221 PVOID MmSystemCacheEnd
;
222 MMSUPPORT MmSystemCacheWs
;
225 // This is where hyperspace ends (followed by the system cache working set)
227 PVOID MmHyperSpaceEnd
;
230 // Page coloring algorithm data
232 ULONG MmSecondaryColors
;
233 ULONG MmSecondaryColorMask
;
236 // Actual (registry-configurable) size of a GUI thread's stack
238 ULONG MmLargeStackSize
= KERNEL_LARGE_STACK_SIZE
;
241 // Before we have a PFN database, memory comes straight from our physical memory
242 // blocks, which is nice because it's guaranteed contiguous and also because once
243 // we take a page from here, the system doesn't see it anymore.
244 // However, once the fun is over, those pages must be re-integrated back into
245 // PFN society life, and that requires us keeping a copy of the original layout
246 // so that we can parse it later.
248 PMEMORY_ALLOCATION_DESCRIPTOR MxFreeDescriptor
;
249 MEMORY_ALLOCATION_DESCRIPTOR MxOldFreeDescriptor
;
252 * For each page's worth bytes of L2 cache in a given set/way line, the zero and
253 * free lists are organized in what is called a "color".
255 * This array points to the two lists, so it can be thought of as a multi-dimensional
256 * array of MmFreePagesByColor[2][MmSecondaryColors]. Since the number is dynamic,
257 * we describe the array in pointer form instead.
259 * On a final note, the color tables themselves are right after the PFN database.
261 C_ASSERT(FreePageList
== 1);
262 PMMCOLOR_TABLES MmFreePagesByColor
[FreePageList
+ 1];
264 /* An event used in Phase 0 before the rest of the system is ready to go */
267 /* All the events used for memory threshold notifications */
268 PKEVENT MiLowMemoryEvent
;
269 PKEVENT MiHighMemoryEvent
;
270 PKEVENT MiLowPagedPoolEvent
;
271 PKEVENT MiHighPagedPoolEvent
;
272 PKEVENT MiLowNonPagedPoolEvent
;
273 PKEVENT MiHighNonPagedPoolEvent
;
275 /* The actual thresholds themselves, in page numbers */
276 PFN_NUMBER MmLowMemoryThreshold
;
277 PFN_NUMBER MmHighMemoryThreshold
;
278 PFN_NUMBER MiLowPagedPoolThreshold
;
279 PFN_NUMBER MiHighPagedPoolThreshold
;
280 PFN_NUMBER MiLowNonPagedPoolThreshold
;
281 PFN_NUMBER MiHighNonPagedPoolThreshold
;
284 * This number determines how many free pages must exist, at minimum, until we
285 * start trimming working sets and flushing modified pages to obtain more free
288 * This number changes if the system detects that this is a server product
290 PFN_NUMBER MmMinimumFreePages
= 26;
293 * This number indicates how many pages we consider to be a low limit of having
294 * "plenty" of free memory.
296 * It is doubled on systems that have more than 63MB of memory
298 PFN_NUMBER MmPlentyFreePages
= 400;
300 /* These values store the type of system this is (small, med, large) and if server */
302 MM_SYSTEMSIZE MmSystemSize
;
304 /* PRIVATE FUNCTIONS **********************************************************/
307 // In Bavaria, this is probably a hate crime
311 MiSyncARM3WithROS(IN PVOID AddressStart
,
315 // Puerile piece of junk-grade carbonized horseshit puss sold to the lowest bidder
317 ULONG Pde
= ADDR_TO_PDE_OFFSET(AddressStart
);
318 while (Pde
<= ADDR_TO_PDE_OFFSET(AddressEnd
))
321 // This both odious and heinous
323 extern ULONG MmGlobalKernelPageDirectory
[1024];
324 MmGlobalKernelPageDirectory
[Pde
] = ((PULONG
)PDE_BASE
)[Pde
];
331 MxGetNextPage(IN PFN_NUMBER PageCount
)
335 /* Make sure we have enough pages */
336 if (PageCount
> MxFreeDescriptor
->PageCount
)
338 /* Crash the system */
339 KeBugCheckEx(INSTALL_MORE_MEMORY
,
340 MmNumberOfPhysicalPages
,
341 MxFreeDescriptor
->PageCount
,
342 MxOldFreeDescriptor
.PageCount
,
346 /* Use our lowest usable free pages */
347 Pfn
= MxFreeDescriptor
->BasePage
;
348 MxFreeDescriptor
->BasePage
+= PageCount
;
349 MxFreeDescriptor
->PageCount
-= PageCount
;
355 MiComputeColorInformation(VOID
)
357 ULONG L2Associativity
;
359 /* Check if no setting was provided already */
360 if (!MmSecondaryColors
)
362 /* Get L2 cache information */
363 L2Associativity
= KeGetPcr()->SecondLevelCacheAssociativity
;
365 /* The number of colors is the number of cache bytes by set/way */
366 MmSecondaryColors
= KeGetPcr()->SecondLevelCacheSize
;
367 if (L2Associativity
) MmSecondaryColors
/= L2Associativity
;
370 /* Now convert cache bytes into pages */
371 MmSecondaryColors
>>= PAGE_SHIFT
;
372 if (!MmSecondaryColors
)
374 /* If there was no cache data from the KPCR, use the default colors */
375 MmSecondaryColors
= MI_SECONDARY_COLORS
;
379 /* Otherwise, make sure there aren't too many colors */
380 if (MmSecondaryColors
> MI_MAX_SECONDARY_COLORS
)
382 /* Set the maximum */
383 MmSecondaryColors
= MI_MAX_SECONDARY_COLORS
;
386 /* Make sure there aren't too little colors */
387 if (MmSecondaryColors
< MI_MIN_SECONDARY_COLORS
)
389 /* Set the default */
390 MmSecondaryColors
= MI_SECONDARY_COLORS
;
393 /* Finally make sure the colors are a power of two */
394 if (MmSecondaryColors
& (MmSecondaryColors
- 1))
396 /* Set the default */
397 MmSecondaryColors
= MI_SECONDARY_COLORS
;
401 /* Compute the mask and store it */
402 MmSecondaryColorMask
= MmSecondaryColors
- 1;
403 KeGetCurrentPrcb()->SecondaryColorMask
= MmSecondaryColorMask
;
408 MiInitializeColorTables(VOID
)
411 PMMPTE PointerPte
, LastPte
;
412 MMPTE TempPte
= ValidKernelPte
;
414 /* The color table starts after the ARM3 PFN database */
415 MmFreePagesByColor
[0] = (PMMCOLOR_TABLES
)&MmPfnDatabase
[1][MmHighestPhysicalPage
+ 1];
417 /* Loop the PTEs. We have two color tables for each secondary color */
418 PointerPte
= MiAddressToPte(&MmFreePagesByColor
[0][0]);
419 LastPte
= MiAddressToPte((ULONG_PTR
)MmFreePagesByColor
[0] +
420 (2 * MmSecondaryColors
* sizeof(MMCOLOR_TABLES
))
422 while (PointerPte
<= LastPte
)
424 /* Check for valid PTE */
425 if (PointerPte
->u
.Hard
.Valid
== 0)
427 /* Get a page and map it */
428 TempPte
.u
.Hard
.PageFrameNumber
= MxGetNextPage(1);
429 ASSERT(TempPte
.u
.Hard
.Valid
== 1);
430 *PointerPte
= TempPte
;
432 /* Zero out the page */
433 RtlZeroMemory(MiPteToAddress(PointerPte
), PAGE_SIZE
);
440 /* Now set the address of the next list, right after this one */
441 MmFreePagesByColor
[1] = &MmFreePagesByColor
[0][MmSecondaryColors
];
443 /* Now loop the lists to set them up */
444 for (i
= 0; i
< MmSecondaryColors
; i
++)
446 /* Set both free and zero lists for each color */
447 MmFreePagesByColor
[ZeroedPageList
][i
].Flink
= 0xFFFFFFFF;
448 MmFreePagesByColor
[ZeroedPageList
][i
].Blink
= (PVOID
)0xFFFFFFFF;
449 MmFreePagesByColor
[ZeroedPageList
][i
].Count
= 0;
450 MmFreePagesByColor
[FreePageList
][i
].Flink
= 0xFFFFFFFF;
451 MmFreePagesByColor
[FreePageList
][i
].Blink
= (PVOID
)0xFFFFFFFF;
452 MmFreePagesByColor
[FreePageList
][i
].Count
= 0;
458 MiIsRegularMemory(IN PLOADER_PARAMETER_BLOCK LoaderBlock
,
461 PLIST_ENTRY NextEntry
;
462 PMEMORY_ALLOCATION_DESCRIPTOR MdBlock
;
464 /* Loop the memory descriptors */
465 NextEntry
= LoaderBlock
->MemoryDescriptorListHead
.Flink
;
466 while (NextEntry
!= &LoaderBlock
->MemoryDescriptorListHead
)
468 /* Get the memory descriptor */
469 MdBlock
= CONTAINING_RECORD(NextEntry
,
470 MEMORY_ALLOCATION_DESCRIPTOR
,
473 /* Check if this PFN could be part of the block */
474 if (Pfn
>= (MdBlock
->BasePage
))
476 /* Check if it really is part of the block */
477 if (Pfn
< (MdBlock
->BasePage
+ MdBlock
->PageCount
))
479 /* Check if the block is actually memory we don't map */
480 if ((MdBlock
->MemoryType
== LoaderFirmwarePermanent
) ||
481 (MdBlock
->MemoryType
== LoaderBBTMemory
) ||
482 (MdBlock
->MemoryType
== LoaderSpecialMemory
))
484 /* We don't need PFN database entries for this memory */
488 /* This is memory we want to map */
494 /* Blocks are ordered, so if it's not here, it doesn't exist */
498 /* Get to the next descriptor */
499 NextEntry
= MdBlock
->ListEntry
.Flink
;
502 /* Check if this PFN is actually from our free memory descriptor */
503 if ((Pfn
>= MxOldFreeDescriptor
.BasePage
) &&
504 (Pfn
< MxOldFreeDescriptor
.BasePage
+ MxOldFreeDescriptor
.PageCount
))
506 /* We use these pages for initial mappings, so we do want to count them */
510 /* Otherwise this isn't memory that we describe or care about */
516 MiMapPfnDatabase(IN PLOADER_PARAMETER_BLOCK LoaderBlock
)
518 ULONG FreePage
, FreePageCount
, PagesLeft
, BasePage
, PageCount
;
519 PLIST_ENTRY NextEntry
;
520 PMEMORY_ALLOCATION_DESCRIPTOR MdBlock
;
521 PMMPTE PointerPte
, LastPte
;
522 MMPTE TempPte
= ValidKernelPte
;
524 /* Get current page data, since we won't be using MxGetNextPage as it would corrupt our state */
525 FreePage
= MxFreeDescriptor
->BasePage
;
526 FreePageCount
= MxFreeDescriptor
->PageCount
;
529 /* Loop the memory descriptors */
530 NextEntry
= LoaderBlock
->MemoryDescriptorListHead
.Flink
;
531 while (NextEntry
!= &LoaderBlock
->MemoryDescriptorListHead
)
533 /* Get the descriptor */
534 MdBlock
= CONTAINING_RECORD(NextEntry
,
535 MEMORY_ALLOCATION_DESCRIPTOR
,
537 if ((MdBlock
->MemoryType
== LoaderFirmwarePermanent
) ||
538 (MdBlock
->MemoryType
== LoaderBBTMemory
) ||
539 (MdBlock
->MemoryType
== LoaderSpecialMemory
))
541 /* These pages are not part of the PFN database */
542 NextEntry
= MdBlock
->ListEntry
.Flink
;
546 /* Next, check if this is our special free descriptor we've found */
547 if (MdBlock
== MxFreeDescriptor
)
549 /* Use the real numbers instead */
550 BasePage
= MxOldFreeDescriptor
.BasePage
;
551 PageCount
= MxOldFreeDescriptor
.PageCount
;
555 /* Use the descriptor's numbers */
556 BasePage
= MdBlock
->BasePage
;
557 PageCount
= MdBlock
->PageCount
;
560 /* Get the PTEs for this range */
561 PointerPte
= MiAddressToPte(&MmPfnDatabase
[0][BasePage
]);
562 LastPte
= MiAddressToPte(((ULONG_PTR
)&MmPfnDatabase
[0][BasePage
+ PageCount
]) - 1);
563 DPRINT("MD Type: %lx Base: %lx Count: %lx\n", MdBlock
->MemoryType
, BasePage
, PageCount
);
566 while (PointerPte
<= LastPte
)
568 /* We'll only touch PTEs that aren't already valid */
569 if (PointerPte
->u
.Hard
.Valid
== 0)
571 /* Use the next free page */
572 TempPte
.u
.Hard
.PageFrameNumber
= FreePage
;
573 ASSERT(FreePageCount
!= 0);
575 /* Consume free pages */
581 KeBugCheckEx(INSTALL_MORE_MEMORY
,
582 MmNumberOfPhysicalPages
,
584 MxOldFreeDescriptor
.PageCount
,
588 /* Write out this PTE */
590 ASSERT(PointerPte
->u
.Hard
.Valid
== 0);
591 ASSERT(TempPte
.u
.Hard
.Valid
== 1);
592 *PointerPte
= TempPte
;
595 RtlZeroMemory(MiPteToAddress(PointerPte
), PAGE_SIZE
);
602 /* Get the PTEs for this range */
603 PointerPte
= MiAddressToPte(&MmPfnDatabase
[1][BasePage
]);
604 LastPte
= MiAddressToPte(((ULONG_PTR
)&MmPfnDatabase
[1][BasePage
+ PageCount
]) - 1);
605 DPRINT("MD Type: %lx Base: %lx Count: %lx\n", MdBlock
->MemoryType
, BasePage
, PageCount
);
608 while (PointerPte
<= LastPte
)
610 /* We'll only touch PTEs that aren't already valid */
611 if (PointerPte
->u
.Hard
.Valid
== 0)
613 /* Use the next free page */
614 TempPte
.u
.Hard
.PageFrameNumber
= FreePage
;
615 ASSERT(FreePageCount
!= 0);
617 /* Consume free pages */
623 KeBugCheckEx(INSTALL_MORE_MEMORY
,
624 MmNumberOfPhysicalPages
,
626 MxOldFreeDescriptor
.PageCount
,
630 /* Write out this PTE */
632 ASSERT(PointerPte
->u
.Hard
.Valid
== 0);
633 ASSERT(TempPte
.u
.Hard
.Valid
== 1);
634 *PointerPte
= TempPte
;
637 RtlZeroMemory(MiPteToAddress(PointerPte
), PAGE_SIZE
);
644 /* Do the next address range */
645 NextEntry
= MdBlock
->ListEntry
.Flink
;
648 /* Now update the free descriptors to consume the pages we used up during the PFN allocation loop */
649 MxFreeDescriptor
->BasePage
= FreePage
;
650 MxFreeDescriptor
->PageCount
= FreePageCount
;
655 MiBuildPfnDatabaseFromPages(IN PLOADER_PARAMETER_BLOCK LoaderBlock
)
660 PFN_NUMBER PageFrameIndex
, StartupPdIndex
, PtePageIndex
;
662 ULONG_PTR BaseAddress
= 0;
664 /* PFN of the startup page directory */
665 StartupPdIndex
= PFN_FROM_PTE(MiAddressToPde(PDE_BASE
));
667 /* Start with the first PDE and scan them all */
668 PointerPde
= MiAddressToPde(NULL
);
669 Count
= PD_COUNT
* PDE_COUNT
;
670 for (i
= 0; i
< Count
; i
++)
672 /* Check for valid PDE */
673 if (PointerPde
->u
.Hard
.Valid
== 1)
675 /* Get the PFN from it */
676 PageFrameIndex
= PFN_FROM_PTE(PointerPde
);
678 /* Do we want a PFN entry for this page? */
679 if (MiIsRegularMemory(LoaderBlock
, PageFrameIndex
))
681 /* Yes we do, set it up */
682 Pfn1
= MI_PFN_TO_PFNENTRY(PageFrameIndex
);
683 Pfn1
->u4
.PteFrame
= StartupPdIndex
;
684 Pfn1
->PteAddress
= PointerPde
;
685 Pfn1
->u2
.ShareCount
++;
686 Pfn1
->u3
.e2
.ReferenceCount
= 1;
687 Pfn1
->u3
.e1
.PageLocation
= ActiveAndValid
;
688 Pfn1
->u3
.e1
.CacheAttribute
= MiNonCached
;
696 /* Now get the PTE and scan the pages */
697 PointerPte
= MiAddressToPte(BaseAddress
);
698 for (j
= 0; j
< PTE_COUNT
; j
++)
700 /* Check for a valid PTE */
701 if (PointerPte
->u
.Hard
.Valid
== 1)
703 /* Increase the shared count of the PFN entry for the PDE */
704 ASSERT(Pfn1
!= NULL
);
705 Pfn1
->u2
.ShareCount
++;
707 /* Now check if the PTE is valid memory too */
708 PtePageIndex
= PFN_FROM_PTE(PointerPte
);
709 if (MiIsRegularMemory(LoaderBlock
, PtePageIndex
))
712 * Only add pages above the end of system code or pages
713 * that are part of nonpaged pool
715 if ((BaseAddress
>= 0xA0000000) ||
716 ((BaseAddress
>= (ULONG_PTR
)MmNonPagedPoolStart
) &&
717 (BaseAddress
< (ULONG_PTR
)MmNonPagedPoolStart
+
718 MmSizeOfNonPagedPoolInBytes
)))
720 /* Get the PFN entry and make sure it too is valid */
721 Pfn2
= MI_PFN_TO_PFNENTRY(PtePageIndex
);
722 if ((MmIsAddressValid(Pfn2
)) &&
723 (MmIsAddressValid(Pfn2
+ 1)))
725 /* Setup the PFN entry */
726 Pfn2
->u4
.PteFrame
= PageFrameIndex
;
727 Pfn2
->PteAddress
= PointerPte
;
728 Pfn2
->u2
.ShareCount
++;
729 Pfn2
->u3
.e2
.ReferenceCount
= 1;
730 Pfn2
->u3
.e1
.PageLocation
= ActiveAndValid
;
731 Pfn2
->u3
.e1
.CacheAttribute
= MiNonCached
;
739 BaseAddress
+= PAGE_SIZE
;
744 /* Next PDE mapped address */
745 BaseAddress
+= PTE_COUNT
* PAGE_SIZE
;
755 MiBuildPfnDatabaseZeroPage(VOID
)
760 /* Grab the lowest page and check if it has no real references */
761 Pfn1
= MI_PFN_TO_PFNENTRY(MmLowestPhysicalPage
);
762 if (!(MmLowestPhysicalPage
) && !(Pfn1
->u3
.e2
.ReferenceCount
))
764 /* Make it a bogus page to catch errors */
765 PointerPde
= MiAddressToPde(0xFFFFFFFF);
766 Pfn1
->u4
.PteFrame
= PFN_FROM_PTE(PointerPde
);
767 Pfn1
->PteAddress
= PointerPde
;
768 Pfn1
->u2
.ShareCount
++;
769 Pfn1
->u3
.e2
.ReferenceCount
= 0xFFF0;
770 Pfn1
->u3
.e1
.PageLocation
= ActiveAndValid
;
771 Pfn1
->u3
.e1
.CacheAttribute
= MiNonCached
;
777 MiBuildPfnDatabaseFromLoaderBlock(IN PLOADER_PARAMETER_BLOCK LoaderBlock
)
779 PLIST_ENTRY NextEntry
;
780 PFN_NUMBER PageCount
= 0;
781 PMEMORY_ALLOCATION_DESCRIPTOR MdBlock
;
782 PFN_NUMBER PageFrameIndex
;
787 /* Now loop through the descriptors */
788 NextEntry
= LoaderBlock
->MemoryDescriptorListHead
.Flink
;
789 while (NextEntry
!= &LoaderBlock
->MemoryDescriptorListHead
)
791 /* Get the current descriptor */
792 MdBlock
= CONTAINING_RECORD(NextEntry
,
793 MEMORY_ALLOCATION_DESCRIPTOR
,
797 PageCount
= MdBlock
->PageCount
;
798 PageFrameIndex
= MdBlock
->BasePage
;
800 /* Don't allow memory above what the PFN database is mapping */
801 if (PageFrameIndex
> MmHighestPhysicalPage
)
803 /* Since they are ordered, everything past here will be larger */
807 /* On the other hand, the end page might be higher up... */
808 if ((PageFrameIndex
+ PageCount
) > (MmHighestPhysicalPage
+ 1))
810 /* In which case we'll trim the descriptor to go as high as we can */
811 PageCount
= MmHighestPhysicalPage
+ 1 - PageFrameIndex
;
812 MdBlock
->PageCount
= PageCount
;
814 /* But if there's nothing left to trim, we got too high, so quit */
815 if (!PageCount
) break;
818 /* Now check the descriptor type */
819 switch (MdBlock
->MemoryType
)
821 /* Check for bad RAM */
824 DPRINT1("You have damaged RAM modules. Stopping boot\n");
828 /* Check for free RAM */
830 case LoaderLoadedProgram
:
831 case LoaderFirmwareTemporary
:
832 case LoaderOsloaderStack
:
834 /* Get the last page of this descriptor. Note we loop backwards */
835 PageFrameIndex
+= PageCount
- 1;
836 Pfn1
= MI_PFN_TO_PFNENTRY(PageFrameIndex
);
839 /* If the page really has no references, mark it as free */
840 if (!Pfn1
->u3
.e2
.ReferenceCount
)
842 Pfn1
->u3
.e1
.CacheAttribute
= MiNonCached
;
843 //MiInsertPageInFreeList(PageFrameIndex);
846 /* Go to the next page */
851 /* Done with this block */
854 /* Check for pages that are invisible to us */
855 case LoaderFirmwarePermanent
:
856 case LoaderSpecialMemory
:
857 case LoaderBBTMemory
:
864 /* Map these pages with the KSEG0 mapping that adds 0x80000000 */
865 PointerPte
= MiAddressToPte(KSEG0_BASE
+ (PageFrameIndex
<< PAGE_SHIFT
));
866 Pfn1
= MI_PFN_TO_PFNENTRY(PageFrameIndex
);
869 /* Check if the page is really unused */
870 PointerPde
= MiAddressToPde(KSEG0_BASE
+ (PageFrameIndex
<< PAGE_SHIFT
));
871 if (!Pfn1
->u3
.e2
.ReferenceCount
)
873 /* Mark it as being in-use */
874 Pfn1
->u4
.PteFrame
= PFN_FROM_PTE(PointerPde
);
875 Pfn1
->PteAddress
= PointerPte
;
876 Pfn1
->u2
.ShareCount
++;
877 Pfn1
->u3
.e2
.ReferenceCount
= 1;
878 Pfn1
->u3
.e1
.PageLocation
= ActiveAndValid
;
879 Pfn1
->u3
.e1
.CacheAttribute
= MiNonCached
;
881 /* Check for RAM disk page */
882 if (MdBlock
->MemoryType
== LoaderXIPRom
)
884 /* Make it a pseudo-I/O ROM mapping */
886 Pfn1
->u2
.ShareCount
= 0;
887 Pfn1
->u3
.e2
.ReferenceCount
= 0;
888 Pfn1
->u3
.e1
.PageLocation
= 0;
890 Pfn1
->u4
.InPageError
= 0;
891 Pfn1
->u3
.e1
.PrototypePte
= 1;
895 /* Advance page structures */
903 /* Next descriptor entry */
904 NextEntry
= MdBlock
->ListEntry
.Flink
;
910 MiBuildPfnDatabaseSelf(VOID
)
912 PMMPTE PointerPte
, LastPte
;
915 /* Loop the PFN database page */
916 PointerPte
= MiAddressToPte(MI_PFN_TO_PFNENTRY(MmLowestPhysicalPage
));
917 LastPte
= MiAddressToPte(MI_PFN_TO_PFNENTRY(MmHighestPhysicalPage
));
918 while (PointerPte
<= LastPte
)
920 /* Make sure the page is valid */
921 if (PointerPte
->u
.Hard
.Valid
== 1)
923 /* Get the PFN entry and just mark it referenced */
924 Pfn1
= MI_PFN_TO_PFNENTRY(PointerPte
->u
.Hard
.PageFrameNumber
);
925 Pfn1
->u2
.ShareCount
= 1;
926 Pfn1
->u3
.e2
.ReferenceCount
= 1;
936 MiInitializePfnDatabase(IN PLOADER_PARAMETER_BLOCK LoaderBlock
)
938 /* Scan memory and start setting up PFN entries */
939 MiBuildPfnDatabaseFromPages(LoaderBlock
);
941 /* Add the zero page */
942 MiBuildPfnDatabaseZeroPage();
944 /* Scan the loader block and build the rest of the PFN database */
945 MiBuildPfnDatabaseFromLoaderBlock(LoaderBlock
);
947 /* Finally add the pages for the PFN database itself */
948 MiBuildPfnDatabaseSelf();
953 MiAdjustWorkingSetManagerParameters(IN BOOLEAN Client
)
955 /* This function needs to do more work, for now, we tune page minimums */
957 /* Check for a system with around 64MB RAM or more */
958 if (MmNumberOfPhysicalPages
>= (63 * _1MB
) / PAGE_SIZE
)
960 /* Double the minimum amount of pages we consider for a "plenty free" scenario */
961 MmPlentyFreePages
*= 2;
967 MiNotifyMemoryEvents(VOID
)
969 /* Are we in a low-memory situation? */
970 if (MmAvailablePages
< MmLowMemoryThreshold
)
972 /* Clear high, set low */
973 if (KeReadStateEvent(MiHighMemoryEvent
)) KeClearEvent(MiHighMemoryEvent
);
974 if (!KeReadStateEvent(MiLowMemoryEvent
)) KeSetEvent(MiLowMemoryEvent
, 0, FALSE
);
976 else if (MmAvailablePages
< MmHighMemoryThreshold
)
978 /* We are in between, clear both */
979 if (KeReadStateEvent(MiHighMemoryEvent
)) KeClearEvent(MiHighMemoryEvent
);
980 if (KeReadStateEvent(MiLowMemoryEvent
)) KeClearEvent(MiLowMemoryEvent
);
984 /* Clear low, set high */
985 if (KeReadStateEvent(MiLowMemoryEvent
)) KeClearEvent(MiLowMemoryEvent
);
986 if (!KeReadStateEvent(MiHighMemoryEvent
)) KeSetEvent(MiHighMemoryEvent
, 0, FALSE
);
992 MiCreateMemoryEvent(IN PUNICODE_STRING Name
,
999 OBJECT_ATTRIBUTES ObjectAttributes
;
1000 SECURITY_DESCRIPTOR SecurityDescriptor
;
1003 Status
= RtlCreateSecurityDescriptor(&SecurityDescriptor
,
1004 SECURITY_DESCRIPTOR_REVISION
);
1005 if (!NT_SUCCESS(Status
)) return Status
;
1007 /* One ACL with 3 ACEs, containing each one SID */
1008 DaclLength
= sizeof(ACL
) +
1009 3 * sizeof(ACCESS_ALLOWED_ACE
) +
1010 RtlLengthSid(SeLocalSystemSid
) +
1011 RtlLengthSid(SeAliasAdminsSid
) +
1012 RtlLengthSid(SeWorldSid
);
1014 /* Allocate space for the DACL */
1015 Dacl
= ExAllocatePoolWithTag(PagedPool
, DaclLength
, 'lcaD');
1016 if (!Dacl
) return STATUS_INSUFFICIENT_RESOURCES
;
1018 /* Setup the ACL inside it */
1019 Status
= RtlCreateAcl(Dacl
, DaclLength
, ACL_REVISION
);
1020 if (!NT_SUCCESS(Status
)) goto CleanUp
;
1022 /* Add query rights for everyone */
1023 Status
= RtlAddAccessAllowedAce(Dacl
,
1025 SYNCHRONIZE
| EVENT_QUERY_STATE
| READ_CONTROL
,
1027 if (!NT_SUCCESS(Status
)) goto CleanUp
;
1029 /* Full rights for the admin */
1030 Status
= RtlAddAccessAllowedAce(Dacl
,
1034 if (!NT_SUCCESS(Status
)) goto CleanUp
;
1036 /* As well as full rights for the system */
1037 Status
= RtlAddAccessAllowedAce(Dacl
,
1041 if (!NT_SUCCESS(Status
)) goto CleanUp
;
1043 /* Set this DACL inside the SD */
1044 Status
= RtlSetDaclSecurityDescriptor(&SecurityDescriptor
,
1048 if (!NT_SUCCESS(Status
)) goto CleanUp
;
1050 /* Setup the event attributes, making sure it's a permanent one */
1051 InitializeObjectAttributes(&ObjectAttributes
,
1053 OBJ_KERNEL_HANDLE
| OBJ_PERMANENT
,
1055 &SecurityDescriptor
);
1057 /* Create the event */
1058 Status
= ZwCreateEvent(&EventHandle
,
1067 /* Check if this is the success path */
1068 if (NT_SUCCESS(Status
))
1070 /* Add a reference to the object, then close the handle we had */
1071 Status
= ObReferenceObjectByHandle(EventHandle
,
1077 ZwClose (EventHandle
);
1086 MiInitializeMemoryEvents(VOID
)
1088 UNICODE_STRING LowString
= RTL_CONSTANT_STRING(L
"\\KernelObjects\\LowMemoryCondition");
1089 UNICODE_STRING HighString
= RTL_CONSTANT_STRING(L
"\\KernelObjects\\HighMemoryCondition");
1090 UNICODE_STRING LowPagedPoolString
= RTL_CONSTANT_STRING(L
"\\KernelObjects\\LowPagedPoolCondition");
1091 UNICODE_STRING HighPagedPoolString
= RTL_CONSTANT_STRING(L
"\\KernelObjects\\HighPagedPoolCondition");
1092 UNICODE_STRING LowNonPagedPoolString
= RTL_CONSTANT_STRING(L
"\\KernelObjects\\LowNonPagedPoolCondition");
1093 UNICODE_STRING HighNonPagedPoolString
= RTL_CONSTANT_STRING(L
"\\KernelObjects\\HighNonPagedPoolCondition");
1096 /* Check if we have a registry setting */
1097 if (MmLowMemoryThreshold
)
1099 /* Convert it to pages */
1100 MmLowMemoryThreshold
*= (_1MB
/ PAGE_SIZE
);
1104 /* The low memory threshold is hit when we don't consider that we have "plenty" of free pages anymore */
1105 MmLowMemoryThreshold
= MmPlentyFreePages
;
1107 /* More than one GB of memory? */
1108 if (MmNumberOfPhysicalPages
> 0x40000)
1110 /* Start at 32MB, and add another 16MB for each GB */
1111 MmLowMemoryThreshold
= (32 * _1MB
) / PAGE_SIZE
;
1112 MmLowMemoryThreshold
+= ((MmNumberOfPhysicalPages
- 0x40000) >> 7);
1114 else if (MmNumberOfPhysicalPages
> 0x8000)
1116 /* For systems with > 128MB RAM, add another 4MB for each 128MB */
1117 MmLowMemoryThreshold
+= ((MmNumberOfPhysicalPages
- 0x8000) >> 5);
1120 /* Don't let the minimum threshold go past 64MB */
1121 MmLowMemoryThreshold
= min(MmLowMemoryThreshold
, (64 * _1MB
) / PAGE_SIZE
);
1124 /* Check if we have a registry setting */
1125 if (MmHighMemoryThreshold
)
1127 /* Convert it into pages */
1128 MmHighMemoryThreshold
*= (_1MB
/ PAGE_SIZE
);
1132 /* Otherwise, the default is three times the low memory threshold */
1133 MmHighMemoryThreshold
= 3 * MmLowMemoryThreshold
;
1134 ASSERT(MmHighMemoryThreshold
> MmLowMemoryThreshold
);
1137 /* Make sure high threshold is actually higher than the low */
1138 MmHighMemoryThreshold
= max(MmHighMemoryThreshold
, MmLowMemoryThreshold
);
1140 /* Create the memory events for all the thresholds */
1141 Status
= MiCreateMemoryEvent(&LowString
, &MiLowMemoryEvent
);
1142 if (!NT_SUCCESS(Status
)) return FALSE
;
1143 Status
= MiCreateMemoryEvent(&HighString
, &MiHighMemoryEvent
);
1144 if (!NT_SUCCESS(Status
)) return FALSE
;
1145 Status
= MiCreateMemoryEvent(&LowPagedPoolString
, &MiLowPagedPoolEvent
);
1146 if (!NT_SUCCESS(Status
)) return FALSE
;
1147 Status
= MiCreateMemoryEvent(&HighPagedPoolString
, &MiHighPagedPoolEvent
);
1148 if (!NT_SUCCESS(Status
)) return FALSE
;
1149 Status
= MiCreateMemoryEvent(&LowNonPagedPoolString
, &MiLowNonPagedPoolEvent
);
1150 if (!NT_SUCCESS(Status
)) return FALSE
;
1151 Status
= MiCreateMemoryEvent(&HighNonPagedPoolString
, &MiHighNonPagedPoolEvent
);
1152 if (!NT_SUCCESS(Status
)) return FALSE
;
1154 /* Now setup the pool events */
1155 MiInitializePoolEvents();
1157 /* Set the initial event state */
1158 MiNotifyMemoryEvents();
1164 MmDumpArmPfnDatabase(VOID
)
1168 PCHAR Consumer
= "Unknown";
1170 ULONG ActivePages
= 0, FreePages
= 0, OtherPages
= 0;
1172 KeRaiseIrql(HIGH_LEVEL
, &OldIrql
);
1175 // Loop the PFN database
1177 for (i
= 0; i
<= MmHighestPhysicalPage
; i
++)
1179 Pfn1
= MI_PFN_TO_PFNENTRY(i
);
1180 if (!Pfn1
) continue;
1183 // Get the page location
1185 switch (Pfn1
->u3
.e1
.PageLocation
)
1187 case ActiveAndValid
:
1189 Consumer
= "Active and Valid";
1195 Consumer
= "Free Page List";
1201 Consumer
= "Other (ASSERT!)";
1207 // Pretty-print the page
1209 DbgPrint("0x%08p:\t%20s\t(%02d.%02d) [%08p-%08p])\n",
1212 Pfn1
->u3
.e2
.ReferenceCount
,
1213 Pfn1
->u2
.ShareCount
,
1218 DbgPrint("Active: %d pages\t[%d KB]\n", ActivePages
, (ActivePages
<< PAGE_SHIFT
) / 1024);
1219 DbgPrint("Free: %d pages\t[%d KB]\n", FreePages
, (FreePages
<< PAGE_SHIFT
) / 1024);
1220 DbgPrint("Other: %d pages\t[%d KB]\n", OtherPages
, (OtherPages
<< PAGE_SHIFT
) / 1024);
1222 KeLowerIrql(OldIrql
);
1227 MiPagesInLoaderBlock(IN PLOADER_PARAMETER_BLOCK LoaderBlock
,
1228 IN PBOOLEAN IncludeType
)
1230 PLIST_ENTRY NextEntry
;
1231 PFN_NUMBER PageCount
= 0;
1232 PMEMORY_ALLOCATION_DESCRIPTOR MdBlock
;
1235 // Now loop through the descriptors
1237 NextEntry
= LoaderBlock
->MemoryDescriptorListHead
.Flink
;
1238 while (NextEntry
!= &LoaderBlock
->MemoryDescriptorListHead
)
1241 // Grab each one, and check if it's one we should include
1243 MdBlock
= CONTAINING_RECORD(NextEntry
,
1244 MEMORY_ALLOCATION_DESCRIPTOR
,
1246 if ((MdBlock
->MemoryType
< LoaderMaximum
) &&
1247 (IncludeType
[MdBlock
->MemoryType
]))
1250 // Add this to our running total
1252 PageCount
+= MdBlock
->PageCount
;
1256 // Try the next descriptor
1258 NextEntry
= MdBlock
->ListEntry
.Flink
;
1267 PPHYSICAL_MEMORY_DESCRIPTOR
1269 MmInitializeMemoryLimits(IN PLOADER_PARAMETER_BLOCK LoaderBlock
,
1270 IN PBOOLEAN IncludeType
)
1272 PLIST_ENTRY NextEntry
;
1273 ULONG Run
= 0, InitialRuns
= 0;
1274 PFN_NUMBER NextPage
= -1, PageCount
= 0;
1275 PPHYSICAL_MEMORY_DESCRIPTOR Buffer
, NewBuffer
;
1276 PMEMORY_ALLOCATION_DESCRIPTOR MdBlock
;
1279 // Scan the memory descriptors
1281 NextEntry
= LoaderBlock
->MemoryDescriptorListHead
.Flink
;
1282 while (NextEntry
!= &LoaderBlock
->MemoryDescriptorListHead
)
1285 // For each one, increase the memory allocation estimate
1288 NextEntry
= NextEntry
->Flink
;
1292 // Allocate the maximum we'll ever need
1294 Buffer
= ExAllocatePoolWithTag(NonPagedPool
,
1295 sizeof(PHYSICAL_MEMORY_DESCRIPTOR
) +
1296 sizeof(PHYSICAL_MEMORY_RUN
) *
1299 if (!Buffer
) return NULL
;
1302 // For now that's how many runs we have
1304 Buffer
->NumberOfRuns
= InitialRuns
;
1307 // Now loop through the descriptors again
1309 NextEntry
= LoaderBlock
->MemoryDescriptorListHead
.Flink
;
1310 while (NextEntry
!= &LoaderBlock
->MemoryDescriptorListHead
)
1313 // Grab each one, and check if it's one we should include
1315 MdBlock
= CONTAINING_RECORD(NextEntry
,
1316 MEMORY_ALLOCATION_DESCRIPTOR
,
1318 if ((MdBlock
->MemoryType
< LoaderMaximum
) &&
1319 (IncludeType
[MdBlock
->MemoryType
]))
1322 // Add this to our running total
1324 PageCount
+= MdBlock
->PageCount
;
1327 // Check if the next page is described by the next descriptor
1329 if (MdBlock
->BasePage
== NextPage
)
1332 // Combine it into the same physical run
1334 ASSERT(MdBlock
->PageCount
!= 0);
1335 Buffer
->Run
[Run
- 1].PageCount
+= MdBlock
->PageCount
;
1336 NextPage
+= MdBlock
->PageCount
;
1341 // Otherwise just duplicate the descriptor's contents
1343 Buffer
->Run
[Run
].BasePage
= MdBlock
->BasePage
;
1344 Buffer
->Run
[Run
].PageCount
= MdBlock
->PageCount
;
1345 NextPage
= Buffer
->Run
[Run
].BasePage
+ Buffer
->Run
[Run
].PageCount
;
1348 // And in this case, increase the number of runs
1355 // Try the next descriptor
1357 NextEntry
= MdBlock
->ListEntry
.Flink
;
1361 // We should not have been able to go past our initial estimate
1363 ASSERT(Run
<= Buffer
->NumberOfRuns
);
1366 // Our guess was probably exaggerated...
1368 if (InitialRuns
> Run
)
1371 // Allocate a more accurately sized buffer
1373 NewBuffer
= ExAllocatePoolWithTag(NonPagedPool
,
1374 sizeof(PHYSICAL_MEMORY_DESCRIPTOR
) +
1375 sizeof(PHYSICAL_MEMORY_RUN
) *
1381 // Copy the old buffer into the new, then free it
1383 RtlCopyMemory(NewBuffer
->Run
,
1385 sizeof(PHYSICAL_MEMORY_RUN
) * Run
);
1389 // Now use the new buffer
1396 // Write the final numbers, and return it
1398 Buffer
->NumberOfRuns
= Run
;
1399 Buffer
->NumberOfPages
= PageCount
;
1405 MiBuildPagedPool(VOID
)
1407 PMMPTE PointerPte
, PointerPde
;
1408 MMPTE TempPte
= ValidKernelPte
;
1409 PFN_NUMBER PageFrameIndex
;
1411 ULONG Size
, BitMapSize
;
1414 // Get the page frame number for the system page directory
1416 PointerPte
= MiAddressToPte(PDE_BASE
);
1417 MmSystemPageDirectory
= PFN_FROM_PTE(PointerPte
);
1420 // Allocate a system PTE which will hold a copy of the page directory
1422 PointerPte
= MiReserveSystemPtes(1, SystemPteSpace
);
1424 MmSystemPagePtes
= MiPteToAddress(PointerPte
);
1427 // Make this system PTE point to the system page directory.
1428 // It is now essentially double-mapped. This will be used later for lazy
1429 // evaluation of PDEs accross process switches, similarly to how the Global
1430 // page directory array in the old ReactOS Mm is used (but in a less hacky
1433 TempPte
= ValidKernelPte
;
1434 TempPte
.u
.Hard
.PageFrameNumber
= MmSystemPageDirectory
;
1435 ASSERT(PointerPte
->u
.Hard
.Valid
== 0);
1436 ASSERT(TempPte
.u
.Hard
.Valid
== 1);
1437 *PointerPte
= TempPte
;
1440 // Let's get back to paged pool work: size it up.
1441 // By default, it should be twice as big as nonpaged pool.
1443 MmSizeOfPagedPoolInBytes
= 2 * MmMaximumNonPagedPoolInBytes
;
1444 if (MmSizeOfPagedPoolInBytes
> ((ULONG_PTR
)MmNonPagedSystemStart
-
1445 (ULONG_PTR
)MmPagedPoolStart
))
1448 // On the other hand, we have limited VA space, so make sure that the VA
1449 // for paged pool doesn't overflow into nonpaged pool VA. Otherwise, set
1450 // whatever maximum is possible.
1452 MmSizeOfPagedPoolInBytes
= (ULONG_PTR
)MmNonPagedSystemStart
-
1453 (ULONG_PTR
)MmPagedPoolStart
;
1457 // Get the size in pages and make sure paged pool is at least 32MB.
1459 Size
= MmSizeOfPagedPoolInBytes
;
1460 if (Size
< MI_MIN_INIT_PAGED_POOLSIZE
) Size
= MI_MIN_INIT_PAGED_POOLSIZE
;
1461 Size
= BYTES_TO_PAGES(Size
);
1464 // Now check how many PTEs will be required for these many pages.
1466 Size
= (Size
+ (1024 - 1)) / 1024;
1469 // Recompute the page-aligned size of the paged pool, in bytes and pages.
1471 MmSizeOfPagedPoolInBytes
= Size
* PAGE_SIZE
* 1024;
1472 MmSizeOfPagedPoolInPages
= MmSizeOfPagedPoolInBytes
>> PAGE_SHIFT
;
1475 // Let's be really sure this doesn't overflow into nonpaged system VA
1477 ASSERT((MmSizeOfPagedPoolInBytes
+ (ULONG_PTR
)MmPagedPoolStart
) <=
1478 (ULONG_PTR
)MmNonPagedSystemStart
);
1481 // This is where paged pool ends
1483 MmPagedPoolEnd
= (PVOID
)(((ULONG_PTR
)MmPagedPoolStart
+
1484 MmSizeOfPagedPoolInBytes
) - 1);
1487 // So now get the PDE for paged pool and zero it out
1489 PointerPde
= MiAddressToPde(MmPagedPoolStart
);
1490 RtlZeroMemory(PointerPde
,
1491 (1 + MiAddressToPde(MmPagedPoolEnd
) - PointerPde
) * sizeof(MMPTE
));
1494 // Next, get the first and last PTE
1496 PointerPte
= MiAddressToPte(MmPagedPoolStart
);
1497 MmPagedPoolInfo
.FirstPteForPagedPool
= PointerPte
;
1498 MmPagedPoolInfo
.LastPteForPagedPool
= MiAddressToPte(MmPagedPoolEnd
);
1501 // Lock the PFN database
1503 OldIrql
= KeAcquireQueuedSpinLock(LockQueuePfnLock
);
1506 // Allocate a page and map the first paged pool PDE
1508 PageFrameIndex
= MmAllocPage(MC_NPPOOL
);
1509 TempPte
.u
.Hard
.PageFrameNumber
= PageFrameIndex
;
1510 ASSERT(PointerPde
->u
.Hard
.Valid
== 0);
1511 ASSERT(TempPte
.u
.Hard
.Valid
== 1);
1512 *PointerPde
= TempPte
;
1515 // Release the PFN database lock
1517 KeReleaseQueuedSpinLock(LockQueuePfnLock
, OldIrql
);
1520 // We only have one PDE mapped for now... at fault time, additional PDEs
1521 // will be allocated to handle paged pool growth. This is where they'll have
1524 MmPagedPoolInfo
.NextPdeForPagedPoolExpansion
= PointerPde
+ 1;
1527 // We keep track of each page via a bit, so check how big the bitmap will
1528 // have to be (make sure to align our page count such that it fits nicely
1529 // into a 4-byte aligned bitmap.
1531 // We'll also allocate the bitmap header itself part of the same buffer.
1534 ASSERT(Size
== MmSizeOfPagedPoolInPages
);
1536 Size
= sizeof(RTL_BITMAP
) + (((Size
+ 31) / 32) * sizeof(ULONG
));
1539 // Allocate the allocation bitmap, which tells us which regions have not yet
1540 // been mapped into memory
1542 MmPagedPoolInfo
.PagedPoolAllocationMap
= ExAllocatePoolWithTag(NonPagedPool
,
1545 ASSERT(MmPagedPoolInfo
.PagedPoolAllocationMap
);
1548 // Initialize it such that at first, only the first page's worth of PTEs is
1549 // marked as allocated (incidentially, the first PDE we allocated earlier).
1551 RtlInitializeBitMap(MmPagedPoolInfo
.PagedPoolAllocationMap
,
1552 (PULONG
)(MmPagedPoolInfo
.PagedPoolAllocationMap
+ 1),
1554 RtlSetAllBits(MmPagedPoolInfo
.PagedPoolAllocationMap
);
1555 RtlClearBits(MmPagedPoolInfo
.PagedPoolAllocationMap
, 0, 1024);
1558 // We have a second bitmap, which keeps track of where allocations end.
1559 // Given the allocation bitmap and a base address, we can therefore figure
1560 // out which page is the last page of that allocation, and thus how big the
1561 // entire allocation is.
1563 MmPagedPoolInfo
.EndOfPagedPoolBitmap
= ExAllocatePoolWithTag(NonPagedPool
,
1566 ASSERT(MmPagedPoolInfo
.EndOfPagedPoolBitmap
);
1567 RtlInitializeBitMap(MmPagedPoolInfo
.EndOfPagedPoolBitmap
,
1568 (PULONG
)(MmPagedPoolInfo
.EndOfPagedPoolBitmap
+ 1),
1572 // Since no allocations have been made yet, there are no bits set as the end
1574 RtlClearAllBits(MmPagedPoolInfo
.EndOfPagedPoolBitmap
);
1577 // Initialize paged pool.
1579 InitializePool(PagedPool
, 0);
1581 /* Default low threshold of 30MB or one fifth of paged pool */
1582 MiLowPagedPoolThreshold
= (30 * _1MB
) >> PAGE_SHIFT
;
1583 MiLowPagedPoolThreshold
= min(MiLowPagedPoolThreshold
, Size
/ 5);
1585 /* Default high threshold of 60MB or 25% */
1586 MiHighPagedPoolThreshold
= (60 * _1MB
) >> PAGE_SHIFT
;
1587 MiHighPagedPoolThreshold
= min(MiHighPagedPoolThreshold
, (Size
* 2) / 5);
1588 ASSERT(MiLowPagedPoolThreshold
< MiHighPagedPoolThreshold
);
1593 MmArmInitSystem(IN ULONG Phase
,
1594 IN PLOADER_PARAMETER_BLOCK LoaderBlock
)
1597 BOOLEAN IncludeType
[LoaderMaximum
];
1599 PPHYSICAL_MEMORY_RUN Run
;
1600 PFN_NUMBER PageCount
;
1603 // Instantiate memory that we don't consider RAM/usable
1604 // We use the same exclusions that Windows does, in order to try to be
1605 // compatible with WinLDR-style booting
1607 for (i
= 0; i
< LoaderMaximum
; i
++) IncludeType
[i
] = TRUE
;
1608 IncludeType
[LoaderBad
] = FALSE
;
1609 IncludeType
[LoaderFirmwarePermanent
] = FALSE
;
1610 IncludeType
[LoaderSpecialMemory
] = FALSE
;
1611 IncludeType
[LoaderBBTMemory
] = FALSE
;
1614 /* Initialize the phase 0 temporary event */
1615 KeInitializeEvent(&MiTempEvent
, NotificationEvent
, FALSE
);
1617 /* Set all the events to use the temporary event for now */
1618 MiLowMemoryEvent
= &MiTempEvent
;
1619 MiHighMemoryEvent
= &MiTempEvent
;
1620 MiLowPagedPoolEvent
= &MiTempEvent
;
1621 MiHighPagedPoolEvent
= &MiTempEvent
;
1622 MiLowNonPagedPoolEvent
= &MiTempEvent
;
1623 MiHighNonPagedPoolEvent
= &MiTempEvent
;
1626 // Define the basic user vs. kernel address space separation
1628 MmSystemRangeStart
= (PVOID
)KSEG0_BASE
;
1629 MmUserProbeAddress
= (ULONG_PTR
)MmSystemRangeStart
- 0x10000;
1630 MmHighestUserAddress
= (PVOID
)(MmUserProbeAddress
- 1);
1633 // Get the size of the boot loader's image allocations and then round
1634 // that region up to a PDE size, so that any PDEs we might create for
1635 // whatever follows are separate from the PDEs that boot loader might've
1636 // already created (and later, we can blow all that away if we want to).
1638 MmBootImageSize
= KeLoaderBlock
->Extension
->LoaderPagesSpanned
;
1639 MmBootImageSize
*= PAGE_SIZE
;
1640 MmBootImageSize
= (MmBootImageSize
+ (4 * 1024 * 1024) - 1) & ~((4 * 1024 * 1024) - 1);
1641 ASSERT((MmBootImageSize
% (4 * 1024 * 1024)) == 0);
1644 // Set the size of session view, pool, and image
1646 MmSessionSize
= MI_SESSION_SIZE
;
1647 MmSessionViewSize
= MI_SESSION_VIEW_SIZE
;
1648 MmSessionPoolSize
= MI_SESSION_POOL_SIZE
;
1649 MmSessionImageSize
= MI_SESSION_IMAGE_SIZE
;
1652 // Set the size of system view
1654 MmSystemViewSize
= MI_SYSTEM_VIEW_SIZE
;
1657 // This is where it all ends
1659 MiSessionImageEnd
= (PVOID
)PTE_BASE
;
1662 // This is where we will load Win32k.sys and the video driver
1664 MiSessionImageStart
= (PVOID
)((ULONG_PTR
)MiSessionImageEnd
-
1665 MmSessionImageSize
);
1668 // So the view starts right below the session working set (itself below
1671 MiSessionViewStart
= (PVOID
)((ULONG_PTR
)MiSessionImageEnd
-
1672 MmSessionImageSize
-
1673 MI_SESSION_WORKING_SET_SIZE
-
1677 // Session pool follows
1679 MiSessionPoolEnd
= MiSessionViewStart
;
1680 MiSessionPoolStart
= (PVOID
)((ULONG_PTR
)MiSessionPoolEnd
-
1684 // And it all begins here
1686 MmSessionBase
= MiSessionPoolStart
;
1689 // Sanity check that our math is correct
1691 ASSERT((ULONG_PTR
)MmSessionBase
+ MmSessionSize
== PTE_BASE
);
1694 // Session space ends wherever image session space ends
1696 MiSessionSpaceEnd
= MiSessionImageEnd
;
1699 // System view space ends at session space, so now that we know where
1700 // this is, we can compute the base address of system view space itself.
1702 MiSystemViewStart
= (PVOID
)((ULONG_PTR
)MmSessionBase
-
1706 /* Initialize the user mode image list */
1707 InitializeListHead(&MmLoadedUserImageList
);
1709 /* Initialize the paged pool mutex */
1710 KeInitializeGuardedMutex(&MmPagedPoolMutex
);
1712 /* Initialize the Loader Lock */
1713 KeInitializeMutant(&MmSystemLoadLock
, FALSE
);
1716 // Count physical pages on the system
1718 PageCount
= MiPagesInLoaderBlock(LoaderBlock
, IncludeType
);
1721 // Check if this is a machine with less than 19MB of RAM
1723 if (PageCount
< MI_MIN_PAGES_FOR_SYSPTE_TUNING
)
1726 // Use the very minimum of system PTEs
1728 MmNumberOfSystemPtes
= 7000;
1733 // Use the default, but check if we have more than 32MB of RAM
1735 MmNumberOfSystemPtes
= 11000;
1736 if (PageCount
> MI_MIN_PAGES_FOR_SYSPTE_BOOST
)
1739 // Double the amount of system PTEs
1741 MmNumberOfSystemPtes
<<= 1;
1745 DPRINT("System PTE count has been tuned to %d (%d bytes)\n",
1746 MmNumberOfSystemPtes
, MmNumberOfSystemPtes
* PAGE_SIZE
);
1748 /* Initialize the platform-specific parts */
1749 MiInitMachineDependent(LoaderBlock
);
1752 // Sync us up with ReactOS Mm
1754 MiSyncARM3WithROS(MmNonPagedSystemStart
, (PVOID
)((ULONG_PTR
)MmNonPagedPoolEnd
- 1));
1755 MiSyncARM3WithROS(MmPfnDatabase
[0], (PVOID
)((ULONG_PTR
)MmNonPagedPoolStart
+ MmSizeOfNonPagedPoolInBytes
- 1));
1756 MiSyncARM3WithROS((PVOID
)HYPER_SPACE
, (PVOID
)(HYPER_SPACE
+ PAGE_SIZE
- 1));
1759 // Build the physical memory block
1761 MmPhysicalMemoryBlock
= MmInitializeMemoryLimits(LoaderBlock
,
1765 // Allocate enough buffer for the PFN bitmap
1766 // Align it up to a 32-bit boundary
1768 Bitmap
= ExAllocatePoolWithTag(NonPagedPool
,
1769 (((MmHighestPhysicalPage
+ 1) + 31) / 32) * 4,
1776 KeBugCheckEx(INSTALL_MORE_MEMORY
,
1777 MmNumberOfPhysicalPages
,
1778 MmLowestPhysicalPage
,
1779 MmHighestPhysicalPage
,
1784 // Initialize it and clear all the bits to begin with
1786 RtlInitializeBitMap(&MiPfnBitMap
,
1788 MmHighestPhysicalPage
+ 1);
1789 RtlClearAllBits(&MiPfnBitMap
);
1792 // Loop physical memory runs
1794 for (i
= 0; i
< MmPhysicalMemoryBlock
->NumberOfRuns
; i
++)
1799 Run
= &MmPhysicalMemoryBlock
->Run
[i
];
1800 DPRINT("PHYSICAL RAM [0x%08p to 0x%08p]\n",
1801 Run
->BasePage
<< PAGE_SHIFT
,
1802 (Run
->BasePage
+ Run
->PageCount
) << PAGE_SHIFT
);
1805 // Make sure it has pages inside it
1810 // Set the bits in the PFN bitmap
1812 RtlSetBits(&MiPfnBitMap
, Run
->BasePage
, Run
->PageCount
);
1817 // Size up paged pool and build the shadow system page directory
1821 /* Check how many pages the system has */
1822 if (MmNumberOfPhysicalPages
<= (13 * _1MB
))
1824 /* Set small system */
1825 MmSystemSize
= MmSmallSystem
;
1827 else if (MmNumberOfPhysicalPages
<= (19 * _1MB
))
1829 /* Set small system */
1830 MmSystemSize
= MmSmallSystem
;
1834 /* Set medium system */
1835 MmSystemSize
= MmMediumSystem
;
1838 /* Check for more than 32MB */
1839 if (MmNumberOfPhysicalPages
>= ((32 * _1MB
) / PAGE_SIZE
))
1841 /* Check for product type being "Wi" for WinNT */
1842 if (MmProductType
== '\0i\0W')
1844 /* Then this is a large system */
1845 MmSystemSize
= MmLargeSystem
;
1849 /* For servers, we need 64MB to consider this as being large */
1850 if (MmNumberOfPhysicalPages
>= ((64 * _1MB
) / PAGE_SIZE
))
1852 /* Set it as large */
1853 MmSystemSize
= MmLargeSystem
;
1858 /* Now setup the shared user data fields */
1859 ASSERT(SharedUserData
->NumberOfPhysicalPages
== 0);
1860 SharedUserData
->NumberOfPhysicalPages
= MmNumberOfPhysicalPages
;
1861 SharedUserData
->LargePageMinimum
= 0;
1863 /* Check for workstation (Wi for WinNT) */
1864 if (MmProductType
== '\0i\0W')
1866 /* Set Windows NT Workstation product type */
1867 SharedUserData
->NtProductType
= NtProductWinNt
;
1872 /* Check for LanMan server */
1873 if (MmProductType
== '\0a\0L')
1875 /* This is a domain controller */
1876 SharedUserData
->NtProductType
= NtProductLanManNt
;
1880 /* Otherwise it must be a normal server */
1881 SharedUserData
->NtProductType
= NtProductServer
;
1884 /* Set the product type, and make the system more aggressive with low memory */
1886 MmMinimumFreePages
= 81;
1889 /* Update working set tuning parameters */
1890 MiAdjustWorkingSetManagerParameters(!MmProductType
);
1894 // Always return success for now
1896 return STATUS_SUCCESS
;