2 * PROJECT: ReactOS Kernel
3 * LICENSE: BSD - See COPYING.ARM in the top level directory
4 * FILE: ntoskrnl/mm/ARM3/mminit.c
5 * PURPOSE: ARM Memory Manager Initialization
6 * PROGRAMMERS: ReactOS Portable Systems Group
9 /* INCLUDES *******************************************************************/
16 #define MODULE_INVOLVED_IN_ARM3
19 /* GLOBALS ********************************************************************/
22 // These are all registry-configurable, but by default, the memory manager will
23 // figure out the most appropriate values.
25 ULONG MmMaximumNonPagedPoolPercent
;
26 ULONG MmSizeOfNonPagedPoolInBytes
;
27 ULONG MmMaximumNonPagedPoolInBytes
;
30 // These numbers describe the discrete equation components of the nonpaged
31 // pool sizing algorithm.
33 // They are described on http://support.microsoft.com/default.aspx/kb/126402/ja
34 // along with the algorithm that uses them, which is implemented later below.
36 ULONG MmMinimumNonPagedPoolSize
= 256 * 1024;
37 ULONG MmMinAdditionNonPagedPoolPerMb
= 32 * 1024;
38 ULONG MmDefaultMaximumNonPagedPool
= 1024 * 1024;
39 ULONG MmMaxAdditionNonPagedPoolPerMb
= 400 * 1024;
42 // The memory layout (and especially variable names) of the NT kernel mode
43 // components can be a bit hard to twig, especially when it comes to the non
46 // There are really two components to the non-paged pool:
48 // - The initial nonpaged pool, sized dynamically up to a maximum.
49 // - The expansion nonpaged pool, sized dynamically up to a maximum.
51 // The initial nonpaged pool is physically continuous for performance, and
52 // immediately follows the PFN database, typically sharing the same PDE. It is
53 // a very small resource (32MB on a 1GB system), and capped at 128MB.
55 // Right now we call this the "ARM³ Nonpaged Pool" and it begins somewhere after
56 // the PFN database (which starts at 0xB0000000).
58 // The expansion nonpaged pool, on the other hand, can grow much bigger (400MB
59 // for a 1GB system). On ARM³ however, it is currently capped at 128MB.
61 // The address where the initial nonpaged pool starts is aptly named
62 // MmNonPagedPoolStart, and it describes a range of MmSizeOfNonPagedPoolInBytes
65 // Expansion nonpaged pool starts at an address described by the variable called
66 // MmNonPagedPoolExpansionStart, and it goes on for MmMaximumNonPagedPoolInBytes
67 // minus MmSizeOfNonPagedPoolInBytes bytes, always reaching MmNonPagedPoolEnd
68 // (because of the way it's calculated) at 0xFFBE0000.
70 // Initial nonpaged pool is allocated and mapped early-on during boot, but what
71 // about the expansion nonpaged pool? It is instead composed of special pages
72 // which belong to what are called System PTEs. These PTEs are the matter of a
73 // later discussion, but they are also considered part of the "nonpaged" OS, due
74 // to the fact that they are never paged out -- once an address is described by
75 // a System PTE, it is always valid, until the System PTE is torn down.
77 // System PTEs are actually composed of two "spaces", the system space proper,
78 // and the nonpaged pool expansion space. The latter, as we've already seen,
79 // begins at MmNonPagedPoolExpansionStart. Based on the number of System PTEs
80 // that the system will support, the remaining address space below this address
81 // is used to hold the system space PTEs. This address, in turn, is held in the
82 // variable named MmNonPagedSystemStart, which itself is never allowed to go
83 // below 0xEB000000 (thus creating an upper bound on the number of System PTEs).
85 // This means that 330MB are reserved for total nonpaged system VA, on top of
86 // whatever the initial nonpaged pool allocation is.
88 // The following URLs, valid as of April 23rd, 2008, support this evidence:
90 // http://www.cs.miami.edu/~burt/journal/NT/memory.html
91 // http://www.ditii.com/2007/09/28/windows-memory-management-x86-virtual-address-space/
93 PVOID MmNonPagedSystemStart
;
94 PVOID MmNonPagedPoolStart
;
95 PVOID MmNonPagedPoolExpansionStart
;
96 PVOID MmNonPagedPoolEnd
= MI_NONPAGED_POOL_END
;
99 // This is where paged pool starts by default
101 PVOID MmPagedPoolStart
= MI_PAGED_POOL_START
;
102 PVOID MmPagedPoolEnd
;
105 // And this is its default size
107 ULONG MmSizeOfPagedPoolInBytes
= MI_MIN_INIT_PAGED_POOLSIZE
;
108 PFN_NUMBER MmSizeOfPagedPoolInPages
= MI_MIN_INIT_PAGED_POOLSIZE
/ PAGE_SIZE
;
111 // Session space starts at 0xBFFFFFFF and grows downwards
112 // By default, it includes an 8MB image area where we map win32k and video card
113 // drivers, followed by a 4MB area containing the session's working set. This is
114 // then followed by a 20MB mapped view area and finally by the session's paged
115 // pool, by default 16MB.
117 // On a normal system, this results in session space occupying the region from
118 // 0xBD000000 to 0xC0000000
120 // See miarm.h for the defines that determine the sizing of this region. On an
121 // NT system, some of these can be configured through the registry, but we don't
124 PVOID MiSessionSpaceEnd
; // 0xC0000000
125 PVOID MiSessionImageEnd
; // 0xC0000000
126 PVOID MiSessionImageStart
; // 0xBF800000
127 PVOID MiSessionViewStart
; // 0xBE000000
128 PVOID MiSessionPoolEnd
; // 0xBE000000
129 PVOID MiSessionPoolStart
; // 0xBD000000
130 PVOID MmSessionBase
; // 0xBD000000
132 ULONG MmSessionViewSize
;
133 ULONG MmSessionPoolSize
;
134 ULONG MmSessionImageSize
;
137 // The system view space, on the other hand, is where sections that are memory
138 // mapped into "system space" end up.
140 // By default, it is a 16MB region.
142 PVOID MiSystemViewStart
;
143 ULONG MmSystemViewSize
;
146 // A copy of the system page directory (the page directory associated with the
147 // System process) is kept (double-mapped) by the manager in order to lazily
148 // map paged pool PDEs into external processes when they fault on a paged pool
151 PFN_NUMBER MmSystemPageDirectory
;
152 PMMPTE MmSystemPagePtes
;
155 // The system cache starts right after hyperspace. The first few pages are for
156 // keeping track of the system working set list.
158 // This should be 0xC0C00000 -- the cache itself starts at 0xC1000000
160 PMMWSL MmSystemCacheWorkingSetList
= MI_SYSTEM_CACHE_WS_START
;
163 // Windows NT seems to choose between 7000, 11000 and 50000
164 // On systems with more than 32MB, this number is then doubled, and further
165 // aligned up to a PDE boundary (4MB).
167 ULONG MmNumberOfSystemPtes
;
170 // This is how many pages the PFN database will take up
171 // In Windows, this includes the Quark Color Table, but not in ARM³
173 ULONG MxPfnAllocation
;
176 // Unlike the old ReactOS Memory Manager, ARM³ (and Windows) does not keep track
177 // of pages that are not actually valid physical memory, such as ACPI reserved
178 // regions, BIOS address ranges, or holes in physical memory address space which
179 // could indicate device-mapped I/O memory.
181 // In fact, the lack of a PFN entry for a page usually indicates that this is
182 // I/O space instead.
184 // A bitmap, called the PFN bitmap, keeps track of all page frames by assigning
185 // a bit to each. If the bit is set, then the page is valid physical RAM.
187 RTL_BITMAP MiPfnBitMap
;
190 // This structure describes the different pieces of RAM-backed address space
192 PPHYSICAL_MEMORY_DESCRIPTOR MmPhysicalMemoryBlock
;
195 // This is where we keep track of the most basic physical layout markers
197 ULONG MmNumberOfPhysicalPages
, MmHighestPhysicalPage
, MmLowestPhysicalPage
= -1;
200 // The total number of pages mapped by the boot loader, which include the kernel
201 // HAL, boot drivers, registry, NLS files and other loader data structures is
202 // kept track of here. This depends on "LoaderPagesSpanned" being correct when
203 // coming from the loader.
205 // This number is later aligned up to a PDE boundary.
207 ULONG MmBootImageSize
;
210 // These three variables keep track of the core separation of address space that
211 // exists between kernel mode and user mode.
213 ULONG MmUserProbeAddress
;
214 PVOID MmHighestUserAddress
;
215 PVOID MmSystemRangeStart
;
217 PVOID MmSystemCacheStart
;
218 PVOID MmSystemCacheEnd
;
219 MMSUPPORT MmSystemCacheWs
;
222 // This is where hyperspace ends (followed by the system cache working set)
224 PVOID MmHyperSpaceEnd
;
227 // Page coloring algorithm data
229 ULONG MmSecondaryColors
;
230 ULONG MmSecondaryColorMask
;
233 // Actual (registry-configurable) size of a GUI thread's stack
235 ULONG MmLargeStackSize
= KERNEL_LARGE_STACK_SIZE
;
238 // Before we have a PFN database, memory comes straight from our physical memory
239 // blocks, which is nice because it's guaranteed contiguous and also because once
240 // we take a page from here, the system doesn't see it anymore.
241 // However, once the fun is over, those pages must be re-integrated back into
242 // PFN society life, and that requires us keeping a copy of the original layout
243 // so that we can parse it later.
245 PMEMORY_ALLOCATION_DESCRIPTOR MxFreeDescriptor
;
246 MEMORY_ALLOCATION_DESCRIPTOR MxOldFreeDescriptor
;
249 * For each page's worth bytes of L2 cache in a given set/way line, the zero and
250 * free lists are organized in what is called a "color".
252 * This array points to the two lists, so it can be thought of as a multi-dimensional
253 * array of MmFreePagesByColor[2][MmSecondaryColors]. Since the number is dynamic,
254 * we describe the array in pointer form instead.
256 * On a final note, the color tables themselves are right after the PFN database.
258 C_ASSERT(FreePageList
== 1);
259 PMMCOLOR_TABLES MmFreePagesByColor
[FreePageList
+ 1];
261 /* PRIVATE FUNCTIONS **********************************************************/
264 // In Bavaria, this is probably a hate crime
268 MiSyncARM3WithROS(IN PVOID AddressStart
,
272 // Puerile piece of junk-grade carbonized horseshit puss sold to the lowest bidder
274 ULONG Pde
= ADDR_TO_PDE_OFFSET(AddressStart
);
275 while (Pde
<= ADDR_TO_PDE_OFFSET(AddressEnd
))
278 // This both odious and heinous
280 extern ULONG MmGlobalKernelPageDirectory
[1024];
281 MmGlobalKernelPageDirectory
[Pde
] = ((PULONG
)PDE_BASE
)[Pde
];
288 MxGetNextPage(IN PFN_NUMBER PageCount
)
292 /* Make sure we have enough pages */
293 if (PageCount
> MxFreeDescriptor
->PageCount
)
295 /* Crash the system */
296 KeBugCheckEx(INSTALL_MORE_MEMORY
,
297 MmNumberOfPhysicalPages
,
298 MxFreeDescriptor
->PageCount
,
299 MxOldFreeDescriptor
.PageCount
,
303 /* Use our lowest usable free pages */
304 Pfn
= MxFreeDescriptor
->BasePage
;
305 MxFreeDescriptor
->BasePage
+= PageCount
;
306 MxFreeDescriptor
->PageCount
-= PageCount
;
312 MiComputeColorInformation(VOID
)
314 ULONG L2Associativity
;
316 /* Check if no setting was provided already */
317 if (!MmSecondaryColors
)
319 /* Get L2 cache information */
320 L2Associativity
= KeGetPcr()->SecondLevelCacheAssociativity
;
322 /* The number of colors is the number of cache bytes by set/way */
323 MmSecondaryColors
= KeGetPcr()->SecondLevelCacheSize
;
324 if (L2Associativity
) MmSecondaryColors
/= L2Associativity
;
327 /* Now convert cache bytes into pages */
328 MmSecondaryColors
>>= PAGE_SHIFT
;
329 if (!MmSecondaryColors
)
331 /* If there was no cache data from the KPCR, use the default colors */
332 MmSecondaryColors
= MI_SECONDARY_COLORS
;
336 /* Otherwise, make sure there aren't too many colors */
337 if (MmSecondaryColors
> MI_MAX_SECONDARY_COLORS
)
339 /* Set the maximum */
340 MmSecondaryColors
= MI_MAX_SECONDARY_COLORS
;
343 /* Make sure there aren't too little colors */
344 if (MmSecondaryColors
< MI_MIN_SECONDARY_COLORS
)
346 /* Set the default */
347 MmSecondaryColors
= MI_SECONDARY_COLORS
;
350 /* Finally make sure the colors are a power of two */
351 if (MmSecondaryColors
& (MmSecondaryColors
- 1))
353 /* Set the default */
354 MmSecondaryColors
= MI_SECONDARY_COLORS
;
358 /* Compute the mask and store it */
359 MmSecondaryColorMask
= MmSecondaryColors
- 1;
360 KeGetCurrentPrcb()->SecondaryColorMask
= MmSecondaryColorMask
;
365 MiInitializeColorTables(VOID
)
368 PMMPTE PointerPte
, LastPte
;
369 MMPTE TempPte
= ValidKernelPte
;
371 /* The color table starts after the ARM3 PFN database */
372 MmFreePagesByColor
[0] = (PMMCOLOR_TABLES
)&MmPfnDatabase
[1][MmHighestPhysicalPage
+ 1];
374 /* Loop the PTEs. We have two color tables for each secondary color */
375 PointerPte
= MiAddressToPte(&MmFreePagesByColor
[0][0]);
376 LastPte
= MiAddressToPte((ULONG_PTR
)MmFreePagesByColor
[0] +
377 (2 * MmSecondaryColors
* sizeof(MMCOLOR_TABLES
))
379 while (PointerPte
<= LastPte
)
381 /* Check for valid PTE */
382 if (PointerPte
->u
.Hard
.Valid
== 0)
384 /* Get a page and map it */
385 TempPte
.u
.Hard
.PageFrameNumber
= MxGetNextPage(1);
386 ASSERT(TempPte
.u
.Hard
.Valid
== 1);
387 *PointerPte
= TempPte
;
389 /* Zero out the page */
390 RtlZeroMemory(MiPteToAddress(PointerPte
), PAGE_SIZE
);
397 /* Now set the address of the next list, right after this one */
398 MmFreePagesByColor
[1] = &MmFreePagesByColor
[0][MmSecondaryColors
];
400 /* Now loop the lists to set them up */
401 for (i
= 0; i
< MmSecondaryColors
; i
++)
403 /* Set both free and zero lists for each color */
404 MmFreePagesByColor
[ZeroedPageList
][i
].Flink
= 0xFFFFFFFF;
405 MmFreePagesByColor
[ZeroedPageList
][i
].Blink
= (PVOID
)0xFFFFFFFF;
406 MmFreePagesByColor
[ZeroedPageList
][i
].Count
= 0;
407 MmFreePagesByColor
[FreePageList
][i
].Flink
= 0xFFFFFFFF;
408 MmFreePagesByColor
[FreePageList
][i
].Blink
= (PVOID
)0xFFFFFFFF;
409 MmFreePagesByColor
[FreePageList
][i
].Count
= 0;
415 MiIsRegularMemory(IN PLOADER_PARAMETER_BLOCK LoaderBlock
,
418 PLIST_ENTRY NextEntry
;
419 PMEMORY_ALLOCATION_DESCRIPTOR MdBlock
;
421 /* Loop the memory descriptors */
422 NextEntry
= LoaderBlock
->MemoryDescriptorListHead
.Flink
;
423 while (NextEntry
!= &LoaderBlock
->MemoryDescriptorListHead
)
425 /* Get the memory descriptor */
426 MdBlock
= CONTAINING_RECORD(NextEntry
,
427 MEMORY_ALLOCATION_DESCRIPTOR
,
430 /* Check if this PFN could be part of the block */
431 if (Pfn
>= (MdBlock
->BasePage
))
433 /* Check if it really is part of the block */
434 if (Pfn
< (MdBlock
->BasePage
+ MdBlock
->PageCount
))
436 /* Check if the block is actually memory we don't map */
437 if ((MdBlock
->MemoryType
== LoaderFirmwarePermanent
) ||
438 (MdBlock
->MemoryType
== LoaderBBTMemory
) ||
439 (MdBlock
->MemoryType
== LoaderSpecialMemory
))
441 /* We don't need PFN database entries for this memory */
445 /* This is memory we want to map */
451 /* Blocks are ordered, so if it's not here, it doesn't exist */
455 /* Get to the next descriptor */
456 NextEntry
= MdBlock
->ListEntry
.Flink
;
459 /* Check if this PFN is actually from our free memory descriptor */
460 if ((Pfn
>= MxOldFreeDescriptor
.BasePage
) &&
461 (Pfn
< MxOldFreeDescriptor
.BasePage
+ MxOldFreeDescriptor
.PageCount
))
463 /* We use these pages for initial mappings, so we do want to count them */
467 /* Otherwise this isn't memory that we describe or care about */
473 MiMapPfnDatabase(IN PLOADER_PARAMETER_BLOCK LoaderBlock
)
475 ULONG FreePage
, FreePageCount
, PagesLeft
, BasePage
, PageCount
;
476 PLIST_ENTRY NextEntry
;
477 PMEMORY_ALLOCATION_DESCRIPTOR MdBlock
;
478 PMMPTE PointerPte
, LastPte
;
479 MMPTE TempPte
= ValidKernelPte
;
481 /* Get current page data, since we won't be using MxGetNextPage as it would corrupt our state */
482 FreePage
= MxFreeDescriptor
->BasePage
;
483 FreePageCount
= MxFreeDescriptor
->PageCount
;
486 /* Loop the memory descriptors */
487 NextEntry
= LoaderBlock
->MemoryDescriptorListHead
.Flink
;
488 while (NextEntry
!= &LoaderBlock
->MemoryDescriptorListHead
)
490 /* Get the descriptor */
491 MdBlock
= CONTAINING_RECORD(NextEntry
,
492 MEMORY_ALLOCATION_DESCRIPTOR
,
494 if ((MdBlock
->MemoryType
== LoaderFirmwarePermanent
) ||
495 (MdBlock
->MemoryType
== LoaderBBTMemory
) ||
496 (MdBlock
->MemoryType
== LoaderSpecialMemory
))
498 /* These pages are not part of the PFN database */
499 NextEntry
= MdBlock
->ListEntry
.Flink
;
503 /* Next, check if this is our special free descriptor we've found */
504 if (MdBlock
== MxFreeDescriptor
)
506 /* Use the real numbers instead */
507 BasePage
= MxOldFreeDescriptor
.BasePage
;
508 PageCount
= MxOldFreeDescriptor
.PageCount
;
512 /* Use the descriptor's numbers */
513 BasePage
= MdBlock
->BasePage
;
514 PageCount
= MdBlock
->PageCount
;
517 /* Get the PTEs for this range */
518 PointerPte
= MiAddressToPte(&MmPfnDatabase
[0][BasePage
]);
519 LastPte
= MiAddressToPte(((ULONG_PTR
)&MmPfnDatabase
[0][BasePage
+ PageCount
]) - 1);
520 DPRINT("MD Type: %lx Base: %lx Count: %lx\n", MdBlock
->MemoryType
, BasePage
, PageCount
);
523 while (PointerPte
<= LastPte
)
525 /* We'll only touch PTEs that aren't already valid */
526 if (PointerPte
->u
.Hard
.Valid
== 0)
528 /* Use the next free page */
529 TempPte
.u
.Hard
.PageFrameNumber
= FreePage
;
530 ASSERT(FreePageCount
!= 0);
532 /* Consume free pages */
538 KeBugCheckEx(INSTALL_MORE_MEMORY
,
539 MmNumberOfPhysicalPages
,
541 MxOldFreeDescriptor
.PageCount
,
545 /* Write out this PTE */
547 ASSERT(PointerPte
->u
.Hard
.Valid
== 0);
548 ASSERT(TempPte
.u
.Hard
.Valid
== 1);
549 *PointerPte
= TempPte
;
552 RtlZeroMemory(MiPteToAddress(PointerPte
), PAGE_SIZE
);
559 /* Get the PTEs for this range */
560 PointerPte
= MiAddressToPte(&MmPfnDatabase
[1][BasePage
]);
561 LastPte
= MiAddressToPte(((ULONG_PTR
)&MmPfnDatabase
[1][BasePage
+ PageCount
]) - 1);
562 DPRINT("MD Type: %lx Base: %lx Count: %lx\n", MdBlock
->MemoryType
, BasePage
, PageCount
);
565 while (PointerPte
<= LastPte
)
567 /* We'll only touch PTEs that aren't already valid */
568 if (PointerPte
->u
.Hard
.Valid
== 0)
570 /* Use the next free page */
571 TempPte
.u
.Hard
.PageFrameNumber
= FreePage
;
572 ASSERT(FreePageCount
!= 0);
574 /* Consume free pages */
580 KeBugCheckEx(INSTALL_MORE_MEMORY
,
581 MmNumberOfPhysicalPages
,
583 MxOldFreeDescriptor
.PageCount
,
587 /* Write out this PTE */
589 ASSERT(PointerPte
->u
.Hard
.Valid
== 0);
590 ASSERT(TempPte
.u
.Hard
.Valid
== 1);
591 *PointerPte
= TempPte
;
594 RtlZeroMemory(MiPteToAddress(PointerPte
), PAGE_SIZE
);
601 /* Do the next address range */
602 NextEntry
= MdBlock
->ListEntry
.Flink
;
605 /* Now update the free descriptors to consume the pages we used up during the PFN allocation loop */
606 MxFreeDescriptor
->BasePage
= FreePage
;
607 MxFreeDescriptor
->PageCount
= FreePageCount
;
612 MiBuildPfnDatabaseFromPages(IN PLOADER_PARAMETER_BLOCK LoaderBlock
)
617 PFN_NUMBER PageFrameIndex
, StartupPdIndex
, PtePageIndex
;
619 ULONG_PTR BaseAddress
= 0;
621 /* PFN of the startup page directory */
622 StartupPdIndex
= PFN_FROM_PTE(MiAddressToPde(PDE_BASE
));
624 /* Start with the first PDE and scan them all */
625 PointerPde
= MiAddressToPde(NULL
);
626 Count
= PD_COUNT
* PDE_COUNT
;
627 for (i
= 0; i
< Count
; i
++)
629 /* Check for valid PDE */
630 if (PointerPde
->u
.Hard
.Valid
== 1)
632 /* Get the PFN from it */
633 PageFrameIndex
= PFN_FROM_PTE(PointerPde
);
635 /* Do we want a PFN entry for this page? */
636 if (MiIsRegularMemory(LoaderBlock
, PageFrameIndex
))
638 /* Yes we do, set it up */
639 Pfn1
= MI_PFN_TO_PFNENTRY(PageFrameIndex
);
640 Pfn1
->u4
.PteFrame
= StartupPdIndex
;
641 Pfn1
->PteAddress
= PointerPde
;
642 Pfn1
->u2
.ShareCount
++;
643 Pfn1
->u3
.e2
.ReferenceCount
= 1;
644 Pfn1
->u3
.e1
.PageLocation
= ActiveAndValid
;
645 Pfn1
->u3
.e1
.CacheAttribute
= MiNonCached
;
653 /* Now get the PTE and scan the pages */
654 PointerPte
= MiAddressToPte(BaseAddress
);
655 for (j
= 0; j
< PTE_COUNT
; j
++)
657 /* Check for a valid PTE */
658 if (PointerPte
->u
.Hard
.Valid
== 1)
660 /* Increase the shared count of the PFN entry for the PDE */
661 ASSERT(Pfn1
!= NULL
);
662 Pfn1
->u2
.ShareCount
++;
664 /* Now check if the PTE is valid memory too */
665 PtePageIndex
= PFN_FROM_PTE(PointerPte
);
666 if (MiIsRegularMemory(LoaderBlock
, PtePageIndex
))
669 * Only add pages above the end of system code or pages
670 * that are part of nonpaged pool
672 if ((BaseAddress
>= 0xA0000000) ||
673 ((BaseAddress
>= (ULONG_PTR
)MmNonPagedPoolStart
) &&
674 (BaseAddress
< (ULONG_PTR
)MmNonPagedPoolStart
+
675 MmSizeOfNonPagedPoolInBytes
)))
677 /* Get the PFN entry and make sure it too is valid */
678 Pfn2
= MI_PFN_TO_PFNENTRY(PtePageIndex
);
679 if ((MmIsAddressValid(Pfn2
)) &&
680 (MmIsAddressValid(Pfn2
+ 1)))
682 /* Setup the PFN entry */
683 Pfn2
->u4
.PteFrame
= PageFrameIndex
;
684 Pfn2
->PteAddress
= PointerPte
;
685 Pfn2
->u2
.ShareCount
++;
686 Pfn2
->u3
.e2
.ReferenceCount
= 1;
687 Pfn2
->u3
.e1
.PageLocation
= ActiveAndValid
;
688 Pfn2
->u3
.e1
.CacheAttribute
= MiNonCached
;
696 BaseAddress
+= PAGE_SIZE
;
701 /* Next PDE mapped address */
702 BaseAddress
+= PTE_COUNT
* PAGE_SIZE
;
712 MiBuildPfnDatabaseZeroPage(VOID
)
717 /* Grab the lowest page and check if it has no real references */
718 Pfn1
= MI_PFN_TO_PFNENTRY(MmLowestPhysicalPage
);
719 if (!(MmLowestPhysicalPage
) && !(Pfn1
->u3
.e2
.ReferenceCount
))
721 /* Make it a bogus page to catch errors */
722 PointerPde
= MiAddressToPde(0xFFFFFFFF);
723 Pfn1
->u4
.PteFrame
= PFN_FROM_PTE(PointerPde
);
724 Pfn1
->PteAddress
= PointerPde
;
725 Pfn1
->u2
.ShareCount
++;
726 Pfn1
->u3
.e2
.ReferenceCount
= 0xFFF0;
727 Pfn1
->u3
.e1
.PageLocation
= ActiveAndValid
;
728 Pfn1
->u3
.e1
.CacheAttribute
= MiNonCached
;
734 MiBuildPfnDatabaseFromLoaderBlock(IN PLOADER_PARAMETER_BLOCK LoaderBlock
)
736 PLIST_ENTRY NextEntry
;
737 PFN_NUMBER PageCount
= 0;
738 PMEMORY_ALLOCATION_DESCRIPTOR MdBlock
;
739 PFN_NUMBER PageFrameIndex
;
744 /* Now loop through the descriptors */
745 NextEntry
= LoaderBlock
->MemoryDescriptorListHead
.Flink
;
746 while (NextEntry
!= &LoaderBlock
->MemoryDescriptorListHead
)
748 /* Get the current descriptor */
749 MdBlock
= CONTAINING_RECORD(NextEntry
,
750 MEMORY_ALLOCATION_DESCRIPTOR
,
754 PageCount
= MdBlock
->PageCount
;
755 PageFrameIndex
= MdBlock
->BasePage
;
757 /* Don't allow memory above what the PFN database is mapping */
758 if (PageFrameIndex
> MmHighestPhysicalPage
)
760 /* Since they are ordered, everything past here will be larger */
764 /* On the other hand, the end page might be higher up... */
765 if ((PageFrameIndex
+ PageCount
) > (MmHighestPhysicalPage
+ 1))
767 /* In which case we'll trim the descriptor to go as high as we can */
768 PageCount
= MmHighestPhysicalPage
+ 1 - PageFrameIndex
;
769 MdBlock
->PageCount
= PageCount
;
771 /* But if there's nothing left to trim, we got too high, so quit */
772 if (!PageCount
) break;
775 /* Now check the descriptor type */
776 switch (MdBlock
->MemoryType
)
778 /* Check for bad RAM */
781 DPRINT1("You have damaged RAM modules. Stopping boot\n");
785 /* Check for free RAM */
787 case LoaderLoadedProgram
:
788 case LoaderFirmwareTemporary
:
789 case LoaderOsloaderStack
:
791 /* Get the last page of this descriptor. Note we loop backwards */
792 PageFrameIndex
+= PageCount
- 1;
793 Pfn1
= MI_PFN_TO_PFNENTRY(PageFrameIndex
);
796 /* If the page really has no references, mark it as free */
797 if (!Pfn1
->u3
.e2
.ReferenceCount
)
799 Pfn1
->u3
.e1
.CacheAttribute
= MiNonCached
;
800 //MiInsertPageInFreeList(PageFrameIndex);
803 /* Go to the next page */
808 /* Done with this block */
811 /* Check for pages that are invisible to us */
812 case LoaderFirmwarePermanent
:
813 case LoaderSpecialMemory
:
814 case LoaderBBTMemory
:
821 /* Map these pages with the KSEG0 mapping that adds 0x80000000 */
822 PointerPte
= MiAddressToPte(KSEG0_BASE
+ (PageFrameIndex
<< PAGE_SHIFT
));
823 Pfn1
= MI_PFN_TO_PFNENTRY(PageFrameIndex
);
826 /* Check if the page is really unused */
827 PointerPde
= MiAddressToPde(KSEG0_BASE
+ (PageFrameIndex
<< PAGE_SHIFT
));
828 if (!Pfn1
->u3
.e2
.ReferenceCount
)
830 /* Mark it as being in-use */
831 Pfn1
->u4
.PteFrame
= PFN_FROM_PTE(PointerPde
);
832 Pfn1
->PteAddress
= PointerPte
;
833 Pfn1
->u2
.ShareCount
++;
834 Pfn1
->u3
.e2
.ReferenceCount
= 1;
835 Pfn1
->u3
.e1
.PageLocation
= ActiveAndValid
;
836 Pfn1
->u3
.e1
.CacheAttribute
= MiNonCached
;
838 /* Check for RAM disk page */
839 if (MdBlock
->MemoryType
== LoaderXIPRom
)
841 /* Make it a pseudo-I/O ROM mapping */
843 Pfn1
->u2
.ShareCount
= 0;
844 Pfn1
->u3
.e2
.ReferenceCount
= 0;
845 Pfn1
->u3
.e1
.PageLocation
= 0;
847 Pfn1
->u4
.InPageError
= 0;
848 Pfn1
->u3
.e1
.PrototypePte
= 1;
852 /* Advance page structures */
860 /* Next descriptor entry */
861 NextEntry
= MdBlock
->ListEntry
.Flink
;
867 MiBuildPfnDatabaseSelf(VOID
)
869 PMMPTE PointerPte
, LastPte
;
872 /* Loop the PFN database page */
873 PointerPte
= MiAddressToPte(MI_PFN_TO_PFNENTRY(MmLowestPhysicalPage
));
874 LastPte
= MiAddressToPte(MI_PFN_TO_PFNENTRY(MmHighestPhysicalPage
));
875 while (PointerPte
<= LastPte
)
877 /* Make sure the page is valid */
878 if (PointerPte
->u
.Hard
.Valid
== 1)
880 /* Get the PFN entry and just mark it referenced */
881 Pfn1
= MI_PFN_TO_PFNENTRY(PointerPte
->u
.Hard
.PageFrameNumber
);
882 Pfn1
->u2
.ShareCount
= 1;
883 Pfn1
->u3
.e2
.ReferenceCount
= 1;
893 MiInitializePfnDatabase(IN PLOADER_PARAMETER_BLOCK LoaderBlock
)
895 /* Scan memory and start setting up PFN entries */
896 MiBuildPfnDatabaseFromPages(LoaderBlock
);
898 /* Add the zero page */
899 MiBuildPfnDatabaseZeroPage();
901 /* Scan the loader block and build the rest of the PFN database */
902 MiBuildPfnDatabaseFromLoaderBlock(LoaderBlock
);
904 /* Finally add the pages for the PFN database itself */
905 MiBuildPfnDatabaseSelf();
910 MmDumpArmPfnDatabase(VOID
)
914 PCHAR Consumer
= "Unknown";
916 ULONG ActivePages
= 0, FreePages
= 0, OtherPages
= 0;
918 KeRaiseIrql(HIGH_LEVEL
, &OldIrql
);
921 // Loop the PFN database
923 for (i
= 0; i
<= MmHighestPhysicalPage
; i
++)
925 Pfn1
= MI_PFN_TO_PFNENTRY(i
);
929 // Get the page location
931 switch (Pfn1
->u3
.e1
.PageLocation
)
935 Consumer
= "Active and Valid";
941 Consumer
= "Free Page List";
947 Consumer
= "Other (ASSERT!)";
953 // Pretty-print the page
955 DbgPrint("0x%08p:\t%20s\t(%02d.%02d) [%08p-%08p])\n",
958 Pfn1
->u3
.e2
.ReferenceCount
,
964 DbgPrint("Active: %d pages\t[%d KB]\n", ActivePages
, (ActivePages
<< PAGE_SHIFT
) / 1024);
965 DbgPrint("Free: %d pages\t[%d KB]\n", FreePages
, (FreePages
<< PAGE_SHIFT
) / 1024);
966 DbgPrint("Other: %d pages\t[%d KB]\n", OtherPages
, (OtherPages
<< PAGE_SHIFT
) / 1024);
968 KeLowerIrql(OldIrql
);
973 MiPagesInLoaderBlock(IN PLOADER_PARAMETER_BLOCK LoaderBlock
,
974 IN PBOOLEAN IncludeType
)
976 PLIST_ENTRY NextEntry
;
977 PFN_NUMBER PageCount
= 0;
978 PMEMORY_ALLOCATION_DESCRIPTOR MdBlock
;
981 // Now loop through the descriptors
983 NextEntry
= LoaderBlock
->MemoryDescriptorListHead
.Flink
;
984 while (NextEntry
!= &LoaderBlock
->MemoryDescriptorListHead
)
987 // Grab each one, and check if it's one we should include
989 MdBlock
= CONTAINING_RECORD(NextEntry
,
990 MEMORY_ALLOCATION_DESCRIPTOR
,
992 if ((MdBlock
->MemoryType
< LoaderMaximum
) &&
993 (IncludeType
[MdBlock
->MemoryType
]))
996 // Add this to our running total
998 PageCount
+= MdBlock
->PageCount
;
1002 // Try the next descriptor
1004 NextEntry
= MdBlock
->ListEntry
.Flink
;
1013 PPHYSICAL_MEMORY_DESCRIPTOR
1015 MmInitializeMemoryLimits(IN PLOADER_PARAMETER_BLOCK LoaderBlock
,
1016 IN PBOOLEAN IncludeType
)
1018 PLIST_ENTRY NextEntry
;
1019 ULONG Run
= 0, InitialRuns
= 0;
1020 PFN_NUMBER NextPage
= -1, PageCount
= 0;
1021 PPHYSICAL_MEMORY_DESCRIPTOR Buffer
, NewBuffer
;
1022 PMEMORY_ALLOCATION_DESCRIPTOR MdBlock
;
1025 // Scan the memory descriptors
1027 NextEntry
= LoaderBlock
->MemoryDescriptorListHead
.Flink
;
1028 while (NextEntry
!= &LoaderBlock
->MemoryDescriptorListHead
)
1031 // For each one, increase the memory allocation estimate
1034 NextEntry
= NextEntry
->Flink
;
1038 // Allocate the maximum we'll ever need
1040 Buffer
= ExAllocatePoolWithTag(NonPagedPool
,
1041 sizeof(PHYSICAL_MEMORY_DESCRIPTOR
) +
1042 sizeof(PHYSICAL_MEMORY_RUN
) *
1045 if (!Buffer
) return NULL
;
1048 // For now that's how many runs we have
1050 Buffer
->NumberOfRuns
= InitialRuns
;
1053 // Now loop through the descriptors again
1055 NextEntry
= LoaderBlock
->MemoryDescriptorListHead
.Flink
;
1056 while (NextEntry
!= &LoaderBlock
->MemoryDescriptorListHead
)
1059 // Grab each one, and check if it's one we should include
1061 MdBlock
= CONTAINING_RECORD(NextEntry
,
1062 MEMORY_ALLOCATION_DESCRIPTOR
,
1064 if ((MdBlock
->MemoryType
< LoaderMaximum
) &&
1065 (IncludeType
[MdBlock
->MemoryType
]))
1068 // Add this to our running total
1070 PageCount
+= MdBlock
->PageCount
;
1073 // Check if the next page is described by the next descriptor
1075 if (MdBlock
->BasePage
== NextPage
)
1078 // Combine it into the same physical run
1080 ASSERT(MdBlock
->PageCount
!= 0);
1081 Buffer
->Run
[Run
- 1].PageCount
+= MdBlock
->PageCount
;
1082 NextPage
+= MdBlock
->PageCount
;
1087 // Otherwise just duplicate the descriptor's contents
1089 Buffer
->Run
[Run
].BasePage
= MdBlock
->BasePage
;
1090 Buffer
->Run
[Run
].PageCount
= MdBlock
->PageCount
;
1091 NextPage
= Buffer
->Run
[Run
].BasePage
+ Buffer
->Run
[Run
].PageCount
;
1094 // And in this case, increase the number of runs
1101 // Try the next descriptor
1103 NextEntry
= MdBlock
->ListEntry
.Flink
;
1107 // We should not have been able to go past our initial estimate
1109 ASSERT(Run
<= Buffer
->NumberOfRuns
);
1112 // Our guess was probably exaggerated...
1114 if (InitialRuns
> Run
)
1117 // Allocate a more accurately sized buffer
1119 NewBuffer
= ExAllocatePoolWithTag(NonPagedPool
,
1120 sizeof(PHYSICAL_MEMORY_DESCRIPTOR
) +
1121 sizeof(PHYSICAL_MEMORY_RUN
) *
1127 // Copy the old buffer into the new, then free it
1129 RtlCopyMemory(NewBuffer
->Run
,
1131 sizeof(PHYSICAL_MEMORY_RUN
) * Run
);
1135 // Now use the new buffer
1142 // Write the final numbers, and return it
1144 Buffer
->NumberOfRuns
= Run
;
1145 Buffer
->NumberOfPages
= PageCount
;
1151 MiBuildPagedPool(VOID
)
1153 PMMPTE PointerPte
, PointerPde
;
1154 MMPTE TempPte
= ValidKernelPte
;
1155 PFN_NUMBER PageFrameIndex
;
1157 ULONG Size
, BitMapSize
;
1160 // Get the page frame number for the system page directory
1162 PointerPte
= MiAddressToPte(PDE_BASE
);
1163 MmSystemPageDirectory
= PFN_FROM_PTE(PointerPte
);
1166 // Allocate a system PTE which will hold a copy of the page directory
1168 PointerPte
= MiReserveSystemPtes(1, SystemPteSpace
);
1170 MmSystemPagePtes
= MiPteToAddress(PointerPte
);
1173 // Make this system PTE point to the system page directory.
1174 // It is now essentially double-mapped. This will be used later for lazy
1175 // evaluation of PDEs accross process switches, similarly to how the Global
1176 // page directory array in the old ReactOS Mm is used (but in a less hacky
1179 TempPte
= ValidKernelPte
;
1180 TempPte
.u
.Hard
.PageFrameNumber
= MmSystemPageDirectory
;
1181 ASSERT(PointerPte
->u
.Hard
.Valid
== 0);
1182 ASSERT(TempPte
.u
.Hard
.Valid
== 1);
1183 *PointerPte
= TempPte
;
1186 // Let's get back to paged pool work: size it up.
1187 // By default, it should be twice as big as nonpaged pool.
1189 MmSizeOfPagedPoolInBytes
= 2 * MmMaximumNonPagedPoolInBytes
;
1190 if (MmSizeOfPagedPoolInBytes
> ((ULONG_PTR
)MmNonPagedSystemStart
-
1191 (ULONG_PTR
)MmPagedPoolStart
))
1194 // On the other hand, we have limited VA space, so make sure that the VA
1195 // for paged pool doesn't overflow into nonpaged pool VA. Otherwise, set
1196 // whatever maximum is possible.
1198 MmSizeOfPagedPoolInBytes
= (ULONG_PTR
)MmNonPagedSystemStart
-
1199 (ULONG_PTR
)MmPagedPoolStart
;
1203 // Get the size in pages and make sure paged pool is at least 32MB.
1205 Size
= MmSizeOfPagedPoolInBytes
;
1206 if (Size
< MI_MIN_INIT_PAGED_POOLSIZE
) Size
= MI_MIN_INIT_PAGED_POOLSIZE
;
1207 Size
= BYTES_TO_PAGES(Size
);
1210 // Now check how many PTEs will be required for these many pages.
1212 Size
= (Size
+ (1024 - 1)) / 1024;
1215 // Recompute the page-aligned size of the paged pool, in bytes and pages.
1217 MmSizeOfPagedPoolInBytes
= Size
* PAGE_SIZE
* 1024;
1218 MmSizeOfPagedPoolInPages
= MmSizeOfPagedPoolInBytes
>> PAGE_SHIFT
;
1221 // Let's be really sure this doesn't overflow into nonpaged system VA
1223 ASSERT((MmSizeOfPagedPoolInBytes
+ (ULONG_PTR
)MmPagedPoolStart
) <=
1224 (ULONG_PTR
)MmNonPagedSystemStart
);
1227 // This is where paged pool ends
1229 MmPagedPoolEnd
= (PVOID
)(((ULONG_PTR
)MmPagedPoolStart
+
1230 MmSizeOfPagedPoolInBytes
) - 1);
1233 // So now get the PDE for paged pool and zero it out
1235 PointerPde
= MiAddressToPde(MmPagedPoolStart
);
1236 RtlZeroMemory(PointerPde
,
1237 (1 + MiAddressToPde(MmPagedPoolEnd
) - PointerPde
) * sizeof(MMPTE
));
1240 // Next, get the first and last PTE
1242 PointerPte
= MiAddressToPte(MmPagedPoolStart
);
1243 MmPagedPoolInfo
.FirstPteForPagedPool
= PointerPte
;
1244 MmPagedPoolInfo
.LastPteForPagedPool
= MiAddressToPte(MmPagedPoolEnd
);
1247 // Lock the PFN database
1249 OldIrql
= KeAcquireQueuedSpinLock(LockQueuePfnLock
);
1252 // Allocate a page and map the first paged pool PDE
1254 PageFrameIndex
= MmAllocPage(MC_NPPOOL
, 0);
1255 TempPte
.u
.Hard
.PageFrameNumber
= PageFrameIndex
;
1256 ASSERT(PointerPde
->u
.Hard
.Valid
== 0);
1257 ASSERT(TempPte
.u
.Hard
.Valid
== 1);
1258 *PointerPde
= TempPte
;
1261 // Release the PFN database lock
1263 KeReleaseQueuedSpinLock(LockQueuePfnLock
, OldIrql
);
1266 // We only have one PDE mapped for now... at fault time, additional PDEs
1267 // will be allocated to handle paged pool growth. This is where they'll have
1270 MmPagedPoolInfo
.NextPdeForPagedPoolExpansion
= PointerPde
+ 1;
1273 // We keep track of each page via a bit, so check how big the bitmap will
1274 // have to be (make sure to align our page count such that it fits nicely
1275 // into a 4-byte aligned bitmap.
1277 // We'll also allocate the bitmap header itself part of the same buffer.
1280 ASSERT(Size
== MmSizeOfPagedPoolInPages
);
1282 Size
= sizeof(RTL_BITMAP
) + (((Size
+ 31) / 32) * sizeof(ULONG
));
1285 // Allocate the allocation bitmap, which tells us which regions have not yet
1286 // been mapped into memory
1288 MmPagedPoolInfo
.PagedPoolAllocationMap
= ExAllocatePoolWithTag(NonPagedPool
,
1291 ASSERT(MmPagedPoolInfo
.PagedPoolAllocationMap
);
1294 // Initialize it such that at first, only the first page's worth of PTEs is
1295 // marked as allocated (incidentially, the first PDE we allocated earlier).
1297 RtlInitializeBitMap(MmPagedPoolInfo
.PagedPoolAllocationMap
,
1298 (PULONG
)(MmPagedPoolInfo
.PagedPoolAllocationMap
+ 1),
1300 RtlSetAllBits(MmPagedPoolInfo
.PagedPoolAllocationMap
);
1301 RtlClearBits(MmPagedPoolInfo
.PagedPoolAllocationMap
, 0, 1024);
1304 // We have a second bitmap, which keeps track of where allocations end.
1305 // Given the allocation bitmap and a base address, we can therefore figure
1306 // out which page is the last page of that allocation, and thus how big the
1307 // entire allocation is.
1309 MmPagedPoolInfo
.EndOfPagedPoolBitmap
= ExAllocatePoolWithTag(NonPagedPool
,
1312 ASSERT(MmPagedPoolInfo
.EndOfPagedPoolBitmap
);
1313 RtlInitializeBitMap(MmPagedPoolInfo
.EndOfPagedPoolBitmap
,
1314 (PULONG
)(MmPagedPoolInfo
.EndOfPagedPoolBitmap
+ 1),
1318 // Since no allocations have been made yet, there are no bits set as the end
1320 RtlClearAllBits(MmPagedPoolInfo
.EndOfPagedPoolBitmap
);
1323 // Initialize paged pool.
1325 InitializePool(PagedPool
, 0);
1328 // Initialize the paged pool mutex
1330 KeInitializeGuardedMutex(&MmPagedPoolMutex
);
1335 MmArmInitSystem(IN ULONG Phase
,
1336 IN PLOADER_PARAMETER_BLOCK LoaderBlock
)
1339 BOOLEAN IncludeType
[LoaderMaximum
];
1341 PPHYSICAL_MEMORY_RUN Run
;
1342 PFN_NUMBER PageCount
;
1345 // Instantiate memory that we don't consider RAM/usable
1346 // We use the same exclusions that Windows does, in order to try to be
1347 // compatible with WinLDR-style booting
1349 for (i
= 0; i
< LoaderMaximum
; i
++) IncludeType
[i
] = TRUE
;
1350 IncludeType
[LoaderBad
] = FALSE
;
1351 IncludeType
[LoaderFirmwarePermanent
] = FALSE
;
1352 IncludeType
[LoaderSpecialMemory
] = FALSE
;
1353 IncludeType
[LoaderBBTMemory
] = FALSE
;
1357 // Define the basic user vs. kernel address space separation
1359 MmSystemRangeStart
= (PVOID
)KSEG0_BASE
;
1360 MmUserProbeAddress
= (ULONG_PTR
)MmSystemRangeStart
- 0x10000;
1361 MmHighestUserAddress
= (PVOID
)(MmUserProbeAddress
- 1);
1364 // Get the size of the boot loader's image allocations and then round
1365 // that region up to a PDE size, so that any PDEs we might create for
1366 // whatever follows are separate from the PDEs that boot loader might've
1367 // already created (and later, we can blow all that away if we want to).
1369 MmBootImageSize
= KeLoaderBlock
->Extension
->LoaderPagesSpanned
;
1370 MmBootImageSize
*= PAGE_SIZE
;
1371 MmBootImageSize
= (MmBootImageSize
+ (4 * 1024 * 1024) - 1) & ~((4 * 1024 * 1024) - 1);
1372 ASSERT((MmBootImageSize
% (4 * 1024 * 1024)) == 0);
1375 // Set the size of session view, pool, and image
1377 MmSessionSize
= MI_SESSION_SIZE
;
1378 MmSessionViewSize
= MI_SESSION_VIEW_SIZE
;
1379 MmSessionPoolSize
= MI_SESSION_POOL_SIZE
;
1380 MmSessionImageSize
= MI_SESSION_IMAGE_SIZE
;
1383 // Set the size of system view
1385 MmSystemViewSize
= MI_SYSTEM_VIEW_SIZE
;
1388 // This is where it all ends
1390 MiSessionImageEnd
= (PVOID
)PTE_BASE
;
1393 // This is where we will load Win32k.sys and the video driver
1395 MiSessionImageStart
= (PVOID
)((ULONG_PTR
)MiSessionImageEnd
-
1396 MmSessionImageSize
);
1399 // So the view starts right below the session working set (itself below
1402 MiSessionViewStart
= (PVOID
)((ULONG_PTR
)MiSessionImageEnd
-
1403 MmSessionImageSize
-
1404 MI_SESSION_WORKING_SET_SIZE
-
1408 // Session pool follows
1410 MiSessionPoolEnd
= MiSessionViewStart
;
1411 MiSessionPoolStart
= (PVOID
)((ULONG_PTR
)MiSessionPoolEnd
-
1415 // And it all begins here
1417 MmSessionBase
= MiSessionPoolStart
;
1420 // Sanity check that our math is correct
1422 ASSERT((ULONG_PTR
)MmSessionBase
+ MmSessionSize
== PTE_BASE
);
1425 // Session space ends wherever image session space ends
1427 MiSessionSpaceEnd
= MiSessionImageEnd
;
1430 // System view space ends at session space, so now that we know where
1431 // this is, we can compute the base address of system view space itself.
1433 MiSystemViewStart
= (PVOID
)((ULONG_PTR
)MmSessionBase
-
1437 // Count physical pages on the system
1439 PageCount
= MiPagesInLoaderBlock(LoaderBlock
, IncludeType
);
1442 // Check if this is a machine with less than 19MB of RAM
1444 if (PageCount
< MI_MIN_PAGES_FOR_SYSPTE_TUNING
)
1447 // Use the very minimum of system PTEs
1449 MmNumberOfSystemPtes
= 7000;
1454 // Use the default, but check if we have more than 32MB of RAM
1456 MmNumberOfSystemPtes
= 11000;
1457 if (PageCount
> MI_MIN_PAGES_FOR_SYSPTE_BOOST
)
1460 // Double the amount of system PTEs
1462 MmNumberOfSystemPtes
<<= 1;
1466 DPRINT("System PTE count has been tuned to %d (%d bytes)\n",
1467 MmNumberOfSystemPtes
, MmNumberOfSystemPtes
* PAGE_SIZE
);
1469 /* Initialize the platform-specific parts */
1470 MiInitMachineDependent(LoaderBlock
);
1473 // Sync us up with ReactOS Mm
1475 MiSyncARM3WithROS(MmNonPagedSystemStart
, (PVOID
)((ULONG_PTR
)MmNonPagedPoolEnd
- 1));
1476 MiSyncARM3WithROS(MmPfnDatabase
[0], (PVOID
)((ULONG_PTR
)MmNonPagedPoolStart
+ MmSizeOfNonPagedPoolInBytes
- 1));
1477 MiSyncARM3WithROS((PVOID
)HYPER_SPACE
, (PVOID
)(HYPER_SPACE
+ PAGE_SIZE
- 1));
1480 // Build the physical memory block
1482 MmPhysicalMemoryBlock
= MmInitializeMemoryLimits(LoaderBlock
,
1486 // Allocate enough buffer for the PFN bitmap
1487 // Align it up to a 32-bit boundary
1489 Bitmap
= ExAllocatePoolWithTag(NonPagedPool
,
1490 (((MmHighestPhysicalPage
+ 1) + 31) / 32) * 4,
1497 KeBugCheckEx(INSTALL_MORE_MEMORY
,
1498 MmNumberOfPhysicalPages
,
1499 MmLowestPhysicalPage
,
1500 MmHighestPhysicalPage
,
1505 // Initialize it and clear all the bits to begin with
1507 RtlInitializeBitMap(&MiPfnBitMap
,
1509 MmHighestPhysicalPage
+ 1);
1510 RtlClearAllBits(&MiPfnBitMap
);
1513 // Loop physical memory runs
1515 for (i
= 0; i
< MmPhysicalMemoryBlock
->NumberOfRuns
; i
++)
1520 Run
= &MmPhysicalMemoryBlock
->Run
[i
];
1521 DPRINT("PHYSICAL RAM [0x%08p to 0x%08p]\n",
1522 Run
->BasePage
<< PAGE_SHIFT
,
1523 (Run
->BasePage
+ Run
->PageCount
) << PAGE_SHIFT
);
1526 // Make sure it has pages inside it
1531 // Set the bits in the PFN bitmap
1533 RtlSetBits(&MiPfnBitMap
, Run
->BasePage
, Run
->PageCount
);
1538 // Size up paged pool and build the shadow system page directory
1544 // Always return success for now
1546 return STATUS_SUCCESS
;