[NTOS]: Fix build, my bad (missing file).
[reactos.git] / reactos / ntoskrnl / mm / ARM3 / mminit.c
1 /*
2 * PROJECT: ReactOS Kernel
3 * LICENSE: BSD - See COPYING.ARM in the top level directory
4 * FILE: ntoskrnl/mm/ARM3/mminit.c
5 * PURPOSE: ARM Memory Manager Initialization
6 * PROGRAMMERS: ReactOS Portable Systems Group
7 */
8
9 /* INCLUDES *******************************************************************/
10
11 #include <ntoskrnl.h>
12 #define NDEBUG
13 #include <debug.h>
14
15 #line 15 "ARM³::INIT"
16 #define MODULE_INVOLVED_IN_ARM3
17 #include "miarm.h"
18
19 /* GLOBALS ********************************************************************/
20
21 //
22 // These are all registry-configurable, but by default, the memory manager will
23 // figure out the most appropriate values.
24 //
25 ULONG MmMaximumNonPagedPoolPercent;
26 ULONG MmSizeOfNonPagedPoolInBytes;
27 ULONG MmMaximumNonPagedPoolInBytes;
28
29 //
30 // These numbers describe the discrete equation components of the nonpaged
31 // pool sizing algorithm.
32 //
33 // They are described on http://support.microsoft.com/default.aspx/kb/126402/ja
34 // along with the algorithm that uses them, which is implemented later below.
35 //
36 ULONG MmMinimumNonPagedPoolSize = 256 * 1024;
37 ULONG MmMinAdditionNonPagedPoolPerMb = 32 * 1024;
38 ULONG MmDefaultMaximumNonPagedPool = 1024 * 1024;
39 ULONG MmMaxAdditionNonPagedPoolPerMb = 400 * 1024;
40
41 //
42 // The memory layout (and especially variable names) of the NT kernel mode
43 // components can be a bit hard to twig, especially when it comes to the non
44 // paged area.
45 //
46 // There are really two components to the non-paged pool:
47 //
48 // - The initial nonpaged pool, sized dynamically up to a maximum.
49 // - The expansion nonpaged pool, sized dynamically up to a maximum.
50 //
51 // The initial nonpaged pool is physically continuous for performance, and
52 // immediately follows the PFN database, typically sharing the same PDE. It is
53 // a very small resource (32MB on a 1GB system), and capped at 128MB.
54 //
55 // Right now we call this the "ARM³ Nonpaged Pool" and it begins somewhere after
56 // the PFN database (which starts at 0xB0000000).
57 //
58 // The expansion nonpaged pool, on the other hand, can grow much bigger (400MB
59 // for a 1GB system). On ARM³ however, it is currently capped at 128MB.
60 //
61 // The address where the initial nonpaged pool starts is aptly named
62 // MmNonPagedPoolStart, and it describes a range of MmSizeOfNonPagedPoolInBytes
63 // bytes.
64 //
65 // Expansion nonpaged pool starts at an address described by the variable called
66 // MmNonPagedPoolExpansionStart, and it goes on for MmMaximumNonPagedPoolInBytes
67 // minus MmSizeOfNonPagedPoolInBytes bytes, always reaching MmNonPagedPoolEnd
68 // (because of the way it's calculated) at 0xFFBE0000.
69 //
70 // Initial nonpaged pool is allocated and mapped early-on during boot, but what
71 // about the expansion nonpaged pool? It is instead composed of special pages
72 // which belong to what are called System PTEs. These PTEs are the matter of a
73 // later discussion, but they are also considered part of the "nonpaged" OS, due
74 // to the fact that they are never paged out -- once an address is described by
75 // a System PTE, it is always valid, until the System PTE is torn down.
76 //
77 // System PTEs are actually composed of two "spaces", the system space proper,
78 // and the nonpaged pool expansion space. The latter, as we've already seen,
79 // begins at MmNonPagedPoolExpansionStart. Based on the number of System PTEs
80 // that the system will support, the remaining address space below this address
81 // is used to hold the system space PTEs. This address, in turn, is held in the
82 // variable named MmNonPagedSystemStart, which itself is never allowed to go
83 // below 0xEB000000 (thus creating an upper bound on the number of System PTEs).
84 //
85 // This means that 330MB are reserved for total nonpaged system VA, on top of
86 // whatever the initial nonpaged pool allocation is.
87 //
88 // The following URLs, valid as of April 23rd, 2008, support this evidence:
89 //
90 // http://www.cs.miami.edu/~burt/journal/NT/memory.html
91 // http://www.ditii.com/2007/09/28/windows-memory-management-x86-virtual-address-space/
92 //
93 PVOID MmNonPagedSystemStart;
94 PVOID MmNonPagedPoolStart;
95 PVOID MmNonPagedPoolExpansionStart;
96 PVOID MmNonPagedPoolEnd = MI_NONPAGED_POOL_END;
97
98 //
99 // This is where paged pool starts by default
100 //
101 PVOID MmPagedPoolStart = MI_PAGED_POOL_START;
102 PVOID MmPagedPoolEnd;
103
104 //
105 // And this is its default size
106 //
107 ULONG MmSizeOfPagedPoolInBytes = MI_MIN_INIT_PAGED_POOLSIZE;
108 PFN_NUMBER MmSizeOfPagedPoolInPages = MI_MIN_INIT_PAGED_POOLSIZE / PAGE_SIZE;
109
110 //
111 // Session space starts at 0xBFFFFFFF and grows downwards
112 // By default, it includes an 8MB image area where we map win32k and video card
113 // drivers, followed by a 4MB area containing the session's working set. This is
114 // then followed by a 20MB mapped view area and finally by the session's paged
115 // pool, by default 16MB.
116 //
117 // On a normal system, this results in session space occupying the region from
118 // 0xBD000000 to 0xC0000000
119 //
120 // See miarm.h for the defines that determine the sizing of this region. On an
121 // NT system, some of these can be configured through the registry, but we don't
122 // support that yet.
123 //
124 PVOID MiSessionSpaceEnd; // 0xC0000000
125 PVOID MiSessionImageEnd; // 0xC0000000
126 PVOID MiSessionImageStart; // 0xBF800000
127 PVOID MiSessionViewStart; // 0xBE000000
128 PVOID MiSessionPoolEnd; // 0xBE000000
129 PVOID MiSessionPoolStart; // 0xBD000000
130 PVOID MmSessionBase; // 0xBD000000
131 ULONG MmSessionSize;
132 ULONG MmSessionViewSize;
133 ULONG MmSessionPoolSize;
134 ULONG MmSessionImageSize;
135
136 //
137 // The system view space, on the other hand, is where sections that are memory
138 // mapped into "system space" end up.
139 //
140 // By default, it is a 16MB region.
141 //
142 PVOID MiSystemViewStart;
143 ULONG MmSystemViewSize;
144
145 //
146 // A copy of the system page directory (the page directory associated with the
147 // System process) is kept (double-mapped) by the manager in order to lazily
148 // map paged pool PDEs into external processes when they fault on a paged pool
149 // address.
150 //
151 PFN_NUMBER MmSystemPageDirectory;
152 PMMPTE MmSystemPagePtes;
153
154 //
155 // The system cache starts right after hyperspace. The first few pages are for
156 // keeping track of the system working set list.
157 //
158 // This should be 0xC0C00000 -- the cache itself starts at 0xC1000000
159 //
160 PMMWSL MmSystemCacheWorkingSetList = MI_SYSTEM_CACHE_WS_START;
161
162 //
163 // Windows NT seems to choose between 7000, 11000 and 50000
164 // On systems with more than 32MB, this number is then doubled, and further
165 // aligned up to a PDE boundary (4MB).
166 //
167 ULONG MmNumberOfSystemPtes;
168
169 //
170 // This is how many pages the PFN database will take up
171 // In Windows, this includes the Quark Color Table, but not in ARM³
172 //
173 ULONG MxPfnAllocation;
174
175 //
176 // Unlike the old ReactOS Memory Manager, ARM³ (and Windows) does not keep track
177 // of pages that are not actually valid physical memory, such as ACPI reserved
178 // regions, BIOS address ranges, or holes in physical memory address space which
179 // could indicate device-mapped I/O memory.
180 //
181 // In fact, the lack of a PFN entry for a page usually indicates that this is
182 // I/O space instead.
183 //
184 // A bitmap, called the PFN bitmap, keeps track of all page frames by assigning
185 // a bit to each. If the bit is set, then the page is valid physical RAM.
186 //
187 RTL_BITMAP MiPfnBitMap;
188
189 //
190 // This structure describes the different pieces of RAM-backed address space
191 //
192 PPHYSICAL_MEMORY_DESCRIPTOR MmPhysicalMemoryBlock;
193
194 //
195 // This is where we keep track of the most basic physical layout markers
196 //
197 ULONG MmNumberOfPhysicalPages, MmHighestPhysicalPage, MmLowestPhysicalPage = -1;
198
199 //
200 // The total number of pages mapped by the boot loader, which include the kernel
201 // HAL, boot drivers, registry, NLS files and other loader data structures is
202 // kept track of here. This depends on "LoaderPagesSpanned" being correct when
203 // coming from the loader.
204 //
205 // This number is later aligned up to a PDE boundary.
206 //
207 ULONG MmBootImageSize;
208
209 //
210 // These three variables keep track of the core separation of address space that
211 // exists between kernel mode and user mode.
212 //
213 ULONG MmUserProbeAddress;
214 PVOID MmHighestUserAddress;
215 PVOID MmSystemRangeStart;
216
217 PVOID MmSystemCacheStart;
218 PVOID MmSystemCacheEnd;
219 MMSUPPORT MmSystemCacheWs;
220
221 //
222 // This is where hyperspace ends (followed by the system cache working set)
223 //
224 PVOID MmHyperSpaceEnd;
225
226 //
227 // Page coloring algorithm data
228 //
229 ULONG MmSecondaryColors;
230 ULONG MmSecondaryColorMask;
231
232 //
233 // Actual (registry-configurable) size of a GUI thread's stack
234 //
235 ULONG MmLargeStackSize = KERNEL_LARGE_STACK_SIZE;
236
237 //
238 // Before we have a PFN database, memory comes straight from our physical memory
239 // blocks, which is nice because it's guaranteed contiguous and also because once
240 // we take a page from here, the system doesn't see it anymore.
241 // However, once the fun is over, those pages must be re-integrated back into
242 // PFN society life, and that requires us keeping a copy of the original layout
243 // so that we can parse it later.
244 //
245 PMEMORY_ALLOCATION_DESCRIPTOR MxFreeDescriptor;
246 MEMORY_ALLOCATION_DESCRIPTOR MxOldFreeDescriptor;
247
248 /*
249 * For each page's worth bytes of L2 cache in a given set/way line, the zero and
250 * free lists are organized in what is called a "color".
251 *
252 * This array points to the two lists, so it can be thought of as a multi-dimensional
253 * array of MmFreePagesByColor[2][MmSecondaryColors]. Since the number is dynamic,
254 * we describe the array in pointer form instead.
255 *
256 * On a final note, the color tables themselves are right after the PFN database.
257 */
258 C_ASSERT(FreePageList == 1);
259 PMMCOLOR_TABLES MmFreePagesByColor[FreePageList + 1];
260
261 /* PRIVATE FUNCTIONS **********************************************************/
262
263 //
264 // In Bavaria, this is probably a hate crime
265 //
266 VOID
267 FASTCALL
268 MiSyncARM3WithROS(IN PVOID AddressStart,
269 IN PVOID AddressEnd)
270 {
271 //
272 // Puerile piece of junk-grade carbonized horseshit puss sold to the lowest bidder
273 //
274 ULONG Pde = ADDR_TO_PDE_OFFSET(AddressStart);
275 while (Pde <= ADDR_TO_PDE_OFFSET(AddressEnd))
276 {
277 //
278 // This both odious and heinous
279 //
280 extern ULONG MmGlobalKernelPageDirectory[1024];
281 MmGlobalKernelPageDirectory[Pde] = ((PULONG)PDE_BASE)[Pde];
282 Pde++;
283 }
284 }
285
286 PFN_NUMBER
287 NTAPI
288 MxGetNextPage(IN PFN_NUMBER PageCount)
289 {
290 PFN_NUMBER Pfn;
291
292 /* Make sure we have enough pages */
293 if (PageCount > MxFreeDescriptor->PageCount)
294 {
295 /* Crash the system */
296 KeBugCheckEx(INSTALL_MORE_MEMORY,
297 MmNumberOfPhysicalPages,
298 MxFreeDescriptor->PageCount,
299 MxOldFreeDescriptor.PageCount,
300 PageCount);
301 }
302
303 /* Use our lowest usable free pages */
304 Pfn = MxFreeDescriptor->BasePage;
305 MxFreeDescriptor->BasePage += PageCount;
306 MxFreeDescriptor->PageCount -= PageCount;
307 return Pfn;
308 }
309
310 VOID
311 NTAPI
312 MiComputeColorInformation(VOID)
313 {
314 ULONG L2Associativity;
315
316 /* Check if no setting was provided already */
317 if (!MmSecondaryColors)
318 {
319 /* Get L2 cache information */
320 L2Associativity = KeGetPcr()->SecondLevelCacheAssociativity;
321
322 /* The number of colors is the number of cache bytes by set/way */
323 MmSecondaryColors = KeGetPcr()->SecondLevelCacheSize;
324 if (L2Associativity) MmSecondaryColors /= L2Associativity;
325 }
326
327 /* Now convert cache bytes into pages */
328 MmSecondaryColors >>= PAGE_SHIFT;
329 if (!MmSecondaryColors)
330 {
331 /* If there was no cache data from the KPCR, use the default colors */
332 MmSecondaryColors = MI_SECONDARY_COLORS;
333 }
334 else
335 {
336 /* Otherwise, make sure there aren't too many colors */
337 if (MmSecondaryColors > MI_MAX_SECONDARY_COLORS)
338 {
339 /* Set the maximum */
340 MmSecondaryColors = MI_MAX_SECONDARY_COLORS;
341 }
342
343 /* Make sure there aren't too little colors */
344 if (MmSecondaryColors < MI_MIN_SECONDARY_COLORS)
345 {
346 /* Set the default */
347 MmSecondaryColors = MI_SECONDARY_COLORS;
348 }
349
350 /* Finally make sure the colors are a power of two */
351 if (MmSecondaryColors & (MmSecondaryColors - 1))
352 {
353 /* Set the default */
354 MmSecondaryColors = MI_SECONDARY_COLORS;
355 }
356 }
357
358 /* Compute the mask and store it */
359 MmSecondaryColorMask = MmSecondaryColors - 1;
360 KeGetCurrentPrcb()->SecondaryColorMask = MmSecondaryColorMask;
361 }
362
363 VOID
364 NTAPI
365 MiInitializeColorTables(VOID)
366 {
367 ULONG i;
368 PMMPTE PointerPte, LastPte;
369 MMPTE TempPte = ValidKernelPte;
370
371 /* The color table starts after the ARM3 PFN database */
372 MmFreePagesByColor[0] = (PMMCOLOR_TABLES)&MmPfnDatabase[1][MmHighestPhysicalPage + 1];
373
374 /* Loop the PTEs. We have two color tables for each secondary color */
375 PointerPte = MiAddressToPte(&MmFreePagesByColor[0][0]);
376 LastPte = MiAddressToPte((ULONG_PTR)MmFreePagesByColor[0] +
377 (2 * MmSecondaryColors * sizeof(MMCOLOR_TABLES))
378 - 1);
379 while (PointerPte <= LastPte)
380 {
381 /* Check for valid PTE */
382 if (PointerPte->u.Hard.Valid == 0)
383 {
384 /* Get a page and map it */
385 TempPte.u.Hard.PageFrameNumber = MxGetNextPage(1);
386 ASSERT(TempPte.u.Hard.Valid == 1);
387 *PointerPte = TempPte;
388
389 /* Zero out the page */
390 RtlZeroMemory(MiPteToAddress(PointerPte), PAGE_SIZE);
391 }
392
393 /* Next */
394 PointerPte++;
395 }
396
397 /* Now set the address of the next list, right after this one */
398 MmFreePagesByColor[1] = &MmFreePagesByColor[0][MmSecondaryColors];
399
400 /* Now loop the lists to set them up */
401 for (i = 0; i < MmSecondaryColors; i++)
402 {
403 /* Set both free and zero lists for each color */
404 MmFreePagesByColor[ZeroedPageList][i].Flink = 0xFFFFFFFF;
405 MmFreePagesByColor[ZeroedPageList][i].Blink = (PVOID)0xFFFFFFFF;
406 MmFreePagesByColor[ZeroedPageList][i].Count = 0;
407 MmFreePagesByColor[FreePageList][i].Flink = 0xFFFFFFFF;
408 MmFreePagesByColor[FreePageList][i].Blink = (PVOID)0xFFFFFFFF;
409 MmFreePagesByColor[FreePageList][i].Count = 0;
410 }
411 }
412
413 BOOLEAN
414 NTAPI
415 MiIsRegularMemory(IN PLOADER_PARAMETER_BLOCK LoaderBlock,
416 IN PFN_NUMBER Pfn)
417 {
418 PLIST_ENTRY NextEntry;
419 PMEMORY_ALLOCATION_DESCRIPTOR MdBlock;
420
421 /* Loop the memory descriptors */
422 NextEntry = LoaderBlock->MemoryDescriptorListHead.Flink;
423 while (NextEntry != &LoaderBlock->MemoryDescriptorListHead)
424 {
425 /* Get the memory descriptor */
426 MdBlock = CONTAINING_RECORD(NextEntry,
427 MEMORY_ALLOCATION_DESCRIPTOR,
428 ListEntry);
429
430 /* Check if this PFN could be part of the block */
431 if (Pfn >= (MdBlock->BasePage))
432 {
433 /* Check if it really is part of the block */
434 if (Pfn < (MdBlock->BasePage + MdBlock->PageCount))
435 {
436 /* Check if the block is actually memory we don't map */
437 if ((MdBlock->MemoryType == LoaderFirmwarePermanent) ||
438 (MdBlock->MemoryType == LoaderBBTMemory) ||
439 (MdBlock->MemoryType == LoaderSpecialMemory))
440 {
441 /* We don't need PFN database entries for this memory */
442 break;
443 }
444
445 /* This is memory we want to map */
446 return TRUE;
447 }
448 }
449 else
450 {
451 /* Blocks are ordered, so if it's not here, it doesn't exist */
452 break;
453 }
454
455 /* Get to the next descriptor */
456 NextEntry = MdBlock->ListEntry.Flink;
457 }
458
459 /* Check if this PFN is actually from our free memory descriptor */
460 if ((Pfn >= MxOldFreeDescriptor.BasePage) &&
461 (Pfn < MxOldFreeDescriptor.BasePage + MxOldFreeDescriptor.PageCount))
462 {
463 /* We use these pages for initial mappings, so we do want to count them */
464 return TRUE;
465 }
466
467 /* Otherwise this isn't memory that we describe or care about */
468 return FALSE;
469 }
470
471 VOID
472 NTAPI
473 MiMapPfnDatabase(IN PLOADER_PARAMETER_BLOCK LoaderBlock)
474 {
475 ULONG FreePage, FreePageCount, PagesLeft, BasePage, PageCount;
476 PLIST_ENTRY NextEntry;
477 PMEMORY_ALLOCATION_DESCRIPTOR MdBlock;
478 PMMPTE PointerPte, LastPte;
479 MMPTE TempPte = ValidKernelPte;
480
481 /* Get current page data, since we won't be using MxGetNextPage as it would corrupt our state */
482 FreePage = MxFreeDescriptor->BasePage;
483 FreePageCount = MxFreeDescriptor->PageCount;
484 PagesLeft = 0;
485
486 /* Loop the memory descriptors */
487 NextEntry = LoaderBlock->MemoryDescriptorListHead.Flink;
488 while (NextEntry != &LoaderBlock->MemoryDescriptorListHead)
489 {
490 /* Get the descriptor */
491 MdBlock = CONTAINING_RECORD(NextEntry,
492 MEMORY_ALLOCATION_DESCRIPTOR,
493 ListEntry);
494 if ((MdBlock->MemoryType == LoaderFirmwarePermanent) ||
495 (MdBlock->MemoryType == LoaderBBTMemory) ||
496 (MdBlock->MemoryType == LoaderSpecialMemory))
497 {
498 /* These pages are not part of the PFN database */
499 NextEntry = MdBlock->ListEntry.Flink;
500 continue;
501 }
502
503 /* Next, check if this is our special free descriptor we've found */
504 if (MdBlock == MxFreeDescriptor)
505 {
506 /* Use the real numbers instead */
507 BasePage = MxOldFreeDescriptor.BasePage;
508 PageCount = MxOldFreeDescriptor.PageCount;
509 }
510 else
511 {
512 /* Use the descriptor's numbers */
513 BasePage = MdBlock->BasePage;
514 PageCount = MdBlock->PageCount;
515 }
516
517 /* Get the PTEs for this range */
518 PointerPte = MiAddressToPte(&MmPfnDatabase[0][BasePage]);
519 LastPte = MiAddressToPte(((ULONG_PTR)&MmPfnDatabase[0][BasePage + PageCount]) - 1);
520 DPRINT("MD Type: %lx Base: %lx Count: %lx\n", MdBlock->MemoryType, BasePage, PageCount);
521
522 /* Loop them */
523 while (PointerPte <= LastPte)
524 {
525 /* We'll only touch PTEs that aren't already valid */
526 if (PointerPte->u.Hard.Valid == 0)
527 {
528 /* Use the next free page */
529 TempPte.u.Hard.PageFrameNumber = FreePage;
530 ASSERT(FreePageCount != 0);
531
532 /* Consume free pages */
533 FreePage++;
534 FreePageCount--;
535 if (!FreePageCount)
536 {
537 /* Out of memory */
538 KeBugCheckEx(INSTALL_MORE_MEMORY,
539 MmNumberOfPhysicalPages,
540 FreePageCount,
541 MxOldFreeDescriptor.PageCount,
542 1);
543 }
544
545 /* Write out this PTE */
546 PagesLeft++;
547 ASSERT(PointerPte->u.Hard.Valid == 0);
548 ASSERT(TempPte.u.Hard.Valid == 1);
549 *PointerPte = TempPte;
550
551 /* Zero this page */
552 RtlZeroMemory(MiPteToAddress(PointerPte), PAGE_SIZE);
553 }
554
555 /* Next! */
556 PointerPte++;
557 }
558
559 /* Get the PTEs for this range */
560 PointerPte = MiAddressToPte(&MmPfnDatabase[1][BasePage]);
561 LastPte = MiAddressToPte(((ULONG_PTR)&MmPfnDatabase[1][BasePage + PageCount]) - 1);
562 DPRINT("MD Type: %lx Base: %lx Count: %lx\n", MdBlock->MemoryType, BasePage, PageCount);
563
564 /* Loop them */
565 while (PointerPte <= LastPte)
566 {
567 /* We'll only touch PTEs that aren't already valid */
568 if (PointerPte->u.Hard.Valid == 0)
569 {
570 /* Use the next free page */
571 TempPte.u.Hard.PageFrameNumber = FreePage;
572 ASSERT(FreePageCount != 0);
573
574 /* Consume free pages */
575 FreePage++;
576 FreePageCount--;
577 if (!FreePageCount)
578 {
579 /* Out of memory */
580 KeBugCheckEx(INSTALL_MORE_MEMORY,
581 MmNumberOfPhysicalPages,
582 FreePageCount,
583 MxOldFreeDescriptor.PageCount,
584 1);
585 }
586
587 /* Write out this PTE */
588 PagesLeft++;
589 ASSERT(PointerPte->u.Hard.Valid == 0);
590 ASSERT(TempPte.u.Hard.Valid == 1);
591 *PointerPte = TempPte;
592
593 /* Zero this page */
594 RtlZeroMemory(MiPteToAddress(PointerPte), PAGE_SIZE);
595 }
596
597 /* Next! */
598 PointerPte++;
599 }
600
601 /* Do the next address range */
602 NextEntry = MdBlock->ListEntry.Flink;
603 }
604
605 /* Now update the free descriptors to consume the pages we used up during the PFN allocation loop */
606 MxFreeDescriptor->BasePage = FreePage;
607 MxFreeDescriptor->PageCount = FreePageCount;
608 }
609
610 VOID
611 NTAPI
612 MiBuildPfnDatabaseFromPages(IN PLOADER_PARAMETER_BLOCK LoaderBlock)
613 {
614 PMMPDE PointerPde;
615 PMMPTE PointerPte;
616 ULONG i, Count, j;
617 PFN_NUMBER PageFrameIndex, StartupPdIndex, PtePageIndex;
618 PMMPFN Pfn1, Pfn2;
619 ULONG_PTR BaseAddress = 0;
620
621 /* PFN of the startup page directory */
622 StartupPdIndex = PFN_FROM_PTE(MiAddressToPde(PDE_BASE));
623
624 /* Start with the first PDE and scan them all */
625 PointerPde = MiAddressToPde(NULL);
626 Count = PD_COUNT * PDE_COUNT;
627 for (i = 0; i < Count; i++)
628 {
629 /* Check for valid PDE */
630 if (PointerPde->u.Hard.Valid == 1)
631 {
632 /* Get the PFN from it */
633 PageFrameIndex = PFN_FROM_PTE(PointerPde);
634
635 /* Do we want a PFN entry for this page? */
636 if (MiIsRegularMemory(LoaderBlock, PageFrameIndex))
637 {
638 /* Yes we do, set it up */
639 Pfn1 = MI_PFN_TO_PFNENTRY(PageFrameIndex);
640 Pfn1->u4.PteFrame = StartupPdIndex;
641 Pfn1->PteAddress = PointerPde;
642 Pfn1->u2.ShareCount++;
643 Pfn1->u3.e2.ReferenceCount = 1;
644 Pfn1->u3.e1.PageLocation = ActiveAndValid;
645 Pfn1->u3.e1.CacheAttribute = MiNonCached;
646 }
647 else
648 {
649 /* No PFN entry */
650 Pfn1 = NULL;
651 }
652
653 /* Now get the PTE and scan the pages */
654 PointerPte = MiAddressToPte(BaseAddress);
655 for (j = 0; j < PTE_COUNT; j++)
656 {
657 /* Check for a valid PTE */
658 if (PointerPte->u.Hard.Valid == 1)
659 {
660 /* Increase the shared count of the PFN entry for the PDE */
661 ASSERT(Pfn1 != NULL);
662 Pfn1->u2.ShareCount++;
663
664 /* Now check if the PTE is valid memory too */
665 PtePageIndex = PFN_FROM_PTE(PointerPte);
666 if (MiIsRegularMemory(LoaderBlock, PtePageIndex))
667 {
668 /*
669 * Only add pages above the end of system code or pages
670 * that are part of nonpaged pool
671 */
672 if ((BaseAddress >= 0xA0000000) ||
673 ((BaseAddress >= (ULONG_PTR)MmNonPagedPoolStart) &&
674 (BaseAddress < (ULONG_PTR)MmNonPagedPoolStart +
675 MmSizeOfNonPagedPoolInBytes)))
676 {
677 /* Get the PFN entry and make sure it too is valid */
678 Pfn2 = MI_PFN_TO_PFNENTRY(PtePageIndex);
679 if ((MmIsAddressValid(Pfn2)) &&
680 (MmIsAddressValid(Pfn2 + 1)))
681 {
682 /* Setup the PFN entry */
683 Pfn2->u4.PteFrame = PageFrameIndex;
684 Pfn2->PteAddress = PointerPte;
685 Pfn2->u2.ShareCount++;
686 Pfn2->u3.e2.ReferenceCount = 1;
687 Pfn2->u3.e1.PageLocation = ActiveAndValid;
688 Pfn2->u3.e1.CacheAttribute = MiNonCached;
689 }
690 }
691 }
692 }
693
694 /* Next PTE */
695 PointerPte++;
696 BaseAddress += PAGE_SIZE;
697 }
698 }
699 else
700 {
701 /* Next PDE mapped address */
702 BaseAddress += PTE_COUNT * PAGE_SIZE;
703 }
704
705 /* Next PTE */
706 PointerPde++;
707 }
708 }
709
710 VOID
711 NTAPI
712 MiBuildPfnDatabaseZeroPage(VOID)
713 {
714 PMMPFN Pfn1;
715 PMMPDE PointerPde;
716
717 /* Grab the lowest page and check if it has no real references */
718 Pfn1 = MI_PFN_TO_PFNENTRY(MmLowestPhysicalPage);
719 if (!(MmLowestPhysicalPage) && !(Pfn1->u3.e2.ReferenceCount))
720 {
721 /* Make it a bogus page to catch errors */
722 PointerPde = MiAddressToPde(0xFFFFFFFF);
723 Pfn1->u4.PteFrame = PFN_FROM_PTE(PointerPde);
724 Pfn1->PteAddress = PointerPde;
725 Pfn1->u2.ShareCount++;
726 Pfn1->u3.e2.ReferenceCount = 0xFFF0;
727 Pfn1->u3.e1.PageLocation = ActiveAndValid;
728 Pfn1->u3.e1.CacheAttribute = MiNonCached;
729 }
730 }
731
732 VOID
733 NTAPI
734 MiBuildPfnDatabaseFromLoaderBlock(IN PLOADER_PARAMETER_BLOCK LoaderBlock)
735 {
736 PLIST_ENTRY NextEntry;
737 PFN_NUMBER PageCount = 0;
738 PMEMORY_ALLOCATION_DESCRIPTOR MdBlock;
739 PFN_NUMBER PageFrameIndex;
740 PMMPFN Pfn1;
741 PMMPTE PointerPte;
742 PMMPDE PointerPde;
743
744 /* Now loop through the descriptors */
745 NextEntry = LoaderBlock->MemoryDescriptorListHead.Flink;
746 while (NextEntry != &LoaderBlock->MemoryDescriptorListHead)
747 {
748 /* Get the current descriptor */
749 MdBlock = CONTAINING_RECORD(NextEntry,
750 MEMORY_ALLOCATION_DESCRIPTOR,
751 ListEntry);
752
753 /* Read its data */
754 PageCount = MdBlock->PageCount;
755 PageFrameIndex = MdBlock->BasePage;
756
757 /* Don't allow memory above what the PFN database is mapping */
758 if (PageFrameIndex > MmHighestPhysicalPage)
759 {
760 /* Since they are ordered, everything past here will be larger */
761 break;
762 }
763
764 /* On the other hand, the end page might be higher up... */
765 if ((PageFrameIndex + PageCount) > (MmHighestPhysicalPage + 1))
766 {
767 /* In which case we'll trim the descriptor to go as high as we can */
768 PageCount = MmHighestPhysicalPage + 1 - PageFrameIndex;
769 MdBlock->PageCount = PageCount;
770
771 /* But if there's nothing left to trim, we got too high, so quit */
772 if (!PageCount) break;
773 }
774
775 /* Now check the descriptor type */
776 switch (MdBlock->MemoryType)
777 {
778 /* Check for bad RAM */
779 case LoaderBad:
780
781 DPRINT1("You have damaged RAM modules. Stopping boot\n");
782 while (TRUE);
783 break;
784
785 /* Check for free RAM */
786 case LoaderFree:
787 case LoaderLoadedProgram:
788 case LoaderFirmwareTemporary:
789 case LoaderOsloaderStack:
790
791 /* Get the last page of this descriptor. Note we loop backwards */
792 PageFrameIndex += PageCount - 1;
793 Pfn1 = MI_PFN_TO_PFNENTRY(PageFrameIndex);
794 while (PageCount--)
795 {
796 /* If the page really has no references, mark it as free */
797 if (!Pfn1->u3.e2.ReferenceCount)
798 {
799 Pfn1->u3.e1.CacheAttribute = MiNonCached;
800 //MiInsertPageInFreeList(PageFrameIndex);
801 }
802
803 /* Go to the next page */
804 Pfn1--;
805 PageFrameIndex--;
806 }
807
808 /* Done with this block */
809 break;
810
811 /* Check for pages that are invisible to us */
812 case LoaderFirmwarePermanent:
813 case LoaderSpecialMemory:
814 case LoaderBBTMemory:
815
816 /* And skip them */
817 break;
818
819 default:
820
821 /* Map these pages with the KSEG0 mapping that adds 0x80000000 */
822 PointerPte = MiAddressToPte(KSEG0_BASE + (PageFrameIndex << PAGE_SHIFT));
823 Pfn1 = MI_PFN_TO_PFNENTRY(PageFrameIndex);
824 while (PageCount--)
825 {
826 /* Check if the page is really unused */
827 PointerPde = MiAddressToPde(KSEG0_BASE + (PageFrameIndex << PAGE_SHIFT));
828 if (!Pfn1->u3.e2.ReferenceCount)
829 {
830 /* Mark it as being in-use */
831 Pfn1->u4.PteFrame = PFN_FROM_PTE(PointerPde);
832 Pfn1->PteAddress = PointerPte;
833 Pfn1->u2.ShareCount++;
834 Pfn1->u3.e2.ReferenceCount = 1;
835 Pfn1->u3.e1.PageLocation = ActiveAndValid;
836 Pfn1->u3.e1.CacheAttribute = MiNonCached;
837
838 /* Check for RAM disk page */
839 if (MdBlock->MemoryType == LoaderXIPRom)
840 {
841 /* Make it a pseudo-I/O ROM mapping */
842 Pfn1->u1.Flink = 0;
843 Pfn1->u2.ShareCount = 0;
844 Pfn1->u3.e2.ReferenceCount = 0;
845 Pfn1->u3.e1.PageLocation = 0;
846 Pfn1->u3.e1.Rom = 1;
847 Pfn1->u4.InPageError = 0;
848 Pfn1->u3.e1.PrototypePte = 1;
849 }
850 }
851
852 /* Advance page structures */
853 Pfn1++;
854 PageFrameIndex++;
855 PointerPte++;
856 }
857 break;
858 }
859
860 /* Next descriptor entry */
861 NextEntry = MdBlock->ListEntry.Flink;
862 }
863 }
864
865 VOID
866 NTAPI
867 MiBuildPfnDatabaseSelf(VOID)
868 {
869 PMMPTE PointerPte, LastPte;
870 PMMPFN Pfn1;
871
872 /* Loop the PFN database page */
873 PointerPte = MiAddressToPte(MI_PFN_TO_PFNENTRY(MmLowestPhysicalPage));
874 LastPte = MiAddressToPte(MI_PFN_TO_PFNENTRY(MmHighestPhysicalPage));
875 while (PointerPte <= LastPte)
876 {
877 /* Make sure the page is valid */
878 if (PointerPte->u.Hard.Valid == 1)
879 {
880 /* Get the PFN entry and just mark it referenced */
881 Pfn1 = MI_PFN_TO_PFNENTRY(PointerPte->u.Hard.PageFrameNumber);
882 Pfn1->u2.ShareCount = 1;
883 Pfn1->u3.e2.ReferenceCount = 1;
884 }
885
886 /* Next */
887 PointerPte++;
888 }
889 }
890
891 VOID
892 NTAPI
893 MiInitializePfnDatabase(IN PLOADER_PARAMETER_BLOCK LoaderBlock)
894 {
895 /* Scan memory and start setting up PFN entries */
896 MiBuildPfnDatabaseFromPages(LoaderBlock);
897
898 /* Add the zero page */
899 MiBuildPfnDatabaseZeroPage();
900
901 /* Scan the loader block and build the rest of the PFN database */
902 MiBuildPfnDatabaseFromLoaderBlock(LoaderBlock);
903
904 /* Finally add the pages for the PFN database itself */
905 MiBuildPfnDatabaseSelf();
906 }
907
908 VOID
909 NTAPI
910 MmDumpArmPfnDatabase(VOID)
911 {
912 ULONG i;
913 PMMPFN Pfn1;
914 PCHAR Consumer = "Unknown";
915 KIRQL OldIrql;
916 ULONG ActivePages = 0, FreePages = 0, OtherPages = 0;
917
918 KeRaiseIrql(HIGH_LEVEL, &OldIrql);
919
920 //
921 // Loop the PFN database
922 //
923 for (i = 0; i <= MmHighestPhysicalPage; i++)
924 {
925 Pfn1 = MI_PFN_TO_PFNENTRY(i);
926 if (!Pfn1) continue;
927
928 //
929 // Get the page location
930 //
931 switch (Pfn1->u3.e1.PageLocation)
932 {
933 case ActiveAndValid:
934
935 Consumer = "Active and Valid";
936 ActivePages++;
937 break;
938
939 case FreePageList:
940
941 Consumer = "Free Page List";
942 FreePages++;
943 break;
944
945 default:
946
947 Consumer = "Other (ASSERT!)";
948 OtherPages++;
949 break;
950 }
951
952 //
953 // Pretty-print the page
954 //
955 DbgPrint("0x%08p:\t%20s\t(%02d.%02d) [%08p-%08p])\n",
956 i << PAGE_SHIFT,
957 Consumer,
958 Pfn1->u3.e2.ReferenceCount,
959 Pfn1->u2.ShareCount,
960 Pfn1->PteAddress,
961 Pfn1->u4.PteFrame);
962 }
963
964 DbgPrint("Active: %d pages\t[%d KB]\n", ActivePages, (ActivePages << PAGE_SHIFT) / 1024);
965 DbgPrint("Free: %d pages\t[%d KB]\n", FreePages, (FreePages << PAGE_SHIFT) / 1024);
966 DbgPrint("Other: %d pages\t[%d KB]\n", OtherPages, (OtherPages << PAGE_SHIFT) / 1024);
967
968 KeLowerIrql(OldIrql);
969 }
970
971 PFN_NUMBER
972 NTAPI
973 MiPagesInLoaderBlock(IN PLOADER_PARAMETER_BLOCK LoaderBlock,
974 IN PBOOLEAN IncludeType)
975 {
976 PLIST_ENTRY NextEntry;
977 PFN_NUMBER PageCount = 0;
978 PMEMORY_ALLOCATION_DESCRIPTOR MdBlock;
979
980 //
981 // Now loop through the descriptors
982 //
983 NextEntry = LoaderBlock->MemoryDescriptorListHead.Flink;
984 while (NextEntry != &LoaderBlock->MemoryDescriptorListHead)
985 {
986 //
987 // Grab each one, and check if it's one we should include
988 //
989 MdBlock = CONTAINING_RECORD(NextEntry,
990 MEMORY_ALLOCATION_DESCRIPTOR,
991 ListEntry);
992 if ((MdBlock->MemoryType < LoaderMaximum) &&
993 (IncludeType[MdBlock->MemoryType]))
994 {
995 //
996 // Add this to our running total
997 //
998 PageCount += MdBlock->PageCount;
999 }
1000
1001 //
1002 // Try the next descriptor
1003 //
1004 NextEntry = MdBlock->ListEntry.Flink;
1005 }
1006
1007 //
1008 // Return the total
1009 //
1010 return PageCount;
1011 }
1012
1013 PPHYSICAL_MEMORY_DESCRIPTOR
1014 NTAPI
1015 MmInitializeMemoryLimits(IN PLOADER_PARAMETER_BLOCK LoaderBlock,
1016 IN PBOOLEAN IncludeType)
1017 {
1018 PLIST_ENTRY NextEntry;
1019 ULONG Run = 0, InitialRuns = 0;
1020 PFN_NUMBER NextPage = -1, PageCount = 0;
1021 PPHYSICAL_MEMORY_DESCRIPTOR Buffer, NewBuffer;
1022 PMEMORY_ALLOCATION_DESCRIPTOR MdBlock;
1023
1024 //
1025 // Scan the memory descriptors
1026 //
1027 NextEntry = LoaderBlock->MemoryDescriptorListHead.Flink;
1028 while (NextEntry != &LoaderBlock->MemoryDescriptorListHead)
1029 {
1030 //
1031 // For each one, increase the memory allocation estimate
1032 //
1033 InitialRuns++;
1034 NextEntry = NextEntry->Flink;
1035 }
1036
1037 //
1038 // Allocate the maximum we'll ever need
1039 //
1040 Buffer = ExAllocatePoolWithTag(NonPagedPool,
1041 sizeof(PHYSICAL_MEMORY_DESCRIPTOR) +
1042 sizeof(PHYSICAL_MEMORY_RUN) *
1043 (InitialRuns - 1),
1044 'lMmM');
1045 if (!Buffer) return NULL;
1046
1047 //
1048 // For now that's how many runs we have
1049 //
1050 Buffer->NumberOfRuns = InitialRuns;
1051
1052 //
1053 // Now loop through the descriptors again
1054 //
1055 NextEntry = LoaderBlock->MemoryDescriptorListHead.Flink;
1056 while (NextEntry != &LoaderBlock->MemoryDescriptorListHead)
1057 {
1058 //
1059 // Grab each one, and check if it's one we should include
1060 //
1061 MdBlock = CONTAINING_RECORD(NextEntry,
1062 MEMORY_ALLOCATION_DESCRIPTOR,
1063 ListEntry);
1064 if ((MdBlock->MemoryType < LoaderMaximum) &&
1065 (IncludeType[MdBlock->MemoryType]))
1066 {
1067 //
1068 // Add this to our running total
1069 //
1070 PageCount += MdBlock->PageCount;
1071
1072 //
1073 // Check if the next page is described by the next descriptor
1074 //
1075 if (MdBlock->BasePage == NextPage)
1076 {
1077 //
1078 // Combine it into the same physical run
1079 //
1080 ASSERT(MdBlock->PageCount != 0);
1081 Buffer->Run[Run - 1].PageCount += MdBlock->PageCount;
1082 NextPage += MdBlock->PageCount;
1083 }
1084 else
1085 {
1086 //
1087 // Otherwise just duplicate the descriptor's contents
1088 //
1089 Buffer->Run[Run].BasePage = MdBlock->BasePage;
1090 Buffer->Run[Run].PageCount = MdBlock->PageCount;
1091 NextPage = Buffer->Run[Run].BasePage + Buffer->Run[Run].PageCount;
1092
1093 //
1094 // And in this case, increase the number of runs
1095 //
1096 Run++;
1097 }
1098 }
1099
1100 //
1101 // Try the next descriptor
1102 //
1103 NextEntry = MdBlock->ListEntry.Flink;
1104 }
1105
1106 //
1107 // We should not have been able to go past our initial estimate
1108 //
1109 ASSERT(Run <= Buffer->NumberOfRuns);
1110
1111 //
1112 // Our guess was probably exaggerated...
1113 //
1114 if (InitialRuns > Run)
1115 {
1116 //
1117 // Allocate a more accurately sized buffer
1118 //
1119 NewBuffer = ExAllocatePoolWithTag(NonPagedPool,
1120 sizeof(PHYSICAL_MEMORY_DESCRIPTOR) +
1121 sizeof(PHYSICAL_MEMORY_RUN) *
1122 (Run - 1),
1123 'lMmM');
1124 if (NewBuffer)
1125 {
1126 //
1127 // Copy the old buffer into the new, then free it
1128 //
1129 RtlCopyMemory(NewBuffer->Run,
1130 Buffer->Run,
1131 sizeof(PHYSICAL_MEMORY_RUN) * Run);
1132 ExFreePool(Buffer);
1133
1134 //
1135 // Now use the new buffer
1136 //
1137 Buffer = NewBuffer;
1138 }
1139 }
1140
1141 //
1142 // Write the final numbers, and return it
1143 //
1144 Buffer->NumberOfRuns = Run;
1145 Buffer->NumberOfPages = PageCount;
1146 return Buffer;
1147 }
1148
1149 VOID
1150 NTAPI
1151 MiBuildPagedPool(VOID)
1152 {
1153 PMMPTE PointerPte, PointerPde;
1154 MMPTE TempPte = ValidKernelPte;
1155 PFN_NUMBER PageFrameIndex;
1156 KIRQL OldIrql;
1157 ULONG Size, BitMapSize;
1158
1159 //
1160 // Get the page frame number for the system page directory
1161 //
1162 PointerPte = MiAddressToPte(PDE_BASE);
1163 MmSystemPageDirectory = PFN_FROM_PTE(PointerPte);
1164
1165 //
1166 // Allocate a system PTE which will hold a copy of the page directory
1167 //
1168 PointerPte = MiReserveSystemPtes(1, SystemPteSpace);
1169 ASSERT(PointerPte);
1170 MmSystemPagePtes = MiPteToAddress(PointerPte);
1171
1172 //
1173 // Make this system PTE point to the system page directory.
1174 // It is now essentially double-mapped. This will be used later for lazy
1175 // evaluation of PDEs accross process switches, similarly to how the Global
1176 // page directory array in the old ReactOS Mm is used (but in a less hacky
1177 // way).
1178 //
1179 TempPte = ValidKernelPte;
1180 TempPte.u.Hard.PageFrameNumber = MmSystemPageDirectory;
1181 ASSERT(PointerPte->u.Hard.Valid == 0);
1182 ASSERT(TempPte.u.Hard.Valid == 1);
1183 *PointerPte = TempPte;
1184
1185 //
1186 // Let's get back to paged pool work: size it up.
1187 // By default, it should be twice as big as nonpaged pool.
1188 //
1189 MmSizeOfPagedPoolInBytes = 2 * MmMaximumNonPagedPoolInBytes;
1190 if (MmSizeOfPagedPoolInBytes > ((ULONG_PTR)MmNonPagedSystemStart -
1191 (ULONG_PTR)MmPagedPoolStart))
1192 {
1193 //
1194 // On the other hand, we have limited VA space, so make sure that the VA
1195 // for paged pool doesn't overflow into nonpaged pool VA. Otherwise, set
1196 // whatever maximum is possible.
1197 //
1198 MmSizeOfPagedPoolInBytes = (ULONG_PTR)MmNonPagedSystemStart -
1199 (ULONG_PTR)MmPagedPoolStart;
1200 }
1201
1202 //
1203 // Get the size in pages and make sure paged pool is at least 32MB.
1204 //
1205 Size = MmSizeOfPagedPoolInBytes;
1206 if (Size < MI_MIN_INIT_PAGED_POOLSIZE) Size = MI_MIN_INIT_PAGED_POOLSIZE;
1207 Size = BYTES_TO_PAGES(Size);
1208
1209 //
1210 // Now check how many PTEs will be required for these many pages.
1211 //
1212 Size = (Size + (1024 - 1)) / 1024;
1213
1214 //
1215 // Recompute the page-aligned size of the paged pool, in bytes and pages.
1216 //
1217 MmSizeOfPagedPoolInBytes = Size * PAGE_SIZE * 1024;
1218 MmSizeOfPagedPoolInPages = MmSizeOfPagedPoolInBytes >> PAGE_SHIFT;
1219
1220 //
1221 // Let's be really sure this doesn't overflow into nonpaged system VA
1222 //
1223 ASSERT((MmSizeOfPagedPoolInBytes + (ULONG_PTR)MmPagedPoolStart) <=
1224 (ULONG_PTR)MmNonPagedSystemStart);
1225
1226 //
1227 // This is where paged pool ends
1228 //
1229 MmPagedPoolEnd = (PVOID)(((ULONG_PTR)MmPagedPoolStart +
1230 MmSizeOfPagedPoolInBytes) - 1);
1231
1232 //
1233 // So now get the PDE for paged pool and zero it out
1234 //
1235 PointerPde = MiAddressToPde(MmPagedPoolStart);
1236 RtlZeroMemory(PointerPde,
1237 (1 + MiAddressToPde(MmPagedPoolEnd) - PointerPde) * sizeof(MMPTE));
1238
1239 //
1240 // Next, get the first and last PTE
1241 //
1242 PointerPte = MiAddressToPte(MmPagedPoolStart);
1243 MmPagedPoolInfo.FirstPteForPagedPool = PointerPte;
1244 MmPagedPoolInfo.LastPteForPagedPool = MiAddressToPte(MmPagedPoolEnd);
1245
1246 //
1247 // Lock the PFN database
1248 //
1249 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
1250
1251 //
1252 // Allocate a page and map the first paged pool PDE
1253 //
1254 PageFrameIndex = MmAllocPage(MC_NPPOOL, 0);
1255 TempPte.u.Hard.PageFrameNumber = PageFrameIndex;
1256 ASSERT(PointerPde->u.Hard.Valid == 0);
1257 ASSERT(TempPte.u.Hard.Valid == 1);
1258 *PointerPde = TempPte;
1259
1260 //
1261 // Release the PFN database lock
1262 //
1263 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
1264
1265 //
1266 // We only have one PDE mapped for now... at fault time, additional PDEs
1267 // will be allocated to handle paged pool growth. This is where they'll have
1268 // to start.
1269 //
1270 MmPagedPoolInfo.NextPdeForPagedPoolExpansion = PointerPde + 1;
1271
1272 //
1273 // We keep track of each page via a bit, so check how big the bitmap will
1274 // have to be (make sure to align our page count such that it fits nicely
1275 // into a 4-byte aligned bitmap.
1276 //
1277 // We'll also allocate the bitmap header itself part of the same buffer.
1278 //
1279 Size = Size * 1024;
1280 ASSERT(Size == MmSizeOfPagedPoolInPages);
1281 BitMapSize = Size;
1282 Size = sizeof(RTL_BITMAP) + (((Size + 31) / 32) * sizeof(ULONG));
1283
1284 //
1285 // Allocate the allocation bitmap, which tells us which regions have not yet
1286 // been mapped into memory
1287 //
1288 MmPagedPoolInfo.PagedPoolAllocationMap = ExAllocatePoolWithTag(NonPagedPool,
1289 Size,
1290 ' mM');
1291 ASSERT(MmPagedPoolInfo.PagedPoolAllocationMap);
1292
1293 //
1294 // Initialize it such that at first, only the first page's worth of PTEs is
1295 // marked as allocated (incidentially, the first PDE we allocated earlier).
1296 //
1297 RtlInitializeBitMap(MmPagedPoolInfo.PagedPoolAllocationMap,
1298 (PULONG)(MmPagedPoolInfo.PagedPoolAllocationMap + 1),
1299 BitMapSize);
1300 RtlSetAllBits(MmPagedPoolInfo.PagedPoolAllocationMap);
1301 RtlClearBits(MmPagedPoolInfo.PagedPoolAllocationMap, 0, 1024);
1302
1303 //
1304 // We have a second bitmap, which keeps track of where allocations end.
1305 // Given the allocation bitmap and a base address, we can therefore figure
1306 // out which page is the last page of that allocation, and thus how big the
1307 // entire allocation is.
1308 //
1309 MmPagedPoolInfo.EndOfPagedPoolBitmap = ExAllocatePoolWithTag(NonPagedPool,
1310 Size,
1311 ' mM');
1312 ASSERT(MmPagedPoolInfo.EndOfPagedPoolBitmap);
1313 RtlInitializeBitMap(MmPagedPoolInfo.EndOfPagedPoolBitmap,
1314 (PULONG)(MmPagedPoolInfo.EndOfPagedPoolBitmap + 1),
1315 BitMapSize);
1316
1317 //
1318 // Since no allocations have been made yet, there are no bits set as the end
1319 //
1320 RtlClearAllBits(MmPagedPoolInfo.EndOfPagedPoolBitmap);
1321
1322 //
1323 // Initialize paged pool.
1324 //
1325 InitializePool(PagedPool, 0);
1326
1327 //
1328 // Initialize the paged pool mutex
1329 //
1330 KeInitializeGuardedMutex(&MmPagedPoolMutex);
1331 }
1332
1333 NTSTATUS
1334 NTAPI
1335 MmArmInitSystem(IN ULONG Phase,
1336 IN PLOADER_PARAMETER_BLOCK LoaderBlock)
1337 {
1338 ULONG i;
1339 BOOLEAN IncludeType[LoaderMaximum];
1340 PVOID Bitmap;
1341 PPHYSICAL_MEMORY_RUN Run;
1342 PFN_NUMBER PageCount;
1343
1344 //
1345 // Instantiate memory that we don't consider RAM/usable
1346 // We use the same exclusions that Windows does, in order to try to be
1347 // compatible with WinLDR-style booting
1348 //
1349 for (i = 0; i < LoaderMaximum; i++) IncludeType[i] = TRUE;
1350 IncludeType[LoaderBad] = FALSE;
1351 IncludeType[LoaderFirmwarePermanent] = FALSE;
1352 IncludeType[LoaderSpecialMemory] = FALSE;
1353 IncludeType[LoaderBBTMemory] = FALSE;
1354 if (Phase == 0)
1355 {
1356 //
1357 // Define the basic user vs. kernel address space separation
1358 //
1359 MmSystemRangeStart = (PVOID)KSEG0_BASE;
1360 MmUserProbeAddress = (ULONG_PTR)MmSystemRangeStart - 0x10000;
1361 MmHighestUserAddress = (PVOID)(MmUserProbeAddress - 1);
1362
1363 //
1364 // Get the size of the boot loader's image allocations and then round
1365 // that region up to a PDE size, so that any PDEs we might create for
1366 // whatever follows are separate from the PDEs that boot loader might've
1367 // already created (and later, we can blow all that away if we want to).
1368 //
1369 MmBootImageSize = KeLoaderBlock->Extension->LoaderPagesSpanned;
1370 MmBootImageSize *= PAGE_SIZE;
1371 MmBootImageSize = (MmBootImageSize + (4 * 1024 * 1024) - 1) & ~((4 * 1024 * 1024) - 1);
1372 ASSERT((MmBootImageSize % (4 * 1024 * 1024)) == 0);
1373
1374 //
1375 // Set the size of session view, pool, and image
1376 //
1377 MmSessionSize = MI_SESSION_SIZE;
1378 MmSessionViewSize = MI_SESSION_VIEW_SIZE;
1379 MmSessionPoolSize = MI_SESSION_POOL_SIZE;
1380 MmSessionImageSize = MI_SESSION_IMAGE_SIZE;
1381
1382 //
1383 // Set the size of system view
1384 //
1385 MmSystemViewSize = MI_SYSTEM_VIEW_SIZE;
1386
1387 //
1388 // This is where it all ends
1389 //
1390 MiSessionImageEnd = (PVOID)PTE_BASE;
1391
1392 //
1393 // This is where we will load Win32k.sys and the video driver
1394 //
1395 MiSessionImageStart = (PVOID)((ULONG_PTR)MiSessionImageEnd -
1396 MmSessionImageSize);
1397
1398 //
1399 // So the view starts right below the session working set (itself below
1400 // the image area)
1401 //
1402 MiSessionViewStart = (PVOID)((ULONG_PTR)MiSessionImageEnd -
1403 MmSessionImageSize -
1404 MI_SESSION_WORKING_SET_SIZE -
1405 MmSessionViewSize);
1406
1407 //
1408 // Session pool follows
1409 //
1410 MiSessionPoolEnd = MiSessionViewStart;
1411 MiSessionPoolStart = (PVOID)((ULONG_PTR)MiSessionPoolEnd -
1412 MmSessionPoolSize);
1413
1414 //
1415 // And it all begins here
1416 //
1417 MmSessionBase = MiSessionPoolStart;
1418
1419 //
1420 // Sanity check that our math is correct
1421 //
1422 ASSERT((ULONG_PTR)MmSessionBase + MmSessionSize == PTE_BASE);
1423
1424 //
1425 // Session space ends wherever image session space ends
1426 //
1427 MiSessionSpaceEnd = MiSessionImageEnd;
1428
1429 //
1430 // System view space ends at session space, so now that we know where
1431 // this is, we can compute the base address of system view space itself.
1432 //
1433 MiSystemViewStart = (PVOID)((ULONG_PTR)MmSessionBase -
1434 MmSystemViewSize);
1435
1436 //
1437 // Count physical pages on the system
1438 //
1439 PageCount = MiPagesInLoaderBlock(LoaderBlock, IncludeType);
1440
1441 //
1442 // Check if this is a machine with less than 19MB of RAM
1443 //
1444 if (PageCount < MI_MIN_PAGES_FOR_SYSPTE_TUNING)
1445 {
1446 //
1447 // Use the very minimum of system PTEs
1448 //
1449 MmNumberOfSystemPtes = 7000;
1450 }
1451 else
1452 {
1453 //
1454 // Use the default, but check if we have more than 32MB of RAM
1455 //
1456 MmNumberOfSystemPtes = 11000;
1457 if (PageCount > MI_MIN_PAGES_FOR_SYSPTE_BOOST)
1458 {
1459 //
1460 // Double the amount of system PTEs
1461 //
1462 MmNumberOfSystemPtes <<= 1;
1463 }
1464 }
1465
1466 DPRINT("System PTE count has been tuned to %d (%d bytes)\n",
1467 MmNumberOfSystemPtes, MmNumberOfSystemPtes * PAGE_SIZE);
1468
1469 /* Initialize the platform-specific parts */
1470 MiInitMachineDependent(LoaderBlock);
1471
1472 //
1473 // Sync us up with ReactOS Mm
1474 //
1475 MiSyncARM3WithROS(MmNonPagedSystemStart, (PVOID)((ULONG_PTR)MmNonPagedPoolEnd - 1));
1476 MiSyncARM3WithROS(MmPfnDatabase[0], (PVOID)((ULONG_PTR)MmNonPagedPoolStart + MmSizeOfNonPagedPoolInBytes - 1));
1477 MiSyncARM3WithROS((PVOID)HYPER_SPACE, (PVOID)(HYPER_SPACE + PAGE_SIZE - 1));
1478
1479 //
1480 // Build the physical memory block
1481 //
1482 MmPhysicalMemoryBlock = MmInitializeMemoryLimits(LoaderBlock,
1483 IncludeType);
1484
1485 //
1486 // Allocate enough buffer for the PFN bitmap
1487 // Align it up to a 32-bit boundary
1488 //
1489 Bitmap = ExAllocatePoolWithTag(NonPagedPool,
1490 (((MmHighestPhysicalPage + 1) + 31) / 32) * 4,
1491 ' mM');
1492 if (!Bitmap)
1493 {
1494 //
1495 // This is critical
1496 //
1497 KeBugCheckEx(INSTALL_MORE_MEMORY,
1498 MmNumberOfPhysicalPages,
1499 MmLowestPhysicalPage,
1500 MmHighestPhysicalPage,
1501 0x101);
1502 }
1503
1504 //
1505 // Initialize it and clear all the bits to begin with
1506 //
1507 RtlInitializeBitMap(&MiPfnBitMap,
1508 Bitmap,
1509 MmHighestPhysicalPage + 1);
1510 RtlClearAllBits(&MiPfnBitMap);
1511
1512 //
1513 // Loop physical memory runs
1514 //
1515 for (i = 0; i < MmPhysicalMemoryBlock->NumberOfRuns; i++)
1516 {
1517 //
1518 // Get the run
1519 //
1520 Run = &MmPhysicalMemoryBlock->Run[i];
1521 DPRINT("PHYSICAL RAM [0x%08p to 0x%08p]\n",
1522 Run->BasePage << PAGE_SHIFT,
1523 (Run->BasePage + Run->PageCount) << PAGE_SHIFT);
1524
1525 //
1526 // Make sure it has pages inside it
1527 //
1528 if (Run->PageCount)
1529 {
1530 //
1531 // Set the bits in the PFN bitmap
1532 //
1533 RtlSetBits(&MiPfnBitMap, Run->BasePage, Run->PageCount);
1534 }
1535 }
1536
1537 //
1538 // Size up paged pool and build the shadow system page directory
1539 //
1540 MiBuildPagedPool();
1541 }
1542
1543 //
1544 // Always return success for now
1545 //
1546 return STATUS_SUCCESS;
1547 }
1548
1549 /* EOF */