[NTOS]: Factor out SecondaryColor computations into MiComputeColorInformation.
[reactos.git] / reactos / ntoskrnl / mm / ARM3 / i386 / init.c
1 /*
2 * PROJECT: ReactOS Kernel
3 * LICENSE: BSD - See COPYING.ARM in the top level directory
4 * FILE: ntoskrnl/mm/ARM3/i386/init.c
5 * PURPOSE: ARM Memory Manager Initialization for x86
6 * PROGRAMMERS: ReactOS Portable Systems Group
7 */
8
9 /* INCLUDES *******************************************************************/
10
11 #include <ntoskrnl.h>
12 #define NDEBUG
13 #include <debug.h>
14
15 #line 15 "ARMĀ³::INIT:X86"
16 #define MODULE_INVOLVED_IN_ARM3
17 #include "../../ARM3/miarm.h"
18
19 /* GLOBALS ********************************************************************/
20
21 //
22 // Before we have a PFN database, memory comes straight from our physical memory
23 // blocks, which is nice because it's guaranteed contiguous and also because once
24 // we take a page from here, the system doesn't see it anymore.
25 // However, once the fun is over, those pages must be re-integrated back into
26 // PFN society life, and that requires us keeping a copy of the original layout
27 // so that we can parse it later.
28 //
29 PMEMORY_ALLOCATION_DESCRIPTOR MxFreeDescriptor;
30 MEMORY_ALLOCATION_DESCRIPTOR MxOldFreeDescriptor;
31
32 /* Template PTE and PDE for a kernel page */
33 MMPTE ValidKernelPde = {.u.Hard.Valid = 1, .u.Hard.Write = 1, .u.Hard.Dirty = 1, .u.Hard.Accessed = 1};
34 MMPTE ValidKernelPte = {.u.Hard.Valid = 1, .u.Hard.Write = 1, .u.Hard.Dirty = 1, .u.Hard.Accessed = 1};
35
36 /* Make the code cleaner with some definitions for size multiples */
37 #define _1KB (1024)
38 #define _1MB (1000 * _1KB)
39
40 /* Architecture specific size of a PDE directory, and size of a page table */
41 #define PDE_SIZE (4096 * sizeof(MMPDE))
42 #define PT_SIZE (1024 * sizeof(MMPTE))
43
44 /* PRIVATE FUNCTIONS **********************************************************/
45
46 VOID
47 NTAPI
48 MiComputeNonPagedPoolVa(IN ULONG FreePages)
49 {
50 IN PFN_NUMBER PoolPages;
51
52 /* Check if this is a machine with less than 256MB of RAM, and no overide */
53 if ((MmNumberOfPhysicalPages <= MI_MIN_PAGES_FOR_NONPAGED_POOL_TUNING) &&
54 !(MmSizeOfNonPagedPoolInBytes))
55 {
56 /* Force the non paged pool to be 2MB so we can reduce RAM usage */
57 MmSizeOfNonPagedPoolInBytes = 2 * _1MB;
58 }
59
60 /* Hyperspace ends here */
61 MmHyperSpaceEnd = (PVOID)((ULONG_PTR)MmSystemCacheWorkingSetList - 1);
62
63 /* Check if the user gave a ridicuously large nonpaged pool RAM size */
64 if ((MmSizeOfNonPagedPoolInBytes >> PAGE_SHIFT) > (FreePages * 7 / 8))
65 {
66 /* More than 7/8ths of RAM was dedicated to nonpaged pool, ignore! */
67 MmSizeOfNonPagedPoolInBytes = 0;
68 }
69
70 /* Check if no registry setting was set, or if the setting was too low */
71 if (MmSizeOfNonPagedPoolInBytes < MmMinimumNonPagedPoolSize)
72 {
73 /* Start with the minimum (256 KB) and add 32 KB for each MB above 4 */
74 MmSizeOfNonPagedPoolInBytes = MmMinimumNonPagedPoolSize;
75 MmSizeOfNonPagedPoolInBytes += (FreePages - 1024) / 256 * MmMinAdditionNonPagedPoolPerMb;
76 }
77
78 /* Check if the registy setting or our dynamic calculation was too high */
79 if (MmSizeOfNonPagedPoolInBytes > MI_MAX_INIT_NONPAGED_POOL_SIZE)
80 {
81 /* Set it to the maximum */
82 MmSizeOfNonPagedPoolInBytes = MI_MAX_INIT_NONPAGED_POOL_SIZE;
83 }
84
85 /* Check if a percentage cap was set through the registry */
86 if (MmMaximumNonPagedPoolPercent) UNIMPLEMENTED;
87
88 /* Page-align the nonpaged pool size */
89 MmSizeOfNonPagedPoolInBytes &= ~(PAGE_SIZE - 1);
90
91 /* Now, check if there was a registry size for the maximum size */
92 if (!MmMaximumNonPagedPoolInBytes)
93 {
94 /* Start with the default (1MB) */
95 MmMaximumNonPagedPoolInBytes = MmDefaultMaximumNonPagedPool;
96
97 /* Add space for PFN database */
98 MmMaximumNonPagedPoolInBytes += (ULONG)
99 PAGE_ALIGN((MmHighestPhysicalPage + 1) * sizeof(MMPFN));
100
101 /* Check if the machine has more than 512MB of free RAM */
102 if (FreePages >= 0x1F000)
103 {
104 /* Add 200KB for each MB above 4 */
105 MmMaximumNonPagedPoolInBytes += (FreePages - 1024) / 256 *
106 (MmMaxAdditionNonPagedPoolPerMb / 2);
107 if (MmMaximumNonPagedPoolInBytes < MI_MAX_NONPAGED_POOL_SIZE)
108 {
109 /* Make it at least 128MB since this machine has a lot of RAM */
110 MmMaximumNonPagedPoolInBytes = MI_MAX_NONPAGED_POOL_SIZE;
111 }
112 }
113 else
114 {
115 /* Add 400KB for each MB above 4 */
116 MmMaximumNonPagedPoolInBytes += (FreePages - 1024) / 256 *
117 MmMaxAdditionNonPagedPoolPerMb;
118 }
119 }
120
121 /* Make sure there's at least 16 pages + the PFN available for expansion */
122 PoolPages = MmSizeOfNonPagedPoolInBytes + (PAGE_SIZE * 16) +
123 ((ULONG)PAGE_ALIGN(MmHighestPhysicalPage + 1) * sizeof(MMPFN));
124 if (MmMaximumNonPagedPoolInBytes < PoolPages)
125 {
126 /* The maximum should be at least high enough to cover all the above */
127 MmMaximumNonPagedPoolInBytes = PoolPages;
128 }
129
130 /* Systems with 2GB of kernel address space get double the size */
131 PoolPages = MI_MAX_NONPAGED_POOL_SIZE * 2;
132
133 /* On the other hand, make sure that PFN + nonpaged pool doesn't get too big */
134 if (MmMaximumNonPagedPoolInBytes > PoolPages)
135 {
136 /* Trim it down to the maximum architectural limit (256MB) */
137 MmMaximumNonPagedPoolInBytes = PoolPages;
138 }
139
140 /* Check if this is a system with > 128MB of non paged pool */
141 if (MmMaximumNonPagedPoolInBytes > MI_MAX_NONPAGED_POOL_SIZE)
142 {
143 /* Check if the initial size is less than the extra 128MB boost */
144 if (MmSizeOfNonPagedPoolInBytes < (MmMaximumNonPagedPoolInBytes -
145 MI_MAX_NONPAGED_POOL_SIZE))
146 {
147 /* FIXME: Should check if the initial pool can be expanded */
148
149 /* Assume no expansion possible, check ift he maximum is too large */
150 if (MmMaximumNonPagedPoolInBytes > (MmSizeOfNonPagedPoolInBytes +
151 MI_MAX_NONPAGED_POOL_SIZE))
152 {
153 /* Set it to the initial value plus the boost */
154 MmMaximumNonPagedPoolInBytes = MmSizeOfNonPagedPoolInBytes +
155 MI_MAX_NONPAGED_POOL_SIZE;
156 }
157 }
158 }
159 }
160
161 VOID
162 NTAPI
163 MiComputeColorInformation(VOID)
164 {
165 ULONG L2Associativity;
166
167 /* Check if no setting was provided already */
168 if (!MmSecondaryColors)
169 {
170 /* Get L2 cache information */
171 L2Associativity = KeGetPcr()->SecondLevelCacheAssociativity;
172
173 /* The number of colors is the number of cache bytes by set/way */
174 MmSecondaryColors = KeGetPcr()->SecondLevelCacheSize;
175 if (L2Associativity) MmSecondaryColors /= L2Associativity;
176 }
177
178 /* Now convert cache bytes into pages */
179 MmSecondaryColors >>= PAGE_SHIFT;
180 if (!MmSecondaryColors)
181 {
182 /* If there was no cache data from the KPCR, use the default colors */
183 MmSecondaryColors = MI_SECONDARY_COLORS;
184 }
185 else
186 {
187 /* Otherwise, make sure there aren't too many colors */
188 if (MmSecondaryColors > MI_MAX_SECONDARY_COLORS)
189 {
190 /* Set the maximum */
191 MmSecondaryColors = MI_MAX_SECONDARY_COLORS;
192 }
193
194 /* Make sure there aren't too little colors */
195 if (MmSecondaryColors < MI_MIN_SECONDARY_COLORS)
196 {
197 /* Set the default */
198 MmSecondaryColors = MI_SECONDARY_COLORS;
199 }
200
201 /* Finally make sure the colors are a power of two */
202 if (MmSecondaryColors & (MmSecondaryColors - 1))
203 {
204 /* Set the default */
205 MmSecondaryColors = MI_SECONDARY_COLORS;
206 }
207 }
208
209 /* Compute the mask and store it */
210 MmSecondaryColorMask = MmSecondaryColors - 1;
211 KeGetCurrentPrcb()->SecondaryColorMask = MmSecondaryColorMask;
212 }
213
214 NTSTATUS
215 NTAPI
216 MiInitMachineDependent(IN PLOADER_PARAMETER_BLOCK LoaderBlock)
217 {
218 PLIST_ENTRY NextEntry;
219 PMEMORY_ALLOCATION_DESCRIPTOR MdBlock;
220 ULONG FreePages = 0;
221 PFN_NUMBER PageFrameIndex;
222 PMMPTE StartPde, EndPde, PointerPte, LastPte;
223 MMPTE TempPde, TempPte;
224 PVOID NonPagedPoolExpansionVa;
225 ULONG OldCount;
226 PFN_NUMBER FreePage, FreePageCount, PagesLeft, BasePage, PageCount;
227
228 /* Check for kernel stack size that's too big */
229 if (MmLargeStackSize > (KERNEL_LARGE_STACK_SIZE / _1KB))
230 {
231 /* Sanitize to default value */
232 MmLargeStackSize = KERNEL_LARGE_STACK_SIZE;
233 }
234 else
235 {
236 /* Take the registry setting, and convert it into bytes */
237 MmLargeStackSize *= _1KB;
238
239 /* Now align it to a page boundary */
240 MmLargeStackSize = PAGE_ROUND_UP(MmLargeStackSize);
241
242 /* Sanity checks */
243 ASSERT(MmLargeStackSize <= KERNEL_LARGE_STACK_SIZE);
244 ASSERT((MmLargeStackSize & (PAGE_SIZE - 1)) == 0);
245
246 /* Make sure it's not too low */
247 if (MmLargeStackSize < KERNEL_STACK_SIZE) MmLargeStackSize = KERNEL_STACK_SIZE;
248 }
249
250 /* Check for global bit */
251 if (KeFeatureBits & KF_GLOBAL_PAGE)
252 {
253 /* Set it on the template PTE and PDE */
254 ValidKernelPte.u.Hard.Global = TRUE;
255 ValidKernelPde.u.Hard.Global = TRUE;
256 }
257
258 /* Now templates are ready */
259 TempPte = ValidKernelPte;
260 TempPde = ValidKernelPde;
261
262 //
263 // Set CR3 for the system process
264 //
265 PointerPte = MiAddressToPde(PTE_BASE);
266 PageFrameIndex = PFN_FROM_PTE(PointerPte) << PAGE_SHIFT;
267 PsGetCurrentProcess()->Pcb.DirectoryTableBase[0] = PageFrameIndex;
268
269 //
270 // Blow away user-mode
271 //
272 StartPde = MiAddressToPde(0);
273 EndPde = MiAddressToPde(KSEG0_BASE);
274 RtlZeroMemory(StartPde, (EndPde - StartPde) * sizeof(MMPTE));
275
276 //
277 // Loop the memory descriptors
278 //
279 NextEntry = LoaderBlock->MemoryDescriptorListHead.Flink;
280 while (NextEntry != &LoaderBlock->MemoryDescriptorListHead)
281 {
282 //
283 // Get the memory block
284 //
285 MdBlock = CONTAINING_RECORD(NextEntry,
286 MEMORY_ALLOCATION_DESCRIPTOR,
287 ListEntry);
288
289 //
290 // Skip invisible memory
291 //
292 if ((MdBlock->MemoryType != LoaderFirmwarePermanent) &&
293 (MdBlock->MemoryType != LoaderSpecialMemory) &&
294 (MdBlock->MemoryType != LoaderHALCachedMemory) &&
295 (MdBlock->MemoryType != LoaderBBTMemory))
296 {
297 //
298 // Check if BURNMEM was used
299 //
300 if (MdBlock->MemoryType != LoaderBad)
301 {
302 //
303 // Count this in the total of pages
304 //
305 MmNumberOfPhysicalPages += MdBlock->PageCount;
306 }
307
308 //
309 // Check if this is the new lowest page
310 //
311 if (MdBlock->BasePage < MmLowestPhysicalPage)
312 {
313 //
314 // Update the lowest page
315 //
316 MmLowestPhysicalPage = MdBlock->BasePage;
317 }
318
319 //
320 // Check if this is the new highest page
321 //
322 PageFrameIndex = MdBlock->BasePage + MdBlock->PageCount;
323 if (PageFrameIndex > MmHighestPhysicalPage)
324 {
325 //
326 // Update the highest page
327 //
328 MmHighestPhysicalPage = PageFrameIndex - 1;
329 }
330
331 //
332 // Check if this is free memory
333 //
334 if ((MdBlock->MemoryType == LoaderFree) ||
335 (MdBlock->MemoryType == LoaderLoadedProgram) ||
336 (MdBlock->MemoryType == LoaderFirmwareTemporary) ||
337 (MdBlock->MemoryType == LoaderOsloaderStack))
338 {
339 //
340 // Check if this is the largest memory descriptor
341 //
342 if (MdBlock->PageCount > FreePages)
343 {
344 //
345 // For now, it is
346 //
347 MxFreeDescriptor = MdBlock;
348 }
349
350 //
351 // More free pages
352 //
353 FreePages += MdBlock->PageCount;
354 }
355 }
356
357 //
358 // Keep going
359 //
360 NextEntry = MdBlock->ListEntry.Flink;
361 }
362
363 //
364 // Save original values of the free descriptor, since it'll be
365 // altered by early allocations
366 //
367 MxOldFreeDescriptor = *MxFreeDescriptor;
368
369 /* Compute non paged pool limits and size */
370 MiComputeNonPagedPoolVa(FreePages);
371
372 /* Compute color information (L2 cache-separated paging lists) */
373 MiComputeColorInformation();
374
375 //
376 // Calculate the number of bytes for the PFN database
377 // and then convert to pages
378 //
379 MxPfnAllocation = (MmHighestPhysicalPage + 1) * sizeof(MMPFN);
380 MxPfnAllocation >>= PAGE_SHIFT;
381
382 //
383 // We have to add one to the count here, because in the process of
384 // shifting down to the page size, we actually ended up getting the
385 // lower aligned size (so say, 0x5FFFF bytes is now 0x5F pages).
386 // Later on, we'll shift this number back into bytes, which would cause
387 // us to end up with only 0x5F000 bytes -- when we actually want to have
388 // 0x60000 bytes.
389 //
390 MxPfnAllocation++;
391
392 //
393 // Now calculate the nonpaged pool expansion VA region
394 //
395 MmNonPagedPoolStart = (PVOID)((ULONG_PTR)MmNonPagedPoolEnd -
396 MmMaximumNonPagedPoolInBytes +
397 MmSizeOfNonPagedPoolInBytes);
398 MmNonPagedPoolStart = (PVOID)PAGE_ALIGN(MmNonPagedPoolStart);
399 NonPagedPoolExpansionVa = MmNonPagedPoolStart;
400 DPRINT("NP Pool has been tuned to: %d bytes and %d bytes\n",
401 MmSizeOfNonPagedPoolInBytes, MmMaximumNonPagedPoolInBytes);
402
403 //
404 // Now calculate the nonpaged system VA region, which includes the
405 // nonpaged pool expansion (above) and the system PTEs. Note that it is
406 // then aligned to a PDE boundary (4MB).
407 //
408 MmNonPagedSystemStart = (PVOID)((ULONG_PTR)MmNonPagedPoolStart -
409 (MmNumberOfSystemPtes + 1) * PAGE_SIZE);
410 MmNonPagedSystemStart = (PVOID)((ULONG_PTR)MmNonPagedSystemStart &
411 ~((4 * 1024 * 1024) - 1));
412
413 //
414 // Don't let it go below the minimum
415 //
416 if (MmNonPagedSystemStart < (PVOID)0xEB000000)
417 {
418 //
419 // This is a hard-coded limit in the Windows NT address space
420 //
421 MmNonPagedSystemStart = (PVOID)0xEB000000;
422
423 //
424 // Reduce the amount of system PTEs to reach this point
425 //
426 MmNumberOfSystemPtes = ((ULONG_PTR)MmNonPagedPoolStart -
427 (ULONG_PTR)MmNonPagedSystemStart) >>
428 PAGE_SHIFT;
429 MmNumberOfSystemPtes--;
430 ASSERT(MmNumberOfSystemPtes > 1000);
431 }
432
433 //
434 // Check if we are in a situation where the size of the paged pool
435 // is so large that it overflows into nonpaged pool
436 //
437 if (MmSizeOfPagedPoolInBytes >
438 ((ULONG_PTR)MmNonPagedSystemStart - (ULONG_PTR)MmPagedPoolStart))
439 {
440 //
441 // We need some recalculations here
442 //
443 DPRINT1("Paged pool is too big!\n");
444 }
445
446 //
447 // Normally, the PFN database should start after the loader images.
448 // This is already the case in ReactOS, but for now we want to co-exist
449 // with the old memory manager, so we'll create a "Shadow PFN Database"
450 // instead, and arbitrarly start it at 0xB0000000.
451 //
452 MmPfnDatabase = (PVOID)0xB0000000;
453 ASSERT(((ULONG_PTR)MmPfnDatabase & ((4 * 1024 * 1024) - 1)) == 0);
454
455 //
456 // Non paged pool comes after the PFN database
457 //
458 MmNonPagedPoolStart = (PVOID)((ULONG_PTR)MmPfnDatabase +
459 (MxPfnAllocation << PAGE_SHIFT));
460
461 //
462 // Now we actually need to get these many physical pages. Nonpaged pool
463 // is actually also physically contiguous (but not the expansion)
464 //
465 PageFrameIndex = MxGetNextPage(MxPfnAllocation +
466 (MmSizeOfNonPagedPoolInBytes >> PAGE_SHIFT));
467 ASSERT(PageFrameIndex != 0);
468 DPRINT("PFN DB PA PFN begins at: %lx\n", PageFrameIndex);
469 DPRINT("NP PA PFN begins at: %lx\n", PageFrameIndex + MxPfnAllocation);
470
471 //
472 // Now we need some pages to create the page tables for the NP system VA
473 // which includes system PTEs and expansion NP
474 //
475 StartPde = MiAddressToPde(MmNonPagedSystemStart);
476 EndPde = MiAddressToPde((PVOID)((ULONG_PTR)MmNonPagedPoolEnd - 1));
477 while (StartPde <= EndPde)
478 {
479 //
480 // Sanity check
481 //
482 ASSERT(StartPde->u.Hard.Valid == 0);
483
484 //
485 // Get a page
486 //
487 TempPde.u.Hard.PageFrameNumber = MxGetNextPage(1);
488 ASSERT(TempPde.u.Hard.Valid == 1);
489 *StartPde = TempPde;
490
491 //
492 // Zero out the page table
493 //
494 PointerPte = MiPteToAddress(StartPde);
495 RtlZeroMemory(PointerPte, PAGE_SIZE);
496
497 //
498 // Next
499 //
500 StartPde++;
501 }
502
503 //
504 // Now we need pages for the page tables which will map initial NP
505 //
506 StartPde = MiAddressToPde(MmPfnDatabase);
507 EndPde = MiAddressToPde((PVOID)((ULONG_PTR)MmNonPagedPoolStart +
508 MmSizeOfNonPagedPoolInBytes - 1));
509 while (StartPde <= EndPde)
510 {
511 //
512 // Sanity check
513 //
514 ASSERT(StartPde->u.Hard.Valid == 0);
515
516 //
517 // Get a page
518 //
519 TempPde.u.Hard.PageFrameNumber = MxGetNextPage(1);
520 ASSERT(TempPde.u.Hard.Valid == 1);
521 *StartPde = TempPde;
522
523 //
524 // Zero out the page table
525 //
526 PointerPte = MiPteToAddress(StartPde);
527 RtlZeroMemory(PointerPte, PAGE_SIZE);
528
529 //
530 // Next
531 //
532 StartPde++;
533 }
534
535 //
536 // Now remember where the expansion starts
537 //
538 MmNonPagedPoolExpansionStart = NonPagedPoolExpansionVa;
539
540 //
541 // Last step is to actually map the nonpaged pool
542 //
543 PointerPte = MiAddressToPte(MmNonPagedPoolStart);
544 LastPte = MiAddressToPte((PVOID)((ULONG_PTR)MmNonPagedPoolStart +
545 MmSizeOfNonPagedPoolInBytes - 1));
546 while (PointerPte <= LastPte)
547 {
548 //
549 // Use one of our contigous pages
550 //
551 TempPte.u.Hard.PageFrameNumber = PageFrameIndex++;
552 ASSERT(PointerPte->u.Hard.Valid == 0);
553 ASSERT(TempPte.u.Hard.Valid == 1);
554 *PointerPte++ = TempPte;
555 }
556
557 //
558 // Sanity check: make sure we have properly defined the system PTE space
559 //
560 ASSERT(MiAddressToPte(MmNonPagedSystemStart) <
561 MiAddressToPte(MmNonPagedPoolExpansionStart));
562
563 //
564 // Now go ahead and initialize the ARMĀ³ nonpaged pool
565 //
566 MiInitializeArmPool();
567
568 //
569 // Get current page data, since we won't be using MxGetNextPage as it
570 // would corrupt our state
571 //
572 FreePage = MxFreeDescriptor->BasePage;
573 FreePageCount = MxFreeDescriptor->PageCount;
574 PagesLeft = 0;
575
576 //
577 // Loop the memory descriptors
578 //
579 NextEntry = KeLoaderBlock->MemoryDescriptorListHead.Flink;
580 while (NextEntry != &KeLoaderBlock->MemoryDescriptorListHead)
581 {
582 //
583 // Get the descriptor
584 //
585 MdBlock = CONTAINING_RECORD(NextEntry,
586 MEMORY_ALLOCATION_DESCRIPTOR,
587 ListEntry);
588 if ((MdBlock->MemoryType == LoaderFirmwarePermanent) ||
589 (MdBlock->MemoryType == LoaderBBTMemory) ||
590 (MdBlock->MemoryType == LoaderSpecialMemory))
591 {
592 //
593 // These pages are not part of the PFN database
594 //
595 NextEntry = MdBlock->ListEntry.Flink;
596 continue;
597 }
598
599 //
600 // Next, check if this is our special free descriptor we've found
601 //
602 if (MdBlock == MxFreeDescriptor)
603 {
604 //
605 // Use the real numbers instead
606 //
607 BasePage = MxOldFreeDescriptor.BasePage;
608 PageCount = MxOldFreeDescriptor.PageCount;
609 }
610 else
611 {
612 //
613 // Use the descriptor's numbers
614 //
615 BasePage = MdBlock->BasePage;
616 PageCount = MdBlock->PageCount;
617 }
618
619 //
620 // Get the PTEs for this range
621 //
622 PointerPte = MiAddressToPte(&MmPfnDatabase[BasePage]);
623 LastPte = MiAddressToPte(((ULONG_PTR)&MmPfnDatabase[BasePage + PageCount]) - 1);
624 DPRINT("MD Type: %lx Base: %lx Count: %lx\n", MdBlock->MemoryType, BasePage, PageCount);
625
626 //
627 // Loop them
628 //
629 while (PointerPte <= LastPte)
630 {
631 //
632 // We'll only touch PTEs that aren't already valid
633 //
634 if (PointerPte->u.Hard.Valid == 0)
635 {
636 //
637 // Use the next free page
638 //
639 TempPte.u.Hard.PageFrameNumber = FreePage;
640 ASSERT(FreePageCount != 0);
641
642 //
643 // Consume free pages
644 //
645 FreePage++;
646 FreePageCount--;
647 if (!FreePageCount)
648 {
649 //
650 // Out of memory
651 //
652 KeBugCheckEx(INSTALL_MORE_MEMORY,
653 MmNumberOfPhysicalPages,
654 FreePageCount,
655 MxOldFreeDescriptor.PageCount,
656 1);
657 }
658
659 //
660 // Write out this PTE
661 //
662 PagesLeft++;
663 ASSERT(PointerPte->u.Hard.Valid == 0);
664 ASSERT(TempPte.u.Hard.Valid == 1);
665 *PointerPte = TempPte;
666
667 //
668 // Zero this page
669 //
670 RtlZeroMemory(MiPteToAddress(PointerPte), PAGE_SIZE);
671 }
672
673 //
674 // Next!
675 //
676 PointerPte++;
677 }
678
679 //
680 // Do the next address range
681 //
682 NextEntry = MdBlock->ListEntry.Flink;
683 }
684
685 //
686 // Now update the free descriptors to consume the pages we used up during
687 // the PFN allocation loop
688 //
689 MxFreeDescriptor->BasePage = FreePage;
690 MxFreeDescriptor->PageCount = FreePageCount;
691
692 /* Call back into shitMM to setup the PFN database */
693 MmInitializePageList();
694
695 //
696 // Reset the descriptor back so we can create the correct memory blocks
697 //
698 *MxFreeDescriptor = MxOldFreeDescriptor;
699
700 //
701 // Initialize the nonpaged pool
702 //
703 InitializePool(NonPagedPool, 0);
704
705 //
706 // We PDE-aligned the nonpaged system start VA, so haul some extra PTEs!
707 //
708 PointerPte = MiAddressToPte(MmNonPagedSystemStart);
709 OldCount = MmNumberOfSystemPtes;
710 MmNumberOfSystemPtes = MiAddressToPte(MmNonPagedPoolExpansionStart) -
711 PointerPte;
712 MmNumberOfSystemPtes--;
713 DPRINT("Final System PTE count: %d (%d bytes)\n",
714 MmNumberOfSystemPtes, MmNumberOfSystemPtes * PAGE_SIZE);
715
716 //
717 // Create the system PTE space
718 //
719 MiInitializeSystemPtes(PointerPte, MmNumberOfSystemPtes, SystemPteSpace);
720
721 //
722 // Get the PDE For hyperspace
723 //
724 StartPde = MiAddressToPde(HYPER_SPACE);
725
726 //
727 // Allocate a page for it and create it
728 //
729 PageFrameIndex = MmAllocPage(MC_SYSTEM, 0);
730 TempPde.u.Hard.PageFrameNumber = PageFrameIndex;
731 TempPde.u.Hard.Global = FALSE; // Hyperspace is local!
732 ASSERT(StartPde->u.Hard.Valid == 0);
733 ASSERT(TempPde.u.Hard.Valid == 1);
734 *StartPde = TempPde;
735
736 //
737 // Zero out the page table now
738 //
739 PointerPte = MiAddressToPte(HYPER_SPACE);
740 RtlZeroMemory(PointerPte, PAGE_SIZE);
741
742 //
743 // Setup the mapping PTEs
744 //
745 MmFirstReservedMappingPte = MiAddressToPte(MI_MAPPING_RANGE_START);
746 MmLastReservedMappingPte = MiAddressToPte(MI_MAPPING_RANGE_END);
747 MmFirstReservedMappingPte->u.Hard.PageFrameNumber = MI_HYPERSPACE_PTES;
748
749 //
750 // Reserve system PTEs for zeroing PTEs and clear them
751 //
752 MiFirstReservedZeroingPte = MiReserveSystemPtes(MI_ZERO_PTES,
753 SystemPteSpace);
754 RtlZeroMemory(MiFirstReservedZeroingPte, MI_ZERO_PTES * sizeof(MMPTE));
755
756 //
757 // Set the counter to maximum to boot with
758 //
759 MiFirstReservedZeroingPte->u.Hard.PageFrameNumber = MI_ZERO_PTES - 1;
760
761 return STATUS_SUCCESS;
762 }
763
764 /* EOF */