[NTOS]: MxGetNextPage is not platform-specific, so share it.
[reactos.git] / reactos / ntoskrnl / mm / ARM3 / i386 / init.c
1 /*
2 * PROJECT: ReactOS Kernel
3 * LICENSE: BSD - See COPYING.ARM in the top level directory
4 * FILE: ntoskrnl/mm/ARM3/i386/init.c
5 * PURPOSE: ARM Memory Manager Initialization for x86
6 * PROGRAMMERS: ReactOS Portable Systems Group
7 */
8
9 /* INCLUDES *******************************************************************/
10
11 #include <ntoskrnl.h>
12 #define NDEBUG
13 #include <debug.h>
14
15 #line 15 "ARMĀ³::INIT:X86"
16 #define MODULE_INVOLVED_IN_ARM3
17 #include "../../ARM3/miarm.h"
18
19 /* GLOBALS ********************************************************************/
20
21 //
22 // Before we have a PFN database, memory comes straight from our physical memory
23 // blocks, which is nice because it's guaranteed contiguous and also because once
24 // we take a page from here, the system doesn't see it anymore.
25 // However, once the fun is over, those pages must be re-integrated back into
26 // PFN society life, and that requires us keeping a copy of the original layout
27 // so that we can parse it later.
28 //
29 PMEMORY_ALLOCATION_DESCRIPTOR MxFreeDescriptor;
30 MEMORY_ALLOCATION_DESCRIPTOR MxOldFreeDescriptor;
31
32 /* Template PTE and PDE for a kernel page */
33 MMPTE ValidKernelPde = {.u.Hard.Valid = 1, .u.Hard.Write = 1, .u.Hard.Dirty = 1, .u.Hard.Accessed = 1};
34 MMPTE ValidKernelPte = {.u.Hard.Valid = 1, .u.Hard.Write = 1, .u.Hard.Dirty = 1, .u.Hard.Accessed = 1};
35
36 /* Make the code cleaner with some definitions for size multiples */
37 #define _1KB (1024)
38 #define _1MB (1000 * _1KB)
39
40 /* Architecture specific size of a PDE directory, and size of a page table */
41 #define PDE_SIZE (4096 * sizeof(MMPDE))
42 #define PT_SIZE (1024 * sizeof(MMPTE))
43
44 /* PRIVATE FUNCTIONS **********************************************************/
45
46 VOID
47 NTAPI
48 MiComputeNonPagedPoolVa(IN ULONG FreePages)
49 {
50 IN PFN_NUMBER PoolPages;
51
52 /* Check if this is a machine with less than 256MB of RAM, and no overide */
53 if ((MmNumberOfPhysicalPages <= MI_MIN_PAGES_FOR_NONPAGED_POOL_TUNING) &&
54 !(MmSizeOfNonPagedPoolInBytes))
55 {
56 /* Force the non paged pool to be 2MB so we can reduce RAM usage */
57 MmSizeOfNonPagedPoolInBytes = 2 * _1MB;
58 }
59
60 /* Hyperspace ends here */
61 MmHyperSpaceEnd = (PVOID)((ULONG_PTR)MmSystemCacheWorkingSetList - 1);
62
63 /* Check if the user gave a ridicuously large nonpaged pool RAM size */
64 if ((MmSizeOfNonPagedPoolInBytes >> PAGE_SHIFT) > (FreePages * 7 / 8))
65 {
66 /* More than 7/8ths of RAM was dedicated to nonpaged pool, ignore! */
67 MmSizeOfNonPagedPoolInBytes = 0;
68 }
69
70 /* Check if no registry setting was set, or if the setting was too low */
71 if (MmSizeOfNonPagedPoolInBytes < MmMinimumNonPagedPoolSize)
72 {
73 /* Start with the minimum (256 KB) and add 32 KB for each MB above 4 */
74 MmSizeOfNonPagedPoolInBytes = MmMinimumNonPagedPoolSize;
75 MmSizeOfNonPagedPoolInBytes += (FreePages - 1024) / 256 * MmMinAdditionNonPagedPoolPerMb;
76 }
77
78 /* Check if the registy setting or our dynamic calculation was too high */
79 if (MmSizeOfNonPagedPoolInBytes > MI_MAX_INIT_NONPAGED_POOL_SIZE)
80 {
81 /* Set it to the maximum */
82 MmSizeOfNonPagedPoolInBytes = MI_MAX_INIT_NONPAGED_POOL_SIZE;
83 }
84
85 /* Check if a percentage cap was set through the registry */
86 if (MmMaximumNonPagedPoolPercent) UNIMPLEMENTED;
87
88 /* Page-align the nonpaged pool size */
89 MmSizeOfNonPagedPoolInBytes &= ~(PAGE_SIZE - 1);
90
91 /* Now, check if there was a registry size for the maximum size */
92 if (!MmMaximumNonPagedPoolInBytes)
93 {
94 /* Start with the default (1MB) */
95 MmMaximumNonPagedPoolInBytes = MmDefaultMaximumNonPagedPool;
96
97 /* Add space for PFN database */
98 MmMaximumNonPagedPoolInBytes += (ULONG)
99 PAGE_ALIGN((MmHighestPhysicalPage + 1) * sizeof(MMPFN));
100
101 /* Check if the machine has more than 512MB of free RAM */
102 if (FreePages >= 0x1F000)
103 {
104 /* Add 200KB for each MB above 4 */
105 MmMaximumNonPagedPoolInBytes += (FreePages - 1024) / 256 *
106 (MmMaxAdditionNonPagedPoolPerMb / 2);
107 if (MmMaximumNonPagedPoolInBytes < MI_MAX_NONPAGED_POOL_SIZE)
108 {
109 /* Make it at least 128MB since this machine has a lot of RAM */
110 MmMaximumNonPagedPoolInBytes = MI_MAX_NONPAGED_POOL_SIZE;
111 }
112 }
113 else
114 {
115 /* Add 400KB for each MB above 4 */
116 MmMaximumNonPagedPoolInBytes += (FreePages - 1024) / 256 *
117 MmMaxAdditionNonPagedPoolPerMb;
118 }
119 }
120
121 /* Make sure there's at least 16 pages + the PFN available for expansion */
122 PoolPages = MmSizeOfNonPagedPoolInBytes + (PAGE_SIZE * 16) +
123 ((ULONG)PAGE_ALIGN(MmHighestPhysicalPage + 1) * sizeof(MMPFN));
124 if (MmMaximumNonPagedPoolInBytes < PoolPages)
125 {
126 /* The maximum should be at least high enough to cover all the above */
127 MmMaximumNonPagedPoolInBytes = PoolPages;
128 }
129
130 /* Systems with 2GB of kernel address space get double the size */
131 PoolPages = MI_MAX_NONPAGED_POOL_SIZE * 2;
132
133 /* On the other hand, make sure that PFN + nonpaged pool doesn't get too big */
134 if (MmMaximumNonPagedPoolInBytes > PoolPages)
135 {
136 /* Trim it down to the maximum architectural limit (256MB) */
137 MmMaximumNonPagedPoolInBytes = PoolPages;
138 }
139
140 /* Check if this is a system with > 128MB of non paged pool */
141 if (MmMaximumNonPagedPoolInBytes > MI_MAX_NONPAGED_POOL_SIZE)
142 {
143 /* Check if the initial size is less than the extra 128MB boost */
144 if (MmSizeOfNonPagedPoolInBytes < (MmMaximumNonPagedPoolInBytes -
145 MI_MAX_NONPAGED_POOL_SIZE))
146 {
147 /* FIXME: Should check if the initial pool can be expanded */
148
149 /* Assume no expansion possible, check ift he maximum is too large */
150 if (MmMaximumNonPagedPoolInBytes > (MmSizeOfNonPagedPoolInBytes +
151 MI_MAX_NONPAGED_POOL_SIZE))
152 {
153 /* Set it to the initial value plus the boost */
154 MmMaximumNonPagedPoolInBytes = MmSizeOfNonPagedPoolInBytes +
155 MI_MAX_NONPAGED_POOL_SIZE;
156 }
157 }
158 }
159 }
160
161 NTSTATUS
162 NTAPI
163 MiInitMachineDependent(IN PLOADER_PARAMETER_BLOCK LoaderBlock)
164 {
165 PLIST_ENTRY NextEntry;
166 PMEMORY_ALLOCATION_DESCRIPTOR MdBlock;
167 ULONG FreePages = 0;
168 PFN_NUMBER PageFrameIndex;
169 PMMPTE StartPde, EndPde, PointerPte, LastPte;
170 MMPTE TempPde, TempPte;
171 PVOID NonPagedPoolExpansionVa;
172 ULONG OldCount, L2Associativity;
173 PFN_NUMBER FreePage, FreePageCount, PagesLeft, BasePage, PageCount;
174
175 /* Check for kernel stack size that's too big */
176 if (MmLargeStackSize > (KERNEL_LARGE_STACK_SIZE / _1KB))
177 {
178 /* Sanitize to default value */
179 MmLargeStackSize = KERNEL_LARGE_STACK_SIZE;
180 }
181 else
182 {
183 /* Take the registry setting, and convert it into bytes */
184 MmLargeStackSize *= _1KB;
185
186 /* Now align it to a page boundary */
187 MmLargeStackSize = PAGE_ROUND_UP(MmLargeStackSize);
188
189 /* Sanity checks */
190 ASSERT(MmLargeStackSize <= KERNEL_LARGE_STACK_SIZE);
191 ASSERT((MmLargeStackSize & (PAGE_SIZE - 1)) == 0);
192
193 /* Make sure it's not too low */
194 if (MmLargeStackSize < KERNEL_STACK_SIZE) MmLargeStackSize = KERNEL_STACK_SIZE;
195 }
196
197 /* Check for global bit */
198 if (KeFeatureBits & KF_GLOBAL_PAGE)
199 {
200 /* Set it on the template PTE and PDE */
201 ValidKernelPte.u.Hard.Global = TRUE;
202 ValidKernelPde.u.Hard.Global = TRUE;
203 }
204
205 /* Now templates are ready */
206 TempPte = ValidKernelPte;
207 TempPde = ValidKernelPde;
208
209 //
210 // Set CR3 for the system process
211 //
212 PointerPte = MiAddressToPde(PTE_BASE);
213 PageFrameIndex = PFN_FROM_PTE(PointerPte) << PAGE_SHIFT;
214 PsGetCurrentProcess()->Pcb.DirectoryTableBase[0] = PageFrameIndex;
215
216 //
217 // Blow away user-mode
218 //
219 StartPde = MiAddressToPde(0);
220 EndPde = MiAddressToPde(KSEG0_BASE);
221 RtlZeroMemory(StartPde, (EndPde - StartPde) * sizeof(MMPTE));
222
223 //
224 // Loop the memory descriptors
225 //
226 NextEntry = LoaderBlock->MemoryDescriptorListHead.Flink;
227 while (NextEntry != &LoaderBlock->MemoryDescriptorListHead)
228 {
229 //
230 // Get the memory block
231 //
232 MdBlock = CONTAINING_RECORD(NextEntry,
233 MEMORY_ALLOCATION_DESCRIPTOR,
234 ListEntry);
235
236 //
237 // Skip invisible memory
238 //
239 if ((MdBlock->MemoryType != LoaderFirmwarePermanent) &&
240 (MdBlock->MemoryType != LoaderSpecialMemory) &&
241 (MdBlock->MemoryType != LoaderHALCachedMemory) &&
242 (MdBlock->MemoryType != LoaderBBTMemory))
243 {
244 //
245 // Check if BURNMEM was used
246 //
247 if (MdBlock->MemoryType != LoaderBad)
248 {
249 //
250 // Count this in the total of pages
251 //
252 MmNumberOfPhysicalPages += MdBlock->PageCount;
253 }
254
255 //
256 // Check if this is the new lowest page
257 //
258 if (MdBlock->BasePage < MmLowestPhysicalPage)
259 {
260 //
261 // Update the lowest page
262 //
263 MmLowestPhysicalPage = MdBlock->BasePage;
264 }
265
266 //
267 // Check if this is the new highest page
268 //
269 PageFrameIndex = MdBlock->BasePage + MdBlock->PageCount;
270 if (PageFrameIndex > MmHighestPhysicalPage)
271 {
272 //
273 // Update the highest page
274 //
275 MmHighestPhysicalPage = PageFrameIndex - 1;
276 }
277
278 //
279 // Check if this is free memory
280 //
281 if ((MdBlock->MemoryType == LoaderFree) ||
282 (MdBlock->MemoryType == LoaderLoadedProgram) ||
283 (MdBlock->MemoryType == LoaderFirmwareTemporary) ||
284 (MdBlock->MemoryType == LoaderOsloaderStack))
285 {
286 //
287 // Check if this is the largest memory descriptor
288 //
289 if (MdBlock->PageCount > FreePages)
290 {
291 //
292 // For now, it is
293 //
294 MxFreeDescriptor = MdBlock;
295 }
296
297 //
298 // More free pages
299 //
300 FreePages += MdBlock->PageCount;
301 }
302 }
303
304 //
305 // Keep going
306 //
307 NextEntry = MdBlock->ListEntry.Flink;
308 }
309
310 //
311 // Save original values of the free descriptor, since it'll be
312 // altered by early allocations
313 //
314 MxOldFreeDescriptor = *MxFreeDescriptor;
315
316 /* Compute non paged pool limits and size */
317 MiComputeNonPagedPoolVa(FreePages);
318
319 //
320 // Get L2 cache information
321 //
322 L2Associativity = KeGetPcr()->SecondLevelCacheAssociativity;
323 MmSecondaryColors = KeGetPcr()->SecondLevelCacheSize;
324 if (L2Associativity) MmSecondaryColors /= L2Associativity;
325
326 //
327 // Compute final color mask and count
328 //
329 MmSecondaryColors >>= PAGE_SHIFT;
330 if (!MmSecondaryColors) MmSecondaryColors = 1;
331 MmSecondaryColorMask = MmSecondaryColors - 1;
332
333 //
334 // Store it
335 //
336 KeGetCurrentPrcb()->SecondaryColorMask = MmSecondaryColorMask;
337
338 //
339 // Calculate the number of bytes for the PFN database
340 // and then convert to pages
341 //
342 MxPfnAllocation = (MmHighestPhysicalPage + 1) * sizeof(MMPFN);
343 MxPfnAllocation >>= PAGE_SHIFT;
344
345 //
346 // We have to add one to the count here, because in the process of
347 // shifting down to the page size, we actually ended up getting the
348 // lower aligned size (so say, 0x5FFFF bytes is now 0x5F pages).
349 // Later on, we'll shift this number back into bytes, which would cause
350 // us to end up with only 0x5F000 bytes -- when we actually want to have
351 // 0x60000 bytes.
352 //
353 MxPfnAllocation++;
354
355 //
356 // Now calculate the nonpaged pool expansion VA region
357 //
358 MmNonPagedPoolStart = (PVOID)((ULONG_PTR)MmNonPagedPoolEnd -
359 MmMaximumNonPagedPoolInBytes +
360 MmSizeOfNonPagedPoolInBytes);
361 MmNonPagedPoolStart = (PVOID)PAGE_ALIGN(MmNonPagedPoolStart);
362 NonPagedPoolExpansionVa = MmNonPagedPoolStart;
363 DPRINT("NP Pool has been tuned to: %d bytes and %d bytes\n",
364 MmSizeOfNonPagedPoolInBytes, MmMaximumNonPagedPoolInBytes);
365
366 //
367 // Now calculate the nonpaged system VA region, which includes the
368 // nonpaged pool expansion (above) and the system PTEs. Note that it is
369 // then aligned to a PDE boundary (4MB).
370 //
371 MmNonPagedSystemStart = (PVOID)((ULONG_PTR)MmNonPagedPoolStart -
372 (MmNumberOfSystemPtes + 1) * PAGE_SIZE);
373 MmNonPagedSystemStart = (PVOID)((ULONG_PTR)MmNonPagedSystemStart &
374 ~((4 * 1024 * 1024) - 1));
375
376 //
377 // Don't let it go below the minimum
378 //
379 if (MmNonPagedSystemStart < (PVOID)0xEB000000)
380 {
381 //
382 // This is a hard-coded limit in the Windows NT address space
383 //
384 MmNonPagedSystemStart = (PVOID)0xEB000000;
385
386 //
387 // Reduce the amount of system PTEs to reach this point
388 //
389 MmNumberOfSystemPtes = ((ULONG_PTR)MmNonPagedPoolStart -
390 (ULONG_PTR)MmNonPagedSystemStart) >>
391 PAGE_SHIFT;
392 MmNumberOfSystemPtes--;
393 ASSERT(MmNumberOfSystemPtes > 1000);
394 }
395
396 //
397 // Check if we are in a situation where the size of the paged pool
398 // is so large that it overflows into nonpaged pool
399 //
400 if (MmSizeOfPagedPoolInBytes >
401 ((ULONG_PTR)MmNonPagedSystemStart - (ULONG_PTR)MmPagedPoolStart))
402 {
403 //
404 // We need some recalculations here
405 //
406 DPRINT1("Paged pool is too big!\n");
407 }
408
409 //
410 // Normally, the PFN database should start after the loader images.
411 // This is already the case in ReactOS, but for now we want to co-exist
412 // with the old memory manager, so we'll create a "Shadow PFN Database"
413 // instead, and arbitrarly start it at 0xB0000000.
414 //
415 MmPfnDatabase = (PVOID)0xB0000000;
416 ASSERT(((ULONG_PTR)MmPfnDatabase & ((4 * 1024 * 1024) - 1)) == 0);
417
418 //
419 // Non paged pool comes after the PFN database
420 //
421 MmNonPagedPoolStart = (PVOID)((ULONG_PTR)MmPfnDatabase +
422 (MxPfnAllocation << PAGE_SHIFT));
423
424 //
425 // Now we actually need to get these many physical pages. Nonpaged pool
426 // is actually also physically contiguous (but not the expansion)
427 //
428 PageFrameIndex = MxGetNextPage(MxPfnAllocation +
429 (MmSizeOfNonPagedPoolInBytes >> PAGE_SHIFT));
430 ASSERT(PageFrameIndex != 0);
431 DPRINT("PFN DB PA PFN begins at: %lx\n", PageFrameIndex);
432 DPRINT("NP PA PFN begins at: %lx\n", PageFrameIndex + MxPfnAllocation);
433
434 //
435 // Now we need some pages to create the page tables for the NP system VA
436 // which includes system PTEs and expansion NP
437 //
438 StartPde = MiAddressToPde(MmNonPagedSystemStart);
439 EndPde = MiAddressToPde((PVOID)((ULONG_PTR)MmNonPagedPoolEnd - 1));
440 while (StartPde <= EndPde)
441 {
442 //
443 // Sanity check
444 //
445 ASSERT(StartPde->u.Hard.Valid == 0);
446
447 //
448 // Get a page
449 //
450 TempPde.u.Hard.PageFrameNumber = MxGetNextPage(1);
451 ASSERT(TempPde.u.Hard.Valid == 1);
452 *StartPde = TempPde;
453
454 //
455 // Zero out the page table
456 //
457 PointerPte = MiPteToAddress(StartPde);
458 RtlZeroMemory(PointerPte, PAGE_SIZE);
459
460 //
461 // Next
462 //
463 StartPde++;
464 }
465
466 //
467 // Now we need pages for the page tables which will map initial NP
468 //
469 StartPde = MiAddressToPde(MmPfnDatabase);
470 EndPde = MiAddressToPde((PVOID)((ULONG_PTR)MmNonPagedPoolStart +
471 MmSizeOfNonPagedPoolInBytes - 1));
472 while (StartPde <= EndPde)
473 {
474 //
475 // Sanity check
476 //
477 ASSERT(StartPde->u.Hard.Valid == 0);
478
479 //
480 // Get a page
481 //
482 TempPde.u.Hard.PageFrameNumber = MxGetNextPage(1);
483 ASSERT(TempPde.u.Hard.Valid == 1);
484 *StartPde = TempPde;
485
486 //
487 // Zero out the page table
488 //
489 PointerPte = MiPteToAddress(StartPde);
490 RtlZeroMemory(PointerPte, PAGE_SIZE);
491
492 //
493 // Next
494 //
495 StartPde++;
496 }
497
498 //
499 // Now remember where the expansion starts
500 //
501 MmNonPagedPoolExpansionStart = NonPagedPoolExpansionVa;
502
503 //
504 // Last step is to actually map the nonpaged pool
505 //
506 PointerPte = MiAddressToPte(MmNonPagedPoolStart);
507 LastPte = MiAddressToPte((PVOID)((ULONG_PTR)MmNonPagedPoolStart +
508 MmSizeOfNonPagedPoolInBytes - 1));
509 while (PointerPte <= LastPte)
510 {
511 //
512 // Use one of our contigous pages
513 //
514 TempPte.u.Hard.PageFrameNumber = PageFrameIndex++;
515 ASSERT(PointerPte->u.Hard.Valid == 0);
516 ASSERT(TempPte.u.Hard.Valid == 1);
517 *PointerPte++ = TempPte;
518 }
519
520 //
521 // Sanity check: make sure we have properly defined the system PTE space
522 //
523 ASSERT(MiAddressToPte(MmNonPagedSystemStart) <
524 MiAddressToPte(MmNonPagedPoolExpansionStart));
525
526 //
527 // Now go ahead and initialize the ARMĀ³ nonpaged pool
528 //
529 MiInitializeArmPool();
530
531 //
532 // Get current page data, since we won't be using MxGetNextPage as it
533 // would corrupt our state
534 //
535 FreePage = MxFreeDescriptor->BasePage;
536 FreePageCount = MxFreeDescriptor->PageCount;
537 PagesLeft = 0;
538
539 //
540 // Loop the memory descriptors
541 //
542 NextEntry = KeLoaderBlock->MemoryDescriptorListHead.Flink;
543 while (NextEntry != &KeLoaderBlock->MemoryDescriptorListHead)
544 {
545 //
546 // Get the descriptor
547 //
548 MdBlock = CONTAINING_RECORD(NextEntry,
549 MEMORY_ALLOCATION_DESCRIPTOR,
550 ListEntry);
551 if ((MdBlock->MemoryType == LoaderFirmwarePermanent) ||
552 (MdBlock->MemoryType == LoaderBBTMemory) ||
553 (MdBlock->MemoryType == LoaderSpecialMemory))
554 {
555 //
556 // These pages are not part of the PFN database
557 //
558 NextEntry = MdBlock->ListEntry.Flink;
559 continue;
560 }
561
562 //
563 // Next, check if this is our special free descriptor we've found
564 //
565 if (MdBlock == MxFreeDescriptor)
566 {
567 //
568 // Use the real numbers instead
569 //
570 BasePage = MxOldFreeDescriptor.BasePage;
571 PageCount = MxOldFreeDescriptor.PageCount;
572 }
573 else
574 {
575 //
576 // Use the descriptor's numbers
577 //
578 BasePage = MdBlock->BasePage;
579 PageCount = MdBlock->PageCount;
580 }
581
582 //
583 // Get the PTEs for this range
584 //
585 PointerPte = MiAddressToPte(&MmPfnDatabase[BasePage]);
586 LastPte = MiAddressToPte(((ULONG_PTR)&MmPfnDatabase[BasePage + PageCount]) - 1);
587 DPRINT("MD Type: %lx Base: %lx Count: %lx\n", MdBlock->MemoryType, BasePage, PageCount);
588
589 //
590 // Loop them
591 //
592 while (PointerPte <= LastPte)
593 {
594 //
595 // We'll only touch PTEs that aren't already valid
596 //
597 if (PointerPte->u.Hard.Valid == 0)
598 {
599 //
600 // Use the next free page
601 //
602 TempPte.u.Hard.PageFrameNumber = FreePage;
603 ASSERT(FreePageCount != 0);
604
605 //
606 // Consume free pages
607 //
608 FreePage++;
609 FreePageCount--;
610 if (!FreePageCount)
611 {
612 //
613 // Out of memory
614 //
615 KeBugCheckEx(INSTALL_MORE_MEMORY,
616 MmNumberOfPhysicalPages,
617 FreePageCount,
618 MxOldFreeDescriptor.PageCount,
619 1);
620 }
621
622 //
623 // Write out this PTE
624 //
625 PagesLeft++;
626 ASSERT(PointerPte->u.Hard.Valid == 0);
627 ASSERT(TempPte.u.Hard.Valid == 1);
628 *PointerPte = TempPte;
629
630 //
631 // Zero this page
632 //
633 RtlZeroMemory(MiPteToAddress(PointerPte), PAGE_SIZE);
634 }
635
636 //
637 // Next!
638 //
639 PointerPte++;
640 }
641
642 //
643 // Do the next address range
644 //
645 NextEntry = MdBlock->ListEntry.Flink;
646 }
647
648 //
649 // Now update the free descriptors to consume the pages we used up during
650 // the PFN allocation loop
651 //
652 MxFreeDescriptor->BasePage = FreePage;
653 MxFreeDescriptor->PageCount = FreePageCount;
654
655 /* Call back into shitMM to setup the PFN database */
656 MmInitializePageList();
657
658 //
659 // Reset the descriptor back so we can create the correct memory blocks
660 //
661 *MxFreeDescriptor = MxOldFreeDescriptor;
662
663 //
664 // Initialize the nonpaged pool
665 //
666 InitializePool(NonPagedPool, 0);
667
668 //
669 // We PDE-aligned the nonpaged system start VA, so haul some extra PTEs!
670 //
671 PointerPte = MiAddressToPte(MmNonPagedSystemStart);
672 OldCount = MmNumberOfSystemPtes;
673 MmNumberOfSystemPtes = MiAddressToPte(MmNonPagedPoolExpansionStart) -
674 PointerPte;
675 MmNumberOfSystemPtes--;
676 DPRINT("Final System PTE count: %d (%d bytes)\n",
677 MmNumberOfSystemPtes, MmNumberOfSystemPtes * PAGE_SIZE);
678
679 //
680 // Create the system PTE space
681 //
682 MiInitializeSystemPtes(PointerPte, MmNumberOfSystemPtes, SystemPteSpace);
683
684 //
685 // Get the PDE For hyperspace
686 //
687 StartPde = MiAddressToPde(HYPER_SPACE);
688
689 //
690 // Allocate a page for it and create it
691 //
692 PageFrameIndex = MmAllocPage(MC_SYSTEM, 0);
693 TempPde.u.Hard.PageFrameNumber = PageFrameIndex;
694 TempPde.u.Hard.Global = FALSE; // Hyperspace is local!
695 ASSERT(StartPde->u.Hard.Valid == 0);
696 ASSERT(TempPde.u.Hard.Valid == 1);
697 *StartPde = TempPde;
698
699 //
700 // Zero out the page table now
701 //
702 PointerPte = MiAddressToPte(HYPER_SPACE);
703 RtlZeroMemory(PointerPte, PAGE_SIZE);
704
705 //
706 // Setup the mapping PTEs
707 //
708 MmFirstReservedMappingPte = MiAddressToPte(MI_MAPPING_RANGE_START);
709 MmLastReservedMappingPte = MiAddressToPte(MI_MAPPING_RANGE_END);
710 MmFirstReservedMappingPte->u.Hard.PageFrameNumber = MI_HYPERSPACE_PTES;
711
712 //
713 // Reserve system PTEs for zeroing PTEs and clear them
714 //
715 MiFirstReservedZeroingPte = MiReserveSystemPtes(MI_ZERO_PTES,
716 SystemPteSpace);
717 RtlZeroMemory(MiFirstReservedZeroingPte, MI_ZERO_PTES * sizeof(MMPTE));
718
719 //
720 // Set the counter to maximum to boot with
721 //
722 MiFirstReservedZeroingPte->u.Hard.PageFrameNumber = MI_ZERO_PTES - 1;
723
724 return STATUS_SUCCESS;
725 }
726
727 /* EOF */