Merge from amd64-branch:
[reactos.git] / reactos / ntoskrnl / mm / ARM3 / i386 / init.c
1 /*
2 * PROJECT: ReactOS Kernel
3 * LICENSE: BSD - See COPYING.ARM in the top level directory
4 * FILE: ntoskrnl/mm/ARM3/i386/init.c
5 * PURPOSE: ARM Memory Manager Initialization for x86
6 * PROGRAMMERS: ReactOS Portable Systems Group
7 */
8
9 /* INCLUDES *******************************************************************/
10
11 #include <ntoskrnl.h>
12 #define NDEBUG
13 #include <debug.h>
14
15 #line 15 "ARM³::INIT"
16 #define MODULE_INVOLVED_IN_ARM3
17 #include "../../ARM3/miarm.h"
18
19 /* GLOBALS ********************************************************************/
20
21 //
22 // These are all registry-configurable, but by default, the memory manager will
23 // figure out the most appropriate values.
24 //
25 ULONG MmMaximumNonPagedPoolPercent;
26 ULONG MmSizeOfNonPagedPoolInBytes;
27 ULONG MmMaximumNonPagedPoolInBytes;
28
29 //
30 // These numbers describe the discrete equation components of the nonpaged
31 // pool sizing algorithm.
32 //
33 // They are described on http://support.microsoft.com/default.aspx/kb/126402/ja
34 // along with the algorithm that uses them, which is implemented later below.
35 //
36 ULONG MmMinimumNonPagedPoolSize = 256 * 1024;
37 ULONG MmMinAdditionNonPagedPoolPerMb = 32 * 1024;
38 ULONG MmDefaultMaximumNonPagedPool = 1024 * 1024;
39 ULONG MmMaxAdditionNonPagedPoolPerMb = 400 * 1024;
40
41 //
42 // The memory layout (and especially variable names) of the NT kernel mode
43 // components can be a bit hard to twig, especially when it comes to the non
44 // paged area.
45 //
46 // There are really two components to the non-paged pool:
47 //
48 // - The initial nonpaged pool, sized dynamically up to a maximum.
49 // - The expansion nonpaged pool, sized dynamically up to a maximum.
50 //
51 // The initial nonpaged pool is physically continuous for performance, and
52 // immediately follows the PFN database, typically sharing the same PDE. It is
53 // a very small resource (32MB on a 1GB system), and capped at 128MB.
54 //
55 // Right now we call this the "ARM³ Nonpaged Pool" and it begins somewhere after
56 // the PFN database (which starts at 0xB0000000).
57 //
58 // The expansion nonpaged pool, on the other hand, can grow much bigger (400MB
59 // for a 1GB system). On ARM³ however, it is currently capped at 128MB.
60 //
61 // The address where the initial nonpaged pool starts is aptly named
62 // MmNonPagedPoolStart, and it describes a range of MmSizeOfNonPagedPoolInBytes
63 // bytes.
64 //
65 // Expansion nonpaged pool starts at an address described by the variable called
66 // MmNonPagedPoolExpansionStart, and it goes on for MmMaximumNonPagedPoolInBytes
67 // minus MmSizeOfNonPagedPoolInBytes bytes, always reaching MmNonPagedPoolEnd
68 // (because of the way it's calculated) at 0xFFBE0000.
69 //
70 // Initial nonpaged pool is allocated and mapped early-on during boot, but what
71 // about the expansion nonpaged pool? It is instead composed of special pages
72 // which belong to what are called System PTEs. These PTEs are the matter of a
73 // later discussion, but they are also considered part of the "nonpaged" OS, due
74 // to the fact that they are never paged out -- once an address is described by
75 // a System PTE, it is always valid, until the System PTE is torn down.
76 //
77 // System PTEs are actually composed of two "spaces", the system space proper,
78 // and the nonpaged pool expansion space. The latter, as we've already seen,
79 // begins at MmNonPagedPoolExpansionStart. Based on the number of System PTEs
80 // that the system will support, the remaining address space below this address
81 // is used to hold the system space PTEs. This address, in turn, is held in the
82 // variable named MmNonPagedSystemStart, which itself is never allowed to go
83 // below 0xEB000000 (thus creating an upper bound on the number of System PTEs).
84 //
85 // This means that 330MB are reserved for total nonpaged system VA, on top of
86 // whatever the initial nonpaged pool allocation is.
87 //
88 // The following URLs, valid as of April 23rd, 2008, support this evidence:
89 //
90 // http://www.cs.miami.edu/~burt/journal/NT/memory.html
91 // http://www.ditii.com/2007/09/28/windows-memory-management-x86-virtual-address-space/
92 //
93 PVOID MmNonPagedSystemStart;
94 PVOID MmNonPagedPoolStart;
95 PVOID MmNonPagedPoolExpansionStart;
96 PVOID MmNonPagedPoolEnd = MI_NONPAGED_POOL_END;
97
98 //
99 // This is where paged pool starts by default
100 //
101 PVOID MmPagedPoolStart = MI_PAGED_POOL_START;
102 PVOID MmPagedPoolEnd;
103
104 //
105 // And this is its default size
106 //
107 ULONG MmSizeOfPagedPoolInBytes = MI_MIN_INIT_PAGED_POOLSIZE;
108 PFN_NUMBER MmSizeOfPagedPoolInPages = MI_MIN_INIT_PAGED_POOLSIZE / PAGE_SIZE;
109
110 //
111 // Session space starts at 0xBFFFFFFF and grows downwards
112 // By default, it includes an 8MB image area where we map win32k and video card
113 // drivers, followed by a 4MB area containing the session's working set. This is
114 // then followed by a 20MB mapped view area and finally by the session's paged
115 // pool, by default 16MB.
116 //
117 // On a normal system, this results in session space occupying the region from
118 // 0xBD000000 to 0xC0000000
119 //
120 // See miarm.h for the defines that determine the sizing of this region. On an
121 // NT system, some of these can be configured through the registry, but we don't
122 // support that yet.
123 //
124 PVOID MiSessionSpaceEnd; // 0xC0000000
125 PVOID MiSessionImageEnd; // 0xC0000000
126 PVOID MiSessionImageStart; // 0xBF800000
127 PVOID MiSessionViewStart; // 0xBE000000
128 PVOID MiSessionPoolEnd; // 0xBE000000
129 PVOID MiSessionPoolStart; // 0xBD000000
130 PVOID MmSessionBase; // 0xBD000000
131 ULONG MmSessionSize;
132 ULONG MmSessionViewSize;
133 ULONG MmSessionPoolSize;
134 ULONG MmSessionImageSize;
135
136 //
137 // The system view space, on the other hand, is where sections that are memory
138 // mapped into "system space" end up.
139 //
140 // By default, it is a 16MB region.
141 //
142 PVOID MiSystemViewStart;
143 ULONG MmSystemViewSize;
144
145 //
146 // A copy of the system page directory (the page directory associated with the
147 // System process) is kept (double-mapped) by the manager in order to lazily
148 // map paged pool PDEs into external processes when they fault on a paged pool
149 // address.
150 //
151 PFN_NUMBER MmSystemPageDirectory;
152 PMMPTE MmSystemPagePtes;
153
154 //
155 // The system cache starts right after hyperspace. The first few pages are for
156 // keeping track of the system working set list.
157 //
158 // This should be 0xC0C00000 -- the cache itself starts at 0xC1000000
159 //
160 PMMWSL MmSystemCacheWorkingSetList = MI_SYSTEM_CACHE_WS_START;
161
162 //
163 // Windows NT seems to choose between 7000, 11000 and 50000
164 // On systems with more than 32MB, this number is then doubled, and further
165 // aligned up to a PDE boundary (4MB).
166 //
167 ULONG MmNumberOfSystemPtes;
168
169 //
170 // This is how many pages the PFN database will take up
171 // In Windows, this includes the Quark Color Table, but not in ARM³
172 //
173 ULONG MxPfnAllocation;
174
175 //
176 // Unlike the old ReactOS Memory Manager, ARM³ (and Windows) does not keep track
177 // of pages that are not actually valid physical memory, such as ACPI reserved
178 // regions, BIOS address ranges, or holes in physical memory address space which
179 // could indicate device-mapped I/O memory.
180 //
181 // In fact, the lack of a PFN entry for a page usually indicates that this is
182 // I/O space instead.
183 //
184 // A bitmap, called the PFN bitmap, keeps track of all page frames by assigning
185 // a bit to each. If the bit is set, then the page is valid physical RAM.
186 //
187 RTL_BITMAP MiPfnBitMap;
188
189 //
190 // This structure describes the different pieces of RAM-backed address space
191 //
192 PPHYSICAL_MEMORY_DESCRIPTOR MmPhysicalMemoryBlock;
193
194 //
195 // Before we have a PFN database, memory comes straight from our physical memory
196 // blocks, which is nice because it's guaranteed contiguous and also because once
197 // we take a page from here, the system doesn't see it anymore.
198 // However, once the fun is over, those pages must be re-integrated back into
199 // PFN society life, and that requires us keeping a copy of the original layout
200 // so that we can parse it later.
201 //
202 PMEMORY_ALLOCATION_DESCRIPTOR MxFreeDescriptor;
203 MEMORY_ALLOCATION_DESCRIPTOR MxOldFreeDescriptor;
204
205 //
206 // This is where we keep track of the most basic physical layout markers
207 //
208 ULONG MmNumberOfPhysicalPages, MmHighestPhysicalPage, MmLowestPhysicalPage = -1;
209
210 //
211 // The total number of pages mapped by the boot loader, which include the kernel
212 // HAL, boot drivers, registry, NLS files and other loader data structures is
213 // kept track of here. This depends on "LoaderPagesSpanned" being correct when
214 // coming from the loader.
215 //
216 // This number is later aligned up to a PDE boundary.
217 //
218 ULONG MmBootImageSize;
219
220 //
221 // These three variables keep track of the core separation of address space that
222 // exists between kernel mode and user mode.
223 //
224 ULONG MmUserProbeAddress;
225 PVOID MmHighestUserAddress;
226 PVOID MmSystemRangeStart;
227
228
229
230 PVOID MmSystemCacheStart;
231 PVOID MmSystemCacheEnd;
232 MMSUPPORT MmSystemCacheWs;
233
234 //
235 // This is where hyperspace ends (followed by the system cache working set)
236 //
237 PVOID MmHyperSpaceEnd;
238
239 //
240 // Page coloring algorithm data
241 //
242 ULONG MmSecondaryColors;
243 ULONG MmSecondaryColorMask;
244
245 //
246 // Actual (registry-configurable) size of a GUI thread's stack
247 //
248 ULONG MmLargeStackSize;
249
250 /* PRIVATE FUNCTIONS **********************************************************/
251
252 //
253 // In Bavaria, this is probably a hate crime
254 //
255 VOID
256 FASTCALL
257 MiSyncARM3WithROS(IN PVOID AddressStart,
258 IN PVOID AddressEnd)
259 {
260 //
261 // Puerile piece of junk-grade carbonized horseshit puss sold to the lowest bidder
262 //
263 ULONG Pde = ADDR_TO_PDE_OFFSET(AddressStart);
264 while (Pde <= ADDR_TO_PDE_OFFSET(AddressEnd))
265 {
266 //
267 // This both odious and heinous
268 //
269 extern ULONG MmGlobalKernelPageDirectory[1024];
270 MmGlobalKernelPageDirectory[Pde] = ((PULONG)PDE_BASE)[Pde];
271 Pde++;
272 }
273 }
274
275 PFN_NUMBER
276 NTAPI
277 MxGetNextPage(IN PFN_NUMBER PageCount)
278 {
279 PFN_NUMBER Pfn;
280
281 //
282 // Make sure we have enough pages
283 //
284 if (PageCount > MxFreeDescriptor->PageCount)
285 {
286 //
287 // Crash the system
288 //
289 KeBugCheckEx(INSTALL_MORE_MEMORY,
290 MmNumberOfPhysicalPages,
291 MxFreeDescriptor->PageCount,
292 MxOldFreeDescriptor.PageCount,
293 PageCount);
294 }
295
296 //
297 // Use our lowest usable free pages
298 //
299 Pfn = MxFreeDescriptor->BasePage;
300 MxFreeDescriptor->BasePage += PageCount;
301 MxFreeDescriptor->PageCount -= PageCount;
302 return Pfn;
303 }
304
305 PFN_NUMBER
306 NTAPI
307 MiPagesInLoaderBlock(IN PLOADER_PARAMETER_BLOCK LoaderBlock,
308 IN PBOOLEAN IncludeType)
309 {
310 PLIST_ENTRY NextEntry;
311 PFN_NUMBER PageCount = 0;
312 PMEMORY_ALLOCATION_DESCRIPTOR MdBlock;
313
314 //
315 // Now loop through the descriptors
316 //
317 NextEntry = LoaderBlock->MemoryDescriptorListHead.Flink;
318 while (NextEntry != &LoaderBlock->MemoryDescriptorListHead)
319 {
320 //
321 // Grab each one, and check if it's one we should include
322 //
323 MdBlock = CONTAINING_RECORD(NextEntry,
324 MEMORY_ALLOCATION_DESCRIPTOR,
325 ListEntry);
326 if ((MdBlock->MemoryType < LoaderMaximum) &&
327 (IncludeType[MdBlock->MemoryType]))
328 {
329 //
330 // Add this to our running total
331 //
332 PageCount += MdBlock->PageCount;
333 }
334
335 //
336 // Try the next descriptor
337 //
338 NextEntry = MdBlock->ListEntry.Flink;
339 }
340
341 //
342 // Return the total
343 //
344 return PageCount;
345 }
346
347 PPHYSICAL_MEMORY_DESCRIPTOR
348 NTAPI
349 MmInitializeMemoryLimits(IN PLOADER_PARAMETER_BLOCK LoaderBlock,
350 IN PBOOLEAN IncludeType)
351 {
352 PLIST_ENTRY NextEntry;
353 ULONG Run = 0, InitialRuns = 0;
354 PFN_NUMBER NextPage = -1, PageCount = 0;
355 PPHYSICAL_MEMORY_DESCRIPTOR Buffer, NewBuffer;
356 PMEMORY_ALLOCATION_DESCRIPTOR MdBlock;
357
358 //
359 // Scan the memory descriptors
360 //
361 NextEntry = LoaderBlock->MemoryDescriptorListHead.Flink;
362 while (NextEntry != &LoaderBlock->MemoryDescriptorListHead)
363 {
364 //
365 // For each one, increase the memory allocation estimate
366 //
367 InitialRuns++;
368 NextEntry = NextEntry->Flink;
369 }
370
371 //
372 // Allocate the maximum we'll ever need
373 //
374 Buffer = ExAllocatePoolWithTag(NonPagedPool,
375 sizeof(PHYSICAL_MEMORY_DESCRIPTOR) +
376 sizeof(PHYSICAL_MEMORY_RUN) *
377 (InitialRuns - 1),
378 'lMmM');
379 if (!Buffer) return NULL;
380
381 //
382 // For now that's how many runs we have
383 //
384 Buffer->NumberOfRuns = InitialRuns;
385
386 //
387 // Now loop through the descriptors again
388 //
389 NextEntry = LoaderBlock->MemoryDescriptorListHead.Flink;
390 while (NextEntry != &LoaderBlock->MemoryDescriptorListHead)
391 {
392 //
393 // Grab each one, and check if it's one we should include
394 //
395 MdBlock = CONTAINING_RECORD(NextEntry,
396 MEMORY_ALLOCATION_DESCRIPTOR,
397 ListEntry);
398 if ((MdBlock->MemoryType < LoaderMaximum) &&
399 (IncludeType[MdBlock->MemoryType]))
400 {
401 //
402 // Add this to our running total
403 //
404 PageCount += MdBlock->PageCount;
405
406 //
407 // Check if the next page is described by the next descriptor
408 //
409 if (MdBlock->BasePage == NextPage)
410 {
411 //
412 // Combine it into the same physical run
413 //
414 ASSERT(MdBlock->PageCount != 0);
415 Buffer->Run[Run - 1].PageCount += MdBlock->PageCount;
416 NextPage += MdBlock->PageCount;
417 }
418 else
419 {
420 //
421 // Otherwise just duplicate the descriptor's contents
422 //
423 Buffer->Run[Run].BasePage = MdBlock->BasePage;
424 Buffer->Run[Run].PageCount = MdBlock->PageCount;
425 NextPage = Buffer->Run[Run].BasePage + Buffer->Run[Run].PageCount;
426
427 //
428 // And in this case, increase the number of runs
429 //
430 Run++;
431 }
432 }
433
434 //
435 // Try the next descriptor
436 //
437 NextEntry = MdBlock->ListEntry.Flink;
438 }
439
440 //
441 // We should not have been able to go past our initial estimate
442 //
443 ASSERT(Run <= Buffer->NumberOfRuns);
444
445 //
446 // Our guess was probably exaggerated...
447 //
448 if (InitialRuns > Run)
449 {
450 //
451 // Allocate a more accurately sized buffer
452 //
453 NewBuffer = ExAllocatePoolWithTag(NonPagedPool,
454 sizeof(PHYSICAL_MEMORY_DESCRIPTOR) +
455 sizeof(PHYSICAL_MEMORY_RUN) *
456 (Run - 1),
457 'lMmM');
458 if (NewBuffer)
459 {
460 //
461 // Copy the old buffer into the new, then free it
462 //
463 RtlCopyMemory(NewBuffer->Run,
464 Buffer->Run,
465 sizeof(PHYSICAL_MEMORY_RUN) * Run);
466 ExFreePool(Buffer);
467
468 //
469 // Now use the new buffer
470 //
471 Buffer = NewBuffer;
472 }
473 }
474
475 //
476 // Write the final numbers, and return it
477 //
478 Buffer->NumberOfRuns = Run;
479 Buffer->NumberOfPages = PageCount;
480 return Buffer;
481 }
482
483 VOID
484 NTAPI
485 MiBuildPagedPool(VOID)
486 {
487 PMMPTE PointerPte, PointerPde;
488 MMPTE TempPte = HyperTemplatePte;
489 PFN_NUMBER PageFrameIndex;
490 KIRQL OldIrql;
491 ULONG Size, BitMapSize;
492
493 //
494 // Get the page frame number for the system page directory
495 //
496 PointerPte = MiAddressToPte(PDE_BASE);
497 MmSystemPageDirectory = PFN_FROM_PTE(PointerPte);
498
499 //
500 // Allocate a system PTE which will hold a copy of the page directory
501 //
502 PointerPte = MiReserveSystemPtes(1, SystemPteSpace);
503 ASSERT(PointerPte);
504 MmSystemPagePtes = MiPteToAddress(PointerPte);
505
506 //
507 // Make this system PTE point to the system page directory.
508 // It is now essentially double-mapped. This will be used later for lazy
509 // evaluation of PDEs accross process switches, similarly to how the Global
510 // page directory array in the old ReactOS Mm is used (but in a less hacky
511 // way).
512 //
513 TempPte = HyperTemplatePte;
514 TempPte.u.Hard.PageFrameNumber = MmSystemPageDirectory;
515 ASSERT(PointerPte->u.Hard.Valid == 0);
516 ASSERT(TempPte.u.Hard.Valid == 1);
517 *PointerPte = TempPte;
518
519 //
520 // Let's get back to paged pool work: size it up.
521 // By default, it should be twice as big as nonpaged pool.
522 //
523 MmSizeOfPagedPoolInBytes = 2 * MmMaximumNonPagedPoolInBytes;
524 if (MmSizeOfPagedPoolInBytes > ((ULONG_PTR)MmNonPagedSystemStart -
525 (ULONG_PTR)MmPagedPoolStart))
526 {
527 //
528 // On the other hand, we have limited VA space, so make sure that the VA
529 // for paged pool doesn't overflow into nonpaged pool VA. Otherwise, set
530 // whatever maximum is possible.
531 //
532 MmSizeOfPagedPoolInBytes = (ULONG_PTR)MmNonPagedSystemStart -
533 (ULONG_PTR)MmPagedPoolStart;
534 }
535
536 //
537 // Get the size in pages and make sure paged pool is at least 32MB.
538 //
539 Size = MmSizeOfPagedPoolInBytes;
540 if (Size < MI_MIN_INIT_PAGED_POOLSIZE) Size = MI_MIN_INIT_PAGED_POOLSIZE;
541 Size = BYTES_TO_PAGES(Size);
542
543 //
544 // Now check how many PTEs will be required for these many pages.
545 //
546 Size = (Size + (1024 - 1)) / 1024;
547
548 //
549 // Recompute the page-aligned size of the paged pool, in bytes and pages.
550 //
551 MmSizeOfPagedPoolInBytes = Size * PAGE_SIZE * 1024;
552 MmSizeOfPagedPoolInPages = MmSizeOfPagedPoolInBytes >> PAGE_SHIFT;
553
554 //
555 // Let's be really sure this doesn't overflow into nonpaged system VA
556 //
557 ASSERT((MmSizeOfPagedPoolInBytes + (ULONG_PTR)MmPagedPoolStart) <=
558 (ULONG_PTR)MmNonPagedSystemStart);
559
560 //
561 // This is where paged pool ends
562 //
563 MmPagedPoolEnd = (PVOID)(((ULONG_PTR)MmPagedPoolStart +
564 MmSizeOfPagedPoolInBytes) - 1);
565
566 //
567 // So now get the PDE for paged pool and zero it out
568 //
569 PointerPde = MiAddressToPde(MmPagedPoolStart);
570 RtlZeroMemory(PointerPde,
571 (1 + MiAddressToPde(MmPagedPoolEnd) - PointerPde) * sizeof(MMPTE));
572
573 //
574 // Next, get the first and last PTE
575 //
576 PointerPte = MiAddressToPte(MmPagedPoolStart);
577 MmPagedPoolInfo.FirstPteForPagedPool = PointerPte;
578 MmPagedPoolInfo.LastPteForPagedPool = MiAddressToPte(MmPagedPoolEnd);
579
580 //
581 // Lock the PFN database
582 //
583 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
584
585 //
586 // Allocate a page and map the first paged pool PDE
587 //
588 PageFrameIndex = MmAllocPage(MC_NPPOOL, 0);
589 TempPte.u.Hard.PageFrameNumber = PageFrameIndex;
590 ASSERT(PointerPde->u.Hard.Valid == 0);
591 ASSERT(TempPte.u.Hard.Valid == 1);
592 *PointerPde = TempPte;
593
594 //
595 // Release the PFN database lock
596 //
597 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
598
599 //
600 // We only have one PDE mapped for now... at fault time, additional PDEs
601 // will be allocated to handle paged pool growth. This is where they'll have
602 // to start.
603 //
604 MmPagedPoolInfo.NextPdeForPagedPoolExpansion = PointerPde + 1;
605
606 //
607 // We keep track of each page via a bit, so check how big the bitmap will
608 // have to be (make sure to align our page count such that it fits nicely
609 // into a 4-byte aligned bitmap.
610 //
611 // We'll also allocate the bitmap header itself part of the same buffer.
612 //
613 Size = Size * 1024;
614 ASSERT(Size == MmSizeOfPagedPoolInPages);
615 BitMapSize = Size;
616 Size = sizeof(RTL_BITMAP) + (((Size + 31) / 32) * sizeof(ULONG));
617
618 //
619 // Allocate the allocation bitmap, which tells us which regions have not yet
620 // been mapped into memory
621 //
622 MmPagedPoolInfo.PagedPoolAllocationMap = ExAllocatePoolWithTag(NonPagedPool,
623 Size,
624 ' mM');
625 ASSERT(MmPagedPoolInfo.PagedPoolAllocationMap);
626
627 //
628 // Initialize it such that at first, only the first page's worth of PTEs is
629 // marked as allocated (incidentially, the first PDE we allocated earlier).
630 //
631 RtlInitializeBitMap(MmPagedPoolInfo.PagedPoolAllocationMap,
632 (PULONG)(MmPagedPoolInfo.PagedPoolAllocationMap + 1),
633 BitMapSize);
634 RtlSetAllBits(MmPagedPoolInfo.PagedPoolAllocationMap);
635 RtlClearBits(MmPagedPoolInfo.PagedPoolAllocationMap, 0, 1024);
636
637 //
638 // We have a second bitmap, which keeps track of where allocations end.
639 // Given the allocation bitmap and a base address, we can therefore figure
640 // out which page is the last page of that allocation, and thus how big the
641 // entire allocation is.
642 //
643 MmPagedPoolInfo.EndOfPagedPoolBitmap = ExAllocatePoolWithTag(NonPagedPool,
644 Size,
645 ' mM');
646 ASSERT(MmPagedPoolInfo.EndOfPagedPoolBitmap);
647 RtlInitializeBitMap(MmPagedPoolInfo.EndOfPagedPoolBitmap,
648 (PULONG)(MmPagedPoolInfo.EndOfPagedPoolBitmap + 1),
649 BitMapSize);
650
651 //
652 // Since no allocations have been made yet, there are no bits set as the end
653 //
654 RtlClearAllBits(MmPagedPoolInfo.EndOfPagedPoolBitmap);
655
656 //
657 // Initialize paged pool.
658 //
659 InitializePool(PagedPool, 0);
660
661 //
662 // Initialize the paged pool mutex
663 //
664 KeInitializeGuardedMutex(&MmPagedPoolMutex);
665 }
666
667 NTSTATUS
668 NTAPI
669 MmArmInitSystem(IN ULONG Phase,
670 IN PLOADER_PARAMETER_BLOCK LoaderBlock)
671 {
672 PLIST_ENTRY NextEntry;
673 PMEMORY_ALLOCATION_DESCRIPTOR MdBlock;
674 ULONG FreePages = 0;
675 PFN_NUMBER PageFrameIndex, PoolPages;
676 PMMPTE StartPde, EndPde, PointerPte, LastPte;
677 MMPTE TempPde = HyperTemplatePte, TempPte = HyperTemplatePte;
678 PVOID NonPagedPoolExpansionVa;
679 ULONG OldCount, i, L2Associativity;
680 BOOLEAN IncludeType[LoaderMaximum];
681 PVOID Bitmap;
682 PPHYSICAL_MEMORY_RUN Run;
683 PFN_NUMBER FreePage, FreePageCount, PagesLeft, BasePage, PageCount;
684
685 //
686 // Instantiate memory that we don't consider RAM/usable
687 // We use the same exclusions that Windows does, in order to try to be
688 // compatible with WinLDR-style booting
689 //
690 for (i = 0; i < LoaderMaximum; i++) IncludeType[i] = TRUE;
691 IncludeType[LoaderBad] = FALSE;
692 IncludeType[LoaderFirmwarePermanent] = FALSE;
693 IncludeType[LoaderSpecialMemory] = FALSE;
694 IncludeType[LoaderBBTMemory] = FALSE;
695 if (Phase == 0)
696 {
697 //
698 // Define the basic user vs. kernel address space separation
699 //
700 MmSystemRangeStart = (PVOID)KSEG0_BASE;
701 MmUserProbeAddress = (ULONG_PTR)MmSystemRangeStart - 0x10000;
702 MmHighestUserAddress = (PVOID)(MmUserProbeAddress - 1);
703
704 //
705 // Get the size of the boot loader's image allocations and then round
706 // that region up to a PDE size, so that any PDEs we might create for
707 // whatever follows are separate from the PDEs that boot loader might've
708 // already created (and later, we can blow all that away if we want to).
709 //
710 MmBootImageSize = KeLoaderBlock->Extension->LoaderPagesSpanned;
711 MmBootImageSize *= PAGE_SIZE;
712 MmBootImageSize = (MmBootImageSize + (4 * 1024 * 1024) - 1) & ~((4 * 1024 * 1024) - 1);
713 ASSERT((MmBootImageSize % (4 * 1024 * 1024)) == 0);
714
715 //
716 // Set the size of session view, pool, and image
717 //
718 MmSessionSize = MI_SESSION_SIZE;
719 MmSessionViewSize = MI_SESSION_VIEW_SIZE;
720 MmSessionPoolSize = MI_SESSION_POOL_SIZE;
721 MmSessionImageSize = MI_SESSION_IMAGE_SIZE;
722
723 //
724 // Set the size of system view
725 //
726 MmSystemViewSize = MI_SYSTEM_VIEW_SIZE;
727
728 //
729 // This is where it all ends
730 //
731 MiSessionImageEnd = (PVOID)PTE_BASE;
732
733 //
734 // This is where we will load Win32k.sys and the video driver
735 //
736 MiSessionImageStart = (PVOID)((ULONG_PTR)MiSessionImageEnd -
737 MmSessionImageSize);
738
739 //
740 // So the view starts right below the session working set (itself below
741 // the image area)
742 //
743 MiSessionViewStart = (PVOID)((ULONG_PTR)MiSessionImageEnd -
744 MmSessionImageSize -
745 MI_SESSION_WORKING_SET_SIZE -
746 MmSessionViewSize);
747
748 //
749 // Session pool follows
750 //
751 MiSessionPoolEnd = MiSessionViewStart;
752 MiSessionPoolStart = (PVOID)((ULONG_PTR)MiSessionPoolEnd -
753 MmSessionPoolSize);
754
755 //
756 // And it all begins here
757 //
758 MmSessionBase = MiSessionPoolStart;
759
760 //
761 // Sanity check that our math is correct
762 //
763 ASSERT((ULONG_PTR)MmSessionBase + MmSessionSize == PTE_BASE);
764
765 //
766 // Session space ends wherever image session space ends
767 //
768 MiSessionSpaceEnd = MiSessionImageEnd;
769
770 //
771 // System view space ends at session space, so now that we know where
772 // this is, we can compute the base address of system view space itself.
773 //
774 MiSystemViewStart = (PVOID)((ULONG_PTR)MmSessionBase -
775 MmSystemViewSize);
776
777 //
778 // Count physical pages on the system
779 //
780 PageCount = MiPagesInLoaderBlock(LoaderBlock, IncludeType);
781
782 //
783 // Check if this is a machine with less than 19MB of RAM
784 //
785 if (PageCount < MI_MIN_PAGES_FOR_SYSPTE_TUNING)
786 {
787 //
788 // Use the very minimum of system PTEs
789 //
790 MmNumberOfSystemPtes = 7000;
791 }
792 else
793 {
794 //
795 // Use the default, but check if we have more than 32MB of RAM
796 //
797 MmNumberOfSystemPtes = 11000;
798 if (PageCount > MI_MIN_PAGES_FOR_SYSPTE_BOOST)
799 {
800 //
801 // Double the amount of system PTEs
802 //
803 MmNumberOfSystemPtes <<= 1;
804 }
805 }
806
807 DPRINT("System PTE count has been tuned to %d (%d bytes)\n",
808 MmNumberOfSystemPtes, MmNumberOfSystemPtes * PAGE_SIZE);
809
810 //
811 //
812 // Start of Architecture Specific Initialization Code
813 //
814 //
815
816 //
817 // The large kernel stack is cutomizable, but use default value for now
818 //
819 MmLargeStackSize = KERNEL_LARGE_STACK_SIZE;
820
821 //
822 // Setup template
823 //
824 HyperTemplatePte.u.Long = 0;
825 HyperTemplatePte.u.Hard.Valid = 1;
826 HyperTemplatePte.u.Hard.Write = 1;
827 HyperTemplatePte.u.Hard.Dirty = 1;
828 HyperTemplatePte.u.Hard.Accessed = 1;
829 if (Ke386GlobalPagesEnabled) HyperTemplatePte.u.Hard.Global = 1;
830
831 //
832 // Set CR3 for the system process
833 //
834 PointerPte = MiAddressToPde(PTE_BASE);
835 PageFrameIndex = PFN_FROM_PTE(PointerPte) << PAGE_SHIFT;
836 PsGetCurrentProcess()->Pcb.DirectoryTableBase[0] = PageFrameIndex;
837
838 //
839 // Blow away user-mode
840 //
841 StartPde = MiAddressToPde(0);
842 EndPde = MiAddressToPde(KSEG0_BASE);
843 RtlZeroMemory(StartPde, (EndPde - StartPde) * sizeof(MMPTE));
844
845 //
846 // Loop the memory descriptors
847 //
848 NextEntry = LoaderBlock->MemoryDescriptorListHead.Flink;
849 while (NextEntry != &LoaderBlock->MemoryDescriptorListHead)
850 {
851 //
852 // Get the memory block
853 //
854 MdBlock = CONTAINING_RECORD(NextEntry,
855 MEMORY_ALLOCATION_DESCRIPTOR,
856 ListEntry);
857
858 //
859 // Skip invisible memory
860 //
861 if ((MdBlock->MemoryType != LoaderFirmwarePermanent) &&
862 (MdBlock->MemoryType != LoaderSpecialMemory) &&
863 (MdBlock->MemoryType != LoaderHALCachedMemory) &&
864 (MdBlock->MemoryType != LoaderBBTMemory))
865 {
866 //
867 // Check if BURNMEM was used
868 //
869 if (MdBlock->MemoryType != LoaderBad)
870 {
871 //
872 // Count this in the total of pages
873 //
874 MmNumberOfPhysicalPages += MdBlock->PageCount;
875 }
876
877 //
878 // Check if this is the new lowest page
879 //
880 if (MdBlock->BasePage < MmLowestPhysicalPage)
881 {
882 //
883 // Update the lowest page
884 //
885 MmLowestPhysicalPage = MdBlock->BasePage;
886 }
887
888 //
889 // Check if this is the new highest page
890 //
891 PageFrameIndex = MdBlock->BasePage + MdBlock->PageCount;
892 if (PageFrameIndex > MmHighestPhysicalPage)
893 {
894 //
895 // Update the highest page
896 //
897 MmHighestPhysicalPage = PageFrameIndex - 1;
898 }
899
900 //
901 // Check if this is free memory
902 //
903 if ((MdBlock->MemoryType == LoaderFree) ||
904 (MdBlock->MemoryType == LoaderLoadedProgram) ||
905 (MdBlock->MemoryType == LoaderFirmwareTemporary) ||
906 (MdBlock->MemoryType == LoaderOsloaderStack))
907 {
908 //
909 // Check if this is the largest memory descriptor
910 //
911 if (MdBlock->PageCount > FreePages)
912 {
913 //
914 // For now, it is
915 //
916 MxFreeDescriptor = MdBlock;
917 }
918
919 //
920 // More free pages
921 //
922 FreePages += MdBlock->PageCount;
923 }
924 }
925
926 //
927 // Keep going
928 //
929 NextEntry = MdBlock->ListEntry.Flink;
930 }
931
932 //
933 // Save original values of the free descriptor, since it'll be
934 // altered by early allocations
935 //
936 MxOldFreeDescriptor = *MxFreeDescriptor;
937
938 //
939 // Check if this is a machine with less than 256MB of RAM, and no overide
940 //
941 if ((MmNumberOfPhysicalPages <= MI_MIN_PAGES_FOR_NONPAGED_POOL_TUNING) &&
942 !(MmSizeOfNonPagedPoolInBytes))
943 {
944 //
945 // Force the non paged pool to be 2MB so we can reduce RAM usage
946 //
947 MmSizeOfNonPagedPoolInBytes = 2 * 1024 * 1024;
948 }
949
950 //
951 // Hyperspace ends here
952 //
953 MmHyperSpaceEnd = (PVOID)((ULONG_PTR)MmSystemCacheWorkingSetList - 1);
954
955 //
956 // Check if the user gave a ridicuously large nonpaged pool RAM size
957 //
958 if ((MmSizeOfNonPagedPoolInBytes >> PAGE_SHIFT) >
959 (MmNumberOfPhysicalPages * 7 / 8))
960 {
961 //
962 // More than 7/8ths of RAM was dedicated to nonpaged pool, ignore!
963 //
964 MmSizeOfNonPagedPoolInBytes = 0;
965 }
966
967 //
968 // Check if no registry setting was set, or if the setting was too low
969 //
970 if (MmSizeOfNonPagedPoolInBytes < MmMinimumNonPagedPoolSize)
971 {
972 //
973 // Start with the minimum (256 KB) and add 32 KB for each MB above 4
974 //
975 MmSizeOfNonPagedPoolInBytes = MmMinimumNonPagedPoolSize;
976 MmSizeOfNonPagedPoolInBytes += (MmNumberOfPhysicalPages - 1024) /
977 256 * MmMinAdditionNonPagedPoolPerMb;
978 }
979
980 //
981 // Check if the registy setting or our dynamic calculation was too high
982 //
983 if (MmSizeOfNonPagedPoolInBytes > MI_MAX_INIT_NONPAGED_POOL_SIZE)
984 {
985 //
986 // Set it to the maximum
987 //
988 MmSizeOfNonPagedPoolInBytes = MI_MAX_INIT_NONPAGED_POOL_SIZE;
989 }
990
991 //
992 // Check if a percentage cap was set through the registry
993 //
994 if (MmMaximumNonPagedPoolPercent)
995 {
996 //
997 // Don't feel like supporting this right now
998 //
999 UNIMPLEMENTED;
1000 }
1001
1002 //
1003 // Page-align the nonpaged pool size
1004 //
1005 MmSizeOfNonPagedPoolInBytes &= ~(PAGE_SIZE - 1);
1006
1007 //
1008 // Now, check if there was a registry size for the maximum size
1009 //
1010 if (!MmMaximumNonPagedPoolInBytes)
1011 {
1012 //
1013 // Start with the default (1MB)
1014 //
1015 MmMaximumNonPagedPoolInBytes = MmDefaultMaximumNonPagedPool;
1016
1017 //
1018 // Add space for PFN database
1019 //
1020 MmMaximumNonPagedPoolInBytes += (ULONG)
1021 PAGE_ALIGN((MmHighestPhysicalPage + 1) * sizeof(MMPFN));
1022
1023 //
1024 // Add 400KB for each MB above 4
1025 //
1026 MmMaximumNonPagedPoolInBytes += (FreePages - 1024) / 256 *
1027 MmMaxAdditionNonPagedPoolPerMb;
1028 }
1029
1030 //
1031 // Make sure there's at least 16 pages + the PFN available for expansion
1032 //
1033 PoolPages = MmSizeOfNonPagedPoolInBytes + (PAGE_SIZE * 16) +
1034 ((ULONG)PAGE_ALIGN(MmHighestPhysicalPage + 1) *
1035 sizeof(MMPFN));
1036 if (MmMaximumNonPagedPoolInBytes < PoolPages)
1037 {
1038 //
1039 // Set it to the minimum value for the maximum (yuck!)
1040 //
1041 MmMaximumNonPagedPoolInBytes = PoolPages;
1042 }
1043
1044 //
1045 // Systems with 2GB of kernel address space get double the size
1046 //
1047 PoolPages = MI_MAX_NONPAGED_POOL_SIZE * 2;
1048
1049 //
1050 // Don't let the maximum go too high
1051 //
1052 if (MmMaximumNonPagedPoolInBytes > PoolPages)
1053 {
1054 //
1055 // Set it to the upper limit
1056 //
1057 MmMaximumNonPagedPoolInBytes = PoolPages;
1058 }
1059
1060 //
1061 // Check if this is a system with > 128MB of non paged pool
1062 //
1063 if (MmMaximumNonPagedPoolInBytes > MI_MAX_NONPAGED_POOL_SIZE)
1064 {
1065 //
1066 // FIXME: Unsure about additional checks needed
1067 //
1068 DPRINT1("Untested path\n");
1069 }
1070
1071 //
1072 // Get L2 cache information
1073 //
1074 L2Associativity = KeGetPcr()->SecondLevelCacheAssociativity;
1075 MmSecondaryColors = KeGetPcr()->SecondLevelCacheSize;
1076 if (L2Associativity) MmSecondaryColors /= L2Associativity;
1077
1078 //
1079 // Compute final color mask and count
1080 //
1081 MmSecondaryColors >>= PAGE_SHIFT;
1082 if (!MmSecondaryColors) MmSecondaryColors = 1;
1083 MmSecondaryColorMask = MmSecondaryColors - 1;
1084
1085 //
1086 // Store it
1087 //
1088 KeGetCurrentPrcb()->SecondaryColorMask = MmSecondaryColorMask;
1089
1090 //
1091 // Calculate the number of bytes for the PFN database
1092 // and then convert to pages
1093 //
1094 MxPfnAllocation = (MmHighestPhysicalPage + 1) * sizeof(MMPFN);
1095 MxPfnAllocation >>= PAGE_SHIFT;
1096
1097 //
1098 // We have to add one to the count here, because in the process of
1099 // shifting down to the page size, we actually ended up getting the
1100 // lower aligned size (so say, 0x5FFFF bytes is now 0x5F pages).
1101 // Later on, we'll shift this number back into bytes, which would cause
1102 // us to end up with only 0x5F000 bytes -- when we actually want to have
1103 // 0x60000 bytes.
1104 //
1105 MxPfnAllocation++;
1106
1107 //
1108 // Now calculate the nonpaged pool expansion VA region
1109 //
1110 MmNonPagedPoolStart = (PVOID)((ULONG_PTR)MmNonPagedPoolEnd -
1111 MmMaximumNonPagedPoolInBytes +
1112 MmSizeOfNonPagedPoolInBytes);
1113 MmNonPagedPoolStart = (PVOID)PAGE_ALIGN(MmNonPagedPoolStart);
1114 NonPagedPoolExpansionVa = MmNonPagedPoolStart;
1115 DPRINT("NP Pool has been tuned to: %d bytes and %d bytes\n",
1116 MmSizeOfNonPagedPoolInBytes, MmMaximumNonPagedPoolInBytes);
1117
1118 //
1119 // Now calculate the nonpaged system VA region, which includes the
1120 // nonpaged pool expansion (above) and the system PTEs. Note that it is
1121 // then aligned to a PDE boundary (4MB).
1122 //
1123 MmNonPagedSystemStart = (PVOID)((ULONG_PTR)MmNonPagedPoolStart -
1124 (MmNumberOfSystemPtes + 1) * PAGE_SIZE);
1125 MmNonPagedSystemStart = (PVOID)((ULONG_PTR)MmNonPagedSystemStart &
1126 ~((4 * 1024 * 1024) - 1));
1127
1128 //
1129 // Don't let it go below the minimum
1130 //
1131 if (MmNonPagedSystemStart < (PVOID)0xEB000000)
1132 {
1133 //
1134 // This is a hard-coded limit in the Windows NT address space
1135 //
1136 MmNonPagedSystemStart = (PVOID)0xEB000000;
1137
1138 //
1139 // Reduce the amount of system PTEs to reach this point
1140 //
1141 MmNumberOfSystemPtes = ((ULONG_PTR)MmNonPagedPoolStart -
1142 (ULONG_PTR)MmNonPagedSystemStart) >>
1143 PAGE_SHIFT;
1144 MmNumberOfSystemPtes--;
1145 ASSERT(MmNumberOfSystemPtes > 1000);
1146 }
1147
1148 //
1149 // Check if we are in a situation where the size of the paged pool
1150 // is so large that it overflows into nonpaged pool
1151 //
1152 if (MmSizeOfPagedPoolInBytes >
1153 ((ULONG_PTR)MmNonPagedSystemStart - (ULONG_PTR)MmPagedPoolStart))
1154 {
1155 //
1156 // We need some recalculations here
1157 //
1158 DPRINT1("Paged pool is too big!\n");
1159 }
1160
1161 //
1162 // Normally, the PFN database should start after the loader images.
1163 // This is already the case in ReactOS, but for now we want to co-exist
1164 // with the old memory manager, so we'll create a "Shadow PFN Database"
1165 // instead, and arbitrarly start it at 0xB0000000.
1166 //
1167 MmPfnDatabase = (PVOID)0xB0000000;
1168 ASSERT(((ULONG_PTR)MmPfnDatabase & ((4 * 1024 * 1024) - 1)) == 0);
1169
1170 //
1171 // Non paged pool comes after the PFN database
1172 //
1173 MmNonPagedPoolStart = (PVOID)((ULONG_PTR)MmPfnDatabase +
1174 (MxPfnAllocation << PAGE_SHIFT));
1175
1176 //
1177 // Now we actually need to get these many physical pages. Nonpaged pool
1178 // is actually also physically contiguous (but not the expansion)
1179 //
1180 PageFrameIndex = MxGetNextPage(MxPfnAllocation +
1181 (MmSizeOfNonPagedPoolInBytes >> PAGE_SHIFT));
1182 ASSERT(PageFrameIndex != 0);
1183 DPRINT("PFN DB PA PFN begins at: %lx\n", PageFrameIndex);
1184 DPRINT("NP PA PFN begins at: %lx\n", PageFrameIndex + MxPfnAllocation);
1185
1186 //
1187 // Now we need some pages to create the page tables for the NP system VA
1188 // which includes system PTEs and expansion NP
1189 //
1190 StartPde = MiAddressToPde(MmNonPagedSystemStart);
1191 EndPde = MiAddressToPde((PVOID)((ULONG_PTR)MmNonPagedPoolEnd - 1));
1192 while (StartPde <= EndPde)
1193 {
1194 //
1195 // Sanity check
1196 //
1197 ASSERT(StartPde->u.Hard.Valid == 0);
1198
1199 //
1200 // Get a page
1201 //
1202 TempPde.u.Hard.PageFrameNumber = MxGetNextPage(1);
1203 ASSERT(TempPde.u.Hard.Valid == 1);
1204 *StartPde = TempPde;
1205
1206 //
1207 // Zero out the page table
1208 //
1209 PointerPte = MiPteToAddress(StartPde);
1210 RtlZeroMemory(PointerPte, PAGE_SIZE);
1211
1212 //
1213 // Next
1214 //
1215 StartPde++;
1216 }
1217
1218 //
1219 // Now we need pages for the page tables which will map initial NP
1220 //
1221 StartPde = MiAddressToPde(MmPfnDatabase);
1222 EndPde = MiAddressToPde((PVOID)((ULONG_PTR)MmNonPagedPoolStart +
1223 MmSizeOfNonPagedPoolInBytes - 1));
1224 while (StartPde <= EndPde)
1225 {
1226 //
1227 // Sanity check
1228 //
1229 ASSERT(StartPde->u.Hard.Valid == 0);
1230
1231 //
1232 // Get a page
1233 //
1234 TempPde.u.Hard.PageFrameNumber = MxGetNextPage(1);
1235 ASSERT(TempPde.u.Hard.Valid == 1);
1236 *StartPde = TempPde;
1237
1238 //
1239 // Zero out the page table
1240 //
1241 PointerPte = MiPteToAddress(StartPde);
1242 RtlZeroMemory(PointerPte, PAGE_SIZE);
1243
1244 //
1245 // Next
1246 //
1247 StartPde++;
1248 }
1249
1250 //
1251 // Now remember where the expansion starts
1252 //
1253 MmNonPagedPoolExpansionStart = NonPagedPoolExpansionVa;
1254
1255 //
1256 // Last step is to actually map the nonpaged pool
1257 //
1258 PointerPte = MiAddressToPte(MmNonPagedPoolStart);
1259 LastPte = MiAddressToPte((PVOID)((ULONG_PTR)MmNonPagedPoolStart +
1260 MmSizeOfNonPagedPoolInBytes - 1));
1261 while (PointerPte <= LastPte)
1262 {
1263 //
1264 // Use one of our contigous pages
1265 //
1266 TempPte.u.Hard.PageFrameNumber = PageFrameIndex++;
1267 ASSERT(PointerPte->u.Hard.Valid == 0);
1268 ASSERT(TempPte.u.Hard.Valid == 1);
1269 *PointerPte++ = TempPte;
1270 }
1271
1272 //
1273 // Sanity check: make sure we have properly defined the system PTE space
1274 //
1275 ASSERT(MiAddressToPte(MmNonPagedSystemStart) <
1276 MiAddressToPte(MmNonPagedPoolExpansionStart));
1277
1278 //
1279 // Now go ahead and initialize the ARM³ nonpaged pool
1280 //
1281 MiInitializeArmPool();
1282
1283 //
1284 // Get current page data, since we won't be using MxGetNextPage as it
1285 // would corrupt our state
1286 //
1287 FreePage = MxFreeDescriptor->BasePage;
1288 FreePageCount = MxFreeDescriptor->PageCount;
1289 PagesLeft = 0;
1290
1291 //
1292 // Loop the memory descriptors
1293 //
1294 NextEntry = KeLoaderBlock->MemoryDescriptorListHead.Flink;
1295 while (NextEntry != &KeLoaderBlock->MemoryDescriptorListHead)
1296 {
1297 //
1298 // Get the descriptor
1299 //
1300 MdBlock = CONTAINING_RECORD(NextEntry,
1301 MEMORY_ALLOCATION_DESCRIPTOR,
1302 ListEntry);
1303 if ((MdBlock->MemoryType == LoaderFirmwarePermanent) ||
1304 (MdBlock->MemoryType == LoaderBBTMemory) ||
1305 (MdBlock->MemoryType == LoaderSpecialMemory))
1306 {
1307 //
1308 // These pages are not part of the PFN database
1309 //
1310 NextEntry = MdBlock->ListEntry.Flink;
1311 continue;
1312 }
1313
1314 //
1315 // Next, check if this is our special free descriptor we've found
1316 //
1317 if (MdBlock == MxFreeDescriptor)
1318 {
1319 //
1320 // Use the real numbers instead
1321 //
1322 BasePage = MxOldFreeDescriptor.BasePage;
1323 PageCount = MxOldFreeDescriptor.PageCount;
1324 }
1325 else
1326 {
1327 //
1328 // Use the descriptor's numbers
1329 //
1330 BasePage = MdBlock->BasePage;
1331 PageCount = MdBlock->PageCount;
1332 }
1333
1334 //
1335 // Get the PTEs for this range
1336 //
1337 PointerPte = MiAddressToPte(&MmPfnDatabase[BasePage]);
1338 LastPte = MiAddressToPte(((ULONG_PTR)&MmPfnDatabase[BasePage + PageCount]) - 1);
1339 DPRINT("MD Type: %lx Base: %lx Count: %lx\n", MdBlock->MemoryType, BasePage, PageCount);
1340
1341 //
1342 // Loop them
1343 //
1344 while (PointerPte <= LastPte)
1345 {
1346 //
1347 // We'll only touch PTEs that aren't already valid
1348 //
1349 if (PointerPte->u.Hard.Valid == 0)
1350 {
1351 //
1352 // Use the next free page
1353 //
1354 TempPte.u.Hard.PageFrameNumber = FreePage;
1355 ASSERT(FreePageCount != 0);
1356
1357 //
1358 // Consume free pages
1359 //
1360 FreePage++;
1361 FreePageCount--;
1362 if (!FreePageCount)
1363 {
1364 //
1365 // Out of memory
1366 //
1367 KeBugCheckEx(INSTALL_MORE_MEMORY,
1368 MmNumberOfPhysicalPages,
1369 FreePageCount,
1370 MxOldFreeDescriptor.PageCount,
1371 1);
1372 }
1373
1374 //
1375 // Write out this PTE
1376 //
1377 PagesLeft++;
1378 ASSERT(PointerPte->u.Hard.Valid == 0);
1379 ASSERT(TempPte.u.Hard.Valid == 1);
1380 *PointerPte = TempPte;
1381
1382 //
1383 // Zero this page
1384 //
1385 RtlZeroMemory(MiPteToAddress(PointerPte), PAGE_SIZE);
1386 }
1387
1388 //
1389 // Next!
1390 //
1391 PointerPte++;
1392 }
1393
1394 //
1395 // Do the next address range
1396 //
1397 NextEntry = MdBlock->ListEntry.Flink;
1398 }
1399
1400 //
1401 // Now update the free descriptors to consume the pages we used up during
1402 // the PFN allocation loop
1403 //
1404 MxFreeDescriptor->BasePage = FreePage;
1405 MxFreeDescriptor->PageCount = FreePageCount;
1406 }
1407 else if (Phase == 1) // IN BETWEEN, THE PFN DATABASE IS NOW CREATED
1408 {
1409 //
1410 // Reset the descriptor back so we can create the correct memory blocks
1411 //
1412 *MxFreeDescriptor = MxOldFreeDescriptor;
1413
1414 //
1415 // Initialize the nonpaged pool
1416 //
1417 InitializePool(NonPagedPool, 0);
1418
1419 //
1420 // We PDE-aligned the nonpaged system start VA, so haul some extra PTEs!
1421 //
1422 PointerPte = MiAddressToPte(MmNonPagedSystemStart);
1423 OldCount = MmNumberOfSystemPtes;
1424 MmNumberOfSystemPtes = MiAddressToPte(MmNonPagedPoolExpansionStart) -
1425 PointerPte;
1426 MmNumberOfSystemPtes--;
1427 DPRINT("Final System PTE count: %d (%d bytes)\n",
1428 MmNumberOfSystemPtes, MmNumberOfSystemPtes * PAGE_SIZE);
1429
1430 //
1431 // Create the system PTE space
1432 //
1433 MiInitializeSystemPtes(PointerPte, MmNumberOfSystemPtes, SystemPteSpace);
1434
1435 //
1436 // Get the PDE For hyperspace
1437 //
1438 StartPde = MiAddressToPde(HYPER_SPACE);
1439
1440 //
1441 // Allocate a page for it and create it
1442 //
1443 PageFrameIndex = MmAllocPage(MC_SYSTEM, 0);
1444 TempPde.u.Hard.PageFrameNumber = PageFrameIndex;
1445 TempPde.u.Hard.Global = FALSE; // Hyperspace is local!
1446 ASSERT(StartPde->u.Hard.Valid == 0);
1447 ASSERT(TempPde.u.Hard.Valid == 1);
1448 *StartPde = TempPde;
1449
1450 //
1451 // Zero out the page table now
1452 //
1453 PointerPte = MiAddressToPte(HYPER_SPACE);
1454 RtlZeroMemory(PointerPte, PAGE_SIZE);
1455
1456 //
1457 // Setup the mapping PTEs
1458 //
1459 MmFirstReservedMappingPte = MiAddressToPte(MI_MAPPING_RANGE_START);
1460 MmLastReservedMappingPte = MiAddressToPte(MI_MAPPING_RANGE_END);
1461 MmFirstReservedMappingPte->u.Hard.PageFrameNumber = MI_HYPERSPACE_PTES;
1462
1463 //
1464 // Reserve system PTEs for zeroing PTEs and clear them
1465 //
1466 MiFirstReservedZeroingPte = MiReserveSystemPtes(MI_ZERO_PTES,
1467 SystemPteSpace);
1468 RtlZeroMemory(MiFirstReservedZeroingPte, MI_ZERO_PTES * sizeof(MMPTE));
1469
1470 //
1471 // Set the counter to maximum to boot with
1472 //
1473 MiFirstReservedZeroingPte->u.Hard.PageFrameNumber = MI_ZERO_PTES - 1;
1474
1475 //
1476 // Sync us up with ReactOS Mm
1477 //
1478 MiSyncARM3WithROS(MmNonPagedSystemStart, (PVOID)((ULONG_PTR)MmNonPagedPoolEnd - 1));
1479 MiSyncARM3WithROS(MmPfnDatabase, (PVOID)((ULONG_PTR)MmNonPagedPoolStart + MmSizeOfNonPagedPoolInBytes - 1));
1480 MiSyncARM3WithROS((PVOID)HYPER_SPACE, (PVOID)(HYPER_SPACE + PAGE_SIZE - 1));
1481
1482 //
1483 // Build the physical memory block
1484 //
1485 MmPhysicalMemoryBlock = MmInitializeMemoryLimits(LoaderBlock,
1486 IncludeType);
1487
1488 //
1489 // Allocate enough buffer for the PFN bitmap
1490 // Align it up to a 32-bit boundary
1491 //
1492 Bitmap = ExAllocatePoolWithTag(NonPagedPool,
1493 (((MmHighestPhysicalPage + 1) + 31) / 32) * 4,
1494 ' mM');
1495 if (!Bitmap)
1496 {
1497 //
1498 // This is critical
1499 //
1500 KeBugCheckEx(INSTALL_MORE_MEMORY,
1501 MmNumberOfPhysicalPages,
1502 MmLowestPhysicalPage,
1503 MmHighestPhysicalPage,
1504 0x101);
1505 }
1506
1507 //
1508 // Initialize it and clear all the bits to begin with
1509 //
1510 RtlInitializeBitMap(&MiPfnBitMap,
1511 Bitmap,
1512 MmHighestPhysicalPage + 1);
1513 RtlClearAllBits(&MiPfnBitMap);
1514
1515 //
1516 // Loop physical memory runs
1517 //
1518 for (i = 0; i < MmPhysicalMemoryBlock->NumberOfRuns; i++)
1519 {
1520 //
1521 // Get the run
1522 //
1523 Run = &MmPhysicalMemoryBlock->Run[i];
1524 DPRINT("PHYSICAL RAM [0x%08p to 0x%08p]\n",
1525 Run->BasePage << PAGE_SHIFT,
1526 (Run->BasePage + Run->PageCount) << PAGE_SHIFT);
1527
1528 //
1529 // Make sure it has pages inside it
1530 //
1531 if (Run->PageCount)
1532 {
1533 //
1534 // Set the bits in the PFN bitmap
1535 //
1536 RtlSetBits(&MiPfnBitMap, Run->BasePage, Run->PageCount);
1537 }
1538 }
1539
1540 //
1541 // Size up paged pool and build the shadow system page directory
1542 //
1543 MiBuildPagedPool();
1544 }
1545
1546 //
1547 // Always return success for now
1548 //
1549 return STATUS_SUCCESS;
1550 }
1551
1552 /* EOF */