Do not crash, use valid kernel PTE.
[reactos.git] / reactos / ntoskrnl / mm / ARM3 / mminit.c
1 /*
2 * PROJECT: ReactOS Kernel
3 * LICENSE: BSD - See COPYING.ARM in the top level directory
4 * FILE: ntoskrnl/mm/ARM3/mminit.c
5 * PURPOSE: ARM Memory Manager Initialization
6 * PROGRAMMERS: ReactOS Portable Systems Group
7 */
8
9 /* INCLUDES *******************************************************************/
10
11 #include <ntoskrnl.h>
12 #define NDEBUG
13 #include <debug.h>
14
15 #line 15 "ARM³::INIT"
16 #define MODULE_INVOLVED_IN_ARM3
17 #include "miarm.h"
18
19 /* GLOBALS ********************************************************************/
20
21 //
22 // These are all registry-configurable, but by default, the memory manager will
23 // figure out the most appropriate values.
24 //
25 ULONG MmMaximumNonPagedPoolPercent;
26 ULONG MmSizeOfNonPagedPoolInBytes;
27 ULONG MmMaximumNonPagedPoolInBytes;
28
29 //
30 // These numbers describe the discrete equation components of the nonpaged
31 // pool sizing algorithm.
32 //
33 // They are described on http://support.microsoft.com/default.aspx/kb/126402/ja
34 // along with the algorithm that uses them, which is implemented later below.
35 //
36 ULONG MmMinimumNonPagedPoolSize = 256 * 1024;
37 ULONG MmMinAdditionNonPagedPoolPerMb = 32 * 1024;
38 ULONG MmDefaultMaximumNonPagedPool = 1024 * 1024;
39 ULONG MmMaxAdditionNonPagedPoolPerMb = 400 * 1024;
40
41 //
42 // The memory layout (and especially variable names) of the NT kernel mode
43 // components can be a bit hard to twig, especially when it comes to the non
44 // paged area.
45 //
46 // There are really two components to the non-paged pool:
47 //
48 // - The initial nonpaged pool, sized dynamically up to a maximum.
49 // - The expansion nonpaged pool, sized dynamically up to a maximum.
50 //
51 // The initial nonpaged pool is physically continuous for performance, and
52 // immediately follows the PFN database, typically sharing the same PDE. It is
53 // a very small resource (32MB on a 1GB system), and capped at 128MB.
54 //
55 // Right now we call this the "ARM³ Nonpaged Pool" and it begins somewhere after
56 // the PFN database (which starts at 0xB0000000).
57 //
58 // The expansion nonpaged pool, on the other hand, can grow much bigger (400MB
59 // for a 1GB system). On ARM³ however, it is currently capped at 128MB.
60 //
61 // The address where the initial nonpaged pool starts is aptly named
62 // MmNonPagedPoolStart, and it describes a range of MmSizeOfNonPagedPoolInBytes
63 // bytes.
64 //
65 // Expansion nonpaged pool starts at an address described by the variable called
66 // MmNonPagedPoolExpansionStart, and it goes on for MmMaximumNonPagedPoolInBytes
67 // minus MmSizeOfNonPagedPoolInBytes bytes, always reaching MmNonPagedPoolEnd
68 // (because of the way it's calculated) at 0xFFBE0000.
69 //
70 // Initial nonpaged pool is allocated and mapped early-on during boot, but what
71 // about the expansion nonpaged pool? It is instead composed of special pages
72 // which belong to what are called System PTEs. These PTEs are the matter of a
73 // later discussion, but they are also considered part of the "nonpaged" OS, due
74 // to the fact that they are never paged out -- once an address is described by
75 // a System PTE, it is always valid, until the System PTE is torn down.
76 //
77 // System PTEs are actually composed of two "spaces", the system space proper,
78 // and the nonpaged pool expansion space. The latter, as we've already seen,
79 // begins at MmNonPagedPoolExpansionStart. Based on the number of System PTEs
80 // that the system will support, the remaining address space below this address
81 // is used to hold the system space PTEs. This address, in turn, is held in the
82 // variable named MmNonPagedSystemStart, which itself is never allowed to go
83 // below 0xEB000000 (thus creating an upper bound on the number of System PTEs).
84 //
85 // This means that 330MB are reserved for total nonpaged system VA, on top of
86 // whatever the initial nonpaged pool allocation is.
87 //
88 // The following URLs, valid as of April 23rd, 2008, support this evidence:
89 //
90 // http://www.cs.miami.edu/~burt/journal/NT/memory.html
91 // http://www.ditii.com/2007/09/28/windows-memory-management-x86-virtual-address-space/
92 //
93 PVOID MmNonPagedSystemStart;
94 PVOID MmNonPagedPoolStart;
95 PVOID MmNonPagedPoolExpansionStart;
96 PVOID MmNonPagedPoolEnd = MI_NONPAGED_POOL_END;
97
98 //
99 // This is where paged pool starts by default
100 //
101 PVOID MmPagedPoolStart = MI_PAGED_POOL_START;
102 PVOID MmPagedPoolEnd;
103
104 //
105 // And this is its default size
106 //
107 ULONG MmSizeOfPagedPoolInBytes = MI_MIN_INIT_PAGED_POOLSIZE;
108 PFN_NUMBER MmSizeOfPagedPoolInPages = MI_MIN_INIT_PAGED_POOLSIZE / PAGE_SIZE;
109
110 //
111 // Session space starts at 0xBFFFFFFF and grows downwards
112 // By default, it includes an 8MB image area where we map win32k and video card
113 // drivers, followed by a 4MB area containing the session's working set. This is
114 // then followed by a 20MB mapped view area and finally by the session's paged
115 // pool, by default 16MB.
116 //
117 // On a normal system, this results in session space occupying the region from
118 // 0xBD000000 to 0xC0000000
119 //
120 // See miarm.h for the defines that determine the sizing of this region. On an
121 // NT system, some of these can be configured through the registry, but we don't
122 // support that yet.
123 //
124 PVOID MiSessionSpaceEnd; // 0xC0000000
125 PVOID MiSessionImageEnd; // 0xC0000000
126 PVOID MiSessionImageStart; // 0xBF800000
127 PVOID MiSessionViewStart; // 0xBE000000
128 PVOID MiSessionPoolEnd; // 0xBE000000
129 PVOID MiSessionPoolStart; // 0xBD000000
130 PVOID MmSessionBase; // 0xBD000000
131 ULONG MmSessionSize;
132 ULONG MmSessionViewSize;
133 ULONG MmSessionPoolSize;
134 ULONG MmSessionImageSize;
135
136 //
137 // The system view space, on the other hand, is where sections that are memory
138 // mapped into "system space" end up.
139 //
140 // By default, it is a 16MB region.
141 //
142 PVOID MiSystemViewStart;
143 ULONG MmSystemViewSize;
144
145 //
146 // A copy of the system page directory (the page directory associated with the
147 // System process) is kept (double-mapped) by the manager in order to lazily
148 // map paged pool PDEs into external processes when they fault on a paged pool
149 // address.
150 //
151 PFN_NUMBER MmSystemPageDirectory;
152 PMMPTE MmSystemPagePtes;
153
154 //
155 // The system cache starts right after hyperspace. The first few pages are for
156 // keeping track of the system working set list.
157 //
158 // This should be 0xC0C00000 -- the cache itself starts at 0xC1000000
159 //
160 PMMWSL MmSystemCacheWorkingSetList = MI_SYSTEM_CACHE_WS_START;
161
162 //
163 // Windows NT seems to choose between 7000, 11000 and 50000
164 // On systems with more than 32MB, this number is then doubled, and further
165 // aligned up to a PDE boundary (4MB).
166 //
167 ULONG MmNumberOfSystemPtes;
168
169 //
170 // This is how many pages the PFN database will take up
171 // In Windows, this includes the Quark Color Table, but not in ARM³
172 //
173 ULONG MxPfnAllocation;
174
175 //
176 // Unlike the old ReactOS Memory Manager, ARM³ (and Windows) does not keep track
177 // of pages that are not actually valid physical memory, such as ACPI reserved
178 // regions, BIOS address ranges, or holes in physical memory address space which
179 // could indicate device-mapped I/O memory.
180 //
181 // In fact, the lack of a PFN entry for a page usually indicates that this is
182 // I/O space instead.
183 //
184 // A bitmap, called the PFN bitmap, keeps track of all page frames by assigning
185 // a bit to each. If the bit is set, then the page is valid physical RAM.
186 //
187 RTL_BITMAP MiPfnBitMap;
188
189 //
190 // This structure describes the different pieces of RAM-backed address space
191 //
192 PPHYSICAL_MEMORY_DESCRIPTOR MmPhysicalMemoryBlock;
193
194 //
195 // This is where we keep track of the most basic physical layout markers
196 //
197 ULONG MmNumberOfPhysicalPages, MmHighestPhysicalPage, MmLowestPhysicalPage = -1;
198
199 //
200 // The total number of pages mapped by the boot loader, which include the kernel
201 // HAL, boot drivers, registry, NLS files and other loader data structures is
202 // kept track of here. This depends on "LoaderPagesSpanned" being correct when
203 // coming from the loader.
204 //
205 // This number is later aligned up to a PDE boundary.
206 //
207 ULONG MmBootImageSize;
208
209 //
210 // These three variables keep track of the core separation of address space that
211 // exists between kernel mode and user mode.
212 //
213 ULONG MmUserProbeAddress;
214 PVOID MmHighestUserAddress;
215 PVOID MmSystemRangeStart;
216
217 PVOID MmSystemCacheStart;
218 PVOID MmSystemCacheEnd;
219 MMSUPPORT MmSystemCacheWs;
220
221 //
222 // This is where hyperspace ends (followed by the system cache working set)
223 //
224 PVOID MmHyperSpaceEnd;
225
226 //
227 // Page coloring algorithm data
228 //
229 ULONG MmSecondaryColors;
230 ULONG MmSecondaryColorMask;
231
232 //
233 // Actual (registry-configurable) size of a GUI thread's stack
234 //
235 ULONG MmLargeStackSize = KERNEL_LARGE_STACK_SIZE;
236
237 /* PRIVATE FUNCTIONS **********************************************************/
238
239 //
240 // In Bavaria, this is probably a hate crime
241 //
242 VOID
243 FASTCALL
244 MiSyncARM3WithROS(IN PVOID AddressStart,
245 IN PVOID AddressEnd)
246 {
247 //
248 // Puerile piece of junk-grade carbonized horseshit puss sold to the lowest bidder
249 //
250 ULONG Pde = ADDR_TO_PDE_OFFSET(AddressStart);
251 while (Pde <= ADDR_TO_PDE_OFFSET(AddressEnd))
252 {
253 //
254 // This both odious and heinous
255 //
256 extern ULONG MmGlobalKernelPageDirectory[1024];
257 MmGlobalKernelPageDirectory[Pde] = ((PULONG)PDE_BASE)[Pde];
258 Pde++;
259 }
260 }
261
262 PFN_NUMBER
263 NTAPI
264 MiPagesInLoaderBlock(IN PLOADER_PARAMETER_BLOCK LoaderBlock,
265 IN PBOOLEAN IncludeType)
266 {
267 PLIST_ENTRY NextEntry;
268 PFN_NUMBER PageCount = 0;
269 PMEMORY_ALLOCATION_DESCRIPTOR MdBlock;
270
271 //
272 // Now loop through the descriptors
273 //
274 NextEntry = LoaderBlock->MemoryDescriptorListHead.Flink;
275 while (NextEntry != &LoaderBlock->MemoryDescriptorListHead)
276 {
277 //
278 // Grab each one, and check if it's one we should include
279 //
280 MdBlock = CONTAINING_RECORD(NextEntry,
281 MEMORY_ALLOCATION_DESCRIPTOR,
282 ListEntry);
283 if ((MdBlock->MemoryType < LoaderMaximum) &&
284 (IncludeType[MdBlock->MemoryType]))
285 {
286 //
287 // Add this to our running total
288 //
289 PageCount += MdBlock->PageCount;
290 }
291
292 //
293 // Try the next descriptor
294 //
295 NextEntry = MdBlock->ListEntry.Flink;
296 }
297
298 //
299 // Return the total
300 //
301 return PageCount;
302 }
303
304 PPHYSICAL_MEMORY_DESCRIPTOR
305 NTAPI
306 MmInitializeMemoryLimits(IN PLOADER_PARAMETER_BLOCK LoaderBlock,
307 IN PBOOLEAN IncludeType)
308 {
309 PLIST_ENTRY NextEntry;
310 ULONG Run = 0, InitialRuns = 0;
311 PFN_NUMBER NextPage = -1, PageCount = 0;
312 PPHYSICAL_MEMORY_DESCRIPTOR Buffer, NewBuffer;
313 PMEMORY_ALLOCATION_DESCRIPTOR MdBlock;
314
315 //
316 // Scan the memory descriptors
317 //
318 NextEntry = LoaderBlock->MemoryDescriptorListHead.Flink;
319 while (NextEntry != &LoaderBlock->MemoryDescriptorListHead)
320 {
321 //
322 // For each one, increase the memory allocation estimate
323 //
324 InitialRuns++;
325 NextEntry = NextEntry->Flink;
326 }
327
328 //
329 // Allocate the maximum we'll ever need
330 //
331 Buffer = ExAllocatePoolWithTag(NonPagedPool,
332 sizeof(PHYSICAL_MEMORY_DESCRIPTOR) +
333 sizeof(PHYSICAL_MEMORY_RUN) *
334 (InitialRuns - 1),
335 'lMmM');
336 if (!Buffer) return NULL;
337
338 //
339 // For now that's how many runs we have
340 //
341 Buffer->NumberOfRuns = InitialRuns;
342
343 //
344 // Now loop through the descriptors again
345 //
346 NextEntry = LoaderBlock->MemoryDescriptorListHead.Flink;
347 while (NextEntry != &LoaderBlock->MemoryDescriptorListHead)
348 {
349 //
350 // Grab each one, and check if it's one we should include
351 //
352 MdBlock = CONTAINING_RECORD(NextEntry,
353 MEMORY_ALLOCATION_DESCRIPTOR,
354 ListEntry);
355 if ((MdBlock->MemoryType < LoaderMaximum) &&
356 (IncludeType[MdBlock->MemoryType]))
357 {
358 //
359 // Add this to our running total
360 //
361 PageCount += MdBlock->PageCount;
362
363 //
364 // Check if the next page is described by the next descriptor
365 //
366 if (MdBlock->BasePage == NextPage)
367 {
368 //
369 // Combine it into the same physical run
370 //
371 ASSERT(MdBlock->PageCount != 0);
372 Buffer->Run[Run - 1].PageCount += MdBlock->PageCount;
373 NextPage += MdBlock->PageCount;
374 }
375 else
376 {
377 //
378 // Otherwise just duplicate the descriptor's contents
379 //
380 Buffer->Run[Run].BasePage = MdBlock->BasePage;
381 Buffer->Run[Run].PageCount = MdBlock->PageCount;
382 NextPage = Buffer->Run[Run].BasePage + Buffer->Run[Run].PageCount;
383
384 //
385 // And in this case, increase the number of runs
386 //
387 Run++;
388 }
389 }
390
391 //
392 // Try the next descriptor
393 //
394 NextEntry = MdBlock->ListEntry.Flink;
395 }
396
397 //
398 // We should not have been able to go past our initial estimate
399 //
400 ASSERT(Run <= Buffer->NumberOfRuns);
401
402 //
403 // Our guess was probably exaggerated...
404 //
405 if (InitialRuns > Run)
406 {
407 //
408 // Allocate a more accurately sized buffer
409 //
410 NewBuffer = ExAllocatePoolWithTag(NonPagedPool,
411 sizeof(PHYSICAL_MEMORY_DESCRIPTOR) +
412 sizeof(PHYSICAL_MEMORY_RUN) *
413 (Run - 1),
414 'lMmM');
415 if (NewBuffer)
416 {
417 //
418 // Copy the old buffer into the new, then free it
419 //
420 RtlCopyMemory(NewBuffer->Run,
421 Buffer->Run,
422 sizeof(PHYSICAL_MEMORY_RUN) * Run);
423 ExFreePool(Buffer);
424
425 //
426 // Now use the new buffer
427 //
428 Buffer = NewBuffer;
429 }
430 }
431
432 //
433 // Write the final numbers, and return it
434 //
435 Buffer->NumberOfRuns = Run;
436 Buffer->NumberOfPages = PageCount;
437 return Buffer;
438 }
439
440 VOID
441 NTAPI
442 MiBuildPagedPool(VOID)
443 {
444 PMMPTE PointerPte, PointerPde;
445 MMPTE TempPte = ValidKernelPte;
446 PFN_NUMBER PageFrameIndex;
447 KIRQL OldIrql;
448 ULONG Size, BitMapSize;
449
450 //
451 // Get the page frame number for the system page directory
452 //
453 PointerPte = MiAddressToPte(PDE_BASE);
454 MmSystemPageDirectory = PFN_FROM_PTE(PointerPte);
455
456 //
457 // Allocate a system PTE which will hold a copy of the page directory
458 //
459 PointerPte = MiReserveSystemPtes(1, SystemPteSpace);
460 ASSERT(PointerPte);
461 MmSystemPagePtes = MiPteToAddress(PointerPte);
462
463 //
464 // Make this system PTE point to the system page directory.
465 // It is now essentially double-mapped. This will be used later for lazy
466 // evaluation of PDEs accross process switches, similarly to how the Global
467 // page directory array in the old ReactOS Mm is used (but in a less hacky
468 // way).
469 //
470 TempPte = ValidKernelPte;
471 TempPte.u.Hard.PageFrameNumber = MmSystemPageDirectory;
472 ASSERT(PointerPte->u.Hard.Valid == 0);
473 ASSERT(TempPte.u.Hard.Valid == 1);
474 *PointerPte = TempPte;
475
476 //
477 // Let's get back to paged pool work: size it up.
478 // By default, it should be twice as big as nonpaged pool.
479 //
480 MmSizeOfPagedPoolInBytes = 2 * MmMaximumNonPagedPoolInBytes;
481 if (MmSizeOfPagedPoolInBytes > ((ULONG_PTR)MmNonPagedSystemStart -
482 (ULONG_PTR)MmPagedPoolStart))
483 {
484 //
485 // On the other hand, we have limited VA space, so make sure that the VA
486 // for paged pool doesn't overflow into nonpaged pool VA. Otherwise, set
487 // whatever maximum is possible.
488 //
489 MmSizeOfPagedPoolInBytes = (ULONG_PTR)MmNonPagedSystemStart -
490 (ULONG_PTR)MmPagedPoolStart;
491 }
492
493 //
494 // Get the size in pages and make sure paged pool is at least 32MB.
495 //
496 Size = MmSizeOfPagedPoolInBytes;
497 if (Size < MI_MIN_INIT_PAGED_POOLSIZE) Size = MI_MIN_INIT_PAGED_POOLSIZE;
498 Size = BYTES_TO_PAGES(Size);
499
500 //
501 // Now check how many PTEs will be required for these many pages.
502 //
503 Size = (Size + (1024 - 1)) / 1024;
504
505 //
506 // Recompute the page-aligned size of the paged pool, in bytes and pages.
507 //
508 MmSizeOfPagedPoolInBytes = Size * PAGE_SIZE * 1024;
509 MmSizeOfPagedPoolInPages = MmSizeOfPagedPoolInBytes >> PAGE_SHIFT;
510
511 //
512 // Let's be really sure this doesn't overflow into nonpaged system VA
513 //
514 ASSERT((MmSizeOfPagedPoolInBytes + (ULONG_PTR)MmPagedPoolStart) <=
515 (ULONG_PTR)MmNonPagedSystemStart);
516
517 //
518 // This is where paged pool ends
519 //
520 MmPagedPoolEnd = (PVOID)(((ULONG_PTR)MmPagedPoolStart +
521 MmSizeOfPagedPoolInBytes) - 1);
522
523 //
524 // So now get the PDE for paged pool and zero it out
525 //
526 PointerPde = MiAddressToPde(MmPagedPoolStart);
527 RtlZeroMemory(PointerPde,
528 (1 + MiAddressToPde(MmPagedPoolEnd) - PointerPde) * sizeof(MMPTE));
529
530 //
531 // Next, get the first and last PTE
532 //
533 PointerPte = MiAddressToPte(MmPagedPoolStart);
534 MmPagedPoolInfo.FirstPteForPagedPool = PointerPte;
535 MmPagedPoolInfo.LastPteForPagedPool = MiAddressToPte(MmPagedPoolEnd);
536
537 //
538 // Lock the PFN database
539 //
540 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
541
542 //
543 // Allocate a page and map the first paged pool PDE
544 //
545 PageFrameIndex = MmAllocPage(MC_NPPOOL, 0);
546 TempPte.u.Hard.PageFrameNumber = PageFrameIndex;
547 ASSERT(PointerPde->u.Hard.Valid == 0);
548 ASSERT(TempPte.u.Hard.Valid == 1);
549 *PointerPde = TempPte;
550
551 //
552 // Release the PFN database lock
553 //
554 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
555
556 //
557 // We only have one PDE mapped for now... at fault time, additional PDEs
558 // will be allocated to handle paged pool growth. This is where they'll have
559 // to start.
560 //
561 MmPagedPoolInfo.NextPdeForPagedPoolExpansion = PointerPde + 1;
562
563 //
564 // We keep track of each page via a bit, so check how big the bitmap will
565 // have to be (make sure to align our page count such that it fits nicely
566 // into a 4-byte aligned bitmap.
567 //
568 // We'll also allocate the bitmap header itself part of the same buffer.
569 //
570 Size = Size * 1024;
571 ASSERT(Size == MmSizeOfPagedPoolInPages);
572 BitMapSize = Size;
573 Size = sizeof(RTL_BITMAP) + (((Size + 31) / 32) * sizeof(ULONG));
574
575 //
576 // Allocate the allocation bitmap, which tells us which regions have not yet
577 // been mapped into memory
578 //
579 MmPagedPoolInfo.PagedPoolAllocationMap = ExAllocatePoolWithTag(NonPagedPool,
580 Size,
581 ' mM');
582 ASSERT(MmPagedPoolInfo.PagedPoolAllocationMap);
583
584 //
585 // Initialize it such that at first, only the first page's worth of PTEs is
586 // marked as allocated (incidentially, the first PDE we allocated earlier).
587 //
588 RtlInitializeBitMap(MmPagedPoolInfo.PagedPoolAllocationMap,
589 (PULONG)(MmPagedPoolInfo.PagedPoolAllocationMap + 1),
590 BitMapSize);
591 RtlSetAllBits(MmPagedPoolInfo.PagedPoolAllocationMap);
592 RtlClearBits(MmPagedPoolInfo.PagedPoolAllocationMap, 0, 1024);
593
594 //
595 // We have a second bitmap, which keeps track of where allocations end.
596 // Given the allocation bitmap and a base address, we can therefore figure
597 // out which page is the last page of that allocation, and thus how big the
598 // entire allocation is.
599 //
600 MmPagedPoolInfo.EndOfPagedPoolBitmap = ExAllocatePoolWithTag(NonPagedPool,
601 Size,
602 ' mM');
603 ASSERT(MmPagedPoolInfo.EndOfPagedPoolBitmap);
604 RtlInitializeBitMap(MmPagedPoolInfo.EndOfPagedPoolBitmap,
605 (PULONG)(MmPagedPoolInfo.EndOfPagedPoolBitmap + 1),
606 BitMapSize);
607
608 //
609 // Since no allocations have been made yet, there are no bits set as the end
610 //
611 RtlClearAllBits(MmPagedPoolInfo.EndOfPagedPoolBitmap);
612
613 //
614 // Initialize paged pool.
615 //
616 InitializePool(PagedPool, 0);
617
618 //
619 // Initialize the paged pool mutex
620 //
621 KeInitializeGuardedMutex(&MmPagedPoolMutex);
622 }
623
624 NTSTATUS
625 NTAPI
626 MmArmInitSystem(IN ULONG Phase,
627 IN PLOADER_PARAMETER_BLOCK LoaderBlock)
628 {
629 ULONG i;
630 BOOLEAN IncludeType[LoaderMaximum];
631 PVOID Bitmap;
632 PPHYSICAL_MEMORY_RUN Run;
633 PFN_NUMBER PageCount;
634
635 //
636 // Instantiate memory that we don't consider RAM/usable
637 // We use the same exclusions that Windows does, in order to try to be
638 // compatible with WinLDR-style booting
639 //
640 for (i = 0; i < LoaderMaximum; i++) IncludeType[i] = TRUE;
641 IncludeType[LoaderBad] = FALSE;
642 IncludeType[LoaderFirmwarePermanent] = FALSE;
643 IncludeType[LoaderSpecialMemory] = FALSE;
644 IncludeType[LoaderBBTMemory] = FALSE;
645 if (Phase == 0)
646 {
647 //
648 // Define the basic user vs. kernel address space separation
649 //
650 MmSystemRangeStart = (PVOID)KSEG0_BASE;
651 MmUserProbeAddress = (ULONG_PTR)MmSystemRangeStart - 0x10000;
652 MmHighestUserAddress = (PVOID)(MmUserProbeAddress - 1);
653
654 //
655 // Get the size of the boot loader's image allocations and then round
656 // that region up to a PDE size, so that any PDEs we might create for
657 // whatever follows are separate from the PDEs that boot loader might've
658 // already created (and later, we can blow all that away if we want to).
659 //
660 MmBootImageSize = KeLoaderBlock->Extension->LoaderPagesSpanned;
661 MmBootImageSize *= PAGE_SIZE;
662 MmBootImageSize = (MmBootImageSize + (4 * 1024 * 1024) - 1) & ~((4 * 1024 * 1024) - 1);
663 ASSERT((MmBootImageSize % (4 * 1024 * 1024)) == 0);
664
665 //
666 // Set the size of session view, pool, and image
667 //
668 MmSessionSize = MI_SESSION_SIZE;
669 MmSessionViewSize = MI_SESSION_VIEW_SIZE;
670 MmSessionPoolSize = MI_SESSION_POOL_SIZE;
671 MmSessionImageSize = MI_SESSION_IMAGE_SIZE;
672
673 //
674 // Set the size of system view
675 //
676 MmSystemViewSize = MI_SYSTEM_VIEW_SIZE;
677
678 //
679 // This is where it all ends
680 //
681 MiSessionImageEnd = (PVOID)PTE_BASE;
682
683 //
684 // This is where we will load Win32k.sys and the video driver
685 //
686 MiSessionImageStart = (PVOID)((ULONG_PTR)MiSessionImageEnd -
687 MmSessionImageSize);
688
689 //
690 // So the view starts right below the session working set (itself below
691 // the image area)
692 //
693 MiSessionViewStart = (PVOID)((ULONG_PTR)MiSessionImageEnd -
694 MmSessionImageSize -
695 MI_SESSION_WORKING_SET_SIZE -
696 MmSessionViewSize);
697
698 //
699 // Session pool follows
700 //
701 MiSessionPoolEnd = MiSessionViewStart;
702 MiSessionPoolStart = (PVOID)((ULONG_PTR)MiSessionPoolEnd -
703 MmSessionPoolSize);
704
705 //
706 // And it all begins here
707 //
708 MmSessionBase = MiSessionPoolStart;
709
710 //
711 // Sanity check that our math is correct
712 //
713 ASSERT((ULONG_PTR)MmSessionBase + MmSessionSize == PTE_BASE);
714
715 //
716 // Session space ends wherever image session space ends
717 //
718 MiSessionSpaceEnd = MiSessionImageEnd;
719
720 //
721 // System view space ends at session space, so now that we know where
722 // this is, we can compute the base address of system view space itself.
723 //
724 MiSystemViewStart = (PVOID)((ULONG_PTR)MmSessionBase -
725 MmSystemViewSize);
726
727 //
728 // Count physical pages on the system
729 //
730 PageCount = MiPagesInLoaderBlock(LoaderBlock, IncludeType);
731
732 //
733 // Check if this is a machine with less than 19MB of RAM
734 //
735 if (PageCount < MI_MIN_PAGES_FOR_SYSPTE_TUNING)
736 {
737 //
738 // Use the very minimum of system PTEs
739 //
740 MmNumberOfSystemPtes = 7000;
741 }
742 else
743 {
744 //
745 // Use the default, but check if we have more than 32MB of RAM
746 //
747 MmNumberOfSystemPtes = 11000;
748 if (PageCount > MI_MIN_PAGES_FOR_SYSPTE_BOOST)
749 {
750 //
751 // Double the amount of system PTEs
752 //
753 MmNumberOfSystemPtes <<= 1;
754 }
755 }
756
757 DPRINT("System PTE count has been tuned to %d (%d bytes)\n",
758 MmNumberOfSystemPtes, MmNumberOfSystemPtes * PAGE_SIZE);
759
760 /* Initialize the platform-specific parts */
761 MiInitMachineDependent(LoaderBlock);
762
763 //
764 // Sync us up with ReactOS Mm
765 //
766 MiSyncARM3WithROS(MmNonPagedSystemStart, (PVOID)((ULONG_PTR)MmNonPagedPoolEnd - 1));
767 MiSyncARM3WithROS(MmPfnDatabase, (PVOID)((ULONG_PTR)MmNonPagedPoolStart + MmSizeOfNonPagedPoolInBytes - 1));
768 MiSyncARM3WithROS((PVOID)HYPER_SPACE, (PVOID)(HYPER_SPACE + PAGE_SIZE - 1));
769
770 //
771 // Build the physical memory block
772 //
773 MmPhysicalMemoryBlock = MmInitializeMemoryLimits(LoaderBlock,
774 IncludeType);
775
776 //
777 // Allocate enough buffer for the PFN bitmap
778 // Align it up to a 32-bit boundary
779 //
780 Bitmap = ExAllocatePoolWithTag(NonPagedPool,
781 (((MmHighestPhysicalPage + 1) + 31) / 32) * 4,
782 ' mM');
783 if (!Bitmap)
784 {
785 //
786 // This is critical
787 //
788 KeBugCheckEx(INSTALL_MORE_MEMORY,
789 MmNumberOfPhysicalPages,
790 MmLowestPhysicalPage,
791 MmHighestPhysicalPage,
792 0x101);
793 }
794
795 //
796 // Initialize it and clear all the bits to begin with
797 //
798 RtlInitializeBitMap(&MiPfnBitMap,
799 Bitmap,
800 MmHighestPhysicalPage + 1);
801 RtlClearAllBits(&MiPfnBitMap);
802
803 //
804 // Loop physical memory runs
805 //
806 for (i = 0; i < MmPhysicalMemoryBlock->NumberOfRuns; i++)
807 {
808 //
809 // Get the run
810 //
811 Run = &MmPhysicalMemoryBlock->Run[i];
812 DPRINT("PHYSICAL RAM [0x%08p to 0x%08p]\n",
813 Run->BasePage << PAGE_SHIFT,
814 (Run->BasePage + Run->PageCount) << PAGE_SHIFT);
815
816 //
817 // Make sure it has pages inside it
818 //
819 if (Run->PageCount)
820 {
821 //
822 // Set the bits in the PFN bitmap
823 //
824 RtlSetBits(&MiPfnBitMap, Run->BasePage, Run->PageCount);
825 }
826 }
827
828 //
829 // Size up paged pool and build the shadow system page directory
830 //
831 MiBuildPagedPool();
832 }
833
834 //
835 // Always return success for now
836 //
837 return STATUS_SUCCESS;
838 }
839
840 /* EOF */