[NTOSKRNL]
[reactos.git] / reactos / ntoskrnl / mm / ARM3 / mminit.c
1 /*
2 * PROJECT: ReactOS Kernel
3 * LICENSE: BSD - See COPYING.ARM in the top level directory
4 * FILE: ntoskrnl/mm/ARM3/mminit.c
5 * PURPOSE: ARM Memory Manager Initialization
6 * PROGRAMMERS: ReactOS Portable Systems Group
7 */
8
9 /* INCLUDES *******************************************************************/
10
11 #include <ntoskrnl.h>
12 #define NDEBUG
13 #include <debug.h>
14
15 #define MODULE_INVOLVED_IN_ARM3
16 #include "miarm.h"
17
18 /* GLOBALS ********************************************************************/
19
20 //
21 // These are all registry-configurable, but by default, the memory manager will
22 // figure out the most appropriate values.
23 //
24 ULONG MmMaximumNonPagedPoolPercent;
25 SIZE_T MmSizeOfNonPagedPoolInBytes;
26 SIZE_T MmMaximumNonPagedPoolInBytes;
27
28 /* Some of the same values, in pages */
29 PFN_NUMBER MmMaximumNonPagedPoolInPages;
30
31 //
32 // These numbers describe the discrete equation components of the nonpaged
33 // pool sizing algorithm.
34 //
35 // They are described on http://support.microsoft.com/default.aspx/kb/126402/ja
36 // along with the algorithm that uses them, which is implemented later below.
37 //
38 SIZE_T MmMinimumNonPagedPoolSize = 256 * 1024;
39 ULONG MmMinAdditionNonPagedPoolPerMb = 32 * 1024;
40 SIZE_T MmDefaultMaximumNonPagedPool = 1024 * 1024;
41 ULONG MmMaxAdditionNonPagedPoolPerMb = 400 * 1024;
42
43 //
44 // The memory layout (and especially variable names) of the NT kernel mode
45 // components can be a bit hard to twig, especially when it comes to the non
46 // paged area.
47 //
48 // There are really two components to the non-paged pool:
49 //
50 // - The initial nonpaged pool, sized dynamically up to a maximum.
51 // - The expansion nonpaged pool, sized dynamically up to a maximum.
52 //
53 // The initial nonpaged pool is physically continuous for performance, and
54 // immediately follows the PFN database, typically sharing the same PDE. It is
55 // a very small resource (32MB on a 1GB system), and capped at 128MB.
56 //
57 // Right now we call this the "ARM³ Nonpaged Pool" and it begins somewhere after
58 // the PFN database (which starts at 0xB0000000).
59 //
60 // The expansion nonpaged pool, on the other hand, can grow much bigger (400MB
61 // for a 1GB system). On ARM³ however, it is currently capped at 128MB.
62 //
63 // The address where the initial nonpaged pool starts is aptly named
64 // MmNonPagedPoolStart, and it describes a range of MmSizeOfNonPagedPoolInBytes
65 // bytes.
66 //
67 // Expansion nonpaged pool starts at an address described by the variable called
68 // MmNonPagedPoolExpansionStart, and it goes on for MmMaximumNonPagedPoolInBytes
69 // minus MmSizeOfNonPagedPoolInBytes bytes, always reaching MmNonPagedPoolEnd
70 // (because of the way it's calculated) at 0xFFBE0000.
71 //
72 // Initial nonpaged pool is allocated and mapped early-on during boot, but what
73 // about the expansion nonpaged pool? It is instead composed of special pages
74 // which belong to what are called System PTEs. These PTEs are the matter of a
75 // later discussion, but they are also considered part of the "nonpaged" OS, due
76 // to the fact that they are never paged out -- once an address is described by
77 // a System PTE, it is always valid, until the System PTE is torn down.
78 //
79 // System PTEs are actually composed of two "spaces", the system space proper,
80 // and the nonpaged pool expansion space. The latter, as we've already seen,
81 // begins at MmNonPagedPoolExpansionStart. Based on the number of System PTEs
82 // that the system will support, the remaining address space below this address
83 // is used to hold the system space PTEs. This address, in turn, is held in the
84 // variable named MmNonPagedSystemStart, which itself is never allowed to go
85 // below 0xEB000000 (thus creating an upper bound on the number of System PTEs).
86 //
87 // This means that 330MB are reserved for total nonpaged system VA, on top of
88 // whatever the initial nonpaged pool allocation is.
89 //
90 // The following URLs, valid as of April 23rd, 2008, support this evidence:
91 //
92 // http://www.cs.miami.edu/~burt/journal/NT/memory.html
93 // http://www.ditii.com/2007/09/28/windows-memory-management-x86-virtual-address-space/
94 //
95 PVOID MmNonPagedSystemStart;
96 PVOID MmNonPagedPoolStart;
97 PVOID MmNonPagedPoolExpansionStart;
98 PVOID MmNonPagedPoolEnd = MI_NONPAGED_POOL_END;
99
100 //
101 // This is where paged pool starts by default
102 //
103 PVOID MmPagedPoolStart = MI_PAGED_POOL_START;
104 PVOID MmPagedPoolEnd;
105
106 //
107 // And this is its default size
108 //
109 SIZE_T MmSizeOfPagedPoolInBytes = MI_MIN_INIT_PAGED_POOLSIZE;
110 PFN_NUMBER MmSizeOfPagedPoolInPages = MI_MIN_INIT_PAGED_POOLSIZE / PAGE_SIZE;
111
112 //
113 // Session space starts at 0xBFFFFFFF and grows downwards
114 // By default, it includes an 8MB image area where we map win32k and video card
115 // drivers, followed by a 4MB area containing the session's working set. This is
116 // then followed by a 20MB mapped view area and finally by the session's paged
117 // pool, by default 16MB.
118 //
119 // On a normal system, this results in session space occupying the region from
120 // 0xBD000000 to 0xC0000000
121 //
122 // See miarm.h for the defines that determine the sizing of this region. On an
123 // NT system, some of these can be configured through the registry, but we don't
124 // support that yet.
125 //
126 PVOID MiSessionSpaceEnd; // 0xC0000000
127 PVOID MiSessionImageEnd; // 0xC0000000
128 PVOID MiSessionImageStart; // 0xBF800000
129 PVOID MiSessionViewStart; // 0xBE000000
130 PVOID MiSessionPoolEnd; // 0xBE000000
131 PVOID MiSessionPoolStart; // 0xBD000000
132 PVOID MmSessionBase; // 0xBD000000
133 SIZE_T MmSessionSize;
134 SIZE_T MmSessionViewSize;
135 SIZE_T MmSessionPoolSize;
136 SIZE_T MmSessionImageSize;
137
138 /*
139 * These are the PTE addresses of the boundaries carved out above
140 */
141 PMMPTE MiSessionImagePteStart;
142 PMMPTE MiSessionImagePteEnd;
143 PMMPTE MiSessionBasePte;
144 PMMPTE MiSessionLastPte;
145
146 //
147 // The system view space, on the other hand, is where sections that are memory
148 // mapped into "system space" end up.
149 //
150 // By default, it is a 16MB region.
151 //
152 PVOID MiSystemViewStart;
153 SIZE_T MmSystemViewSize;
154
155 #if (_MI_PAGING_LEVELS == 2)
156 //
157 // A copy of the system page directory (the page directory associated with the
158 // System process) is kept (double-mapped) by the manager in order to lazily
159 // map paged pool PDEs into external processes when they fault on a paged pool
160 // address.
161 //
162 PFN_NUMBER MmSystemPageDirectory[PD_COUNT];
163 PMMPDE MmSystemPagePtes;
164 #endif
165
166 //
167 // The system cache starts right after hyperspace. The first few pages are for
168 // keeping track of the system working set list.
169 //
170 // This should be 0xC0C00000 -- the cache itself starts at 0xC1000000
171 //
172 PMMWSL MmSystemCacheWorkingSetList = MI_SYSTEM_CACHE_WS_START;
173
174 //
175 // Windows NT seems to choose between 7000, 11000 and 50000
176 // On systems with more than 32MB, this number is then doubled, and further
177 // aligned up to a PDE boundary (4MB).
178 //
179 ULONG_PTR MmNumberOfSystemPtes;
180
181 //
182 // This is how many pages the PFN database will take up
183 // In Windows, this includes the Quark Color Table, but not in ARM³
184 //
185 PFN_NUMBER MxPfnAllocation;
186
187 //
188 // Unlike the old ReactOS Memory Manager, ARM³ (and Windows) does not keep track
189 // of pages that are not actually valid physical memory, such as ACPI reserved
190 // regions, BIOS address ranges, or holes in physical memory address space which
191 // could indicate device-mapped I/O memory.
192 //
193 // In fact, the lack of a PFN entry for a page usually indicates that this is
194 // I/O space instead.
195 //
196 // A bitmap, called the PFN bitmap, keeps track of all page frames by assigning
197 // a bit to each. If the bit is set, then the page is valid physical RAM.
198 //
199 RTL_BITMAP MiPfnBitMap;
200
201 //
202 // This structure describes the different pieces of RAM-backed address space
203 //
204 PPHYSICAL_MEMORY_DESCRIPTOR MmPhysicalMemoryBlock;
205
206 //
207 // This is where we keep track of the most basic physical layout markers
208 //
209 PFN_NUMBER MmNumberOfPhysicalPages, MmHighestPhysicalPage, MmLowestPhysicalPage = -1;
210
211 //
212 // The total number of pages mapped by the boot loader, which include the kernel
213 // HAL, boot drivers, registry, NLS files and other loader data structures is
214 // kept track of here. This depends on "LoaderPagesSpanned" being correct when
215 // coming from the loader.
216 //
217 // This number is later aligned up to a PDE boundary.
218 //
219 SIZE_T MmBootImageSize;
220
221 //
222 // These three variables keep track of the core separation of address space that
223 // exists between kernel mode and user mode.
224 //
225 ULONG_PTR MmUserProbeAddress;
226 PVOID MmHighestUserAddress;
227 PVOID MmSystemRangeStart;
228
229 /* And these store the respective highest PTE/PDE address */
230 PMMPTE MiHighestUserPte;
231 PMMPDE MiHighestUserPde;
232 #if (_MI_PAGING_LEVELS >= 3)
233 /* We need the highest PPE and PXE addresses */
234 #endif
235
236 /* These variables define the system cache address space */
237 PVOID MmSystemCacheStart;
238 PVOID MmSystemCacheEnd;
239 MMSUPPORT MmSystemCacheWs;
240
241 //
242 // This is where hyperspace ends (followed by the system cache working set)
243 //
244 PVOID MmHyperSpaceEnd;
245
246 //
247 // Page coloring algorithm data
248 //
249 ULONG MmSecondaryColors;
250 ULONG MmSecondaryColorMask;
251
252 //
253 // Actual (registry-configurable) size of a GUI thread's stack
254 //
255 ULONG MmLargeStackSize = KERNEL_LARGE_STACK_SIZE;
256
257 //
258 // Before we have a PFN database, memory comes straight from our physical memory
259 // blocks, which is nice because it's guaranteed contiguous and also because once
260 // we take a page from here, the system doesn't see it anymore.
261 // However, once the fun is over, those pages must be re-integrated back into
262 // PFN society life, and that requires us keeping a copy of the original layout
263 // so that we can parse it later.
264 //
265 PMEMORY_ALLOCATION_DESCRIPTOR MxFreeDescriptor;
266 MEMORY_ALLOCATION_DESCRIPTOR MxOldFreeDescriptor;
267
268 /*
269 * For each page's worth bytes of L2 cache in a given set/way line, the zero and
270 * free lists are organized in what is called a "color".
271 *
272 * This array points to the two lists, so it can be thought of as a multi-dimensional
273 * array of MmFreePagesByColor[2][MmSecondaryColors]. Since the number is dynamic,
274 * we describe the array in pointer form instead.
275 *
276 * On a final note, the color tables themselves are right after the PFN database.
277 */
278 C_ASSERT(FreePageList == 1);
279 PMMCOLOR_TABLES MmFreePagesByColor[FreePageList + 1];
280
281 /* An event used in Phase 0 before the rest of the system is ready to go */
282 KEVENT MiTempEvent;
283
284 /* All the events used for memory threshold notifications */
285 PKEVENT MiLowMemoryEvent;
286 PKEVENT MiHighMemoryEvent;
287 PKEVENT MiLowPagedPoolEvent;
288 PKEVENT MiHighPagedPoolEvent;
289 PKEVENT MiLowNonPagedPoolEvent;
290 PKEVENT MiHighNonPagedPoolEvent;
291
292 /* The actual thresholds themselves, in page numbers */
293 PFN_NUMBER MmLowMemoryThreshold;
294 PFN_NUMBER MmHighMemoryThreshold;
295 PFN_NUMBER MiLowPagedPoolThreshold;
296 PFN_NUMBER MiHighPagedPoolThreshold;
297 PFN_NUMBER MiLowNonPagedPoolThreshold;
298 PFN_NUMBER MiHighNonPagedPoolThreshold;
299
300 /*
301 * This number determines how many free pages must exist, at minimum, until we
302 * start trimming working sets and flushing modified pages to obtain more free
303 * pages.
304 *
305 * This number changes if the system detects that this is a server product
306 */
307 PFN_NUMBER MmMinimumFreePages = 26;
308
309 /*
310 * This number indicates how many pages we consider to be a low limit of having
311 * "plenty" of free memory.
312 *
313 * It is doubled on systems that have more than 63MB of memory
314 */
315 PFN_NUMBER MmPlentyFreePages = 400;
316
317 /* These values store the type of system this is (small, med, large) and if server */
318 ULONG MmProductType;
319 MM_SYSTEMSIZE MmSystemSize;
320
321 /*
322 * These values store the cache working set minimums and maximums, in pages
323 *
324 * The minimum value is boosted on systems with more than 24MB of RAM, and cut
325 * down to only 32 pages on embedded (<24MB RAM) systems.
326 *
327 * An extra boost of 2MB is given on systems with more than 33MB of RAM.
328 */
329 PFN_NUMBER MmSystemCacheWsMinimum = 288;
330 PFN_NUMBER MmSystemCacheWsMaximum = 350;
331
332 /* FIXME: Move to cache/working set code later */
333 BOOLEAN MmLargeSystemCache;
334
335 /*
336 * This value determines in how many fragments/chunks the subsection prototype
337 * PTEs should be allocated when mapping a section object. It is configurable in
338 * the registry through the MapAllocationFragment parameter.
339 *
340 * The default is 64KB on systems with more than 1GB of RAM, 32KB on systems with
341 * more than 256MB of RAM, and 16KB on systems with less than 256MB of RAM.
342 *
343 * The maximum it can be set to is 2MB, and the minimum is 4KB.
344 */
345 SIZE_T MmAllocationFragment;
346
347 /*
348 * These two values track how much virtual memory can be committed, and when
349 * expansion should happen.
350 */
351 // FIXME: They should be moved elsewhere since it's not an "init" setting?
352 SIZE_T MmTotalCommitLimit;
353 SIZE_T MmTotalCommitLimitMaximum;
354
355 /* Internal setting used for debugging memory descriptors */
356 BOOLEAN MiDbgEnableMdDump =
357 #ifdef _ARM_
358 TRUE;
359 #else
360 FALSE;
361 #endif
362
363 /* PRIVATE FUNCTIONS **********************************************************/
364
365 PFN_NUMBER
366 NTAPI
367 INIT_FUNCTION
368 MxGetNextPage(IN PFN_NUMBER PageCount)
369 {
370 PFN_NUMBER Pfn;
371
372 /* Make sure we have enough pages */
373 if (PageCount > MxFreeDescriptor->PageCount)
374 {
375 /* Crash the system */
376 KeBugCheckEx(INSTALL_MORE_MEMORY,
377 MmNumberOfPhysicalPages,
378 MxFreeDescriptor->PageCount,
379 MxOldFreeDescriptor.PageCount,
380 PageCount);
381 }
382
383 /* Use our lowest usable free pages */
384 Pfn = MxFreeDescriptor->BasePage;
385 MxFreeDescriptor->BasePage += PageCount;
386 MxFreeDescriptor->PageCount -= PageCount;
387 return Pfn;
388 }
389
390 VOID
391 NTAPI
392 INIT_FUNCTION
393 MiComputeColorInformation(VOID)
394 {
395 ULONG L2Associativity;
396
397 /* Check if no setting was provided already */
398 if (!MmSecondaryColors)
399 {
400 /* Get L2 cache information */
401 L2Associativity = KeGetPcr()->SecondLevelCacheAssociativity;
402
403 /* The number of colors is the number of cache bytes by set/way */
404 MmSecondaryColors = KeGetPcr()->SecondLevelCacheSize;
405 if (L2Associativity) MmSecondaryColors /= L2Associativity;
406 }
407
408 /* Now convert cache bytes into pages */
409 MmSecondaryColors >>= PAGE_SHIFT;
410 if (!MmSecondaryColors)
411 {
412 /* If there was no cache data from the KPCR, use the default colors */
413 MmSecondaryColors = MI_SECONDARY_COLORS;
414 }
415 else
416 {
417 /* Otherwise, make sure there aren't too many colors */
418 if (MmSecondaryColors > MI_MAX_SECONDARY_COLORS)
419 {
420 /* Set the maximum */
421 MmSecondaryColors = MI_MAX_SECONDARY_COLORS;
422 }
423
424 /* Make sure there aren't too little colors */
425 if (MmSecondaryColors < MI_MIN_SECONDARY_COLORS)
426 {
427 /* Set the default */
428 MmSecondaryColors = MI_SECONDARY_COLORS;
429 }
430
431 /* Finally make sure the colors are a power of two */
432 if (MmSecondaryColors & (MmSecondaryColors - 1))
433 {
434 /* Set the default */
435 MmSecondaryColors = MI_SECONDARY_COLORS;
436 }
437 }
438
439 /* Compute the mask and store it */
440 MmSecondaryColorMask = MmSecondaryColors - 1;
441 KeGetCurrentPrcb()->SecondaryColorMask = MmSecondaryColorMask;
442 }
443
444 VOID
445 NTAPI
446 INIT_FUNCTION
447 MiInitializeColorTables(VOID)
448 {
449 ULONG i;
450 PMMPTE PointerPte, LastPte;
451 MMPTE TempPte = ValidKernelPte;
452
453 /* The color table starts after the ARM3 PFN database */
454 MmFreePagesByColor[0] = (PMMCOLOR_TABLES)&MmPfnDatabase[MmHighestPhysicalPage + 1];
455
456 /* Loop the PTEs. We have two color tables for each secondary color */
457 PointerPte = MiAddressToPte(&MmFreePagesByColor[0][0]);
458 LastPte = MiAddressToPte((ULONG_PTR)MmFreePagesByColor[0] +
459 (2 * MmSecondaryColors * sizeof(MMCOLOR_TABLES))
460 - 1);
461 while (PointerPte <= LastPte)
462 {
463 /* Check for valid PTE */
464 if (PointerPte->u.Hard.Valid == 0)
465 {
466 /* Get a page and map it */
467 TempPte.u.Hard.PageFrameNumber = MxGetNextPage(1);
468 MI_WRITE_VALID_PTE(PointerPte, TempPte);
469
470 /* Zero out the page */
471 RtlZeroMemory(MiPteToAddress(PointerPte), PAGE_SIZE);
472 }
473
474 /* Next */
475 PointerPte++;
476 }
477
478 /* Now set the address of the next list, right after this one */
479 MmFreePagesByColor[1] = &MmFreePagesByColor[0][MmSecondaryColors];
480
481 /* Now loop the lists to set them up */
482 for (i = 0; i < MmSecondaryColors; i++)
483 {
484 /* Set both free and zero lists for each color */
485 MmFreePagesByColor[ZeroedPageList][i].Flink = 0xFFFFFFFF;
486 MmFreePagesByColor[ZeroedPageList][i].Blink = (PVOID)0xFFFFFFFF;
487 MmFreePagesByColor[ZeroedPageList][i].Count = 0;
488 MmFreePagesByColor[FreePageList][i].Flink = 0xFFFFFFFF;
489 MmFreePagesByColor[FreePageList][i].Blink = (PVOID)0xFFFFFFFF;
490 MmFreePagesByColor[FreePageList][i].Count = 0;
491 }
492 }
493
494 BOOLEAN
495 NTAPI
496 INIT_FUNCTION
497 MiIsRegularMemory(IN PLOADER_PARAMETER_BLOCK LoaderBlock,
498 IN PFN_NUMBER Pfn)
499 {
500 PLIST_ENTRY NextEntry;
501 PMEMORY_ALLOCATION_DESCRIPTOR MdBlock;
502
503 /* Loop the memory descriptors */
504 NextEntry = LoaderBlock->MemoryDescriptorListHead.Flink;
505 while (NextEntry != &LoaderBlock->MemoryDescriptorListHead)
506 {
507 /* Get the memory descriptor */
508 MdBlock = CONTAINING_RECORD(NextEntry,
509 MEMORY_ALLOCATION_DESCRIPTOR,
510 ListEntry);
511
512 /* Check if this PFN could be part of the block */
513 if (Pfn >= (MdBlock->BasePage))
514 {
515 /* Check if it really is part of the block */
516 if (Pfn < (MdBlock->BasePage + MdBlock->PageCount))
517 {
518 /* Check if the block is actually memory we don't map */
519 if ((MdBlock->MemoryType == LoaderFirmwarePermanent) ||
520 (MdBlock->MemoryType == LoaderBBTMemory) ||
521 (MdBlock->MemoryType == LoaderSpecialMemory))
522 {
523 /* We don't need PFN database entries for this memory */
524 break;
525 }
526
527 /* This is memory we want to map */
528 return TRUE;
529 }
530 }
531 else
532 {
533 /* Blocks are ordered, so if it's not here, it doesn't exist */
534 break;
535 }
536
537 /* Get to the next descriptor */
538 NextEntry = MdBlock->ListEntry.Flink;
539 }
540
541 /* Check if this PFN is actually from our free memory descriptor */
542 if ((Pfn >= MxOldFreeDescriptor.BasePage) &&
543 (Pfn < MxOldFreeDescriptor.BasePage + MxOldFreeDescriptor.PageCount))
544 {
545 /* We use these pages for initial mappings, so we do want to count them */
546 return TRUE;
547 }
548
549 /* Otherwise this isn't memory that we describe or care about */
550 return FALSE;
551 }
552
553 VOID
554 NTAPI
555 INIT_FUNCTION
556 MiMapPfnDatabase(IN PLOADER_PARAMETER_BLOCK LoaderBlock)
557 {
558 ULONG FreePage, FreePageCount, PagesLeft, BasePage, PageCount;
559 PLIST_ENTRY NextEntry;
560 PMEMORY_ALLOCATION_DESCRIPTOR MdBlock;
561 PMMPTE PointerPte, LastPte;
562 MMPTE TempPte = ValidKernelPte;
563
564 /* Get current page data, since we won't be using MxGetNextPage as it would corrupt our state */
565 FreePage = MxFreeDescriptor->BasePage;
566 FreePageCount = MxFreeDescriptor->PageCount;
567 PagesLeft = 0;
568
569 /* Loop the memory descriptors */
570 NextEntry = LoaderBlock->MemoryDescriptorListHead.Flink;
571 while (NextEntry != &LoaderBlock->MemoryDescriptorListHead)
572 {
573 /* Get the descriptor */
574 MdBlock = CONTAINING_RECORD(NextEntry,
575 MEMORY_ALLOCATION_DESCRIPTOR,
576 ListEntry);
577 if ((MdBlock->MemoryType == LoaderFirmwarePermanent) ||
578 (MdBlock->MemoryType == LoaderBBTMemory) ||
579 (MdBlock->MemoryType == LoaderSpecialMemory))
580 {
581 /* These pages are not part of the PFN database */
582 NextEntry = MdBlock->ListEntry.Flink;
583 continue;
584 }
585
586 /* Next, check if this is our special free descriptor we've found */
587 if (MdBlock == MxFreeDescriptor)
588 {
589 /* Use the real numbers instead */
590 BasePage = MxOldFreeDescriptor.BasePage;
591 PageCount = MxOldFreeDescriptor.PageCount;
592 }
593 else
594 {
595 /* Use the descriptor's numbers */
596 BasePage = MdBlock->BasePage;
597 PageCount = MdBlock->PageCount;
598 }
599
600 /* Get the PTEs for this range */
601 PointerPte = MiAddressToPte(&MmPfnDatabase[BasePage]);
602 LastPte = MiAddressToPte(((ULONG_PTR)&MmPfnDatabase[BasePage + PageCount]) - 1);
603 DPRINT("MD Type: %lx Base: %lx Count: %lx\n", MdBlock->MemoryType, BasePage, PageCount);
604
605 /* Loop them */
606 while (PointerPte <= LastPte)
607 {
608 /* We'll only touch PTEs that aren't already valid */
609 if (PointerPte->u.Hard.Valid == 0)
610 {
611 /* Use the next free page */
612 TempPte.u.Hard.PageFrameNumber = FreePage;
613 ASSERT(FreePageCount != 0);
614
615 /* Consume free pages */
616 FreePage++;
617 FreePageCount--;
618 if (!FreePageCount)
619 {
620 /* Out of memory */
621 KeBugCheckEx(INSTALL_MORE_MEMORY,
622 MmNumberOfPhysicalPages,
623 FreePageCount,
624 MxOldFreeDescriptor.PageCount,
625 1);
626 }
627
628 /* Write out this PTE */
629 PagesLeft++;
630 MI_WRITE_VALID_PTE(PointerPte, TempPte);
631
632 /* Zero this page */
633 RtlZeroMemory(MiPteToAddress(PointerPte), PAGE_SIZE);
634 }
635
636 /* Next! */
637 PointerPte++;
638 }
639
640 /* Do the next address range */
641 NextEntry = MdBlock->ListEntry.Flink;
642 }
643
644 /* Now update the free descriptors to consume the pages we used up during the PFN allocation loop */
645 MxFreeDescriptor->BasePage = FreePage;
646 MxFreeDescriptor->PageCount = FreePageCount;
647 }
648
649 VOID
650 NTAPI
651 INIT_FUNCTION
652 MiBuildPfnDatabaseFromPages(IN PLOADER_PARAMETER_BLOCK LoaderBlock)
653 {
654 PMMPDE PointerPde;
655 PMMPTE PointerPte;
656 ULONG i, Count, j;
657 PFN_NUMBER PageFrameIndex, StartupPdIndex, PtePageIndex;
658 PMMPFN Pfn1, Pfn2;
659 ULONG_PTR BaseAddress = 0;
660
661 /* PFN of the startup page directory */
662 StartupPdIndex = PFN_FROM_PTE(MiAddressToPde(PDE_BASE));
663
664 /* Start with the first PDE and scan them all */
665 PointerPde = MiAddressToPde(NULL);
666 Count = PD_COUNT * PDE_COUNT;
667 for (i = 0; i < Count; i++)
668 {
669 /* Check for valid PDE */
670 if (PointerPde->u.Hard.Valid == 1)
671 {
672 /* Get the PFN from it */
673 PageFrameIndex = PFN_FROM_PTE(PointerPde);
674
675 /* Do we want a PFN entry for this page? */
676 if (MiIsRegularMemory(LoaderBlock, PageFrameIndex))
677 {
678 /* Yes we do, set it up */
679 Pfn1 = MiGetPfnEntry(PageFrameIndex);
680 Pfn1->u4.PteFrame = StartupPdIndex;
681 Pfn1->PteAddress = (PMMPTE)PointerPde;
682 Pfn1->u2.ShareCount++;
683 Pfn1->u3.e2.ReferenceCount = 1;
684 Pfn1->u3.e1.PageLocation = ActiveAndValid;
685 Pfn1->u3.e1.CacheAttribute = MiNonCached;
686 #if MI_TRACE_PFNS
687 Pfn1->PfnUsage = MI_USAGE_INIT_MEMORY;
688 memcpy(Pfn1->ProcessName, "Initial PDE", 16);
689 #endif
690 }
691 else
692 {
693 /* No PFN entry */
694 Pfn1 = NULL;
695 }
696
697 /* Now get the PTE and scan the pages */
698 PointerPte = MiAddressToPte(BaseAddress);
699 for (j = 0; j < PTE_COUNT; j++)
700 {
701 /* Check for a valid PTE */
702 if (PointerPte->u.Hard.Valid == 1)
703 {
704 /* Increase the shared count of the PFN entry for the PDE */
705 ASSERT(Pfn1 != NULL);
706 Pfn1->u2.ShareCount++;
707
708 /* Now check if the PTE is valid memory too */
709 PtePageIndex = PFN_FROM_PTE(PointerPte);
710 if (MiIsRegularMemory(LoaderBlock, PtePageIndex))
711 {
712 /*
713 * Only add pages above the end of system code or pages
714 * that are part of nonpaged pool
715 */
716 if ((BaseAddress >= 0xA0000000) ||
717 ((BaseAddress >= (ULONG_PTR)MmNonPagedPoolStart) &&
718 (BaseAddress < (ULONG_PTR)MmNonPagedPoolStart +
719 MmSizeOfNonPagedPoolInBytes)))
720 {
721 /* Get the PFN entry and make sure it too is valid */
722 Pfn2 = MiGetPfnEntry(PtePageIndex);
723 if ((MmIsAddressValid(Pfn2)) &&
724 (MmIsAddressValid(Pfn2 + 1)))
725 {
726 /* Setup the PFN entry */
727 Pfn2->u4.PteFrame = PageFrameIndex;
728 Pfn2->PteAddress = PointerPte;
729 Pfn2->u2.ShareCount++;
730 Pfn2->u3.e2.ReferenceCount = 1;
731 Pfn2->u3.e1.PageLocation = ActiveAndValid;
732 Pfn2->u3.e1.CacheAttribute = MiNonCached;
733 #if MI_TRACE_PFNS
734 Pfn2->PfnUsage = MI_USAGE_INIT_MEMORY;
735 memcpy(Pfn1->ProcessName, "Initial PTE", 16);
736 #endif
737 }
738 }
739 }
740 }
741
742 /* Next PTE */
743 PointerPte++;
744 BaseAddress += PAGE_SIZE;
745 }
746 }
747 else
748 {
749 /* Next PDE mapped address */
750 BaseAddress += PDE_MAPPED_VA;
751 }
752
753 /* Next PTE */
754 PointerPde++;
755 }
756 }
757
758 VOID
759 NTAPI
760 INIT_FUNCTION
761 MiBuildPfnDatabaseZeroPage(VOID)
762 {
763 PMMPFN Pfn1;
764 PMMPDE PointerPde;
765
766 /* Grab the lowest page and check if it has no real references */
767 Pfn1 = MiGetPfnEntry(MmLowestPhysicalPage);
768 if (!(MmLowestPhysicalPage) && !(Pfn1->u3.e2.ReferenceCount))
769 {
770 /* Make it a bogus page to catch errors */
771 PointerPde = MiAddressToPde(0xFFFFFFFF);
772 Pfn1->u4.PteFrame = PFN_FROM_PTE(PointerPde);
773 Pfn1->PteAddress = (PMMPTE)PointerPde;
774 Pfn1->u2.ShareCount++;
775 Pfn1->u3.e2.ReferenceCount = 0xFFF0;
776 Pfn1->u3.e1.PageLocation = ActiveAndValid;
777 Pfn1->u3.e1.CacheAttribute = MiNonCached;
778 }
779 }
780
781 VOID
782 NTAPI
783 INIT_FUNCTION
784 MiBuildPfnDatabaseFromLoaderBlock(IN PLOADER_PARAMETER_BLOCK LoaderBlock)
785 {
786 PLIST_ENTRY NextEntry;
787 PFN_NUMBER PageCount = 0;
788 PMEMORY_ALLOCATION_DESCRIPTOR MdBlock;
789 PFN_NUMBER PageFrameIndex;
790 PMMPFN Pfn1;
791 PMMPTE PointerPte;
792 PMMPDE PointerPde;
793 KIRQL OldIrql;
794
795 /* Now loop through the descriptors */
796 NextEntry = LoaderBlock->MemoryDescriptorListHead.Flink;
797 while (NextEntry != &LoaderBlock->MemoryDescriptorListHead)
798 {
799 /* Get the current descriptor */
800 MdBlock = CONTAINING_RECORD(NextEntry,
801 MEMORY_ALLOCATION_DESCRIPTOR,
802 ListEntry);
803
804 /* Read its data */
805 PageCount = MdBlock->PageCount;
806 PageFrameIndex = MdBlock->BasePage;
807
808 /* Don't allow memory above what the PFN database is mapping */
809 if (PageFrameIndex > MmHighestPhysicalPage)
810 {
811 /* Since they are ordered, everything past here will be larger */
812 break;
813 }
814
815 /* On the other hand, the end page might be higher up... */
816 if ((PageFrameIndex + PageCount) > (MmHighestPhysicalPage + 1))
817 {
818 /* In which case we'll trim the descriptor to go as high as we can */
819 PageCount = MmHighestPhysicalPage + 1 - PageFrameIndex;
820 MdBlock->PageCount = PageCount;
821
822 /* But if there's nothing left to trim, we got too high, so quit */
823 if (!PageCount) break;
824 }
825
826 /* Now check the descriptor type */
827 switch (MdBlock->MemoryType)
828 {
829 /* Check for bad RAM */
830 case LoaderBad:
831
832 DPRINT1("You either have specified /BURNMEMORY or damaged RAM modules.\n");
833 break;
834
835 /* Check for free RAM */
836 case LoaderFree:
837 case LoaderLoadedProgram:
838 case LoaderFirmwareTemporary:
839 case LoaderOsloaderStack:
840
841 /* Get the last page of this descriptor. Note we loop backwards */
842 PageFrameIndex += PageCount - 1;
843 Pfn1 = MiGetPfnEntry(PageFrameIndex);
844
845 /* Lock the PFN Database */
846 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
847 while (PageCount--)
848 {
849 /* If the page really has no references, mark it as free */
850 if (!Pfn1->u3.e2.ReferenceCount)
851 {
852 /* Add it to the free list */
853 Pfn1->u3.e1.CacheAttribute = MiNonCached;
854 MiInsertPageInFreeList(PageFrameIndex);
855 }
856
857 /* Go to the next page */
858 Pfn1--;
859 PageFrameIndex--;
860 }
861
862 /* Release PFN database */
863 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
864
865 /* Done with this block */
866 break;
867
868 /* Check for pages that are invisible to us */
869 case LoaderFirmwarePermanent:
870 case LoaderSpecialMemory:
871 case LoaderBBTMemory:
872
873 /* And skip them */
874 break;
875
876 default:
877
878 /* Map these pages with the KSEG0 mapping that adds 0x80000000 */
879 PointerPte = MiAddressToPte(KSEG0_BASE + (PageFrameIndex << PAGE_SHIFT));
880 Pfn1 = MiGetPfnEntry(PageFrameIndex);
881 while (PageCount--)
882 {
883 /* Check if the page is really unused */
884 PointerPde = MiAddressToPde(KSEG0_BASE + (PageFrameIndex << PAGE_SHIFT));
885 if (!Pfn1->u3.e2.ReferenceCount)
886 {
887 /* Mark it as being in-use */
888 Pfn1->u4.PteFrame = PFN_FROM_PTE(PointerPde);
889 Pfn1->PteAddress = PointerPte;
890 Pfn1->u2.ShareCount++;
891 Pfn1->u3.e2.ReferenceCount = 1;
892 Pfn1->u3.e1.PageLocation = ActiveAndValid;
893 Pfn1->u3.e1.CacheAttribute = MiNonCached;
894 #if MI_TRACE_PFNS
895 Pfn1->PfnUsage = MI_USAGE_BOOT_DRIVER;
896 #endif
897
898 /* Check for RAM disk page */
899 if (MdBlock->MemoryType == LoaderXIPRom)
900 {
901 /* Make it a pseudo-I/O ROM mapping */
902 Pfn1->u1.Flink = 0;
903 Pfn1->u2.ShareCount = 0;
904 Pfn1->u3.e2.ReferenceCount = 0;
905 Pfn1->u3.e1.PageLocation = 0;
906 Pfn1->u3.e1.Rom = 1;
907 Pfn1->u4.InPageError = 0;
908 Pfn1->u3.e1.PrototypePte = 1;
909 }
910 }
911
912 /* Advance page structures */
913 Pfn1++;
914 PageFrameIndex++;
915 PointerPte++;
916 }
917 break;
918 }
919
920 /* Next descriptor entry */
921 NextEntry = MdBlock->ListEntry.Flink;
922 }
923 }
924
925 VOID
926 NTAPI
927 INIT_FUNCTION
928 MiBuildPfnDatabaseSelf(VOID)
929 {
930 PMMPTE PointerPte, LastPte;
931 PMMPFN Pfn1;
932
933 /* Loop the PFN database page */
934 PointerPte = MiAddressToPte(MiGetPfnEntry(MmLowestPhysicalPage));
935 LastPte = MiAddressToPte(MiGetPfnEntry(MmHighestPhysicalPage));
936 while (PointerPte <= LastPte)
937 {
938 /* Make sure the page is valid */
939 if (PointerPte->u.Hard.Valid == 1)
940 {
941 /* Get the PFN entry and just mark it referenced */
942 Pfn1 = MiGetPfnEntry(PointerPte->u.Hard.PageFrameNumber);
943 Pfn1->u2.ShareCount = 1;
944 Pfn1->u3.e2.ReferenceCount = 1;
945 #if MI_TRACE_PFNS
946 Pfn1->PfnUsage = MI_USAGE_PFN_DATABASE;
947 #endif
948 }
949
950 /* Next */
951 PointerPte++;
952 }
953 }
954
955 VOID
956 NTAPI
957 INIT_FUNCTION
958 MiInitializePfnDatabase(IN PLOADER_PARAMETER_BLOCK LoaderBlock)
959 {
960 /* Scan memory and start setting up PFN entries */
961 MiBuildPfnDatabaseFromPages(LoaderBlock);
962
963 /* Add the zero page */
964 MiBuildPfnDatabaseZeroPage();
965
966 /* Scan the loader block and build the rest of the PFN database */
967 MiBuildPfnDatabaseFromLoaderBlock(LoaderBlock);
968
969 /* Finally add the pages for the PFN database itself */
970 MiBuildPfnDatabaseSelf();
971 }
972
973 VOID
974 NTAPI
975 INIT_FUNCTION
976 MiAdjustWorkingSetManagerParameters(IN BOOLEAN Client)
977 {
978 /* This function needs to do more work, for now, we tune page minimums */
979
980 /* Check for a system with around 64MB RAM or more */
981 if (MmNumberOfPhysicalPages >= (63 * _1MB) / PAGE_SIZE)
982 {
983 /* Double the minimum amount of pages we consider for a "plenty free" scenario */
984 MmPlentyFreePages *= 2;
985 }
986 }
987
988 VOID
989 NTAPI
990 INIT_FUNCTION
991 MiNotifyMemoryEvents(VOID)
992 {
993 /* Are we in a low-memory situation? */
994 if (MmAvailablePages < MmLowMemoryThreshold)
995 {
996 /* Clear high, set low */
997 if (KeReadStateEvent(MiHighMemoryEvent)) KeClearEvent(MiHighMemoryEvent);
998 if (!KeReadStateEvent(MiLowMemoryEvent)) KeSetEvent(MiLowMemoryEvent, 0, FALSE);
999 }
1000 else if (MmAvailablePages < MmHighMemoryThreshold)
1001 {
1002 /* We are in between, clear both */
1003 if (KeReadStateEvent(MiHighMemoryEvent)) KeClearEvent(MiHighMemoryEvent);
1004 if (KeReadStateEvent(MiLowMemoryEvent)) KeClearEvent(MiLowMemoryEvent);
1005 }
1006 else
1007 {
1008 /* Clear low, set high */
1009 if (KeReadStateEvent(MiLowMemoryEvent)) KeClearEvent(MiLowMemoryEvent);
1010 if (!KeReadStateEvent(MiHighMemoryEvent)) KeSetEvent(MiHighMemoryEvent, 0, FALSE);
1011 }
1012 }
1013
1014 NTSTATUS
1015 NTAPI
1016 INIT_FUNCTION
1017 MiCreateMemoryEvent(IN PUNICODE_STRING Name,
1018 OUT PKEVENT *Event)
1019 {
1020 PACL Dacl;
1021 HANDLE EventHandle;
1022 ULONG DaclLength;
1023 NTSTATUS Status;
1024 OBJECT_ATTRIBUTES ObjectAttributes;
1025 SECURITY_DESCRIPTOR SecurityDescriptor;
1026
1027 /* Create the SD */
1028 Status = RtlCreateSecurityDescriptor(&SecurityDescriptor,
1029 SECURITY_DESCRIPTOR_REVISION);
1030 if (!NT_SUCCESS(Status)) return Status;
1031
1032 /* One ACL with 3 ACEs, containing each one SID */
1033 DaclLength = sizeof(ACL) +
1034 3 * sizeof(ACCESS_ALLOWED_ACE) +
1035 RtlLengthSid(SeLocalSystemSid) +
1036 RtlLengthSid(SeAliasAdminsSid) +
1037 RtlLengthSid(SeWorldSid);
1038
1039 /* Allocate space for the DACL */
1040 Dacl = ExAllocatePoolWithTag(PagedPool, DaclLength, 'lcaD');
1041 if (!Dacl) return STATUS_INSUFFICIENT_RESOURCES;
1042
1043 /* Setup the ACL inside it */
1044 Status = RtlCreateAcl(Dacl, DaclLength, ACL_REVISION);
1045 if (!NT_SUCCESS(Status)) goto CleanUp;
1046
1047 /* Add query rights for everyone */
1048 Status = RtlAddAccessAllowedAce(Dacl,
1049 ACL_REVISION,
1050 SYNCHRONIZE | EVENT_QUERY_STATE | READ_CONTROL,
1051 SeWorldSid);
1052 if (!NT_SUCCESS(Status)) goto CleanUp;
1053
1054 /* Full rights for the admin */
1055 Status = RtlAddAccessAllowedAce(Dacl,
1056 ACL_REVISION,
1057 EVENT_ALL_ACCESS,
1058 SeAliasAdminsSid);
1059 if (!NT_SUCCESS(Status)) goto CleanUp;
1060
1061 /* As well as full rights for the system */
1062 Status = RtlAddAccessAllowedAce(Dacl,
1063 ACL_REVISION,
1064 EVENT_ALL_ACCESS,
1065 SeLocalSystemSid);
1066 if (!NT_SUCCESS(Status)) goto CleanUp;
1067
1068 /* Set this DACL inside the SD */
1069 Status = RtlSetDaclSecurityDescriptor(&SecurityDescriptor,
1070 TRUE,
1071 Dacl,
1072 FALSE);
1073 if (!NT_SUCCESS(Status)) goto CleanUp;
1074
1075 /* Setup the event attributes, making sure it's a permanent one */
1076 InitializeObjectAttributes(&ObjectAttributes,
1077 Name,
1078 OBJ_KERNEL_HANDLE | OBJ_PERMANENT,
1079 NULL,
1080 &SecurityDescriptor);
1081
1082 /* Create the event */
1083 Status = ZwCreateEvent(&EventHandle,
1084 EVENT_ALL_ACCESS,
1085 &ObjectAttributes,
1086 NotificationEvent,
1087 FALSE);
1088 CleanUp:
1089 /* Free the DACL */
1090 ExFreePool(Dacl);
1091
1092 /* Check if this is the success path */
1093 if (NT_SUCCESS(Status))
1094 {
1095 /* Add a reference to the object, then close the handle we had */
1096 Status = ObReferenceObjectByHandle(EventHandle,
1097 EVENT_MODIFY_STATE,
1098 ExEventObjectType,
1099 KernelMode,
1100 (PVOID*)Event,
1101 NULL);
1102 ZwClose (EventHandle);
1103 }
1104
1105 /* Return status */
1106 return Status;
1107 }
1108
1109 BOOLEAN
1110 NTAPI
1111 INIT_FUNCTION
1112 MiInitializeMemoryEvents(VOID)
1113 {
1114 UNICODE_STRING LowString = RTL_CONSTANT_STRING(L"\\KernelObjects\\LowMemoryCondition");
1115 UNICODE_STRING HighString = RTL_CONSTANT_STRING(L"\\KernelObjects\\HighMemoryCondition");
1116 UNICODE_STRING LowPagedPoolString = RTL_CONSTANT_STRING(L"\\KernelObjects\\LowPagedPoolCondition");
1117 UNICODE_STRING HighPagedPoolString = RTL_CONSTANT_STRING(L"\\KernelObjects\\HighPagedPoolCondition");
1118 UNICODE_STRING LowNonPagedPoolString = RTL_CONSTANT_STRING(L"\\KernelObjects\\LowNonPagedPoolCondition");
1119 UNICODE_STRING HighNonPagedPoolString = RTL_CONSTANT_STRING(L"\\KernelObjects\\HighNonPagedPoolCondition");
1120 NTSTATUS Status;
1121
1122 /* Check if we have a registry setting */
1123 if (MmLowMemoryThreshold)
1124 {
1125 /* Convert it to pages */
1126 MmLowMemoryThreshold *= (_1MB / PAGE_SIZE);
1127 }
1128 else
1129 {
1130 /* The low memory threshold is hit when we don't consider that we have "plenty" of free pages anymore */
1131 MmLowMemoryThreshold = MmPlentyFreePages;
1132
1133 /* More than one GB of memory? */
1134 if (MmNumberOfPhysicalPages > 0x40000)
1135 {
1136 /* Start at 32MB, and add another 16MB for each GB */
1137 MmLowMemoryThreshold = (32 * _1MB) / PAGE_SIZE;
1138 MmLowMemoryThreshold += ((MmNumberOfPhysicalPages - 0x40000) >> 7);
1139 }
1140 else if (MmNumberOfPhysicalPages > 0x8000)
1141 {
1142 /* For systems with > 128MB RAM, add another 4MB for each 128MB */
1143 MmLowMemoryThreshold += ((MmNumberOfPhysicalPages - 0x8000) >> 5);
1144 }
1145
1146 /* Don't let the minimum threshold go past 64MB */
1147 MmLowMemoryThreshold = min(MmLowMemoryThreshold, (64 * _1MB) / PAGE_SIZE);
1148 }
1149
1150 /* Check if we have a registry setting */
1151 if (MmHighMemoryThreshold)
1152 {
1153 /* Convert it into pages */
1154 MmHighMemoryThreshold *= (_1MB / PAGE_SIZE);
1155 }
1156 else
1157 {
1158 /* Otherwise, the default is three times the low memory threshold */
1159 MmHighMemoryThreshold = 3 * MmLowMemoryThreshold;
1160 ASSERT(MmHighMemoryThreshold > MmLowMemoryThreshold);
1161 }
1162
1163 /* Make sure high threshold is actually higher than the low */
1164 MmHighMemoryThreshold = max(MmHighMemoryThreshold, MmLowMemoryThreshold);
1165
1166 /* Create the memory events for all the thresholds */
1167 Status = MiCreateMemoryEvent(&LowString, &MiLowMemoryEvent);
1168 if (!NT_SUCCESS(Status)) return FALSE;
1169 Status = MiCreateMemoryEvent(&HighString, &MiHighMemoryEvent);
1170 if (!NT_SUCCESS(Status)) return FALSE;
1171 Status = MiCreateMemoryEvent(&LowPagedPoolString, &MiLowPagedPoolEvent);
1172 if (!NT_SUCCESS(Status)) return FALSE;
1173 Status = MiCreateMemoryEvent(&HighPagedPoolString, &MiHighPagedPoolEvent);
1174 if (!NT_SUCCESS(Status)) return FALSE;
1175 Status = MiCreateMemoryEvent(&LowNonPagedPoolString, &MiLowNonPagedPoolEvent);
1176 if (!NT_SUCCESS(Status)) return FALSE;
1177 Status = MiCreateMemoryEvent(&HighNonPagedPoolString, &MiHighNonPagedPoolEvent);
1178 if (!NT_SUCCESS(Status)) return FALSE;
1179
1180 /* Now setup the pool events */
1181 MiInitializePoolEvents();
1182
1183 /* Set the initial event state */
1184 MiNotifyMemoryEvents();
1185 return TRUE;
1186 }
1187
1188 VOID
1189 NTAPI
1190 INIT_FUNCTION
1191 MiAddHalIoMappings(VOID)
1192 {
1193 PVOID BaseAddress;
1194 PMMPDE PointerPde;
1195 PMMPTE PointerPte;
1196 ULONG i, j, PdeCount;
1197 PFN_NUMBER PageFrameIndex;
1198
1199 /* HAL Heap address -- should be on a PDE boundary */
1200 BaseAddress = (PVOID)0xFFC00000;
1201 ASSERT(MiAddressToPteOffset(BaseAddress) == 0);
1202
1203 /* Check how many PDEs the heap has */
1204 PointerPde = MiAddressToPde(BaseAddress);
1205 PdeCount = PDE_COUNT - MiGetPdeOffset(BaseAddress);
1206 for (i = 0; i < PdeCount; i++)
1207 {
1208 /* Does the HAL own this mapping? */
1209 if ((PointerPde->u.Hard.Valid == 1) &&
1210 (MI_IS_PAGE_LARGE(PointerPde) == FALSE))
1211 {
1212 /* Get the PTE for it and scan each page */
1213 PointerPte = MiAddressToPte(BaseAddress);
1214 for (j = 0 ; j < PTE_COUNT; j++)
1215 {
1216 /* Does the HAL own this page? */
1217 if (PointerPte->u.Hard.Valid == 1)
1218 {
1219 /* Is the HAL using it for device or I/O mapped memory? */
1220 PageFrameIndex = PFN_FROM_PTE(PointerPte);
1221 if (!MiGetPfnEntry(PageFrameIndex))
1222 {
1223 /* FIXME: For PAT, we need to track I/O cache attributes for coherency */
1224 DPRINT1("HAL I/O Mapping at %p is unsafe\n", BaseAddress);
1225 }
1226 }
1227
1228 /* Move to the next page */
1229 BaseAddress = (PVOID)((ULONG_PTR)BaseAddress + PAGE_SIZE);
1230 PointerPte++;
1231 }
1232 }
1233 else
1234 {
1235 /* Move to the next address */
1236 BaseAddress = (PVOID)((ULONG_PTR)BaseAddress + PDE_MAPPED_VA);
1237 }
1238
1239 /* Move to the next PDE */
1240 PointerPde++;
1241 }
1242 }
1243
1244 VOID
1245 NTAPI
1246 MmDumpArmPfnDatabase(IN BOOLEAN StatusOnly)
1247 {
1248 ULONG i;
1249 PMMPFN Pfn1;
1250 PCHAR Consumer = "Unknown";
1251 KIRQL OldIrql;
1252 ULONG ActivePages = 0, FreePages = 0, OtherPages = 0;
1253 #if MI_TRACE_PFNS
1254 ULONG UsageBucket[MI_USAGE_FREE_PAGE + 1] = {0};
1255 PCHAR MI_USAGE_TEXT[MI_USAGE_FREE_PAGE + 1] =
1256 {
1257 "Not set",
1258 "Paged Pool",
1259 "Nonpaged Pool",
1260 "Nonpaged Pool Ex",
1261 "Kernel Stack",
1262 "Kernel Stack Ex",
1263 "System PTE",
1264 "VAD",
1265 "PEB/TEB",
1266 "Section",
1267 "Page Table",
1268 "Page Directory",
1269 "Old Page Table",
1270 "Driver Page",
1271 "Contiguous Alloc",
1272 "MDL",
1273 "Demand Zero",
1274 "Zero Loop",
1275 "Cache",
1276 "PFN Database",
1277 "Boot Driver",
1278 "Initial Memory",
1279 "Free Page"
1280 };
1281 #endif
1282 //
1283 // Loop the PFN database
1284 //
1285 KeRaiseIrql(HIGH_LEVEL, &OldIrql);
1286 for (i = 0; i <= MmHighestPhysicalPage; i++)
1287 {
1288 Pfn1 = MiGetPfnEntry(i);
1289 if (!Pfn1) continue;
1290 #if MI_TRACE_PFNS
1291 ASSERT(Pfn1->PfnUsage <= MI_USAGE_FREE_PAGE);
1292 #endif
1293 //
1294 // Get the page location
1295 //
1296 switch (Pfn1->u3.e1.PageLocation)
1297 {
1298 case ActiveAndValid:
1299
1300 Consumer = "Active and Valid";
1301 ActivePages++;
1302 break;
1303
1304 case ZeroedPageList:
1305
1306 Consumer = "Zero Page List";
1307 FreePages++;
1308 break;//continue;
1309
1310 case FreePageList:
1311
1312 Consumer = "Free Page List";
1313 FreePages++;
1314 break;//continue;
1315
1316 default:
1317
1318 Consumer = "Other (ASSERT!)";
1319 OtherPages++;
1320 break;
1321 }
1322
1323 #if MI_TRACE_PFNS
1324 /* Add into bucket */
1325 UsageBucket[Pfn1->PfnUsage]++;
1326 #endif
1327
1328 //
1329 // Pretty-print the page
1330 //
1331 if (!StatusOnly)
1332 DbgPrint("0x%08p:\t%20s\t(%04d.%04d)\t[%16s - %16s])\n",
1333 i << PAGE_SHIFT,
1334 Consumer,
1335 Pfn1->u3.e2.ReferenceCount,
1336 Pfn1->u2.ShareCount == LIST_HEAD ? 0xFFFF : Pfn1->u2.ShareCount,
1337 #if MI_TRACE_PFNS
1338 MI_USAGE_TEXT[Pfn1->PfnUsage],
1339 Pfn1->ProcessName);
1340 #else
1341 "Page tracking",
1342 "is disabled");
1343 #endif
1344 }
1345
1346 DbgPrint("Active: %5d pages\t[%6d KB]\n", ActivePages, (ActivePages << PAGE_SHIFT) / 1024);
1347 DbgPrint("Free: %5d pages\t[%6d KB]\n", FreePages, (FreePages << PAGE_SHIFT) / 1024);
1348 DbgPrint("-----------------------------------------\n");
1349 #if MI_TRACE_PFNS
1350 OtherPages = UsageBucket[MI_USAGE_BOOT_DRIVER];
1351 DbgPrint("Boot Images: %5d pages\t[%6d KB]\n", OtherPages, (OtherPages << PAGE_SHIFT) / 1024);
1352 OtherPages = UsageBucket[MI_USAGE_DRIVER_PAGE];
1353 DbgPrint("System Drivers: %5d pages\t[%6d KB]\n", OtherPages, (OtherPages << PAGE_SHIFT) / 1024);
1354 OtherPages = UsageBucket[MI_USAGE_PFN_DATABASE];
1355 DbgPrint("PFN Database: %5d pages\t[%6d KB]\n", OtherPages, (OtherPages << PAGE_SHIFT) / 1024);
1356 OtherPages = UsageBucket[MI_USAGE_PAGE_TABLE] + UsageBucket[MI_USAGE_LEGACY_PAGE_DIRECTORY];
1357 DbgPrint("Page Tables: %5d pages\t[%6d KB]\n", OtherPages, (OtherPages << PAGE_SHIFT) / 1024);
1358 OtherPages = UsageBucket[MI_USAGE_NONPAGED_POOL] + UsageBucket[MI_USAGE_NONPAGED_POOL_EXPANSION];
1359 DbgPrint("NonPaged Pool: %5d pages\t[%6d KB]\n", OtherPages, (OtherPages << PAGE_SHIFT) / 1024);
1360 OtherPages = UsageBucket[MI_USAGE_PAGED_POOL];
1361 DbgPrint("Paged Pool: %5d pages\t[%6d KB]\n", OtherPages, (OtherPages << PAGE_SHIFT) / 1024);
1362 OtherPages = UsageBucket[MI_USAGE_KERNEL_STACK] + UsageBucket[MI_USAGE_KERNEL_STACK_EXPANSION];
1363 DbgPrint("Kernel Stack: %5d pages\t[%6d KB]\n", OtherPages, (OtherPages << PAGE_SHIFT) / 1024);
1364 OtherPages = UsageBucket[MI_USAGE_INIT_MEMORY];
1365 DbgPrint("Init Memory: %5d pages\t[%6d KB]\n", OtherPages, (OtherPages << PAGE_SHIFT) / 1024);
1366 OtherPages = UsageBucket[MI_USAGE_SECTION];
1367 DbgPrint("Sections: %5d pages\t[%6d KB]\n", OtherPages, (OtherPages << PAGE_SHIFT) / 1024);
1368 OtherPages = UsageBucket[MI_USAGE_CACHE];
1369 DbgPrint("Cache: %5d pages\t[%6d KB]\n", OtherPages, (OtherPages << PAGE_SHIFT) / 1024);
1370 #endif
1371 KeLowerIrql(OldIrql);
1372 }
1373
1374 PFN_NUMBER
1375 NTAPI
1376 MiPagesInLoaderBlock(IN PLOADER_PARAMETER_BLOCK LoaderBlock,
1377 IN PBOOLEAN IncludeType)
1378 {
1379 PLIST_ENTRY NextEntry;
1380 PFN_NUMBER PageCount = 0;
1381 PMEMORY_ALLOCATION_DESCRIPTOR MdBlock;
1382
1383 //
1384 // Now loop through the descriptors
1385 //
1386 NextEntry = LoaderBlock->MemoryDescriptorListHead.Flink;
1387 while (NextEntry != &LoaderBlock->MemoryDescriptorListHead)
1388 {
1389 //
1390 // Grab each one, and check if it's one we should include
1391 //
1392 MdBlock = CONTAINING_RECORD(NextEntry,
1393 MEMORY_ALLOCATION_DESCRIPTOR,
1394 ListEntry);
1395 if ((MdBlock->MemoryType < LoaderMaximum) &&
1396 (IncludeType[MdBlock->MemoryType]))
1397 {
1398 //
1399 // Add this to our running total
1400 //
1401 PageCount += MdBlock->PageCount;
1402 }
1403
1404 //
1405 // Try the next descriptor
1406 //
1407 NextEntry = MdBlock->ListEntry.Flink;
1408 }
1409
1410 //
1411 // Return the total
1412 //
1413 return PageCount;
1414 }
1415
1416 PPHYSICAL_MEMORY_DESCRIPTOR
1417 NTAPI
1418 INIT_FUNCTION
1419 MmInitializeMemoryLimits(IN PLOADER_PARAMETER_BLOCK LoaderBlock,
1420 IN PBOOLEAN IncludeType)
1421 {
1422 PLIST_ENTRY NextEntry;
1423 ULONG Run = 0, InitialRuns = 0;
1424 PFN_NUMBER NextPage = -1, PageCount = 0;
1425 PPHYSICAL_MEMORY_DESCRIPTOR Buffer, NewBuffer;
1426 PMEMORY_ALLOCATION_DESCRIPTOR MdBlock;
1427
1428 //
1429 // Scan the memory descriptors
1430 //
1431 NextEntry = LoaderBlock->MemoryDescriptorListHead.Flink;
1432 while (NextEntry != &LoaderBlock->MemoryDescriptorListHead)
1433 {
1434 //
1435 // For each one, increase the memory allocation estimate
1436 //
1437 InitialRuns++;
1438 NextEntry = NextEntry->Flink;
1439 }
1440
1441 //
1442 // Allocate the maximum we'll ever need
1443 //
1444 Buffer = ExAllocatePoolWithTag(NonPagedPool,
1445 sizeof(PHYSICAL_MEMORY_DESCRIPTOR) +
1446 sizeof(PHYSICAL_MEMORY_RUN) *
1447 (InitialRuns - 1),
1448 'lMmM');
1449 if (!Buffer) return NULL;
1450
1451 //
1452 // For now that's how many runs we have
1453 //
1454 Buffer->NumberOfRuns = InitialRuns;
1455
1456 //
1457 // Now loop through the descriptors again
1458 //
1459 NextEntry = LoaderBlock->MemoryDescriptorListHead.Flink;
1460 while (NextEntry != &LoaderBlock->MemoryDescriptorListHead)
1461 {
1462 //
1463 // Grab each one, and check if it's one we should include
1464 //
1465 MdBlock = CONTAINING_RECORD(NextEntry,
1466 MEMORY_ALLOCATION_DESCRIPTOR,
1467 ListEntry);
1468 if ((MdBlock->MemoryType < LoaderMaximum) &&
1469 (IncludeType[MdBlock->MemoryType]))
1470 {
1471 //
1472 // Add this to our running total
1473 //
1474 PageCount += MdBlock->PageCount;
1475
1476 //
1477 // Check if the next page is described by the next descriptor
1478 //
1479 if (MdBlock->BasePage == NextPage)
1480 {
1481 //
1482 // Combine it into the same physical run
1483 //
1484 ASSERT(MdBlock->PageCount != 0);
1485 Buffer->Run[Run - 1].PageCount += MdBlock->PageCount;
1486 NextPage += MdBlock->PageCount;
1487 }
1488 else
1489 {
1490 //
1491 // Otherwise just duplicate the descriptor's contents
1492 //
1493 Buffer->Run[Run].BasePage = MdBlock->BasePage;
1494 Buffer->Run[Run].PageCount = MdBlock->PageCount;
1495 NextPage = Buffer->Run[Run].BasePage + Buffer->Run[Run].PageCount;
1496
1497 //
1498 // And in this case, increase the number of runs
1499 //
1500 Run++;
1501 }
1502 }
1503
1504 //
1505 // Try the next descriptor
1506 //
1507 NextEntry = MdBlock->ListEntry.Flink;
1508 }
1509
1510 //
1511 // We should not have been able to go past our initial estimate
1512 //
1513 ASSERT(Run <= Buffer->NumberOfRuns);
1514
1515 //
1516 // Our guess was probably exaggerated...
1517 //
1518 if (InitialRuns > Run)
1519 {
1520 //
1521 // Allocate a more accurately sized buffer
1522 //
1523 NewBuffer = ExAllocatePoolWithTag(NonPagedPool,
1524 sizeof(PHYSICAL_MEMORY_DESCRIPTOR) +
1525 sizeof(PHYSICAL_MEMORY_RUN) *
1526 (Run - 1),
1527 'lMmM');
1528 if (NewBuffer)
1529 {
1530 //
1531 // Copy the old buffer into the new, then free it
1532 //
1533 RtlCopyMemory(NewBuffer->Run,
1534 Buffer->Run,
1535 sizeof(PHYSICAL_MEMORY_RUN) * Run);
1536 ExFreePool(Buffer);
1537
1538 //
1539 // Now use the new buffer
1540 //
1541 Buffer = NewBuffer;
1542 }
1543 }
1544
1545 //
1546 // Write the final numbers, and return it
1547 //
1548 Buffer->NumberOfRuns = Run;
1549 Buffer->NumberOfPages = PageCount;
1550 return Buffer;
1551 }
1552
1553 VOID
1554 NTAPI
1555 INIT_FUNCTION
1556 MiBuildPagedPool(VOID)
1557 {
1558 PMMPTE PointerPte;
1559 PMMPDE PointerPde;
1560 MMPTE TempPte = ValidKernelPte;
1561 MMPDE TempPde = ValidKernelPde;
1562 PFN_NUMBER PageFrameIndex;
1563 KIRQL OldIrql;
1564 ULONG Size, BitMapSize;
1565 #if (_MI_PAGING_LEVELS == 2)
1566 //
1567 // Get the page frame number for the system page directory
1568 //
1569 PointerPte = MiAddressToPte(PDE_BASE);
1570 ASSERT(PD_COUNT == 1);
1571 MmSystemPageDirectory[0] = PFN_FROM_PTE(PointerPte);
1572
1573 //
1574 // Allocate a system PTE which will hold a copy of the page directory
1575 //
1576 PointerPte = MiReserveSystemPtes(1, SystemPteSpace);
1577 ASSERT(PointerPte);
1578 MmSystemPagePtes = MiPteToAddress(PointerPte);
1579
1580 //
1581 // Make this system PTE point to the system page directory.
1582 // It is now essentially double-mapped. This will be used later for lazy
1583 // evaluation of PDEs accross process switches, similarly to how the Global
1584 // page directory array in the old ReactOS Mm is used (but in a less hacky
1585 // way).
1586 //
1587 TempPte = ValidKernelPte;
1588 ASSERT(PD_COUNT == 1);
1589 TempPte.u.Hard.PageFrameNumber = MmSystemPageDirectory[0];
1590 MI_WRITE_VALID_PTE(PointerPte, TempPte);
1591 #endif
1592 //
1593 // Let's get back to paged pool work: size it up.
1594 // By default, it should be twice as big as nonpaged pool.
1595 //
1596 MmSizeOfPagedPoolInBytes = 2 * MmMaximumNonPagedPoolInBytes;
1597 if (MmSizeOfPagedPoolInBytes > ((ULONG_PTR)MmNonPagedSystemStart -
1598 (ULONG_PTR)MmPagedPoolStart))
1599 {
1600 //
1601 // On the other hand, we have limited VA space, so make sure that the VA
1602 // for paged pool doesn't overflow into nonpaged pool VA. Otherwise, set
1603 // whatever maximum is possible.
1604 //
1605 MmSizeOfPagedPoolInBytes = (ULONG_PTR)MmNonPagedSystemStart -
1606 (ULONG_PTR)MmPagedPoolStart;
1607 }
1608
1609 //
1610 // Get the size in pages and make sure paged pool is at least 32MB.
1611 //
1612 Size = MmSizeOfPagedPoolInBytes;
1613 if (Size < MI_MIN_INIT_PAGED_POOLSIZE) Size = MI_MIN_INIT_PAGED_POOLSIZE;
1614 Size = BYTES_TO_PAGES(Size);
1615
1616 //
1617 // Now check how many PTEs will be required for these many pages.
1618 //
1619 Size = (Size + (1024 - 1)) / 1024;
1620
1621 //
1622 // Recompute the page-aligned size of the paged pool, in bytes and pages.
1623 //
1624 MmSizeOfPagedPoolInBytes = Size * PAGE_SIZE * 1024;
1625 MmSizeOfPagedPoolInPages = MmSizeOfPagedPoolInBytes >> PAGE_SHIFT;
1626
1627 //
1628 // Let's be really sure this doesn't overflow into nonpaged system VA
1629 //
1630 ASSERT((MmSizeOfPagedPoolInBytes + (ULONG_PTR)MmPagedPoolStart) <=
1631 (ULONG_PTR)MmNonPagedSystemStart);
1632
1633 //
1634 // This is where paged pool ends
1635 //
1636 MmPagedPoolEnd = (PVOID)(((ULONG_PTR)MmPagedPoolStart +
1637 MmSizeOfPagedPoolInBytes) - 1);
1638
1639 //
1640 // So now get the PDE for paged pool and zero it out
1641 //
1642 PointerPde = MiAddressToPde(MmPagedPoolStart);
1643
1644 #if (_MI_PAGING_LEVELS >= 3)
1645 /* On these systems, there's no double-mapping, so instead, the PPE and PXEs
1646 * are setup to span the entire paged pool area, so there's no need for the
1647 * system PD */
1648 ASSERT(FALSE);
1649 #endif
1650
1651 RtlZeroMemory(PointerPde,
1652 (1 + MiAddressToPde(MmPagedPoolEnd) - PointerPde) * sizeof(MMPDE));
1653
1654 //
1655 // Next, get the first and last PTE
1656 //
1657 PointerPte = MiAddressToPte(MmPagedPoolStart);
1658 MmPagedPoolInfo.FirstPteForPagedPool = PointerPte;
1659 MmPagedPoolInfo.LastPteForPagedPool = MiAddressToPte(MmPagedPoolEnd);
1660
1661 //
1662 // Lock the PFN database
1663 //
1664 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
1665
1666 /* Allocate a page and map the first paged pool PDE */
1667 MI_SET_USAGE(MI_USAGE_PAGED_POOL);
1668 MI_SET_PROCESS2("Kernel");
1669 PageFrameIndex = MiRemoveZeroPage(0);
1670 TempPde.u.Hard.PageFrameNumber = PageFrameIndex;
1671 MI_WRITE_VALID_PDE(PointerPde, TempPde);
1672 #if (_MI_PAGING_LEVELS >= 3)
1673 /* Use the PPE of MmPagedPoolStart that was setup above */
1674 // Bla = PFN_FROM_PTE(PpeAddress(MmPagedPool...));
1675 ASSERT(FALSE);
1676 #else
1677 /* Do it this way */
1678 // Bla = MmSystemPageDirectory[(PointerPde - (PMMPTE)PDE_BASE) / PDE_COUNT]
1679
1680 /* Initialize the PFN entry for it */
1681 MiInitializePfnForOtherProcess(PageFrameIndex,
1682 (PMMPTE)PointerPde,
1683 MmSystemPageDirectory[(PointerPde - (PMMPDE)PDE_BASE) / PDE_COUNT]);
1684 #endif
1685
1686 //
1687 // Release the PFN database lock
1688 //
1689 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
1690
1691 //
1692 // We only have one PDE mapped for now... at fault time, additional PDEs
1693 // will be allocated to handle paged pool growth. This is where they'll have
1694 // to start.
1695 //
1696 MmPagedPoolInfo.NextPdeForPagedPoolExpansion = PointerPde + 1;
1697
1698 //
1699 // We keep track of each page via a bit, so check how big the bitmap will
1700 // have to be (make sure to align our page count such that it fits nicely
1701 // into a 4-byte aligned bitmap.
1702 //
1703 // We'll also allocate the bitmap header itself part of the same buffer.
1704 //
1705 Size = Size * 1024;
1706 ASSERT(Size == MmSizeOfPagedPoolInPages);
1707 BitMapSize = Size;
1708 Size = sizeof(RTL_BITMAP) + (((Size + 31) / 32) * sizeof(ULONG));
1709
1710 //
1711 // Allocate the allocation bitmap, which tells us which regions have not yet
1712 // been mapped into memory
1713 //
1714 MmPagedPoolInfo.PagedPoolAllocationMap = ExAllocatePoolWithTag(NonPagedPool,
1715 Size,
1716 ' mM');
1717 ASSERT(MmPagedPoolInfo.PagedPoolAllocationMap);
1718
1719 //
1720 // Initialize it such that at first, only the first page's worth of PTEs is
1721 // marked as allocated (incidentially, the first PDE we allocated earlier).
1722 //
1723 RtlInitializeBitMap(MmPagedPoolInfo.PagedPoolAllocationMap,
1724 (PULONG)(MmPagedPoolInfo.PagedPoolAllocationMap + 1),
1725 BitMapSize);
1726 RtlSetAllBits(MmPagedPoolInfo.PagedPoolAllocationMap);
1727 RtlClearBits(MmPagedPoolInfo.PagedPoolAllocationMap, 0, 1024);
1728
1729 //
1730 // We have a second bitmap, which keeps track of where allocations end.
1731 // Given the allocation bitmap and a base address, we can therefore figure
1732 // out which page is the last page of that allocation, and thus how big the
1733 // entire allocation is.
1734 //
1735 MmPagedPoolInfo.EndOfPagedPoolBitmap = ExAllocatePoolWithTag(NonPagedPool,
1736 Size,
1737 ' mM');
1738 ASSERT(MmPagedPoolInfo.EndOfPagedPoolBitmap);
1739 RtlInitializeBitMap(MmPagedPoolInfo.EndOfPagedPoolBitmap,
1740 (PULONG)(MmPagedPoolInfo.EndOfPagedPoolBitmap + 1),
1741 BitMapSize);
1742
1743 //
1744 // Since no allocations have been made yet, there are no bits set as the end
1745 //
1746 RtlClearAllBits(MmPagedPoolInfo.EndOfPagedPoolBitmap);
1747
1748 //
1749 // Initialize paged pool.
1750 //
1751 InitializePool(PagedPool, 0);
1752
1753 /* Default low threshold of 30MB or one fifth of paged pool */
1754 MiLowPagedPoolThreshold = (30 * _1MB) >> PAGE_SHIFT;
1755 MiLowPagedPoolThreshold = min(MiLowPagedPoolThreshold, Size / 5);
1756
1757 /* Default high threshold of 60MB or 25% */
1758 MiHighPagedPoolThreshold = (60 * _1MB) >> PAGE_SHIFT;
1759 MiHighPagedPoolThreshold = min(MiHighPagedPoolThreshold, (Size * 2) / 5);
1760 ASSERT(MiLowPagedPoolThreshold < MiHighPagedPoolThreshold);
1761
1762 /* Setup the global session space */
1763 MiInitializeSystemSpaceMap(NULL);
1764 }
1765
1766 VOID
1767 NTAPI
1768 INIT_FUNCTION
1769 MiDbgDumpMemoryDescriptors(VOID)
1770 {
1771 PLIST_ENTRY NextEntry;
1772 PMEMORY_ALLOCATION_DESCRIPTOR Md;
1773 ULONG TotalPages = 0;
1774 PCHAR
1775 MemType[] =
1776 {
1777 "ExceptionBlock ",
1778 "SystemBlock ",
1779 "Free ",
1780 "Bad ",
1781 "LoadedProgram ",
1782 "FirmwareTemporary ",
1783 "FirmwarePermanent ",
1784 "OsloaderHeap ",
1785 "OsloaderStack ",
1786 "SystemCode ",
1787 "HalCode ",
1788 "BootDriver ",
1789 "ConsoleInDriver ",
1790 "ConsoleOutDriver ",
1791 "StartupDpcStack ",
1792 "StartupKernelStack",
1793 "StartupPanicStack ",
1794 "StartupPcrPage ",
1795 "StartupPdrPage ",
1796 "RegistryData ",
1797 "MemoryData ",
1798 "NlsData ",
1799 "SpecialMemory ",
1800 "BBTMemory ",
1801 "LoaderReserve ",
1802 "LoaderXIPRom "
1803 };
1804
1805 DPRINT1("Base\t\tLength\t\tType\n");
1806 for (NextEntry = KeLoaderBlock->MemoryDescriptorListHead.Flink;
1807 NextEntry != &KeLoaderBlock->MemoryDescriptorListHead;
1808 NextEntry = NextEntry->Flink)
1809 {
1810 Md = CONTAINING_RECORD(NextEntry, MEMORY_ALLOCATION_DESCRIPTOR, ListEntry);
1811 DPRINT1("%08lX\t%08lX\t%s\n", Md->BasePage, Md->PageCount, MemType[Md->MemoryType]);
1812 TotalPages += Md->PageCount;
1813 }
1814
1815 DPRINT1("Total: %08lX (%d MB)\n", TotalPages, (TotalPages * PAGE_SIZE) / 1024 / 1024);
1816 }
1817
1818 BOOLEAN
1819 NTAPI
1820 INIT_FUNCTION
1821 MmArmInitSystem(IN ULONG Phase,
1822 IN PLOADER_PARAMETER_BLOCK LoaderBlock)
1823 {
1824 ULONG i;
1825 BOOLEAN IncludeType[LoaderMaximum];
1826 PVOID Bitmap;
1827 PPHYSICAL_MEMORY_RUN Run;
1828 PFN_NUMBER PageCount;
1829
1830 /* Dump memory descriptors */
1831 if (MiDbgEnableMdDump) MiDbgDumpMemoryDescriptors();
1832
1833 //
1834 // Instantiate memory that we don't consider RAM/usable
1835 // We use the same exclusions that Windows does, in order to try to be
1836 // compatible with WinLDR-style booting
1837 //
1838 for (i = 0; i < LoaderMaximum; i++) IncludeType[i] = TRUE;
1839 IncludeType[LoaderBad] = FALSE;
1840 IncludeType[LoaderFirmwarePermanent] = FALSE;
1841 IncludeType[LoaderSpecialMemory] = FALSE;
1842 IncludeType[LoaderBBTMemory] = FALSE;
1843 if (Phase == 0)
1844 {
1845 /* Initialize the phase 0 temporary event */
1846 KeInitializeEvent(&MiTempEvent, NotificationEvent, FALSE);
1847
1848 /* Set all the events to use the temporary event for now */
1849 MiLowMemoryEvent = &MiTempEvent;
1850 MiHighMemoryEvent = &MiTempEvent;
1851 MiLowPagedPoolEvent = &MiTempEvent;
1852 MiHighPagedPoolEvent = &MiTempEvent;
1853 MiLowNonPagedPoolEvent = &MiTempEvent;
1854 MiHighNonPagedPoolEvent = &MiTempEvent;
1855
1856 //
1857 // Define the basic user vs. kernel address space separation
1858 //
1859 MmSystemRangeStart = (PVOID)KSEG0_BASE;
1860 MmUserProbeAddress = (ULONG_PTR)MmSystemRangeStart - 0x10000;
1861 MmHighestUserAddress = (PVOID)(MmUserProbeAddress - 1);
1862
1863 /* Highest PTE and PDE based on the addresses above */
1864 MiHighestUserPte = MiAddressToPte(MmHighestUserAddress);
1865 MiHighestUserPde = MiAddressToPde(MmHighestUserAddress);
1866 #if (_MI_PAGING_LEVELS >= 3)
1867 /* We need the highest PPE and PXE addresses */
1868 ASSERT(FALSE);
1869 #endif
1870 //
1871 // Get the size of the boot loader's image allocations and then round
1872 // that region up to a PDE size, so that any PDEs we might create for
1873 // whatever follows are separate from the PDEs that boot loader might've
1874 // already created (and later, we can blow all that away if we want to).
1875 //
1876 MmBootImageSize = KeLoaderBlock->Extension->LoaderPagesSpanned;
1877 MmBootImageSize *= PAGE_SIZE;
1878 MmBootImageSize = (MmBootImageSize + PDE_MAPPED_VA - 1) & ~(PDE_MAPPED_VA - 1);
1879 ASSERT((MmBootImageSize % PDE_MAPPED_VA) == 0);
1880
1881 //
1882 // Set the size of session view, pool, and image
1883 //
1884 MmSessionSize = MI_SESSION_SIZE;
1885 MmSessionViewSize = MI_SESSION_VIEW_SIZE;
1886 MmSessionPoolSize = MI_SESSION_POOL_SIZE;
1887 MmSessionImageSize = MI_SESSION_IMAGE_SIZE;
1888
1889 //
1890 // Set the size of system view
1891 //
1892 MmSystemViewSize = MI_SYSTEM_VIEW_SIZE;
1893
1894 //
1895 // This is where it all ends
1896 //
1897 MiSessionImageEnd = (PVOID)PTE_BASE;
1898
1899 //
1900 // This is where we will load Win32k.sys and the video driver
1901 //
1902 MiSessionImageStart = (PVOID)((ULONG_PTR)MiSessionImageEnd -
1903 MmSessionImageSize);
1904
1905 //
1906 // So the view starts right below the session working set (itself below
1907 // the image area)
1908 //
1909 MiSessionViewStart = (PVOID)((ULONG_PTR)MiSessionImageEnd -
1910 MmSessionImageSize -
1911 MI_SESSION_WORKING_SET_SIZE -
1912 MmSessionViewSize);
1913
1914 //
1915 // Session pool follows
1916 //
1917 MiSessionPoolEnd = MiSessionViewStart;
1918 MiSessionPoolStart = (PVOID)((ULONG_PTR)MiSessionPoolEnd -
1919 MmSessionPoolSize);
1920
1921 //
1922 // And it all begins here
1923 //
1924 MmSessionBase = MiSessionPoolStart;
1925
1926 //
1927 // Sanity check that our math is correct
1928 //
1929 ASSERT((ULONG_PTR)MmSessionBase + MmSessionSize == PTE_BASE);
1930
1931 //
1932 // Session space ends wherever image session space ends
1933 //
1934 MiSessionSpaceEnd = MiSessionImageEnd;
1935
1936 //
1937 // System view space ends at session space, so now that we know where
1938 // this is, we can compute the base address of system view space itself.
1939 //
1940 MiSystemViewStart = (PVOID)((ULONG_PTR)MmSessionBase -
1941 MmSystemViewSize);
1942
1943 /* Compute the PTE addresses for all the addresses we carved out */
1944 MiSessionImagePteStart = MiAddressToPte(MiSessionImageStart);
1945 MiSessionImagePteEnd = MiAddressToPte(MiSessionImageEnd);
1946 MiSessionBasePte = MiAddressToPte(MmSessionBase);
1947 MiSessionLastPte = MiAddressToPte(MiSessionSpaceEnd);
1948
1949 /* Initialize the user mode image list */
1950 InitializeListHead(&MmLoadedUserImageList);
1951
1952 /* Initialize the paged pool mutex */
1953 KeInitializeGuardedMutex(&MmPagedPoolMutex);
1954
1955 /* Initialize the Loader Lock */
1956 KeInitializeMutant(&MmSystemLoadLock, FALSE);
1957
1958 /* Set the zero page event */
1959 KeInitializeEvent(&MmZeroingPageEvent, SynchronizationEvent, FALSE);
1960 MmZeroingPageThreadActive = FALSE;
1961
1962 //
1963 // Count physical pages on the system
1964 //
1965 PageCount = MiPagesInLoaderBlock(LoaderBlock, IncludeType);
1966
1967 //
1968 // Check if this is a machine with less than 19MB of RAM
1969 //
1970 if (PageCount < MI_MIN_PAGES_FOR_SYSPTE_TUNING)
1971 {
1972 //
1973 // Use the very minimum of system PTEs
1974 //
1975 MmNumberOfSystemPtes = 7000;
1976 }
1977 else
1978 {
1979 //
1980 // Use the default, but check if we have more than 32MB of RAM
1981 //
1982 MmNumberOfSystemPtes = 11000;
1983 if (PageCount > MI_MIN_PAGES_FOR_SYSPTE_BOOST)
1984 {
1985 //
1986 // Double the amount of system PTEs
1987 //
1988 MmNumberOfSystemPtes <<= 1;
1989 }
1990 }
1991
1992 DPRINT("System PTE count has been tuned to %d (%d bytes)\n",
1993 MmNumberOfSystemPtes, MmNumberOfSystemPtes * PAGE_SIZE);
1994
1995 /* Initialize the working set lock */
1996 ExInitializePushLock((PULONG_PTR)&MmSystemCacheWs.WorkingSetMutex);
1997
1998 /* Set commit limit */
1999 MmTotalCommitLimit = 2 * _1GB;
2000 MmTotalCommitLimitMaximum = MmTotalCommitLimit;
2001
2002 /* Has the allocation fragment been setup? */
2003 if (!MmAllocationFragment)
2004 {
2005 /* Use the default value */
2006 MmAllocationFragment = MI_ALLOCATION_FRAGMENT;
2007 if (PageCount < ((256 * _1MB) / PAGE_SIZE))
2008 {
2009 /* On memory systems with less than 256MB, divide by 4 */
2010 MmAllocationFragment = MI_ALLOCATION_FRAGMENT / 4;
2011 }
2012 else if (PageCount < (_1GB / PAGE_SIZE))
2013 {
2014 /* On systems with less than 1GB, divide by 2 */
2015 MmAllocationFragment = MI_ALLOCATION_FRAGMENT / 2;
2016 }
2017 }
2018 else
2019 {
2020 /* Convert from 1KB fragments to pages */
2021 MmAllocationFragment *= _1KB;
2022 MmAllocationFragment = ROUND_TO_PAGES(MmAllocationFragment);
2023
2024 /* Don't let it past the maximum */
2025 MmAllocationFragment = min(MmAllocationFragment,
2026 MI_MAX_ALLOCATION_FRAGMENT);
2027
2028 /* Don't let it too small either */
2029 MmAllocationFragment = max(MmAllocationFragment,
2030 MI_MIN_ALLOCATION_FRAGMENT);
2031 }
2032
2033 /* Initialize the platform-specific parts */
2034 MiInitMachineDependent(LoaderBlock);
2035
2036 //
2037 // Build the physical memory block
2038 //
2039 MmPhysicalMemoryBlock = MmInitializeMemoryLimits(LoaderBlock,
2040 IncludeType);
2041
2042 //
2043 // Allocate enough buffer for the PFN bitmap
2044 // Align it up to a 32-bit boundary
2045 //
2046 Bitmap = ExAllocatePoolWithTag(NonPagedPool,
2047 (((MmHighestPhysicalPage + 1) + 31) / 32) * 4,
2048 ' mM');
2049 if (!Bitmap)
2050 {
2051 //
2052 // This is critical
2053 //
2054 KeBugCheckEx(INSTALL_MORE_MEMORY,
2055 MmNumberOfPhysicalPages,
2056 MmLowestPhysicalPage,
2057 MmHighestPhysicalPage,
2058 0x101);
2059 }
2060
2061 //
2062 // Initialize it and clear all the bits to begin with
2063 //
2064 RtlInitializeBitMap(&MiPfnBitMap,
2065 Bitmap,
2066 MmHighestPhysicalPage + 1);
2067 RtlClearAllBits(&MiPfnBitMap);
2068
2069 //
2070 // Loop physical memory runs
2071 //
2072 for (i = 0; i < MmPhysicalMemoryBlock->NumberOfRuns; i++)
2073 {
2074 //
2075 // Get the run
2076 //
2077 Run = &MmPhysicalMemoryBlock->Run[i];
2078 DPRINT("PHYSICAL RAM [0x%08p to 0x%08p]\n",
2079 Run->BasePage << PAGE_SHIFT,
2080 (Run->BasePage + Run->PageCount) << PAGE_SHIFT);
2081
2082 //
2083 // Make sure it has pages inside it
2084 //
2085 if (Run->PageCount)
2086 {
2087 //
2088 // Set the bits in the PFN bitmap
2089 //
2090 RtlSetBits(&MiPfnBitMap, Run->BasePage, Run->PageCount);
2091 }
2092 }
2093
2094 /* Look for large page cache entries that need caching */
2095 MiSyncCachedRanges();
2096
2097 /* Loop for HAL Heap I/O device mappings that need coherency tracking */
2098 MiAddHalIoMappings();
2099
2100 /* Set the initial resident page count */
2101 MmResidentAvailablePages = MmAvailablePages - 32;
2102
2103 /* Initialize large page structures on PAE/x64, and MmProcessList on x86 */
2104 MiInitializeLargePageSupport();
2105
2106 /* Check if the registry says any drivers should be loaded with large pages */
2107 MiInitializeDriverLargePageList();
2108
2109 /* Relocate the boot drivers into system PTE space and fixup their PFNs */
2110 MiReloadBootLoadedDrivers(LoaderBlock);
2111
2112 /* FIXME: Call out into Driver Verifier for initialization */
2113
2114 /* Check how many pages the system has */
2115 if (MmNumberOfPhysicalPages <= ((13 * _1MB) / PAGE_SIZE))
2116 {
2117 /* Set small system */
2118 MmSystemSize = MmSmallSystem;
2119 }
2120 else if (MmNumberOfPhysicalPages <= ((19 * _1MB) / PAGE_SIZE))
2121 {
2122 /* Set small system and add 100 pages for the cache */
2123 MmSystemSize = MmSmallSystem;
2124 MmSystemCacheWsMinimum += 100;
2125 }
2126 else
2127 {
2128 /* Set medium system and add 400 pages for the cache */
2129 MmSystemSize = MmMediumSystem;
2130 MmSystemCacheWsMinimum += 400;
2131 }
2132
2133 /* Check for less than 24MB */
2134 if (MmNumberOfPhysicalPages < ((24 * _1MB) / PAGE_SIZE))
2135 {
2136 /* No more than 32 pages */
2137 MmSystemCacheWsMinimum = 32;
2138 }
2139
2140 /* Check for more than 32MB */
2141 if (MmNumberOfPhysicalPages >= ((32 * _1MB) / PAGE_SIZE))
2142 {
2143 /* Check for product type being "Wi" for WinNT */
2144 if (MmProductType == '\0i\0W')
2145 {
2146 /* Then this is a large system */
2147 MmSystemSize = MmLargeSystem;
2148 }
2149 else
2150 {
2151 /* For servers, we need 64MB to consider this as being large */
2152 if (MmNumberOfPhysicalPages >= ((64 * _1MB) / PAGE_SIZE))
2153 {
2154 /* Set it as large */
2155 MmSystemSize = MmLargeSystem;
2156 }
2157 }
2158 }
2159
2160 /* Check for more than 33 MB */
2161 if (MmNumberOfPhysicalPages > ((33 * _1MB) / PAGE_SIZE))
2162 {
2163 /* Add another 500 pages to the cache */
2164 MmSystemCacheWsMinimum += 500;
2165 }
2166
2167 /* Now setup the shared user data fields */
2168 ASSERT(SharedUserData->NumberOfPhysicalPages == 0);
2169 SharedUserData->NumberOfPhysicalPages = MmNumberOfPhysicalPages;
2170 SharedUserData->LargePageMinimum = 0;
2171
2172 /* Check for workstation (Wi for WinNT) */
2173 if (MmProductType == '\0i\0W')
2174 {
2175 /* Set Windows NT Workstation product type */
2176 SharedUserData->NtProductType = NtProductWinNt;
2177 MmProductType = 0;
2178 }
2179 else
2180 {
2181 /* Check for LanMan server */
2182 if (MmProductType == '\0a\0L')
2183 {
2184 /* This is a domain controller */
2185 SharedUserData->NtProductType = NtProductLanManNt;
2186 }
2187 else
2188 {
2189 /* Otherwise it must be a normal server */
2190 SharedUserData->NtProductType = NtProductServer;
2191 }
2192
2193 /* Set the product type, and make the system more aggressive with low memory */
2194 MmProductType = 1;
2195 MmMinimumFreePages = 81;
2196 }
2197
2198 /* Update working set tuning parameters */
2199 MiAdjustWorkingSetManagerParameters(!MmProductType);
2200
2201 /* Finetune the page count by removing working set and NP expansion */
2202 MmResidentAvailablePages -= MiExpansionPoolPagesInitialCharge;
2203 MmResidentAvailablePages -= MmSystemCacheWsMinimum;
2204 MmResidentAvailableAtInit = MmResidentAvailablePages;
2205 if (MmResidentAvailablePages <= 0)
2206 {
2207 /* This should not happen */
2208 DPRINT1("System cache working set too big\n");
2209 return FALSE;
2210 }
2211
2212 /* Initialize the system cache */
2213 //MiInitializeSystemCache(MmSystemCacheWsMinimum, MmAvailablePages);
2214
2215 /* Update the commit limit */
2216 MmTotalCommitLimit = MmAvailablePages;
2217 if (MmTotalCommitLimit > 1024) MmTotalCommitLimit -= 1024;
2218 MmTotalCommitLimitMaximum = MmTotalCommitLimit;
2219
2220 /* Size up paged pool and build the shadow system page directory */
2221 MiBuildPagedPool();
2222
2223 /* Debugger physical memory support is now ready to be used */
2224 MmDebugPte = MiAddressToPte(MiDebugMapping);
2225
2226 /* Initialize the loaded module list */
2227 MiInitializeLoadedModuleList(LoaderBlock);
2228 }
2229
2230 //
2231 // Always return success for now
2232 //
2233 return TRUE;
2234 }
2235
2236 /* EOF */