[NTOS]: Add page-tracking macro calls wherever needed. MI_TRACE_PFNS is still off...
[reactos.git] / reactos / ntoskrnl / mm / ARM3 / mminit.c
1 /*
2 * PROJECT: ReactOS Kernel
3 * LICENSE: BSD - See COPYING.ARM in the top level directory
4 * FILE: ntoskrnl/mm/ARM3/mminit.c
5 * PURPOSE: ARM Memory Manager Initialization
6 * PROGRAMMERS: ReactOS Portable Systems Group
7 */
8
9 /* INCLUDES *******************************************************************/
10
11 #include <ntoskrnl.h>
12 #define NDEBUG
13 #include <debug.h>
14
15 #line 15 "ARM³::INIT"
16 #define MODULE_INVOLVED_IN_ARM3
17 #include "miarm.h"
18
19 /* GLOBALS ********************************************************************/
20
21 //
22 // These are all registry-configurable, but by default, the memory manager will
23 // figure out the most appropriate values.
24 //
25 ULONG MmMaximumNonPagedPoolPercent;
26 SIZE_T MmSizeOfNonPagedPoolInBytes;
27 SIZE_T MmMaximumNonPagedPoolInBytes;
28
29 /* Some of the same values, in pages */
30 PFN_NUMBER MmMaximumNonPagedPoolInPages;
31
32 //
33 // These numbers describe the discrete equation components of the nonpaged
34 // pool sizing algorithm.
35 //
36 // They are described on http://support.microsoft.com/default.aspx/kb/126402/ja
37 // along with the algorithm that uses them, which is implemented later below.
38 //
39 SIZE_T MmMinimumNonPagedPoolSize = 256 * 1024;
40 ULONG MmMinAdditionNonPagedPoolPerMb = 32 * 1024;
41 SIZE_T MmDefaultMaximumNonPagedPool = 1024 * 1024;
42 ULONG MmMaxAdditionNonPagedPoolPerMb = 400 * 1024;
43
44 //
45 // The memory layout (and especially variable names) of the NT kernel mode
46 // components can be a bit hard to twig, especially when it comes to the non
47 // paged area.
48 //
49 // There are really two components to the non-paged pool:
50 //
51 // - The initial nonpaged pool, sized dynamically up to a maximum.
52 // - The expansion nonpaged pool, sized dynamically up to a maximum.
53 //
54 // The initial nonpaged pool is physically continuous for performance, and
55 // immediately follows the PFN database, typically sharing the same PDE. It is
56 // a very small resource (32MB on a 1GB system), and capped at 128MB.
57 //
58 // Right now we call this the "ARM³ Nonpaged Pool" and it begins somewhere after
59 // the PFN database (which starts at 0xB0000000).
60 //
61 // The expansion nonpaged pool, on the other hand, can grow much bigger (400MB
62 // for a 1GB system). On ARM³ however, it is currently capped at 128MB.
63 //
64 // The address where the initial nonpaged pool starts is aptly named
65 // MmNonPagedPoolStart, and it describes a range of MmSizeOfNonPagedPoolInBytes
66 // bytes.
67 //
68 // Expansion nonpaged pool starts at an address described by the variable called
69 // MmNonPagedPoolExpansionStart, and it goes on for MmMaximumNonPagedPoolInBytes
70 // minus MmSizeOfNonPagedPoolInBytes bytes, always reaching MmNonPagedPoolEnd
71 // (because of the way it's calculated) at 0xFFBE0000.
72 //
73 // Initial nonpaged pool is allocated and mapped early-on during boot, but what
74 // about the expansion nonpaged pool? It is instead composed of special pages
75 // which belong to what are called System PTEs. These PTEs are the matter of a
76 // later discussion, but they are also considered part of the "nonpaged" OS, due
77 // to the fact that they are never paged out -- once an address is described by
78 // a System PTE, it is always valid, until the System PTE is torn down.
79 //
80 // System PTEs are actually composed of two "spaces", the system space proper,
81 // and the nonpaged pool expansion space. The latter, as we've already seen,
82 // begins at MmNonPagedPoolExpansionStart. Based on the number of System PTEs
83 // that the system will support, the remaining address space below this address
84 // is used to hold the system space PTEs. This address, in turn, is held in the
85 // variable named MmNonPagedSystemStart, which itself is never allowed to go
86 // below 0xEB000000 (thus creating an upper bound on the number of System PTEs).
87 //
88 // This means that 330MB are reserved for total nonpaged system VA, on top of
89 // whatever the initial nonpaged pool allocation is.
90 //
91 // The following URLs, valid as of April 23rd, 2008, support this evidence:
92 //
93 // http://www.cs.miami.edu/~burt/journal/NT/memory.html
94 // http://www.ditii.com/2007/09/28/windows-memory-management-x86-virtual-address-space/
95 //
96 PVOID MmNonPagedSystemStart;
97 PVOID MmNonPagedPoolStart;
98 PVOID MmNonPagedPoolExpansionStart;
99 PVOID MmNonPagedPoolEnd = MI_NONPAGED_POOL_END;
100
101 //
102 // This is where paged pool starts by default
103 //
104 PVOID MmPagedPoolStart = MI_PAGED_POOL_START;
105 PVOID MmPagedPoolEnd;
106
107 //
108 // And this is its default size
109 //
110 SIZE_T MmSizeOfPagedPoolInBytes = MI_MIN_INIT_PAGED_POOLSIZE;
111 PFN_NUMBER MmSizeOfPagedPoolInPages = MI_MIN_INIT_PAGED_POOLSIZE / PAGE_SIZE;
112
113 //
114 // Session space starts at 0xBFFFFFFF and grows downwards
115 // By default, it includes an 8MB image area where we map win32k and video card
116 // drivers, followed by a 4MB area containing the session's working set. This is
117 // then followed by a 20MB mapped view area and finally by the session's paged
118 // pool, by default 16MB.
119 //
120 // On a normal system, this results in session space occupying the region from
121 // 0xBD000000 to 0xC0000000
122 //
123 // See miarm.h for the defines that determine the sizing of this region. On an
124 // NT system, some of these can be configured through the registry, but we don't
125 // support that yet.
126 //
127 PVOID MiSessionSpaceEnd; // 0xC0000000
128 PVOID MiSessionImageEnd; // 0xC0000000
129 PVOID MiSessionImageStart; // 0xBF800000
130 PVOID MiSessionViewStart; // 0xBE000000
131 PVOID MiSessionPoolEnd; // 0xBE000000
132 PVOID MiSessionPoolStart; // 0xBD000000
133 PVOID MmSessionBase; // 0xBD000000
134 SIZE_T MmSessionSize;
135 SIZE_T MmSessionViewSize;
136 SIZE_T MmSessionPoolSize;
137 SIZE_T MmSessionImageSize;
138
139 /*
140 * These are the PTE addresses of the boundaries carved out above
141 */
142 PMMPTE MiSessionImagePteStart;
143 PMMPTE MiSessionImagePteEnd;
144 PMMPTE MiSessionBasePte;
145 PMMPTE MiSessionLastPte;
146
147 //
148 // The system view space, on the other hand, is where sections that are memory
149 // mapped into "system space" end up.
150 //
151 // By default, it is a 16MB region.
152 //
153 PVOID MiSystemViewStart;
154 SIZE_T MmSystemViewSize;
155
156 #if (_MI_PAGING_LEVELS == 2)
157 //
158 // A copy of the system page directory (the page directory associated with the
159 // System process) is kept (double-mapped) by the manager in order to lazily
160 // map paged pool PDEs into external processes when they fault on a paged pool
161 // address.
162 //
163 PFN_NUMBER MmSystemPageDirectory[PD_COUNT];
164 PMMPTE MmSystemPagePtes;
165 #endif
166
167 //
168 // The system cache starts right after hyperspace. The first few pages are for
169 // keeping track of the system working set list.
170 //
171 // This should be 0xC0C00000 -- the cache itself starts at 0xC1000000
172 //
173 PMMWSL MmSystemCacheWorkingSetList = MI_SYSTEM_CACHE_WS_START;
174
175 //
176 // Windows NT seems to choose between 7000, 11000 and 50000
177 // On systems with more than 32MB, this number is then doubled, and further
178 // aligned up to a PDE boundary (4MB).
179 //
180 ULONG_PTR MmNumberOfSystemPtes;
181
182 //
183 // This is how many pages the PFN database will take up
184 // In Windows, this includes the Quark Color Table, but not in ARM³
185 //
186 PFN_NUMBER MxPfnAllocation;
187
188 //
189 // Unlike the old ReactOS Memory Manager, ARM³ (and Windows) does not keep track
190 // of pages that are not actually valid physical memory, such as ACPI reserved
191 // regions, BIOS address ranges, or holes in physical memory address space which
192 // could indicate device-mapped I/O memory.
193 //
194 // In fact, the lack of a PFN entry for a page usually indicates that this is
195 // I/O space instead.
196 //
197 // A bitmap, called the PFN bitmap, keeps track of all page frames by assigning
198 // a bit to each. If the bit is set, then the page is valid physical RAM.
199 //
200 RTL_BITMAP MiPfnBitMap;
201
202 //
203 // This structure describes the different pieces of RAM-backed address space
204 //
205 PPHYSICAL_MEMORY_DESCRIPTOR MmPhysicalMemoryBlock;
206
207 //
208 // This is where we keep track of the most basic physical layout markers
209 //
210 PFN_NUMBER MmNumberOfPhysicalPages, MmHighestPhysicalPage, MmLowestPhysicalPage = -1;
211
212 //
213 // The total number of pages mapped by the boot loader, which include the kernel
214 // HAL, boot drivers, registry, NLS files and other loader data structures is
215 // kept track of here. This depends on "LoaderPagesSpanned" being correct when
216 // coming from the loader.
217 //
218 // This number is later aligned up to a PDE boundary.
219 //
220 SIZE_T MmBootImageSize;
221
222 //
223 // These three variables keep track of the core separation of address space that
224 // exists between kernel mode and user mode.
225 //
226 ULONG_PTR MmUserProbeAddress;
227 PVOID MmHighestUserAddress;
228 PVOID MmSystemRangeStart;
229
230 /* And these store the respective highest PTE/PDE address */
231 PMMPTE MiHighestUserPte;
232 PMMPDE MiHighestUserPde;
233 #if (_MI_PAGING_LEVELS >= 3)
234 /* We need the highest PPE and PXE addresses */
235 #endif
236
237 /* These variables define the system cache address space */
238 PVOID MmSystemCacheStart;
239 PVOID MmSystemCacheEnd;
240 MMSUPPORT MmSystemCacheWs;
241
242 //
243 // This is where hyperspace ends (followed by the system cache working set)
244 //
245 PVOID MmHyperSpaceEnd;
246
247 //
248 // Page coloring algorithm data
249 //
250 ULONG MmSecondaryColors;
251 ULONG MmSecondaryColorMask;
252
253 //
254 // Actual (registry-configurable) size of a GUI thread's stack
255 //
256 ULONG MmLargeStackSize = KERNEL_LARGE_STACK_SIZE;
257
258 //
259 // Before we have a PFN database, memory comes straight from our physical memory
260 // blocks, which is nice because it's guaranteed contiguous and also because once
261 // we take a page from here, the system doesn't see it anymore.
262 // However, once the fun is over, those pages must be re-integrated back into
263 // PFN society life, and that requires us keeping a copy of the original layout
264 // so that we can parse it later.
265 //
266 PMEMORY_ALLOCATION_DESCRIPTOR MxFreeDescriptor;
267 MEMORY_ALLOCATION_DESCRIPTOR MxOldFreeDescriptor;
268
269 /*
270 * For each page's worth bytes of L2 cache in a given set/way line, the zero and
271 * free lists are organized in what is called a "color".
272 *
273 * This array points to the two lists, so it can be thought of as a multi-dimensional
274 * array of MmFreePagesByColor[2][MmSecondaryColors]. Since the number is dynamic,
275 * we describe the array in pointer form instead.
276 *
277 * On a final note, the color tables themselves are right after the PFN database.
278 */
279 C_ASSERT(FreePageList == 1);
280 PMMCOLOR_TABLES MmFreePagesByColor[FreePageList + 1];
281
282 /* An event used in Phase 0 before the rest of the system is ready to go */
283 KEVENT MiTempEvent;
284
285 /* All the events used for memory threshold notifications */
286 PKEVENT MiLowMemoryEvent;
287 PKEVENT MiHighMemoryEvent;
288 PKEVENT MiLowPagedPoolEvent;
289 PKEVENT MiHighPagedPoolEvent;
290 PKEVENT MiLowNonPagedPoolEvent;
291 PKEVENT MiHighNonPagedPoolEvent;
292
293 /* The actual thresholds themselves, in page numbers */
294 PFN_NUMBER MmLowMemoryThreshold;
295 PFN_NUMBER MmHighMemoryThreshold;
296 PFN_NUMBER MiLowPagedPoolThreshold;
297 PFN_NUMBER MiHighPagedPoolThreshold;
298 PFN_NUMBER MiLowNonPagedPoolThreshold;
299 PFN_NUMBER MiHighNonPagedPoolThreshold;
300
301 /*
302 * This number determines how many free pages must exist, at minimum, until we
303 * start trimming working sets and flushing modified pages to obtain more free
304 * pages.
305 *
306 * This number changes if the system detects that this is a server product
307 */
308 PFN_NUMBER MmMinimumFreePages = 26;
309
310 /*
311 * This number indicates how many pages we consider to be a low limit of having
312 * "plenty" of free memory.
313 *
314 * It is doubled on systems that have more than 63MB of memory
315 */
316 PFN_NUMBER MmPlentyFreePages = 400;
317
318 /* These values store the type of system this is (small, med, large) and if server */
319 ULONG MmProductType;
320 MM_SYSTEMSIZE MmSystemSize;
321
322 /*
323 * These values store the cache working set minimums and maximums, in pages
324 *
325 * The minimum value is boosted on systems with more than 24MB of RAM, and cut
326 * down to only 32 pages on embedded (<24MB RAM) systems.
327 *
328 * An extra boost of 2MB is given on systems with more than 33MB of RAM.
329 */
330 PFN_NUMBER MmSystemCacheWsMinimum = 288;
331 PFN_NUMBER MmSystemCacheWsMaximum = 350;
332
333 /* FIXME: Move to cache/working set code later */
334 BOOLEAN MmLargeSystemCache;
335
336 /*
337 * This value determines in how many fragments/chunks the subsection prototype
338 * PTEs should be allocated when mapping a section object. It is configurable in
339 * the registry through the MapAllocationFragment parameter.
340 *
341 * The default is 64KB on systems with more than 1GB of RAM, 32KB on systems with
342 * more than 256MB of RAM, and 16KB on systems with less than 256MB of RAM.
343 *
344 * The maximum it can be set to is 2MB, and the minimum is 4KB.
345 */
346 SIZE_T MmAllocationFragment;
347
348 /*
349 * These two values track how much virtual memory can be committed, and when
350 * expansion should happen.
351 */
352 // FIXME: They should be moved elsewhere since it's not an "init" setting?
353 SIZE_T MmTotalCommitLimit;
354 SIZE_T MmTotalCommitLimitMaximum;
355
356 /* Internal setting used for debugging memory descriptors */
357 BOOLEAN MiDbgEnableMdDump =
358 #ifdef _ARM_
359 TRUE;
360 #else
361 FALSE;
362 #endif
363
364 /* PRIVATE FUNCTIONS **********************************************************/
365
366 PFN_NUMBER
367 NTAPI
368 MxGetNextPage(IN PFN_NUMBER PageCount)
369 {
370 PFN_NUMBER Pfn;
371
372 /* Make sure we have enough pages */
373 if (PageCount > MxFreeDescriptor->PageCount)
374 {
375 /* Crash the system */
376 KeBugCheckEx(INSTALL_MORE_MEMORY,
377 MmNumberOfPhysicalPages,
378 MxFreeDescriptor->PageCount,
379 MxOldFreeDescriptor.PageCount,
380 PageCount);
381 }
382
383 /* Use our lowest usable free pages */
384 Pfn = MxFreeDescriptor->BasePage;
385 MxFreeDescriptor->BasePage += PageCount;
386 MxFreeDescriptor->PageCount -= PageCount;
387 return Pfn;
388 }
389
390 VOID
391 NTAPI
392 MiComputeColorInformation(VOID)
393 {
394 ULONG L2Associativity;
395
396 /* Check if no setting was provided already */
397 if (!MmSecondaryColors)
398 {
399 /* Get L2 cache information */
400 L2Associativity = KeGetPcr()->SecondLevelCacheAssociativity;
401
402 /* The number of colors is the number of cache bytes by set/way */
403 MmSecondaryColors = KeGetPcr()->SecondLevelCacheSize;
404 if (L2Associativity) MmSecondaryColors /= L2Associativity;
405 }
406
407 /* Now convert cache bytes into pages */
408 MmSecondaryColors >>= PAGE_SHIFT;
409 if (!MmSecondaryColors)
410 {
411 /* If there was no cache data from the KPCR, use the default colors */
412 MmSecondaryColors = MI_SECONDARY_COLORS;
413 }
414 else
415 {
416 /* Otherwise, make sure there aren't too many colors */
417 if (MmSecondaryColors > MI_MAX_SECONDARY_COLORS)
418 {
419 /* Set the maximum */
420 MmSecondaryColors = MI_MAX_SECONDARY_COLORS;
421 }
422
423 /* Make sure there aren't too little colors */
424 if (MmSecondaryColors < MI_MIN_SECONDARY_COLORS)
425 {
426 /* Set the default */
427 MmSecondaryColors = MI_SECONDARY_COLORS;
428 }
429
430 /* Finally make sure the colors are a power of two */
431 if (MmSecondaryColors & (MmSecondaryColors - 1))
432 {
433 /* Set the default */
434 MmSecondaryColors = MI_SECONDARY_COLORS;
435 }
436 }
437
438 /* Compute the mask and store it */
439 MmSecondaryColorMask = MmSecondaryColors - 1;
440 KeGetCurrentPrcb()->SecondaryColorMask = MmSecondaryColorMask;
441 }
442
443 VOID
444 NTAPI
445 MiInitializeColorTables(VOID)
446 {
447 ULONG i;
448 PMMPTE PointerPte, LastPte;
449 MMPTE TempPte = ValidKernelPte;
450
451 /* The color table starts after the ARM3 PFN database */
452 MmFreePagesByColor[0] = (PMMCOLOR_TABLES)&MmPfnDatabase[MmHighestPhysicalPage + 1];
453
454 /* Loop the PTEs. We have two color tables for each secondary color */
455 PointerPte = MiAddressToPte(&MmFreePagesByColor[0][0]);
456 LastPte = MiAddressToPte((ULONG_PTR)MmFreePagesByColor[0] +
457 (2 * MmSecondaryColors * sizeof(MMCOLOR_TABLES))
458 - 1);
459 while (PointerPte <= LastPte)
460 {
461 /* Check for valid PTE */
462 if (PointerPte->u.Hard.Valid == 0)
463 {
464 /* Get a page and map it */
465 TempPte.u.Hard.PageFrameNumber = MxGetNextPage(1);
466 MI_WRITE_VALID_PTE(PointerPte, TempPte);
467
468 /* Zero out the page */
469 RtlZeroMemory(MiPteToAddress(PointerPte), PAGE_SIZE);
470 }
471
472 /* Next */
473 PointerPte++;
474 }
475
476 /* Now set the address of the next list, right after this one */
477 MmFreePagesByColor[1] = &MmFreePagesByColor[0][MmSecondaryColors];
478
479 /* Now loop the lists to set them up */
480 for (i = 0; i < MmSecondaryColors; i++)
481 {
482 /* Set both free and zero lists for each color */
483 MmFreePagesByColor[ZeroedPageList][i].Flink = 0xFFFFFFFF;
484 MmFreePagesByColor[ZeroedPageList][i].Blink = (PVOID)0xFFFFFFFF;
485 MmFreePagesByColor[ZeroedPageList][i].Count = 0;
486 MmFreePagesByColor[FreePageList][i].Flink = 0xFFFFFFFF;
487 MmFreePagesByColor[FreePageList][i].Blink = (PVOID)0xFFFFFFFF;
488 MmFreePagesByColor[FreePageList][i].Count = 0;
489 }
490 }
491
492 BOOLEAN
493 NTAPI
494 MiIsRegularMemory(IN PLOADER_PARAMETER_BLOCK LoaderBlock,
495 IN PFN_NUMBER Pfn)
496 {
497 PLIST_ENTRY NextEntry;
498 PMEMORY_ALLOCATION_DESCRIPTOR MdBlock;
499
500 /* Loop the memory descriptors */
501 NextEntry = LoaderBlock->MemoryDescriptorListHead.Flink;
502 while (NextEntry != &LoaderBlock->MemoryDescriptorListHead)
503 {
504 /* Get the memory descriptor */
505 MdBlock = CONTAINING_RECORD(NextEntry,
506 MEMORY_ALLOCATION_DESCRIPTOR,
507 ListEntry);
508
509 /* Check if this PFN could be part of the block */
510 if (Pfn >= (MdBlock->BasePage))
511 {
512 /* Check if it really is part of the block */
513 if (Pfn < (MdBlock->BasePage + MdBlock->PageCount))
514 {
515 /* Check if the block is actually memory we don't map */
516 if ((MdBlock->MemoryType == LoaderFirmwarePermanent) ||
517 (MdBlock->MemoryType == LoaderBBTMemory) ||
518 (MdBlock->MemoryType == LoaderSpecialMemory))
519 {
520 /* We don't need PFN database entries for this memory */
521 break;
522 }
523
524 /* This is memory we want to map */
525 return TRUE;
526 }
527 }
528 else
529 {
530 /* Blocks are ordered, so if it's not here, it doesn't exist */
531 break;
532 }
533
534 /* Get to the next descriptor */
535 NextEntry = MdBlock->ListEntry.Flink;
536 }
537
538 /* Check if this PFN is actually from our free memory descriptor */
539 if ((Pfn >= MxOldFreeDescriptor.BasePage) &&
540 (Pfn < MxOldFreeDescriptor.BasePage + MxOldFreeDescriptor.PageCount))
541 {
542 /* We use these pages for initial mappings, so we do want to count them */
543 return TRUE;
544 }
545
546 /* Otherwise this isn't memory that we describe or care about */
547 return FALSE;
548 }
549
550 VOID
551 NTAPI
552 MiMapPfnDatabase(IN PLOADER_PARAMETER_BLOCK LoaderBlock)
553 {
554 ULONG FreePage, FreePageCount, PagesLeft, BasePage, PageCount;
555 PLIST_ENTRY NextEntry;
556 PMEMORY_ALLOCATION_DESCRIPTOR MdBlock;
557 PMMPTE PointerPte, LastPte;
558 MMPTE TempPte = ValidKernelPte;
559
560 /* Get current page data, since we won't be using MxGetNextPage as it would corrupt our state */
561 FreePage = MxFreeDescriptor->BasePage;
562 FreePageCount = MxFreeDescriptor->PageCount;
563 PagesLeft = 0;
564
565 /* Loop the memory descriptors */
566 NextEntry = LoaderBlock->MemoryDescriptorListHead.Flink;
567 while (NextEntry != &LoaderBlock->MemoryDescriptorListHead)
568 {
569 /* Get the descriptor */
570 MdBlock = CONTAINING_RECORD(NextEntry,
571 MEMORY_ALLOCATION_DESCRIPTOR,
572 ListEntry);
573 if ((MdBlock->MemoryType == LoaderFirmwarePermanent) ||
574 (MdBlock->MemoryType == LoaderBBTMemory) ||
575 (MdBlock->MemoryType == LoaderSpecialMemory))
576 {
577 /* These pages are not part of the PFN database */
578 NextEntry = MdBlock->ListEntry.Flink;
579 continue;
580 }
581
582 /* Next, check if this is our special free descriptor we've found */
583 if (MdBlock == MxFreeDescriptor)
584 {
585 /* Use the real numbers instead */
586 BasePage = MxOldFreeDescriptor.BasePage;
587 PageCount = MxOldFreeDescriptor.PageCount;
588 }
589 else
590 {
591 /* Use the descriptor's numbers */
592 BasePage = MdBlock->BasePage;
593 PageCount = MdBlock->PageCount;
594 }
595
596 /* Get the PTEs for this range */
597 PointerPte = MiAddressToPte(&MmPfnDatabase[BasePage]);
598 LastPte = MiAddressToPte(((ULONG_PTR)&MmPfnDatabase[BasePage + PageCount]) - 1);
599 DPRINT("MD Type: %lx Base: %lx Count: %lx\n", MdBlock->MemoryType, BasePage, PageCount);
600
601 /* Loop them */
602 while (PointerPte <= LastPte)
603 {
604 /* We'll only touch PTEs that aren't already valid */
605 if (PointerPte->u.Hard.Valid == 0)
606 {
607 /* Use the next free page */
608 TempPte.u.Hard.PageFrameNumber = FreePage;
609 ASSERT(FreePageCount != 0);
610
611 /* Consume free pages */
612 FreePage++;
613 FreePageCount--;
614 if (!FreePageCount)
615 {
616 /* Out of memory */
617 KeBugCheckEx(INSTALL_MORE_MEMORY,
618 MmNumberOfPhysicalPages,
619 FreePageCount,
620 MxOldFreeDescriptor.PageCount,
621 1);
622 }
623
624 /* Write out this PTE */
625 PagesLeft++;
626 MI_WRITE_VALID_PTE(PointerPte, TempPte);
627
628 /* Zero this page */
629 RtlZeroMemory(MiPteToAddress(PointerPte), PAGE_SIZE);
630 }
631
632 /* Next! */
633 PointerPte++;
634 }
635
636 /* Do the next address range */
637 NextEntry = MdBlock->ListEntry.Flink;
638 }
639
640 /* Now update the free descriptors to consume the pages we used up during the PFN allocation loop */
641 MxFreeDescriptor->BasePage = FreePage;
642 MxFreeDescriptor->PageCount = FreePageCount;
643 }
644
645 VOID
646 NTAPI
647 MiBuildPfnDatabaseFromPages(IN PLOADER_PARAMETER_BLOCK LoaderBlock)
648 {
649 PMMPDE PointerPde;
650 PMMPTE PointerPte;
651 ULONG i, Count, j;
652 PFN_NUMBER PageFrameIndex, StartupPdIndex, PtePageIndex;
653 PMMPFN Pfn1, Pfn2;
654 ULONG_PTR BaseAddress = 0;
655
656 /* PFN of the startup page directory */
657 StartupPdIndex = PFN_FROM_PTE(MiAddressToPde(PDE_BASE));
658
659 /* Start with the first PDE and scan them all */
660 PointerPde = MiAddressToPde(NULL);
661 Count = PD_COUNT * PDE_COUNT;
662 for (i = 0; i < Count; i++)
663 {
664 /* Check for valid PDE */
665 if (PointerPde->u.Hard.Valid == 1)
666 {
667 /* Get the PFN from it */
668 PageFrameIndex = PFN_FROM_PTE(PointerPde);
669
670 /* Do we want a PFN entry for this page? */
671 if (MiIsRegularMemory(LoaderBlock, PageFrameIndex))
672 {
673 /* Yes we do, set it up */
674 Pfn1 = MiGetPfnEntry(PageFrameIndex);
675 Pfn1->u4.PteFrame = StartupPdIndex;
676 Pfn1->PteAddress = PointerPde;
677 Pfn1->u2.ShareCount++;
678 Pfn1->u3.e2.ReferenceCount = 1;
679 Pfn1->u3.e1.PageLocation = ActiveAndValid;
680 Pfn1->u3.e1.CacheAttribute = MiNonCached;
681 #if MI_TRACE_PFNS
682 Pfn1->PfnUsage = MI_USAGE_INIT_MEMORY;
683 memcpy(Pfn1->ProcessName, "Initial PDE", 16);
684 #endif
685 }
686 else
687 {
688 /* No PFN entry */
689 Pfn1 = NULL;
690 }
691
692 /* Now get the PTE and scan the pages */
693 PointerPte = MiAddressToPte(BaseAddress);
694 for (j = 0; j < PTE_COUNT; j++)
695 {
696 /* Check for a valid PTE */
697 if (PointerPte->u.Hard.Valid == 1)
698 {
699 /* Increase the shared count of the PFN entry for the PDE */
700 ASSERT(Pfn1 != NULL);
701 Pfn1->u2.ShareCount++;
702
703 /* Now check if the PTE is valid memory too */
704 PtePageIndex = PFN_FROM_PTE(PointerPte);
705 if (MiIsRegularMemory(LoaderBlock, PtePageIndex))
706 {
707 /*
708 * Only add pages above the end of system code or pages
709 * that are part of nonpaged pool
710 */
711 if ((BaseAddress >= 0xA0000000) ||
712 ((BaseAddress >= (ULONG_PTR)MmNonPagedPoolStart) &&
713 (BaseAddress < (ULONG_PTR)MmNonPagedPoolStart +
714 MmSizeOfNonPagedPoolInBytes)))
715 {
716 /* Get the PFN entry and make sure it too is valid */
717 Pfn2 = MiGetPfnEntry(PtePageIndex);
718 if ((MmIsAddressValid(Pfn2)) &&
719 (MmIsAddressValid(Pfn2 + 1)))
720 {
721 /* Setup the PFN entry */
722 Pfn2->u4.PteFrame = PageFrameIndex;
723 Pfn2->PteAddress = PointerPte;
724 Pfn2->u2.ShareCount++;
725 Pfn2->u3.e2.ReferenceCount = 1;
726 Pfn2->u3.e1.PageLocation = ActiveAndValid;
727 Pfn2->u3.e1.CacheAttribute = MiNonCached;
728 #if MI_TRACE_PFNS
729 Pfn2->PfnUsage = MI_USAGE_INIT_MEMORY;
730 memcpy(Pfn1->ProcessName, "Initial PTE", 16);
731 #endif
732 }
733 }
734 }
735 }
736
737 /* Next PTE */
738 PointerPte++;
739 BaseAddress += PAGE_SIZE;
740 }
741 }
742 else
743 {
744 /* Next PDE mapped address */
745 BaseAddress += PDE_MAPPED_VA;
746 }
747
748 /* Next PTE */
749 PointerPde++;
750 }
751 }
752
753 VOID
754 NTAPI
755 MiBuildPfnDatabaseZeroPage(VOID)
756 {
757 PMMPFN Pfn1;
758 PMMPDE PointerPde;
759
760 /* Grab the lowest page and check if it has no real references */
761 Pfn1 = MiGetPfnEntry(MmLowestPhysicalPage);
762 if (!(MmLowestPhysicalPage) && !(Pfn1->u3.e2.ReferenceCount))
763 {
764 /* Make it a bogus page to catch errors */
765 PointerPde = MiAddressToPde(0xFFFFFFFF);
766 Pfn1->u4.PteFrame = PFN_FROM_PTE(PointerPde);
767 Pfn1->PteAddress = PointerPde;
768 Pfn1->u2.ShareCount++;
769 Pfn1->u3.e2.ReferenceCount = 0xFFF0;
770 Pfn1->u3.e1.PageLocation = ActiveAndValid;
771 Pfn1->u3.e1.CacheAttribute = MiNonCached;
772 }
773 }
774
775 VOID
776 NTAPI
777 MiBuildPfnDatabaseFromLoaderBlock(IN PLOADER_PARAMETER_BLOCK LoaderBlock)
778 {
779 PLIST_ENTRY NextEntry;
780 PFN_NUMBER PageCount = 0;
781 PMEMORY_ALLOCATION_DESCRIPTOR MdBlock;
782 PFN_NUMBER PageFrameIndex;
783 PMMPFN Pfn1;
784 PMMPTE PointerPte;
785 PMMPDE PointerPde;
786 KIRQL OldIrql;
787
788 /* Now loop through the descriptors */
789 NextEntry = LoaderBlock->MemoryDescriptorListHead.Flink;
790 while (NextEntry != &LoaderBlock->MemoryDescriptorListHead)
791 {
792 /* Get the current descriptor */
793 MdBlock = CONTAINING_RECORD(NextEntry,
794 MEMORY_ALLOCATION_DESCRIPTOR,
795 ListEntry);
796
797 /* Read its data */
798 PageCount = MdBlock->PageCount;
799 PageFrameIndex = MdBlock->BasePage;
800
801 /* Don't allow memory above what the PFN database is mapping */
802 if (PageFrameIndex > MmHighestPhysicalPage)
803 {
804 /* Since they are ordered, everything past here will be larger */
805 break;
806 }
807
808 /* On the other hand, the end page might be higher up... */
809 if ((PageFrameIndex + PageCount) > (MmHighestPhysicalPage + 1))
810 {
811 /* In which case we'll trim the descriptor to go as high as we can */
812 PageCount = MmHighestPhysicalPage + 1 - PageFrameIndex;
813 MdBlock->PageCount = PageCount;
814
815 /* But if there's nothing left to trim, we got too high, so quit */
816 if (!PageCount) break;
817 }
818
819 /* Now check the descriptor type */
820 switch (MdBlock->MemoryType)
821 {
822 /* Check for bad RAM */
823 case LoaderBad:
824
825 DPRINT1("You either have specified /BURNMEMORY or damaged RAM modules.\n");
826 break;
827
828 /* Check for free RAM */
829 case LoaderFree:
830 case LoaderLoadedProgram:
831 case LoaderFirmwareTemporary:
832 case LoaderOsloaderStack:
833
834 /* Get the last page of this descriptor. Note we loop backwards */
835 PageFrameIndex += PageCount - 1;
836 Pfn1 = MiGetPfnEntry(PageFrameIndex);
837
838 /* Lock the PFN Database */
839 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
840 while (PageCount--)
841 {
842 /* If the page really has no references, mark it as free */
843 if (!Pfn1->u3.e2.ReferenceCount)
844 {
845 /* Add it to the free list */
846 Pfn1->u3.e1.CacheAttribute = MiNonCached;
847 MiInsertPageInFreeList(PageFrameIndex);
848 }
849
850 /* Go to the next page */
851 Pfn1--;
852 PageFrameIndex--;
853 }
854
855 /* Release PFN database */
856 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
857
858 /* Done with this block */
859 break;
860
861 /* Check for pages that are invisible to us */
862 case LoaderFirmwarePermanent:
863 case LoaderSpecialMemory:
864 case LoaderBBTMemory:
865
866 /* And skip them */
867 break;
868
869 default:
870
871 /* Map these pages with the KSEG0 mapping that adds 0x80000000 */
872 PointerPte = MiAddressToPte(KSEG0_BASE + (PageFrameIndex << PAGE_SHIFT));
873 Pfn1 = MiGetPfnEntry(PageFrameIndex);
874 while (PageCount--)
875 {
876 /* Check if the page is really unused */
877 PointerPde = MiAddressToPde(KSEG0_BASE + (PageFrameIndex << PAGE_SHIFT));
878 if (!Pfn1->u3.e2.ReferenceCount)
879 {
880 /* Mark it as being in-use */
881 Pfn1->u4.PteFrame = PFN_FROM_PTE(PointerPde);
882 Pfn1->PteAddress = PointerPte;
883 Pfn1->u2.ShareCount++;
884 Pfn1->u3.e2.ReferenceCount = 1;
885 Pfn1->u3.e1.PageLocation = ActiveAndValid;
886 Pfn1->u3.e1.CacheAttribute = MiNonCached;
887 #if MI_TRACE_PFNS
888 Pfn1->PfnUsage = MI_USAGE_BOOT_DRIVER;
889 #endif
890
891 /* Check for RAM disk page */
892 if (MdBlock->MemoryType == LoaderXIPRom)
893 {
894 /* Make it a pseudo-I/O ROM mapping */
895 Pfn1->u1.Flink = 0;
896 Pfn1->u2.ShareCount = 0;
897 Pfn1->u3.e2.ReferenceCount = 0;
898 Pfn1->u3.e1.PageLocation = 0;
899 Pfn1->u3.e1.Rom = 1;
900 Pfn1->u4.InPageError = 0;
901 Pfn1->u3.e1.PrototypePte = 1;
902 }
903 }
904
905 /* Advance page structures */
906 Pfn1++;
907 PageFrameIndex++;
908 PointerPte++;
909 }
910 break;
911 }
912
913 /* Next descriptor entry */
914 NextEntry = MdBlock->ListEntry.Flink;
915 }
916 }
917
918 VOID
919 NTAPI
920 MiBuildPfnDatabaseSelf(VOID)
921 {
922 PMMPTE PointerPte, LastPte;
923 PMMPFN Pfn1;
924
925 /* Loop the PFN database page */
926 PointerPte = MiAddressToPte(MiGetPfnEntry(MmLowestPhysicalPage));
927 LastPte = MiAddressToPte(MiGetPfnEntry(MmHighestPhysicalPage));
928 while (PointerPte <= LastPte)
929 {
930 /* Make sure the page is valid */
931 if (PointerPte->u.Hard.Valid == 1)
932 {
933 /* Get the PFN entry and just mark it referenced */
934 Pfn1 = MiGetPfnEntry(PointerPte->u.Hard.PageFrameNumber);
935 Pfn1->u2.ShareCount = 1;
936 Pfn1->u3.e2.ReferenceCount = 1;
937 #if MI_TRACE_PFNS
938 Pfn1->PfnUsage = MI_USAGE_PFN_DATABASE;
939 #endif
940 }
941
942 /* Next */
943 PointerPte++;
944 }
945 }
946
947 VOID
948 NTAPI
949 MiInitializePfnDatabase(IN PLOADER_PARAMETER_BLOCK LoaderBlock)
950 {
951 /* Scan memory and start setting up PFN entries */
952 MiBuildPfnDatabaseFromPages(LoaderBlock);
953
954 /* Add the zero page */
955 MiBuildPfnDatabaseZeroPage();
956
957 /* Scan the loader block and build the rest of the PFN database */
958 MiBuildPfnDatabaseFromLoaderBlock(LoaderBlock);
959
960 /* Finally add the pages for the PFN database itself */
961 MiBuildPfnDatabaseSelf();
962 }
963
964 VOID
965 NTAPI
966 MiAdjustWorkingSetManagerParameters(IN BOOLEAN Client)
967 {
968 /* This function needs to do more work, for now, we tune page minimums */
969
970 /* Check for a system with around 64MB RAM or more */
971 if (MmNumberOfPhysicalPages >= (63 * _1MB) / PAGE_SIZE)
972 {
973 /* Double the minimum amount of pages we consider for a "plenty free" scenario */
974 MmPlentyFreePages *= 2;
975 }
976 }
977
978 VOID
979 NTAPI
980 MiNotifyMemoryEvents(VOID)
981 {
982 /* Are we in a low-memory situation? */
983 if (MmAvailablePages < MmLowMemoryThreshold)
984 {
985 /* Clear high, set low */
986 if (KeReadStateEvent(MiHighMemoryEvent)) KeClearEvent(MiHighMemoryEvent);
987 if (!KeReadStateEvent(MiLowMemoryEvent)) KeSetEvent(MiLowMemoryEvent, 0, FALSE);
988 }
989 else if (MmAvailablePages < MmHighMemoryThreshold)
990 {
991 /* We are in between, clear both */
992 if (KeReadStateEvent(MiHighMemoryEvent)) KeClearEvent(MiHighMemoryEvent);
993 if (KeReadStateEvent(MiLowMemoryEvent)) KeClearEvent(MiLowMemoryEvent);
994 }
995 else
996 {
997 /* Clear low, set high */
998 if (KeReadStateEvent(MiLowMemoryEvent)) KeClearEvent(MiLowMemoryEvent);
999 if (!KeReadStateEvent(MiHighMemoryEvent)) KeSetEvent(MiHighMemoryEvent, 0, FALSE);
1000 }
1001 }
1002
1003 NTSTATUS
1004 NTAPI
1005 MiCreateMemoryEvent(IN PUNICODE_STRING Name,
1006 OUT PKEVENT *Event)
1007 {
1008 PACL Dacl;
1009 HANDLE EventHandle;
1010 ULONG DaclLength;
1011 NTSTATUS Status;
1012 OBJECT_ATTRIBUTES ObjectAttributes;
1013 SECURITY_DESCRIPTOR SecurityDescriptor;
1014
1015 /* Create the SD */
1016 Status = RtlCreateSecurityDescriptor(&SecurityDescriptor,
1017 SECURITY_DESCRIPTOR_REVISION);
1018 if (!NT_SUCCESS(Status)) return Status;
1019
1020 /* One ACL with 3 ACEs, containing each one SID */
1021 DaclLength = sizeof(ACL) +
1022 3 * sizeof(ACCESS_ALLOWED_ACE) +
1023 RtlLengthSid(SeLocalSystemSid) +
1024 RtlLengthSid(SeAliasAdminsSid) +
1025 RtlLengthSid(SeWorldSid);
1026
1027 /* Allocate space for the DACL */
1028 Dacl = ExAllocatePoolWithTag(PagedPool, DaclLength, 'lcaD');
1029 if (!Dacl) return STATUS_INSUFFICIENT_RESOURCES;
1030
1031 /* Setup the ACL inside it */
1032 Status = RtlCreateAcl(Dacl, DaclLength, ACL_REVISION);
1033 if (!NT_SUCCESS(Status)) goto CleanUp;
1034
1035 /* Add query rights for everyone */
1036 Status = RtlAddAccessAllowedAce(Dacl,
1037 ACL_REVISION,
1038 SYNCHRONIZE | EVENT_QUERY_STATE | READ_CONTROL,
1039 SeWorldSid);
1040 if (!NT_SUCCESS(Status)) goto CleanUp;
1041
1042 /* Full rights for the admin */
1043 Status = RtlAddAccessAllowedAce(Dacl,
1044 ACL_REVISION,
1045 EVENT_ALL_ACCESS,
1046 SeAliasAdminsSid);
1047 if (!NT_SUCCESS(Status)) goto CleanUp;
1048
1049 /* As well as full rights for the system */
1050 Status = RtlAddAccessAllowedAce(Dacl,
1051 ACL_REVISION,
1052 EVENT_ALL_ACCESS,
1053 SeLocalSystemSid);
1054 if (!NT_SUCCESS(Status)) goto CleanUp;
1055
1056 /* Set this DACL inside the SD */
1057 Status = RtlSetDaclSecurityDescriptor(&SecurityDescriptor,
1058 TRUE,
1059 Dacl,
1060 FALSE);
1061 if (!NT_SUCCESS(Status)) goto CleanUp;
1062
1063 /* Setup the event attributes, making sure it's a permanent one */
1064 InitializeObjectAttributes(&ObjectAttributes,
1065 Name,
1066 OBJ_KERNEL_HANDLE | OBJ_PERMANENT,
1067 NULL,
1068 &SecurityDescriptor);
1069
1070 /* Create the event */
1071 Status = ZwCreateEvent(&EventHandle,
1072 EVENT_ALL_ACCESS,
1073 &ObjectAttributes,
1074 NotificationEvent,
1075 FALSE);
1076 CleanUp:
1077 /* Free the DACL */
1078 ExFreePool(Dacl);
1079
1080 /* Check if this is the success path */
1081 if (NT_SUCCESS(Status))
1082 {
1083 /* Add a reference to the object, then close the handle we had */
1084 Status = ObReferenceObjectByHandle(EventHandle,
1085 EVENT_MODIFY_STATE,
1086 ExEventObjectType,
1087 KernelMode,
1088 (PVOID*)Event,
1089 NULL);
1090 ZwClose (EventHandle);
1091 }
1092
1093 /* Return status */
1094 return Status;
1095 }
1096
1097 BOOLEAN
1098 NTAPI
1099 MiInitializeMemoryEvents(VOID)
1100 {
1101 UNICODE_STRING LowString = RTL_CONSTANT_STRING(L"\\KernelObjects\\LowMemoryCondition");
1102 UNICODE_STRING HighString = RTL_CONSTANT_STRING(L"\\KernelObjects\\HighMemoryCondition");
1103 UNICODE_STRING LowPagedPoolString = RTL_CONSTANT_STRING(L"\\KernelObjects\\LowPagedPoolCondition");
1104 UNICODE_STRING HighPagedPoolString = RTL_CONSTANT_STRING(L"\\KernelObjects\\HighPagedPoolCondition");
1105 UNICODE_STRING LowNonPagedPoolString = RTL_CONSTANT_STRING(L"\\KernelObjects\\LowNonPagedPoolCondition");
1106 UNICODE_STRING HighNonPagedPoolString = RTL_CONSTANT_STRING(L"\\KernelObjects\\HighNonPagedPoolCondition");
1107 NTSTATUS Status;
1108
1109 /* Check if we have a registry setting */
1110 if (MmLowMemoryThreshold)
1111 {
1112 /* Convert it to pages */
1113 MmLowMemoryThreshold *= (_1MB / PAGE_SIZE);
1114 }
1115 else
1116 {
1117 /* The low memory threshold is hit when we don't consider that we have "plenty" of free pages anymore */
1118 MmLowMemoryThreshold = MmPlentyFreePages;
1119
1120 /* More than one GB of memory? */
1121 if (MmNumberOfPhysicalPages > 0x40000)
1122 {
1123 /* Start at 32MB, and add another 16MB for each GB */
1124 MmLowMemoryThreshold = (32 * _1MB) / PAGE_SIZE;
1125 MmLowMemoryThreshold += ((MmNumberOfPhysicalPages - 0x40000) >> 7);
1126 }
1127 else if (MmNumberOfPhysicalPages > 0x8000)
1128 {
1129 /* For systems with > 128MB RAM, add another 4MB for each 128MB */
1130 MmLowMemoryThreshold += ((MmNumberOfPhysicalPages - 0x8000) >> 5);
1131 }
1132
1133 /* Don't let the minimum threshold go past 64MB */
1134 MmLowMemoryThreshold = min(MmLowMemoryThreshold, (64 * _1MB) / PAGE_SIZE);
1135 }
1136
1137 /* Check if we have a registry setting */
1138 if (MmHighMemoryThreshold)
1139 {
1140 /* Convert it into pages */
1141 MmHighMemoryThreshold *= (_1MB / PAGE_SIZE);
1142 }
1143 else
1144 {
1145 /* Otherwise, the default is three times the low memory threshold */
1146 MmHighMemoryThreshold = 3 * MmLowMemoryThreshold;
1147 ASSERT(MmHighMemoryThreshold > MmLowMemoryThreshold);
1148 }
1149
1150 /* Make sure high threshold is actually higher than the low */
1151 MmHighMemoryThreshold = max(MmHighMemoryThreshold, MmLowMemoryThreshold);
1152
1153 /* Create the memory events for all the thresholds */
1154 Status = MiCreateMemoryEvent(&LowString, &MiLowMemoryEvent);
1155 if (!NT_SUCCESS(Status)) return FALSE;
1156 Status = MiCreateMemoryEvent(&HighString, &MiHighMemoryEvent);
1157 if (!NT_SUCCESS(Status)) return FALSE;
1158 Status = MiCreateMemoryEvent(&LowPagedPoolString, &MiLowPagedPoolEvent);
1159 if (!NT_SUCCESS(Status)) return FALSE;
1160 Status = MiCreateMemoryEvent(&HighPagedPoolString, &MiHighPagedPoolEvent);
1161 if (!NT_SUCCESS(Status)) return FALSE;
1162 Status = MiCreateMemoryEvent(&LowNonPagedPoolString, &MiLowNonPagedPoolEvent);
1163 if (!NT_SUCCESS(Status)) return FALSE;
1164 Status = MiCreateMemoryEvent(&HighNonPagedPoolString, &MiHighNonPagedPoolEvent);
1165 if (!NT_SUCCESS(Status)) return FALSE;
1166
1167 /* Now setup the pool events */
1168 MiInitializePoolEvents();
1169
1170 /* Set the initial event state */
1171 MiNotifyMemoryEvents();
1172 return TRUE;
1173 }
1174
1175 VOID
1176 NTAPI
1177 MiAddHalIoMappings(VOID)
1178 {
1179 PVOID BaseAddress;
1180 PMMPTE PointerPde;
1181 PMMPTE PointerPte;
1182 ULONG i, j, PdeCount;
1183 PFN_NUMBER PageFrameIndex;
1184
1185 /* HAL Heap address -- should be on a PDE boundary */
1186 BaseAddress = (PVOID)0xFFC00000;
1187 ASSERT(MiAddressToPteOffset(BaseAddress) == 0);
1188
1189 /* Check how many PDEs the heap has */
1190 PointerPde = MiAddressToPde(BaseAddress);
1191 PdeCount = PDE_COUNT - ADDR_TO_PDE_OFFSET(BaseAddress);
1192 for (i = 0; i < PdeCount; i++)
1193 {
1194 /* Does the HAL own this mapping? */
1195 if ((PointerPde->u.Hard.Valid == 1) &&
1196 (PointerPde->u.Hard.LargePage == 0))
1197 {
1198 /* Get the PTE for it and scan each page */
1199 PointerPte = MiAddressToPte(BaseAddress);
1200 for (j = 0 ; j < PTE_COUNT; j++)
1201 {
1202 /* Does the HAL own this page? */
1203 if (PointerPte->u.Hard.Valid == 1)
1204 {
1205 /* Is the HAL using it for device or I/O mapped memory? */
1206 PageFrameIndex = PFN_FROM_PTE(PointerPte);
1207 if (!MiGetPfnEntry(PageFrameIndex))
1208 {
1209 /* FIXME: For PAT, we need to track I/O cache attributes for coherency */
1210 DPRINT1("HAL I/O Mapping at %p is unsafe\n", BaseAddress);
1211 }
1212 }
1213
1214 /* Move to the next page */
1215 BaseAddress = (PVOID)((ULONG_PTR)BaseAddress + PAGE_SIZE);
1216 PointerPte++;
1217 }
1218 }
1219 else
1220 {
1221 /* Move to the next address */
1222 BaseAddress = (PVOID)((ULONG_PTR)BaseAddress + PDE_MAPPED_VA);
1223 }
1224
1225 /* Move to the next PDE */
1226 PointerPde++;
1227 }
1228 }
1229
1230 VOID
1231 NTAPI
1232 MmDumpArmPfnDatabase(IN BOOLEAN StatusOnly)
1233 {
1234 ULONG i;
1235 PMMPFN Pfn1;
1236 PCHAR Consumer = "Unknown";
1237 KIRQL OldIrql;
1238 ULONG ActivePages = 0, FreePages = 0, OtherPages = 0;
1239 #if MI_TRACE_PFNS
1240 ULONG UsageBucket[MI_USAGE_FREE_PAGE + 1] = {0};
1241 PCHAR MI_USAGE_TEXT[MI_USAGE_FREE_PAGE + 1] =
1242 {
1243 "Not set",
1244 "Paged Pool",
1245 "Nonpaged Pool",
1246 "Nonpaged Pool Ex",
1247 "Kernel Stack",
1248 "Kernel Stack Ex",
1249 "System PTE",
1250 "VAD",
1251 "PEB/TEB",
1252 "Section",
1253 "Page Table",
1254 "Page Directory",
1255 "Old Page Table",
1256 "Driver Page",
1257 "Contiguous Alloc",
1258 "MDL",
1259 "Demand Zero",
1260 "Zero Loop",
1261 "Cache",
1262 "PFN Database",
1263 "Boot Driver",
1264 "Initial Memory",
1265 "Free Page"
1266 };
1267 #endif
1268 //
1269 // Loop the PFN database
1270 //
1271 KeRaiseIrql(HIGH_LEVEL, &OldIrql);
1272 for (i = 0; i <= MmHighestPhysicalPage; i++)
1273 {
1274 Pfn1 = MiGetPfnEntry(i);
1275 if (!Pfn1) continue;
1276 #if MI_TRACE_PFNS
1277 ASSERT(Pfn1->PfnUsage <= MI_USAGE_FREE_PAGE);
1278 #endif
1279 //
1280 // Get the page location
1281 //
1282 switch (Pfn1->u3.e1.PageLocation)
1283 {
1284 case ActiveAndValid:
1285
1286 Consumer = "Active and Valid";
1287 ActivePages++;
1288 break;
1289
1290 case ZeroedPageList:
1291
1292 Consumer = "Zero Page List";
1293 FreePages++;
1294 break;//continue;
1295
1296 case FreePageList:
1297
1298 Consumer = "Free Page List";
1299 FreePages++;
1300 break;//continue;
1301
1302 default:
1303
1304 Consumer = "Other (ASSERT!)";
1305 OtherPages++;
1306 break;
1307 }
1308
1309 #if MI_TRACE_PFNS
1310 /* Add into bucket */
1311 UsageBucket[Pfn1->PfnUsage]++;
1312 #endif
1313
1314 //
1315 // Pretty-print the page
1316 //
1317 if (!StatusOnly)
1318 DbgPrint("0x%08p:\t%20s\t(%04d.%04d)\t[%16s - %16s])\n",
1319 i << PAGE_SHIFT,
1320 Consumer,
1321 Pfn1->u3.e2.ReferenceCount,
1322 Pfn1->u2.ShareCount == LIST_HEAD ? 0xFFFF : Pfn1->u2.ShareCount,
1323 #if MI_TRACE_PFNS
1324 MI_USAGE_TEXT[Pfn1->PfnUsage],
1325 Pfn1->ProcessName);
1326 #else
1327 "Page tracking",
1328 "is disabled");
1329 #endif
1330 }
1331
1332 DbgPrint("Active: %5d pages\t[%6d KB]\n", ActivePages, (ActivePages << PAGE_SHIFT) / 1024);
1333 DbgPrint("Free: %5d pages\t[%6d KB]\n", FreePages, (FreePages << PAGE_SHIFT) / 1024);
1334 DbgPrint("-----------------------------------------\n");
1335 #if MI_TRACE_PFNS
1336 OtherPages = UsageBucket[MI_USAGE_BOOT_DRIVER];
1337 DbgPrint("Boot Images: %5d pages\t[%6d KB]\n", OtherPages, (OtherPages << PAGE_SHIFT) / 1024);
1338 OtherPages = UsageBucket[MI_USAGE_DRIVER_PAGE];
1339 DbgPrint("System Drivers: %5d pages\t[%6d KB]\n", OtherPages, (OtherPages << PAGE_SHIFT) / 1024);
1340 OtherPages = UsageBucket[MI_USAGE_PFN_DATABASE];
1341 DbgPrint("PFN Database: %5d pages\t[%6d KB]\n", OtherPages, (OtherPages << PAGE_SHIFT) / 1024);
1342 OtherPages = UsageBucket[MI_USAGE_PAGE_TABLE] + UsageBucket[MI_USAGE_LEGACY_PAGE_DIRECTORY];
1343 DbgPrint("Page Tables: %5d pages\t[%6d KB]\n", OtherPages, (OtherPages << PAGE_SHIFT) / 1024);
1344 OtherPages = UsageBucket[MI_USAGE_NONPAGED_POOL] + UsageBucket[MI_USAGE_NONPAGED_POOL_EXPANSION];
1345 DbgPrint("NonPaged Pool: %5d pages\t[%6d KB]\n", OtherPages, (OtherPages << PAGE_SHIFT) / 1024);
1346 OtherPages = UsageBucket[MI_USAGE_PAGED_POOL];
1347 DbgPrint("Paged Pool: %5d pages\t[%6d KB]\n", OtherPages, (OtherPages << PAGE_SHIFT) / 1024);
1348 OtherPages = UsageBucket[MI_USAGE_KERNEL_STACK] + UsageBucket[MI_USAGE_KERNEL_STACK_EXPANSION];
1349 DbgPrint("Kernel Stack: %5d pages\t[%6d KB]\n", OtherPages, (OtherPages << PAGE_SHIFT) / 1024);
1350 OtherPages = UsageBucket[MI_USAGE_INIT_MEMORY];
1351 DbgPrint("Init Memory: %5d pages\t[%6d KB]\n", OtherPages, (OtherPages << PAGE_SHIFT) / 1024);
1352 OtherPages = UsageBucket[MI_USAGE_SECTION];
1353 DbgPrint("Sections: %5d pages\t[%6d KB]\n", OtherPages, (OtherPages << PAGE_SHIFT) / 1024);
1354 OtherPages = UsageBucket[MI_USAGE_CACHE];
1355 DbgPrint("Cache: %5d pages\t[%6d KB]\n", OtherPages, (OtherPages << PAGE_SHIFT) / 1024);
1356 #endif
1357 KeLowerIrql(OldIrql);
1358 }
1359
1360 PFN_NUMBER
1361 NTAPI
1362 MiPagesInLoaderBlock(IN PLOADER_PARAMETER_BLOCK LoaderBlock,
1363 IN PBOOLEAN IncludeType)
1364 {
1365 PLIST_ENTRY NextEntry;
1366 PFN_NUMBER PageCount = 0;
1367 PMEMORY_ALLOCATION_DESCRIPTOR MdBlock;
1368
1369 //
1370 // Now loop through the descriptors
1371 //
1372 NextEntry = LoaderBlock->MemoryDescriptorListHead.Flink;
1373 while (NextEntry != &LoaderBlock->MemoryDescriptorListHead)
1374 {
1375 //
1376 // Grab each one, and check if it's one we should include
1377 //
1378 MdBlock = CONTAINING_RECORD(NextEntry,
1379 MEMORY_ALLOCATION_DESCRIPTOR,
1380 ListEntry);
1381 if ((MdBlock->MemoryType < LoaderMaximum) &&
1382 (IncludeType[MdBlock->MemoryType]))
1383 {
1384 //
1385 // Add this to our running total
1386 //
1387 PageCount += MdBlock->PageCount;
1388 }
1389
1390 //
1391 // Try the next descriptor
1392 //
1393 NextEntry = MdBlock->ListEntry.Flink;
1394 }
1395
1396 //
1397 // Return the total
1398 //
1399 return PageCount;
1400 }
1401
1402 PPHYSICAL_MEMORY_DESCRIPTOR
1403 NTAPI
1404 MmInitializeMemoryLimits(IN PLOADER_PARAMETER_BLOCK LoaderBlock,
1405 IN PBOOLEAN IncludeType)
1406 {
1407 PLIST_ENTRY NextEntry;
1408 ULONG Run = 0, InitialRuns = 0;
1409 PFN_NUMBER NextPage = -1, PageCount = 0;
1410 PPHYSICAL_MEMORY_DESCRIPTOR Buffer, NewBuffer;
1411 PMEMORY_ALLOCATION_DESCRIPTOR MdBlock;
1412
1413 //
1414 // Scan the memory descriptors
1415 //
1416 NextEntry = LoaderBlock->MemoryDescriptorListHead.Flink;
1417 while (NextEntry != &LoaderBlock->MemoryDescriptorListHead)
1418 {
1419 //
1420 // For each one, increase the memory allocation estimate
1421 //
1422 InitialRuns++;
1423 NextEntry = NextEntry->Flink;
1424 }
1425
1426 //
1427 // Allocate the maximum we'll ever need
1428 //
1429 Buffer = ExAllocatePoolWithTag(NonPagedPool,
1430 sizeof(PHYSICAL_MEMORY_DESCRIPTOR) +
1431 sizeof(PHYSICAL_MEMORY_RUN) *
1432 (InitialRuns - 1),
1433 'lMmM');
1434 if (!Buffer) return NULL;
1435
1436 //
1437 // For now that's how many runs we have
1438 //
1439 Buffer->NumberOfRuns = InitialRuns;
1440
1441 //
1442 // Now loop through the descriptors again
1443 //
1444 NextEntry = LoaderBlock->MemoryDescriptorListHead.Flink;
1445 while (NextEntry != &LoaderBlock->MemoryDescriptorListHead)
1446 {
1447 //
1448 // Grab each one, and check if it's one we should include
1449 //
1450 MdBlock = CONTAINING_RECORD(NextEntry,
1451 MEMORY_ALLOCATION_DESCRIPTOR,
1452 ListEntry);
1453 if ((MdBlock->MemoryType < LoaderMaximum) &&
1454 (IncludeType[MdBlock->MemoryType]))
1455 {
1456 //
1457 // Add this to our running total
1458 //
1459 PageCount += MdBlock->PageCount;
1460
1461 //
1462 // Check if the next page is described by the next descriptor
1463 //
1464 if (MdBlock->BasePage == NextPage)
1465 {
1466 //
1467 // Combine it into the same physical run
1468 //
1469 ASSERT(MdBlock->PageCount != 0);
1470 Buffer->Run[Run - 1].PageCount += MdBlock->PageCount;
1471 NextPage += MdBlock->PageCount;
1472 }
1473 else
1474 {
1475 //
1476 // Otherwise just duplicate the descriptor's contents
1477 //
1478 Buffer->Run[Run].BasePage = MdBlock->BasePage;
1479 Buffer->Run[Run].PageCount = MdBlock->PageCount;
1480 NextPage = Buffer->Run[Run].BasePage + Buffer->Run[Run].PageCount;
1481
1482 //
1483 // And in this case, increase the number of runs
1484 //
1485 Run++;
1486 }
1487 }
1488
1489 //
1490 // Try the next descriptor
1491 //
1492 NextEntry = MdBlock->ListEntry.Flink;
1493 }
1494
1495 //
1496 // We should not have been able to go past our initial estimate
1497 //
1498 ASSERT(Run <= Buffer->NumberOfRuns);
1499
1500 //
1501 // Our guess was probably exaggerated...
1502 //
1503 if (InitialRuns > Run)
1504 {
1505 //
1506 // Allocate a more accurately sized buffer
1507 //
1508 NewBuffer = ExAllocatePoolWithTag(NonPagedPool,
1509 sizeof(PHYSICAL_MEMORY_DESCRIPTOR) +
1510 sizeof(PHYSICAL_MEMORY_RUN) *
1511 (Run - 1),
1512 'lMmM');
1513 if (NewBuffer)
1514 {
1515 //
1516 // Copy the old buffer into the new, then free it
1517 //
1518 RtlCopyMemory(NewBuffer->Run,
1519 Buffer->Run,
1520 sizeof(PHYSICAL_MEMORY_RUN) * Run);
1521 ExFreePool(Buffer);
1522
1523 //
1524 // Now use the new buffer
1525 //
1526 Buffer = NewBuffer;
1527 }
1528 }
1529
1530 //
1531 // Write the final numbers, and return it
1532 //
1533 Buffer->NumberOfRuns = Run;
1534 Buffer->NumberOfPages = PageCount;
1535 return Buffer;
1536 }
1537
1538 VOID
1539 NTAPI
1540 MiBuildPagedPool(VOID)
1541 {
1542 PMMPTE PointerPte, PointerPde;
1543 MMPTE TempPte = ValidKernelPte;
1544 PFN_NUMBER PageFrameIndex;
1545 KIRQL OldIrql;
1546 ULONG Size, BitMapSize;
1547 #if (_MI_PAGING_LEVELS == 2)
1548 //
1549 // Get the page frame number for the system page directory
1550 //
1551 PointerPte = MiAddressToPte(PDE_BASE);
1552 ASSERT(PD_COUNT == 1);
1553 MmSystemPageDirectory[0] = PFN_FROM_PTE(PointerPte);
1554
1555 //
1556 // Allocate a system PTE which will hold a copy of the page directory
1557 //
1558 PointerPte = MiReserveSystemPtes(1, SystemPteSpace);
1559 ASSERT(PointerPte);
1560 MmSystemPagePtes = MiPteToAddress(PointerPte);
1561
1562 //
1563 // Make this system PTE point to the system page directory.
1564 // It is now essentially double-mapped. This will be used later for lazy
1565 // evaluation of PDEs accross process switches, similarly to how the Global
1566 // page directory array in the old ReactOS Mm is used (but in a less hacky
1567 // way).
1568 //
1569 TempPte = ValidKernelPte;
1570 ASSERT(PD_COUNT == 1);
1571 TempPte.u.Hard.PageFrameNumber = MmSystemPageDirectory[0];
1572 MI_WRITE_VALID_PTE(PointerPte, TempPte);
1573 #endif
1574 //
1575 // Let's get back to paged pool work: size it up.
1576 // By default, it should be twice as big as nonpaged pool.
1577 //
1578 MmSizeOfPagedPoolInBytes = 2 * MmMaximumNonPagedPoolInBytes;
1579 if (MmSizeOfPagedPoolInBytes > ((ULONG_PTR)MmNonPagedSystemStart -
1580 (ULONG_PTR)MmPagedPoolStart))
1581 {
1582 //
1583 // On the other hand, we have limited VA space, so make sure that the VA
1584 // for paged pool doesn't overflow into nonpaged pool VA. Otherwise, set
1585 // whatever maximum is possible.
1586 //
1587 MmSizeOfPagedPoolInBytes = (ULONG_PTR)MmNonPagedSystemStart -
1588 (ULONG_PTR)MmPagedPoolStart;
1589 }
1590
1591 //
1592 // Get the size in pages and make sure paged pool is at least 32MB.
1593 //
1594 Size = MmSizeOfPagedPoolInBytes;
1595 if (Size < MI_MIN_INIT_PAGED_POOLSIZE) Size = MI_MIN_INIT_PAGED_POOLSIZE;
1596 Size = BYTES_TO_PAGES(Size);
1597
1598 //
1599 // Now check how many PTEs will be required for these many pages.
1600 //
1601 Size = (Size + (1024 - 1)) / 1024;
1602
1603 //
1604 // Recompute the page-aligned size of the paged pool, in bytes and pages.
1605 //
1606 MmSizeOfPagedPoolInBytes = Size * PAGE_SIZE * 1024;
1607 MmSizeOfPagedPoolInPages = MmSizeOfPagedPoolInBytes >> PAGE_SHIFT;
1608
1609 //
1610 // Let's be really sure this doesn't overflow into nonpaged system VA
1611 //
1612 ASSERT((MmSizeOfPagedPoolInBytes + (ULONG_PTR)MmPagedPoolStart) <=
1613 (ULONG_PTR)MmNonPagedSystemStart);
1614
1615 //
1616 // This is where paged pool ends
1617 //
1618 MmPagedPoolEnd = (PVOID)(((ULONG_PTR)MmPagedPoolStart +
1619 MmSizeOfPagedPoolInBytes) - 1);
1620
1621 //
1622 // So now get the PDE for paged pool and zero it out
1623 //
1624 PointerPde = MiAddressToPde(MmPagedPoolStart);
1625
1626 #if (_MI_PAGING_LEVELS >= 3)
1627 /* On these systems, there's no double-mapping, so instead, the PPE and PXEs
1628 * are setup to span the entire paged pool area, so there's no need for the
1629 * system PD */
1630 ASSERT(FALSE);
1631 #endif
1632
1633 RtlZeroMemory(PointerPde,
1634 (1 + MiAddressToPde(MmPagedPoolEnd) - PointerPde) * sizeof(MMPTE));
1635
1636 //
1637 // Next, get the first and last PTE
1638 //
1639 PointerPte = MiAddressToPte(MmPagedPoolStart);
1640 MmPagedPoolInfo.FirstPteForPagedPool = PointerPte;
1641 MmPagedPoolInfo.LastPteForPagedPool = MiAddressToPte(MmPagedPoolEnd);
1642
1643 //
1644 // Lock the PFN database
1645 //
1646 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
1647
1648 /* Allocate a page and map the first paged pool PDE */
1649 MI_SET_USAGE(MI_USAGE_PAGED_POOL);
1650 MI_SET_PROCESS2("Kernel");
1651 PageFrameIndex = MiRemoveZeroPage(0);
1652 TempPte.u.Hard.PageFrameNumber = PageFrameIndex;
1653 MI_WRITE_VALID_PTE(PointerPde, TempPte);
1654 #if (_MI_PAGING_LEVELS >= 3)
1655 /* Use the PPE of MmPagedPoolStart that was setup above */
1656 // Bla = PFN_FROM_PTE(PpeAddress(MmPagedPool...));
1657 ASSERT(FALSE);
1658 #else
1659 /* Do it this way */
1660 // Bla = MmSystemPageDirectory[(PointerPde - (PMMPTE)PDE_BASE) / PDE_COUNT]
1661
1662 /* Initialize the PFN entry for it */
1663 MiInitializePfnForOtherProcess(PageFrameIndex,
1664 PointerPde,
1665 MmSystemPageDirectory[(PointerPde - (PMMPTE)PDE_BASE) / PDE_COUNT]);
1666 #endif
1667
1668 //
1669 // Release the PFN database lock
1670 //
1671 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
1672
1673 //
1674 // We only have one PDE mapped for now... at fault time, additional PDEs
1675 // will be allocated to handle paged pool growth. This is where they'll have
1676 // to start.
1677 //
1678 MmPagedPoolInfo.NextPdeForPagedPoolExpansion = PointerPde + 1;
1679
1680 //
1681 // We keep track of each page via a bit, so check how big the bitmap will
1682 // have to be (make sure to align our page count such that it fits nicely
1683 // into a 4-byte aligned bitmap.
1684 //
1685 // We'll also allocate the bitmap header itself part of the same buffer.
1686 //
1687 Size = Size * 1024;
1688 ASSERT(Size == MmSizeOfPagedPoolInPages);
1689 BitMapSize = Size;
1690 Size = sizeof(RTL_BITMAP) + (((Size + 31) / 32) * sizeof(ULONG));
1691
1692 //
1693 // Allocate the allocation bitmap, which tells us which regions have not yet
1694 // been mapped into memory
1695 //
1696 MmPagedPoolInfo.PagedPoolAllocationMap = ExAllocatePoolWithTag(NonPagedPool,
1697 Size,
1698 ' mM');
1699 ASSERT(MmPagedPoolInfo.PagedPoolAllocationMap);
1700
1701 //
1702 // Initialize it such that at first, only the first page's worth of PTEs is
1703 // marked as allocated (incidentially, the first PDE we allocated earlier).
1704 //
1705 RtlInitializeBitMap(MmPagedPoolInfo.PagedPoolAllocationMap,
1706 (PULONG)(MmPagedPoolInfo.PagedPoolAllocationMap + 1),
1707 BitMapSize);
1708 RtlSetAllBits(MmPagedPoolInfo.PagedPoolAllocationMap);
1709 RtlClearBits(MmPagedPoolInfo.PagedPoolAllocationMap, 0, 1024);
1710
1711 //
1712 // We have a second bitmap, which keeps track of where allocations end.
1713 // Given the allocation bitmap and a base address, we can therefore figure
1714 // out which page is the last page of that allocation, and thus how big the
1715 // entire allocation is.
1716 //
1717 MmPagedPoolInfo.EndOfPagedPoolBitmap = ExAllocatePoolWithTag(NonPagedPool,
1718 Size,
1719 ' mM');
1720 ASSERT(MmPagedPoolInfo.EndOfPagedPoolBitmap);
1721 RtlInitializeBitMap(MmPagedPoolInfo.EndOfPagedPoolBitmap,
1722 (PULONG)(MmPagedPoolInfo.EndOfPagedPoolBitmap + 1),
1723 BitMapSize);
1724
1725 //
1726 // Since no allocations have been made yet, there are no bits set as the end
1727 //
1728 RtlClearAllBits(MmPagedPoolInfo.EndOfPagedPoolBitmap);
1729
1730 //
1731 // Initialize paged pool.
1732 //
1733 InitializePool(PagedPool, 0);
1734
1735 /* Default low threshold of 30MB or one fifth of paged pool */
1736 MiLowPagedPoolThreshold = (30 * _1MB) >> PAGE_SHIFT;
1737 MiLowPagedPoolThreshold = min(MiLowPagedPoolThreshold, Size / 5);
1738
1739 /* Default high threshold of 60MB or 25% */
1740 MiHighPagedPoolThreshold = (60 * _1MB) >> PAGE_SHIFT;
1741 MiHighPagedPoolThreshold = min(MiHighPagedPoolThreshold, (Size * 2) / 5);
1742 ASSERT(MiLowPagedPoolThreshold < MiHighPagedPoolThreshold);
1743
1744 /* Setup the global session space */
1745 MiInitializeSystemSpaceMap(NULL);
1746 }
1747
1748 VOID
1749 NTAPI
1750 MiDbgDumpMemoryDescriptors(VOID)
1751 {
1752 PLIST_ENTRY NextEntry;
1753 PMEMORY_ALLOCATION_DESCRIPTOR Md;
1754 ULONG TotalPages = 0;
1755 PCHAR
1756 MemType[] =
1757 {
1758 "ExceptionBlock ",
1759 "SystemBlock ",
1760 "Free ",
1761 "Bad ",
1762 "LoadedProgram ",
1763 "FirmwareTemporary ",
1764 "FirmwarePermanent ",
1765 "OsloaderHeap ",
1766 "OsloaderStack ",
1767 "SystemCode ",
1768 "HalCode ",
1769 "BootDriver ",
1770 "ConsoleInDriver ",
1771 "ConsoleOutDriver ",
1772 "StartupDpcStack ",
1773 "StartupKernelStack",
1774 "StartupPanicStack ",
1775 "StartupPcrPage ",
1776 "StartupPdrPage ",
1777 "RegistryData ",
1778 "MemoryData ",
1779 "NlsData ",
1780 "SpecialMemory ",
1781 "BBTMemory ",
1782 "LoaderReserve ",
1783 "LoaderXIPRom "
1784 };
1785
1786 DPRINT1("Base\t\tLength\t\tType\n");
1787 for (NextEntry = KeLoaderBlock->MemoryDescriptorListHead.Flink;
1788 NextEntry != &KeLoaderBlock->MemoryDescriptorListHead;
1789 NextEntry = NextEntry->Flink)
1790 {
1791 Md = CONTAINING_RECORD(NextEntry, MEMORY_ALLOCATION_DESCRIPTOR, ListEntry);
1792 DPRINT1("%08lX\t%08lX\t%s\n", Md->BasePage, Md->PageCount, MemType[Md->MemoryType]);
1793 TotalPages += Md->PageCount;
1794 }
1795
1796 DPRINT1("Total: %08lX (%d MB)\n", TotalPages, (TotalPages * PAGE_SIZE) / 1024 / 1024);
1797 }
1798
1799 BOOLEAN
1800 NTAPI
1801 MmArmInitSystem(IN ULONG Phase,
1802 IN PLOADER_PARAMETER_BLOCK LoaderBlock)
1803 {
1804 ULONG i;
1805 BOOLEAN IncludeType[LoaderMaximum];
1806 PVOID Bitmap;
1807 PPHYSICAL_MEMORY_RUN Run;
1808 PFN_NUMBER PageCount;
1809
1810 /* Dump memory descriptors */
1811 if (MiDbgEnableMdDump) MiDbgDumpMemoryDescriptors();
1812
1813 //
1814 // Instantiate memory that we don't consider RAM/usable
1815 // We use the same exclusions that Windows does, in order to try to be
1816 // compatible with WinLDR-style booting
1817 //
1818 for (i = 0; i < LoaderMaximum; i++) IncludeType[i] = TRUE;
1819 IncludeType[LoaderBad] = FALSE;
1820 IncludeType[LoaderFirmwarePermanent] = FALSE;
1821 IncludeType[LoaderSpecialMemory] = FALSE;
1822 IncludeType[LoaderBBTMemory] = FALSE;
1823 if (Phase == 0)
1824 {
1825 /* Initialize the phase 0 temporary event */
1826 KeInitializeEvent(&MiTempEvent, NotificationEvent, FALSE);
1827
1828 /* Set all the events to use the temporary event for now */
1829 MiLowMemoryEvent = &MiTempEvent;
1830 MiHighMemoryEvent = &MiTempEvent;
1831 MiLowPagedPoolEvent = &MiTempEvent;
1832 MiHighPagedPoolEvent = &MiTempEvent;
1833 MiLowNonPagedPoolEvent = &MiTempEvent;
1834 MiHighNonPagedPoolEvent = &MiTempEvent;
1835
1836 //
1837 // Define the basic user vs. kernel address space separation
1838 //
1839 MmSystemRangeStart = (PVOID)KSEG0_BASE;
1840 MmUserProbeAddress = (ULONG_PTR)MmSystemRangeStart - 0x10000;
1841 MmHighestUserAddress = (PVOID)(MmUserProbeAddress - 1);
1842
1843 /* Highest PTE and PDE based on the addresses above */
1844 MiHighestUserPte = MiAddressToPte(MmHighestUserAddress);
1845 MiHighestUserPde = MiAddressToPde(MmHighestUserAddress);
1846 #if (_MI_PAGING_LEVELS >= 3)
1847 /* We need the highest PPE and PXE addresses */
1848 ASSERT(FALSE);
1849 #endif
1850 //
1851 // Get the size of the boot loader's image allocations and then round
1852 // that region up to a PDE size, so that any PDEs we might create for
1853 // whatever follows are separate from the PDEs that boot loader might've
1854 // already created (and later, we can blow all that away if we want to).
1855 //
1856 MmBootImageSize = KeLoaderBlock->Extension->LoaderPagesSpanned;
1857 MmBootImageSize *= PAGE_SIZE;
1858 MmBootImageSize = (MmBootImageSize + PDE_MAPPED_VA - 1) & ~(PDE_MAPPED_VA - 1);
1859 ASSERT((MmBootImageSize % PDE_MAPPED_VA) == 0);
1860
1861 //
1862 // Set the size of session view, pool, and image
1863 //
1864 MmSessionSize = MI_SESSION_SIZE;
1865 MmSessionViewSize = MI_SESSION_VIEW_SIZE;
1866 MmSessionPoolSize = MI_SESSION_POOL_SIZE;
1867 MmSessionImageSize = MI_SESSION_IMAGE_SIZE;
1868
1869 //
1870 // Set the size of system view
1871 //
1872 MmSystemViewSize = MI_SYSTEM_VIEW_SIZE;
1873
1874 //
1875 // This is where it all ends
1876 //
1877 MiSessionImageEnd = (PVOID)PTE_BASE;
1878
1879 //
1880 // This is where we will load Win32k.sys and the video driver
1881 //
1882 MiSessionImageStart = (PVOID)((ULONG_PTR)MiSessionImageEnd -
1883 MmSessionImageSize);
1884
1885 //
1886 // So the view starts right below the session working set (itself below
1887 // the image area)
1888 //
1889 MiSessionViewStart = (PVOID)((ULONG_PTR)MiSessionImageEnd -
1890 MmSessionImageSize -
1891 MI_SESSION_WORKING_SET_SIZE -
1892 MmSessionViewSize);
1893
1894 //
1895 // Session pool follows
1896 //
1897 MiSessionPoolEnd = MiSessionViewStart;
1898 MiSessionPoolStart = (PVOID)((ULONG_PTR)MiSessionPoolEnd -
1899 MmSessionPoolSize);
1900
1901 //
1902 // And it all begins here
1903 //
1904 MmSessionBase = MiSessionPoolStart;
1905
1906 //
1907 // Sanity check that our math is correct
1908 //
1909 ASSERT((ULONG_PTR)MmSessionBase + MmSessionSize == PTE_BASE);
1910
1911 //
1912 // Session space ends wherever image session space ends
1913 //
1914 MiSessionSpaceEnd = MiSessionImageEnd;
1915
1916 //
1917 // System view space ends at session space, so now that we know where
1918 // this is, we can compute the base address of system view space itself.
1919 //
1920 MiSystemViewStart = (PVOID)((ULONG_PTR)MmSessionBase -
1921 MmSystemViewSize);
1922
1923 /* Compute the PTE addresses for all the addresses we carved out */
1924 MiSessionImagePteStart = MiAddressToPte(MiSessionImageStart);
1925 MiSessionImagePteEnd = MiAddressToPte(MiSessionImageEnd);
1926 MiSessionBasePte = MiAddressToPte(MmSessionBase);
1927 MiSessionLastPte = MiAddressToPte(MiSessionSpaceEnd);
1928
1929 /* Initialize the user mode image list */
1930 InitializeListHead(&MmLoadedUserImageList);
1931
1932 /* Initialize the paged pool mutex */
1933 KeInitializeGuardedMutex(&MmPagedPoolMutex);
1934
1935 /* Initialize the Loader Lock */
1936 KeInitializeMutant(&MmSystemLoadLock, FALSE);
1937
1938 /* Set the zero page event */
1939 KeInitializeEvent(&MmZeroingPageEvent, SynchronizationEvent, FALSE);
1940 MmZeroingPageThreadActive = FALSE;
1941
1942 //
1943 // Count physical pages on the system
1944 //
1945 PageCount = MiPagesInLoaderBlock(LoaderBlock, IncludeType);
1946
1947 //
1948 // Check if this is a machine with less than 19MB of RAM
1949 //
1950 if (PageCount < MI_MIN_PAGES_FOR_SYSPTE_TUNING)
1951 {
1952 //
1953 // Use the very minimum of system PTEs
1954 //
1955 MmNumberOfSystemPtes = 7000;
1956 }
1957 else
1958 {
1959 //
1960 // Use the default, but check if we have more than 32MB of RAM
1961 //
1962 MmNumberOfSystemPtes = 11000;
1963 if (PageCount > MI_MIN_PAGES_FOR_SYSPTE_BOOST)
1964 {
1965 //
1966 // Double the amount of system PTEs
1967 //
1968 MmNumberOfSystemPtes <<= 1;
1969 }
1970 }
1971
1972 DPRINT("System PTE count has been tuned to %d (%d bytes)\n",
1973 MmNumberOfSystemPtes, MmNumberOfSystemPtes * PAGE_SIZE);
1974
1975 /* Initialize the working set lock */
1976 ExInitializePushLock((PULONG_PTR)&MmSystemCacheWs.WorkingSetMutex);
1977
1978 /* Set commit limit */
1979 MmTotalCommitLimit = 2 * _1GB;
1980 MmTotalCommitLimitMaximum = MmTotalCommitLimit;
1981
1982 /* Has the allocation fragment been setup? */
1983 if (!MmAllocationFragment)
1984 {
1985 /* Use the default value */
1986 MmAllocationFragment = MI_ALLOCATION_FRAGMENT;
1987 if (PageCount < ((256 * _1MB) / PAGE_SIZE))
1988 {
1989 /* On memory systems with less than 256MB, divide by 4 */
1990 MmAllocationFragment = MI_ALLOCATION_FRAGMENT / 4;
1991 }
1992 else if (PageCount < (_1GB / PAGE_SIZE))
1993 {
1994 /* On systems with less than 1GB, divide by 2 */
1995 MmAllocationFragment = MI_ALLOCATION_FRAGMENT / 2;
1996 }
1997 }
1998 else
1999 {
2000 /* Convert from 1KB fragments to pages */
2001 MmAllocationFragment *= _1KB;
2002 MmAllocationFragment = ROUND_TO_PAGES(MmAllocationFragment);
2003
2004 /* Don't let it past the maximum */
2005 MmAllocationFragment = min(MmAllocationFragment,
2006 MI_MAX_ALLOCATION_FRAGMENT);
2007
2008 /* Don't let it too small either */
2009 MmAllocationFragment = max(MmAllocationFragment,
2010 MI_MIN_ALLOCATION_FRAGMENT);
2011 }
2012
2013 /* Initialize the platform-specific parts */
2014 MiInitMachineDependent(LoaderBlock);
2015
2016 //
2017 // Build the physical memory block
2018 //
2019 MmPhysicalMemoryBlock = MmInitializeMemoryLimits(LoaderBlock,
2020 IncludeType);
2021
2022 //
2023 // Allocate enough buffer for the PFN bitmap
2024 // Align it up to a 32-bit boundary
2025 //
2026 Bitmap = ExAllocatePoolWithTag(NonPagedPool,
2027 (((MmHighestPhysicalPage + 1) + 31) / 32) * 4,
2028 ' mM');
2029 if (!Bitmap)
2030 {
2031 //
2032 // This is critical
2033 //
2034 KeBugCheckEx(INSTALL_MORE_MEMORY,
2035 MmNumberOfPhysicalPages,
2036 MmLowestPhysicalPage,
2037 MmHighestPhysicalPage,
2038 0x101);
2039 }
2040
2041 //
2042 // Initialize it and clear all the bits to begin with
2043 //
2044 RtlInitializeBitMap(&MiPfnBitMap,
2045 Bitmap,
2046 MmHighestPhysicalPage + 1);
2047 RtlClearAllBits(&MiPfnBitMap);
2048
2049 //
2050 // Loop physical memory runs
2051 //
2052 for (i = 0; i < MmPhysicalMemoryBlock->NumberOfRuns; i++)
2053 {
2054 //
2055 // Get the run
2056 //
2057 Run = &MmPhysicalMemoryBlock->Run[i];
2058 DPRINT("PHYSICAL RAM [0x%08p to 0x%08p]\n",
2059 Run->BasePage << PAGE_SHIFT,
2060 (Run->BasePage + Run->PageCount) << PAGE_SHIFT);
2061
2062 //
2063 // Make sure it has pages inside it
2064 //
2065 if (Run->PageCount)
2066 {
2067 //
2068 // Set the bits in the PFN bitmap
2069 //
2070 RtlSetBits(&MiPfnBitMap, Run->BasePage, Run->PageCount);
2071 }
2072 }
2073
2074 /* Look for large page cache entries that need caching */
2075 MiSyncCachedRanges();
2076
2077 /* Loop for HAL Heap I/O device mappings that need coherency tracking */
2078 MiAddHalIoMappings();
2079
2080 /* Set the initial resident page count */
2081 MmResidentAvailablePages = MmAvailablePages - 32;
2082
2083 /* Initialize large page structures on PAE/x64, and MmProcessList on x86 */
2084 MiInitializeLargePageSupport();
2085
2086 /* Check if the registry says any drivers should be loaded with large pages */
2087 MiInitializeDriverLargePageList();
2088
2089 /* Relocate the boot drivers into system PTE space and fixup their PFNs */
2090 MiReloadBootLoadedDrivers(LoaderBlock);
2091
2092 /* FIXME: Call out into Driver Verifier for initialization */
2093
2094 /* Check how many pages the system has */
2095 if (MmNumberOfPhysicalPages <= ((13 * _1MB) / PAGE_SIZE))
2096 {
2097 /* Set small system */
2098 MmSystemSize = MmSmallSystem;
2099 }
2100 else if (MmNumberOfPhysicalPages <= ((19 * _1MB) / PAGE_SIZE))
2101 {
2102 /* Set small system and add 100 pages for the cache */
2103 MmSystemSize = MmSmallSystem;
2104 MmSystemCacheWsMinimum += 100;
2105 }
2106 else
2107 {
2108 /* Set medium system and add 400 pages for the cache */
2109 MmSystemSize = MmMediumSystem;
2110 MmSystemCacheWsMinimum += 400;
2111 }
2112
2113 /* Check for less than 24MB */
2114 if (MmNumberOfPhysicalPages < ((24 * _1MB) / PAGE_SIZE))
2115 {
2116 /* No more than 32 pages */
2117 MmSystemCacheWsMinimum = 32;
2118 }
2119
2120 /* Check for more than 32MB */
2121 if (MmNumberOfPhysicalPages >= ((32 * _1MB) / PAGE_SIZE))
2122 {
2123 /* Check for product type being "Wi" for WinNT */
2124 if (MmProductType == '\0i\0W')
2125 {
2126 /* Then this is a large system */
2127 MmSystemSize = MmLargeSystem;
2128 }
2129 else
2130 {
2131 /* For servers, we need 64MB to consider this as being large */
2132 if (MmNumberOfPhysicalPages >= ((64 * _1MB) / PAGE_SIZE))
2133 {
2134 /* Set it as large */
2135 MmSystemSize = MmLargeSystem;
2136 }
2137 }
2138 }
2139
2140 /* Check for more than 33 MB */
2141 if (MmNumberOfPhysicalPages > ((33 * _1MB) / PAGE_SIZE))
2142 {
2143 /* Add another 500 pages to the cache */
2144 MmSystemCacheWsMinimum += 500;
2145 }
2146
2147 /* Now setup the shared user data fields */
2148 ASSERT(SharedUserData->NumberOfPhysicalPages == 0);
2149 SharedUserData->NumberOfPhysicalPages = MmNumberOfPhysicalPages;
2150 SharedUserData->LargePageMinimum = 0;
2151
2152 /* Check for workstation (Wi for WinNT) */
2153 if (MmProductType == '\0i\0W')
2154 {
2155 /* Set Windows NT Workstation product type */
2156 SharedUserData->NtProductType = NtProductWinNt;
2157 MmProductType = 0;
2158 }
2159 else
2160 {
2161 /* Check for LanMan server */
2162 if (MmProductType == '\0a\0L')
2163 {
2164 /* This is a domain controller */
2165 SharedUserData->NtProductType = NtProductLanManNt;
2166 }
2167 else
2168 {
2169 /* Otherwise it must be a normal server */
2170 SharedUserData->NtProductType = NtProductServer;
2171 }
2172
2173 /* Set the product type, and make the system more aggressive with low memory */
2174 MmProductType = 1;
2175 MmMinimumFreePages = 81;
2176 }
2177
2178 /* Update working set tuning parameters */
2179 MiAdjustWorkingSetManagerParameters(!MmProductType);
2180
2181 /* Finetune the page count by removing working set and NP expansion */
2182 MmResidentAvailablePages -= MiExpansionPoolPagesInitialCharge;
2183 MmResidentAvailablePages -= MmSystemCacheWsMinimum;
2184 MmResidentAvailableAtInit = MmResidentAvailablePages;
2185 if (MmResidentAvailablePages <= 0)
2186 {
2187 /* This should not happen */
2188 DPRINT1("System cache working set too big\n");
2189 return FALSE;
2190 }
2191
2192 /* Initialize the system cache */
2193 //MiInitializeSystemCache(MmSystemCacheWsMinimum, MmAvailablePages);
2194
2195 /* Update the commit limit */
2196 MmTotalCommitLimit = MmAvailablePages;
2197 if (MmTotalCommitLimit > 1024) MmTotalCommitLimit -= 1024;
2198 MmTotalCommitLimitMaximum = MmTotalCommitLimit;
2199
2200 /* Size up paged pool and build the shadow system page directory */
2201 MiBuildPagedPool();
2202
2203 /* Debugger physical memory support is now ready to be used */
2204 MmDebugPte = MiAddressToPte(MiDebugMapping);
2205
2206 /* Initialize the loaded module list */
2207 MiInitializeLoadedModuleList(LoaderBlock);
2208 }
2209
2210 //
2211 // Always return success for now
2212 //
2213 return TRUE;
2214 }
2215
2216 /* EOF */