sync with trunk r49322
[reactos.git] / ntoskrnl / mm / ARM3 / mminit.c
1 /*
2 * PROJECT: ReactOS Kernel
3 * LICENSE: BSD - See COPYING.ARM in the top level directory
4 * FILE: ntoskrnl/mm/ARM3/mminit.c
5 * PURPOSE: ARM Memory Manager Initialization
6 * PROGRAMMERS: ReactOS Portable Systems Group
7 */
8
9 /* INCLUDES *******************************************************************/
10
11 #include <ntoskrnl.h>
12 #define NDEBUG
13 #include <debug.h>
14
15 #line 15 "ARM³::INIT"
16 #define MODULE_INVOLVED_IN_ARM3
17 #include "miarm.h"
18
19 /* GLOBALS ********************************************************************/
20
21 //
22 // These are all registry-configurable, but by default, the memory manager will
23 // figure out the most appropriate values.
24 //
25 ULONG MmMaximumNonPagedPoolPercent;
26 SIZE_T MmSizeOfNonPagedPoolInBytes;
27 SIZE_T MmMaximumNonPagedPoolInBytes;
28
29 /* Some of the same values, in pages */
30 PFN_NUMBER MmMaximumNonPagedPoolInPages;
31
32 //
33 // These numbers describe the discrete equation components of the nonpaged
34 // pool sizing algorithm.
35 //
36 // They are described on http://support.microsoft.com/default.aspx/kb/126402/ja
37 // along with the algorithm that uses them, which is implemented later below.
38 //
39 SIZE_T MmMinimumNonPagedPoolSize = 256 * 1024;
40 ULONG MmMinAdditionNonPagedPoolPerMb = 32 * 1024;
41 SIZE_T MmDefaultMaximumNonPagedPool = 1024 * 1024;
42 ULONG MmMaxAdditionNonPagedPoolPerMb = 400 * 1024;
43
44 //
45 // The memory layout (and especially variable names) of the NT kernel mode
46 // components can be a bit hard to twig, especially when it comes to the non
47 // paged area.
48 //
49 // There are really two components to the non-paged pool:
50 //
51 // - The initial nonpaged pool, sized dynamically up to a maximum.
52 // - The expansion nonpaged pool, sized dynamically up to a maximum.
53 //
54 // The initial nonpaged pool is physically continuous for performance, and
55 // immediately follows the PFN database, typically sharing the same PDE. It is
56 // a very small resource (32MB on a 1GB system), and capped at 128MB.
57 //
58 // Right now we call this the "ARM³ Nonpaged Pool" and it begins somewhere after
59 // the PFN database (which starts at 0xB0000000).
60 //
61 // The expansion nonpaged pool, on the other hand, can grow much bigger (400MB
62 // for a 1GB system). On ARM³ however, it is currently capped at 128MB.
63 //
64 // The address where the initial nonpaged pool starts is aptly named
65 // MmNonPagedPoolStart, and it describes a range of MmSizeOfNonPagedPoolInBytes
66 // bytes.
67 //
68 // Expansion nonpaged pool starts at an address described by the variable called
69 // MmNonPagedPoolExpansionStart, and it goes on for MmMaximumNonPagedPoolInBytes
70 // minus MmSizeOfNonPagedPoolInBytes bytes, always reaching MmNonPagedPoolEnd
71 // (because of the way it's calculated) at 0xFFBE0000.
72 //
73 // Initial nonpaged pool is allocated and mapped early-on during boot, but what
74 // about the expansion nonpaged pool? It is instead composed of special pages
75 // which belong to what are called System PTEs. These PTEs are the matter of a
76 // later discussion, but they are also considered part of the "nonpaged" OS, due
77 // to the fact that they are never paged out -- once an address is described by
78 // a System PTE, it is always valid, until the System PTE is torn down.
79 //
80 // System PTEs are actually composed of two "spaces", the system space proper,
81 // and the nonpaged pool expansion space. The latter, as we've already seen,
82 // begins at MmNonPagedPoolExpansionStart. Based on the number of System PTEs
83 // that the system will support, the remaining address space below this address
84 // is used to hold the system space PTEs. This address, in turn, is held in the
85 // variable named MmNonPagedSystemStart, which itself is never allowed to go
86 // below 0xEB000000 (thus creating an upper bound on the number of System PTEs).
87 //
88 // This means that 330MB are reserved for total nonpaged system VA, on top of
89 // whatever the initial nonpaged pool allocation is.
90 //
91 // The following URLs, valid as of April 23rd, 2008, support this evidence:
92 //
93 // http://www.cs.miami.edu/~burt/journal/NT/memory.html
94 // http://www.ditii.com/2007/09/28/windows-memory-management-x86-virtual-address-space/
95 //
96 PVOID MmNonPagedSystemStart;
97 PVOID MmNonPagedPoolStart;
98 PVOID MmNonPagedPoolExpansionStart;
99 PVOID MmNonPagedPoolEnd = MI_NONPAGED_POOL_END;
100
101 //
102 // This is where paged pool starts by default
103 //
104 PVOID MmPagedPoolStart = MI_PAGED_POOL_START;
105 PVOID MmPagedPoolEnd;
106
107 //
108 // And this is its default size
109 //
110 SIZE_T MmSizeOfPagedPoolInBytes = MI_MIN_INIT_PAGED_POOLSIZE;
111 PFN_NUMBER MmSizeOfPagedPoolInPages = MI_MIN_INIT_PAGED_POOLSIZE / PAGE_SIZE;
112
113 //
114 // Session space starts at 0xBFFFFFFF and grows downwards
115 // By default, it includes an 8MB image area where we map win32k and video card
116 // drivers, followed by a 4MB area containing the session's working set. This is
117 // then followed by a 20MB mapped view area and finally by the session's paged
118 // pool, by default 16MB.
119 //
120 // On a normal system, this results in session space occupying the region from
121 // 0xBD000000 to 0xC0000000
122 //
123 // See miarm.h for the defines that determine the sizing of this region. On an
124 // NT system, some of these can be configured through the registry, but we don't
125 // support that yet.
126 //
127 PVOID MiSessionSpaceEnd; // 0xC0000000
128 PVOID MiSessionImageEnd; // 0xC0000000
129 PVOID MiSessionImageStart; // 0xBF800000
130 PVOID MiSessionViewStart; // 0xBE000000
131 PVOID MiSessionPoolEnd; // 0xBE000000
132 PVOID MiSessionPoolStart; // 0xBD000000
133 PVOID MmSessionBase; // 0xBD000000
134 SIZE_T MmSessionSize;
135 SIZE_T MmSessionViewSize;
136 SIZE_T MmSessionPoolSize;
137 SIZE_T MmSessionImageSize;
138
139 /*
140 * These are the PTE addresses of the boundaries carved out above
141 */
142 PMMPTE MiSessionImagePteStart;
143 PMMPTE MiSessionImagePteEnd;
144 PMMPTE MiSessionBasePte;
145 PMMPTE MiSessionLastPte;
146
147 //
148 // The system view space, on the other hand, is where sections that are memory
149 // mapped into "system space" end up.
150 //
151 // By default, it is a 16MB region.
152 //
153 PVOID MiSystemViewStart;
154 SIZE_T MmSystemViewSize;
155
156 #if (_MI_PAGING_LEVELS == 2)
157 //
158 // A copy of the system page directory (the page directory associated with the
159 // System process) is kept (double-mapped) by the manager in order to lazily
160 // map paged pool PDEs into external processes when they fault on a paged pool
161 // address.
162 //
163 PFN_NUMBER MmSystemPageDirectory[PD_COUNT];
164 PMMPTE MmSystemPagePtes;
165 #endif
166
167 //
168 // The system cache starts right after hyperspace. The first few pages are for
169 // keeping track of the system working set list.
170 //
171 // This should be 0xC0C00000 -- the cache itself starts at 0xC1000000
172 //
173 PMMWSL MmSystemCacheWorkingSetList = MI_SYSTEM_CACHE_WS_START;
174
175 //
176 // Windows NT seems to choose between 7000, 11000 and 50000
177 // On systems with more than 32MB, this number is then doubled, and further
178 // aligned up to a PDE boundary (4MB).
179 //
180 ULONG_PTR MmNumberOfSystemPtes;
181
182 //
183 // This is how many pages the PFN database will take up
184 // In Windows, this includes the Quark Color Table, but not in ARM³
185 //
186 PFN_NUMBER MxPfnAllocation;
187
188 //
189 // Unlike the old ReactOS Memory Manager, ARM³ (and Windows) does not keep track
190 // of pages that are not actually valid physical memory, such as ACPI reserved
191 // regions, BIOS address ranges, or holes in physical memory address space which
192 // could indicate device-mapped I/O memory.
193 //
194 // In fact, the lack of a PFN entry for a page usually indicates that this is
195 // I/O space instead.
196 //
197 // A bitmap, called the PFN bitmap, keeps track of all page frames by assigning
198 // a bit to each. If the bit is set, then the page is valid physical RAM.
199 //
200 RTL_BITMAP MiPfnBitMap;
201
202 //
203 // This structure describes the different pieces of RAM-backed address space
204 //
205 PPHYSICAL_MEMORY_DESCRIPTOR MmPhysicalMemoryBlock;
206
207 //
208 // This is where we keep track of the most basic physical layout markers
209 //
210 PFN_NUMBER MmNumberOfPhysicalPages, MmHighestPhysicalPage, MmLowestPhysicalPage = -1;
211
212 //
213 // The total number of pages mapped by the boot loader, which include the kernel
214 // HAL, boot drivers, registry, NLS files and other loader data structures is
215 // kept track of here. This depends on "LoaderPagesSpanned" being correct when
216 // coming from the loader.
217 //
218 // This number is later aligned up to a PDE boundary.
219 //
220 SIZE_T MmBootImageSize;
221
222 //
223 // These three variables keep track of the core separation of address space that
224 // exists between kernel mode and user mode.
225 //
226 ULONG_PTR MmUserProbeAddress;
227 PVOID MmHighestUserAddress;
228 PVOID MmSystemRangeStart;
229
230 /* And these store the respective highest PTE/PDE address */
231 PMMPTE MiHighestUserPte;
232 PMMPDE MiHighestUserPde;
233 #if (_MI_PAGING_LEVELS >= 3)
234 /* We need the highest PPE and PXE addresses */
235 #endif
236
237 /* These variables define the system cache address space */
238 PVOID MmSystemCacheStart;
239 PVOID MmSystemCacheEnd;
240 MMSUPPORT MmSystemCacheWs;
241
242 //
243 // This is where hyperspace ends (followed by the system cache working set)
244 //
245 PVOID MmHyperSpaceEnd;
246
247 //
248 // Page coloring algorithm data
249 //
250 ULONG MmSecondaryColors;
251 ULONG MmSecondaryColorMask;
252
253 //
254 // Actual (registry-configurable) size of a GUI thread's stack
255 //
256 ULONG MmLargeStackSize = KERNEL_LARGE_STACK_SIZE;
257
258 //
259 // Before we have a PFN database, memory comes straight from our physical memory
260 // blocks, which is nice because it's guaranteed contiguous and also because once
261 // we take a page from here, the system doesn't see it anymore.
262 // However, once the fun is over, those pages must be re-integrated back into
263 // PFN society life, and that requires us keeping a copy of the original layout
264 // so that we can parse it later.
265 //
266 PMEMORY_ALLOCATION_DESCRIPTOR MxFreeDescriptor;
267 MEMORY_ALLOCATION_DESCRIPTOR MxOldFreeDescriptor;
268
269 /*
270 * For each page's worth bytes of L2 cache in a given set/way line, the zero and
271 * free lists are organized in what is called a "color".
272 *
273 * This array points to the two lists, so it can be thought of as a multi-dimensional
274 * array of MmFreePagesByColor[2][MmSecondaryColors]. Since the number is dynamic,
275 * we describe the array in pointer form instead.
276 *
277 * On a final note, the color tables themselves are right after the PFN database.
278 */
279 C_ASSERT(FreePageList == 1);
280 PMMCOLOR_TABLES MmFreePagesByColor[FreePageList + 1];
281
282 /* An event used in Phase 0 before the rest of the system is ready to go */
283 KEVENT MiTempEvent;
284
285 /* All the events used for memory threshold notifications */
286 PKEVENT MiLowMemoryEvent;
287 PKEVENT MiHighMemoryEvent;
288 PKEVENT MiLowPagedPoolEvent;
289 PKEVENT MiHighPagedPoolEvent;
290 PKEVENT MiLowNonPagedPoolEvent;
291 PKEVENT MiHighNonPagedPoolEvent;
292
293 /* The actual thresholds themselves, in page numbers */
294 PFN_NUMBER MmLowMemoryThreshold;
295 PFN_NUMBER MmHighMemoryThreshold;
296 PFN_NUMBER MiLowPagedPoolThreshold;
297 PFN_NUMBER MiHighPagedPoolThreshold;
298 PFN_NUMBER MiLowNonPagedPoolThreshold;
299 PFN_NUMBER MiHighNonPagedPoolThreshold;
300
301 /*
302 * This number determines how many free pages must exist, at minimum, until we
303 * start trimming working sets and flushing modified pages to obtain more free
304 * pages.
305 *
306 * This number changes if the system detects that this is a server product
307 */
308 PFN_NUMBER MmMinimumFreePages = 26;
309
310 /*
311 * This number indicates how many pages we consider to be a low limit of having
312 * "plenty" of free memory.
313 *
314 * It is doubled on systems that have more than 63MB of memory
315 */
316 PFN_NUMBER MmPlentyFreePages = 400;
317
318 /* These values store the type of system this is (small, med, large) and if server */
319 ULONG MmProductType;
320 MM_SYSTEMSIZE MmSystemSize;
321
322 /*
323 * These values store the cache working set minimums and maximums, in pages
324 *
325 * The minimum value is boosted on systems with more than 24MB of RAM, and cut
326 * down to only 32 pages on embedded (<24MB RAM) systems.
327 *
328 * An extra boost of 2MB is given on systems with more than 33MB of RAM.
329 */
330 PFN_NUMBER MmSystemCacheWsMinimum = 288;
331 PFN_NUMBER MmSystemCacheWsMaximum = 350;
332
333 /* FIXME: Move to cache/working set code later */
334 BOOLEAN MmLargeSystemCache;
335
336 /*
337 * This value determines in how many fragments/chunks the subsection prototype
338 * PTEs should be allocated when mapping a section object. It is configurable in
339 * the registry through the MapAllocationFragment parameter.
340 *
341 * The default is 64KB on systems with more than 1GB of RAM, 32KB on systems with
342 * more than 256MB of RAM, and 16KB on systems with less than 256MB of RAM.
343 *
344 * The maximum it can be set to is 2MB, and the minimum is 4KB.
345 */
346 SIZE_T MmAllocationFragment;
347
348 /*
349 * These two values track how much virtual memory can be committed, and when
350 * expansion should happen.
351 */
352 // FIXME: They should be moved elsewhere since it's not an "init" setting?
353 SIZE_T MmTotalCommitLimit;
354 SIZE_T MmTotalCommitLimitMaximum;
355
356 /* Internal setting used for debugging memory descriptors */
357 BOOLEAN MiDbgEnableMdDump =
358 #ifdef _ARM_
359 TRUE;
360 #else
361 FALSE;
362 #endif
363
364 /* PRIVATE FUNCTIONS **********************************************************/
365
366 PFN_NUMBER
367 NTAPI
368 MxGetNextPage(IN PFN_NUMBER PageCount)
369 {
370 PFN_NUMBER Pfn;
371
372 /* Make sure we have enough pages */
373 if (PageCount > MxFreeDescriptor->PageCount)
374 {
375 /* Crash the system */
376 KeBugCheckEx(INSTALL_MORE_MEMORY,
377 MmNumberOfPhysicalPages,
378 MxFreeDescriptor->PageCount,
379 MxOldFreeDescriptor.PageCount,
380 PageCount);
381 }
382
383 /* Use our lowest usable free pages */
384 Pfn = MxFreeDescriptor->BasePage;
385 MxFreeDescriptor->BasePage += PageCount;
386 MxFreeDescriptor->PageCount -= PageCount;
387 return Pfn;
388 }
389
390 VOID
391 NTAPI
392 MiComputeColorInformation(VOID)
393 {
394 ULONG L2Associativity;
395
396 /* Check if no setting was provided already */
397 if (!MmSecondaryColors)
398 {
399 /* Get L2 cache information */
400 L2Associativity = KeGetPcr()->SecondLevelCacheAssociativity;
401
402 /* The number of colors is the number of cache bytes by set/way */
403 MmSecondaryColors = KeGetPcr()->SecondLevelCacheSize;
404 if (L2Associativity) MmSecondaryColors /= L2Associativity;
405 }
406
407 /* Now convert cache bytes into pages */
408 MmSecondaryColors >>= PAGE_SHIFT;
409 if (!MmSecondaryColors)
410 {
411 /* If there was no cache data from the KPCR, use the default colors */
412 MmSecondaryColors = MI_SECONDARY_COLORS;
413 }
414 else
415 {
416 /* Otherwise, make sure there aren't too many colors */
417 if (MmSecondaryColors > MI_MAX_SECONDARY_COLORS)
418 {
419 /* Set the maximum */
420 MmSecondaryColors = MI_MAX_SECONDARY_COLORS;
421 }
422
423 /* Make sure there aren't too little colors */
424 if (MmSecondaryColors < MI_MIN_SECONDARY_COLORS)
425 {
426 /* Set the default */
427 MmSecondaryColors = MI_SECONDARY_COLORS;
428 }
429
430 /* Finally make sure the colors are a power of two */
431 if (MmSecondaryColors & (MmSecondaryColors - 1))
432 {
433 /* Set the default */
434 MmSecondaryColors = MI_SECONDARY_COLORS;
435 }
436 }
437
438 /* Compute the mask and store it */
439 MmSecondaryColorMask = MmSecondaryColors - 1;
440 KeGetCurrentPrcb()->SecondaryColorMask = MmSecondaryColorMask;
441 }
442
443 VOID
444 NTAPI
445 MiInitializeColorTables(VOID)
446 {
447 ULONG i;
448 PMMPTE PointerPte, LastPte;
449 MMPTE TempPte = ValidKernelPte;
450
451 /* The color table starts after the ARM3 PFN database */
452 MmFreePagesByColor[0] = (PMMCOLOR_TABLES)&MmPfnDatabase[MmHighestPhysicalPage + 1];
453
454 /* Loop the PTEs. We have two color tables for each secondary color */
455 PointerPte = MiAddressToPte(&MmFreePagesByColor[0][0]);
456 LastPte = MiAddressToPte((ULONG_PTR)MmFreePagesByColor[0] +
457 (2 * MmSecondaryColors * sizeof(MMCOLOR_TABLES))
458 - 1);
459 while (PointerPte <= LastPte)
460 {
461 /* Check for valid PTE */
462 if (PointerPte->u.Hard.Valid == 0)
463 {
464 /* Get a page and map it */
465 TempPte.u.Hard.PageFrameNumber = MxGetNextPage(1);
466 MI_WRITE_VALID_PTE(PointerPte, TempPte);
467
468 /* Zero out the page */
469 RtlZeroMemory(MiPteToAddress(PointerPte), PAGE_SIZE);
470 }
471
472 /* Next */
473 PointerPte++;
474 }
475
476 /* Now set the address of the next list, right after this one */
477 MmFreePagesByColor[1] = &MmFreePagesByColor[0][MmSecondaryColors];
478
479 /* Now loop the lists to set them up */
480 for (i = 0; i < MmSecondaryColors; i++)
481 {
482 /* Set both free and zero lists for each color */
483 MmFreePagesByColor[ZeroedPageList][i].Flink = 0xFFFFFFFF;
484 MmFreePagesByColor[ZeroedPageList][i].Blink = (PVOID)0xFFFFFFFF;
485 MmFreePagesByColor[ZeroedPageList][i].Count = 0;
486 MmFreePagesByColor[FreePageList][i].Flink = 0xFFFFFFFF;
487 MmFreePagesByColor[FreePageList][i].Blink = (PVOID)0xFFFFFFFF;
488 MmFreePagesByColor[FreePageList][i].Count = 0;
489 }
490 }
491
492 BOOLEAN
493 NTAPI
494 MiIsRegularMemory(IN PLOADER_PARAMETER_BLOCK LoaderBlock,
495 IN PFN_NUMBER Pfn)
496 {
497 PLIST_ENTRY NextEntry;
498 PMEMORY_ALLOCATION_DESCRIPTOR MdBlock;
499
500 /* Loop the memory descriptors */
501 NextEntry = LoaderBlock->MemoryDescriptorListHead.Flink;
502 while (NextEntry != &LoaderBlock->MemoryDescriptorListHead)
503 {
504 /* Get the memory descriptor */
505 MdBlock = CONTAINING_RECORD(NextEntry,
506 MEMORY_ALLOCATION_DESCRIPTOR,
507 ListEntry);
508
509 /* Check if this PFN could be part of the block */
510 if (Pfn >= (MdBlock->BasePage))
511 {
512 /* Check if it really is part of the block */
513 if (Pfn < (MdBlock->BasePage + MdBlock->PageCount))
514 {
515 /* Check if the block is actually memory we don't map */
516 if ((MdBlock->MemoryType == LoaderFirmwarePermanent) ||
517 (MdBlock->MemoryType == LoaderBBTMemory) ||
518 (MdBlock->MemoryType == LoaderSpecialMemory))
519 {
520 /* We don't need PFN database entries for this memory */
521 break;
522 }
523
524 /* This is memory we want to map */
525 return TRUE;
526 }
527 }
528 else
529 {
530 /* Blocks are ordered, so if it's not here, it doesn't exist */
531 break;
532 }
533
534 /* Get to the next descriptor */
535 NextEntry = MdBlock->ListEntry.Flink;
536 }
537
538 /* Check if this PFN is actually from our free memory descriptor */
539 if ((Pfn >= MxOldFreeDescriptor.BasePage) &&
540 (Pfn < MxOldFreeDescriptor.BasePage + MxOldFreeDescriptor.PageCount))
541 {
542 /* We use these pages for initial mappings, so we do want to count them */
543 return TRUE;
544 }
545
546 /* Otherwise this isn't memory that we describe or care about */
547 return FALSE;
548 }
549
550 VOID
551 NTAPI
552 MiMapPfnDatabase(IN PLOADER_PARAMETER_BLOCK LoaderBlock)
553 {
554 ULONG FreePage, FreePageCount, PagesLeft, BasePage, PageCount;
555 PLIST_ENTRY NextEntry;
556 PMEMORY_ALLOCATION_DESCRIPTOR MdBlock;
557 PMMPTE PointerPte, LastPte;
558 MMPTE TempPte = ValidKernelPte;
559
560 /* Get current page data, since we won't be using MxGetNextPage as it would corrupt our state */
561 FreePage = MxFreeDescriptor->BasePage;
562 FreePageCount = MxFreeDescriptor->PageCount;
563 PagesLeft = 0;
564
565 /* Loop the memory descriptors */
566 NextEntry = LoaderBlock->MemoryDescriptorListHead.Flink;
567 while (NextEntry != &LoaderBlock->MemoryDescriptorListHead)
568 {
569 /* Get the descriptor */
570 MdBlock = CONTAINING_RECORD(NextEntry,
571 MEMORY_ALLOCATION_DESCRIPTOR,
572 ListEntry);
573 if ((MdBlock->MemoryType == LoaderFirmwarePermanent) ||
574 (MdBlock->MemoryType == LoaderBBTMemory) ||
575 (MdBlock->MemoryType == LoaderSpecialMemory))
576 {
577 /* These pages are not part of the PFN database */
578 NextEntry = MdBlock->ListEntry.Flink;
579 continue;
580 }
581
582 /* Next, check if this is our special free descriptor we've found */
583 if (MdBlock == MxFreeDescriptor)
584 {
585 /* Use the real numbers instead */
586 BasePage = MxOldFreeDescriptor.BasePage;
587 PageCount = MxOldFreeDescriptor.PageCount;
588 }
589 else
590 {
591 /* Use the descriptor's numbers */
592 BasePage = MdBlock->BasePage;
593 PageCount = MdBlock->PageCount;
594 }
595
596 /* Get the PTEs for this range */
597 PointerPte = MiAddressToPte(&MmPfnDatabase[BasePage]);
598 LastPte = MiAddressToPte(((ULONG_PTR)&MmPfnDatabase[BasePage + PageCount]) - 1);
599 DPRINT("MD Type: %lx Base: %lx Count: %lx\n", MdBlock->MemoryType, BasePage, PageCount);
600
601 /* Loop them */
602 while (PointerPte <= LastPte)
603 {
604 /* We'll only touch PTEs that aren't already valid */
605 if (PointerPte->u.Hard.Valid == 0)
606 {
607 /* Use the next free page */
608 TempPte.u.Hard.PageFrameNumber = FreePage;
609 ASSERT(FreePageCount != 0);
610
611 /* Consume free pages */
612 FreePage++;
613 FreePageCount--;
614 if (!FreePageCount)
615 {
616 /* Out of memory */
617 KeBugCheckEx(INSTALL_MORE_MEMORY,
618 MmNumberOfPhysicalPages,
619 FreePageCount,
620 MxOldFreeDescriptor.PageCount,
621 1);
622 }
623
624 /* Write out this PTE */
625 PagesLeft++;
626 MI_WRITE_VALID_PTE(PointerPte, TempPte);
627
628 /* Zero this page */
629 RtlZeroMemory(MiPteToAddress(PointerPte), PAGE_SIZE);
630 }
631
632 /* Next! */
633 PointerPte++;
634 }
635
636 /* Do the next address range */
637 NextEntry = MdBlock->ListEntry.Flink;
638 }
639
640 /* Now update the free descriptors to consume the pages we used up during the PFN allocation loop */
641 MxFreeDescriptor->BasePage = FreePage;
642 MxFreeDescriptor->PageCount = FreePageCount;
643 }
644
645 VOID
646 NTAPI
647 MiBuildPfnDatabaseFromPages(IN PLOADER_PARAMETER_BLOCK LoaderBlock)
648 {
649 PMMPDE PointerPde;
650 PMMPTE PointerPte;
651 ULONG i, Count, j;
652 PFN_NUMBER PageFrameIndex, StartupPdIndex, PtePageIndex;
653 PMMPFN Pfn1, Pfn2;
654 ULONG_PTR BaseAddress = 0;
655
656 /* PFN of the startup page directory */
657 StartupPdIndex = PFN_FROM_PTE(MiAddressToPde(PDE_BASE));
658
659 /* Start with the first PDE and scan them all */
660 PointerPde = MiAddressToPde(NULL);
661 Count = PD_COUNT * PDE_COUNT;
662 for (i = 0; i < Count; i++)
663 {
664 /* Check for valid PDE */
665 if (PointerPde->u.Hard.Valid == 1)
666 {
667 /* Get the PFN from it */
668 PageFrameIndex = PFN_FROM_PTE(PointerPde);
669
670 /* Do we want a PFN entry for this page? */
671 if (MiIsRegularMemory(LoaderBlock, PageFrameIndex))
672 {
673 /* Yes we do, set it up */
674 Pfn1 = MiGetPfnEntry(PageFrameIndex);
675 Pfn1->u4.PteFrame = StartupPdIndex;
676 Pfn1->PteAddress = PointerPde;
677 Pfn1->u2.ShareCount++;
678 Pfn1->u3.e2.ReferenceCount = 1;
679 Pfn1->u3.e1.PageLocation = ActiveAndValid;
680 Pfn1->u3.e1.CacheAttribute = MiNonCached;
681 }
682 else
683 {
684 /* No PFN entry */
685 Pfn1 = NULL;
686 }
687
688 /* Now get the PTE and scan the pages */
689 PointerPte = MiAddressToPte(BaseAddress);
690 for (j = 0; j < PTE_COUNT; j++)
691 {
692 /* Check for a valid PTE */
693 if (PointerPte->u.Hard.Valid == 1)
694 {
695 /* Increase the shared count of the PFN entry for the PDE */
696 ASSERT(Pfn1 != NULL);
697 Pfn1->u2.ShareCount++;
698
699 /* Now check if the PTE is valid memory too */
700 PtePageIndex = PFN_FROM_PTE(PointerPte);
701 if (MiIsRegularMemory(LoaderBlock, PtePageIndex))
702 {
703 /*
704 * Only add pages above the end of system code or pages
705 * that are part of nonpaged pool
706 */
707 if ((BaseAddress >= 0xA0000000) ||
708 ((BaseAddress >= (ULONG_PTR)MmNonPagedPoolStart) &&
709 (BaseAddress < (ULONG_PTR)MmNonPagedPoolStart +
710 MmSizeOfNonPagedPoolInBytes)))
711 {
712 /* Get the PFN entry and make sure it too is valid */
713 Pfn2 = MiGetPfnEntry(PtePageIndex);
714 if ((MmIsAddressValid(Pfn2)) &&
715 (MmIsAddressValid(Pfn2 + 1)))
716 {
717 /* Setup the PFN entry */
718 Pfn2->u4.PteFrame = PageFrameIndex;
719 Pfn2->PteAddress = PointerPte;
720 Pfn2->u2.ShareCount++;
721 Pfn2->u3.e2.ReferenceCount = 1;
722 Pfn2->u3.e1.PageLocation = ActiveAndValid;
723 Pfn2->u3.e1.CacheAttribute = MiNonCached;
724 }
725 }
726 }
727 }
728
729 /* Next PTE */
730 PointerPte++;
731 BaseAddress += PAGE_SIZE;
732 }
733 }
734 else
735 {
736 /* Next PDE mapped address */
737 BaseAddress += PDE_MAPPED_VA;
738 }
739
740 /* Next PTE */
741 PointerPde++;
742 }
743 }
744
745 VOID
746 NTAPI
747 MiBuildPfnDatabaseZeroPage(VOID)
748 {
749 PMMPFN Pfn1;
750 PMMPDE PointerPde;
751
752 /* Grab the lowest page and check if it has no real references */
753 Pfn1 = MiGetPfnEntry(MmLowestPhysicalPage);
754 if (!(MmLowestPhysicalPage) && !(Pfn1->u3.e2.ReferenceCount))
755 {
756 /* Make it a bogus page to catch errors */
757 PointerPde = MiAddressToPde(0xFFFFFFFF);
758 Pfn1->u4.PteFrame = PFN_FROM_PTE(PointerPde);
759 Pfn1->PteAddress = PointerPde;
760 Pfn1->u2.ShareCount++;
761 Pfn1->u3.e2.ReferenceCount = 0xFFF0;
762 Pfn1->u3.e1.PageLocation = ActiveAndValid;
763 Pfn1->u3.e1.CacheAttribute = MiNonCached;
764 }
765 }
766
767 VOID
768 NTAPI
769 MiBuildPfnDatabaseFromLoaderBlock(IN PLOADER_PARAMETER_BLOCK LoaderBlock)
770 {
771 PLIST_ENTRY NextEntry;
772 PFN_NUMBER PageCount = 0;
773 PMEMORY_ALLOCATION_DESCRIPTOR MdBlock;
774 PFN_NUMBER PageFrameIndex;
775 PMMPFN Pfn1;
776 PMMPTE PointerPte;
777 PMMPDE PointerPde;
778 KIRQL OldIrql;
779
780 /* Now loop through the descriptors */
781 NextEntry = LoaderBlock->MemoryDescriptorListHead.Flink;
782 while (NextEntry != &LoaderBlock->MemoryDescriptorListHead)
783 {
784 /* Get the current descriptor */
785 MdBlock = CONTAINING_RECORD(NextEntry,
786 MEMORY_ALLOCATION_DESCRIPTOR,
787 ListEntry);
788
789 /* Read its data */
790 PageCount = MdBlock->PageCount;
791 PageFrameIndex = MdBlock->BasePage;
792
793 /* Don't allow memory above what the PFN database is mapping */
794 if (PageFrameIndex > MmHighestPhysicalPage)
795 {
796 /* Since they are ordered, everything past here will be larger */
797 break;
798 }
799
800 /* On the other hand, the end page might be higher up... */
801 if ((PageFrameIndex + PageCount) > (MmHighestPhysicalPage + 1))
802 {
803 /* In which case we'll trim the descriptor to go as high as we can */
804 PageCount = MmHighestPhysicalPage + 1 - PageFrameIndex;
805 MdBlock->PageCount = PageCount;
806
807 /* But if there's nothing left to trim, we got too high, so quit */
808 if (!PageCount) break;
809 }
810
811 /* Now check the descriptor type */
812 switch (MdBlock->MemoryType)
813 {
814 /* Check for bad RAM */
815 case LoaderBad:
816
817 DPRINT1("You either have specified /BURNMEMORY or damaged RAM modules.\n");
818 break;
819
820 /* Check for free RAM */
821 case LoaderFree:
822 case LoaderLoadedProgram:
823 case LoaderFirmwareTemporary:
824 case LoaderOsloaderStack:
825
826 /* Get the last page of this descriptor. Note we loop backwards */
827 PageFrameIndex += PageCount - 1;
828 Pfn1 = MiGetPfnEntry(PageFrameIndex);
829
830 /* Lock the PFN Database */
831 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
832 while (PageCount--)
833 {
834 /* If the page really has no references, mark it as free */
835 if (!Pfn1->u3.e2.ReferenceCount)
836 {
837 /* Add it to the free list */
838 Pfn1->u3.e1.CacheAttribute = MiNonCached;
839 MiInsertPageInFreeList(PageFrameIndex);
840 }
841
842 /* Go to the next page */
843 Pfn1--;
844 PageFrameIndex--;
845 }
846
847 /* Release PFN database */
848 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
849
850 /* Done with this block */
851 break;
852
853 /* Check for pages that are invisible to us */
854 case LoaderFirmwarePermanent:
855 case LoaderSpecialMemory:
856 case LoaderBBTMemory:
857
858 /* And skip them */
859 break;
860
861 default:
862
863 /* Map these pages with the KSEG0 mapping that adds 0x80000000 */
864 PointerPte = MiAddressToPte(KSEG0_BASE + (PageFrameIndex << PAGE_SHIFT));
865 Pfn1 = MiGetPfnEntry(PageFrameIndex);
866 while (PageCount--)
867 {
868 /* Check if the page is really unused */
869 PointerPde = MiAddressToPde(KSEG0_BASE + (PageFrameIndex << PAGE_SHIFT));
870 if (!Pfn1->u3.e2.ReferenceCount)
871 {
872 /* Mark it as being in-use */
873 Pfn1->u4.PteFrame = PFN_FROM_PTE(PointerPde);
874 Pfn1->PteAddress = PointerPte;
875 Pfn1->u2.ShareCount++;
876 Pfn1->u3.e2.ReferenceCount = 1;
877 Pfn1->u3.e1.PageLocation = ActiveAndValid;
878 Pfn1->u3.e1.CacheAttribute = MiNonCached;
879
880 /* Check for RAM disk page */
881 if (MdBlock->MemoryType == LoaderXIPRom)
882 {
883 /* Make it a pseudo-I/O ROM mapping */
884 Pfn1->u1.Flink = 0;
885 Pfn1->u2.ShareCount = 0;
886 Pfn1->u3.e2.ReferenceCount = 0;
887 Pfn1->u3.e1.PageLocation = 0;
888 Pfn1->u3.e1.Rom = 1;
889 Pfn1->u4.InPageError = 0;
890 Pfn1->u3.e1.PrototypePte = 1;
891 }
892 }
893
894 /* Advance page structures */
895 Pfn1++;
896 PageFrameIndex++;
897 PointerPte++;
898 }
899 break;
900 }
901
902 /* Next descriptor entry */
903 NextEntry = MdBlock->ListEntry.Flink;
904 }
905 }
906
907 VOID
908 NTAPI
909 MiBuildPfnDatabaseSelf(VOID)
910 {
911 PMMPTE PointerPte, LastPte;
912 PMMPFN Pfn1;
913
914 /* Loop the PFN database page */
915 PointerPte = MiAddressToPte(MiGetPfnEntry(MmLowestPhysicalPage));
916 LastPte = MiAddressToPte(MiGetPfnEntry(MmHighestPhysicalPage));
917 while (PointerPte <= LastPte)
918 {
919 /* Make sure the page is valid */
920 if (PointerPte->u.Hard.Valid == 1)
921 {
922 /* Get the PFN entry and just mark it referenced */
923 Pfn1 = MiGetPfnEntry(PointerPte->u.Hard.PageFrameNumber);
924 Pfn1->u2.ShareCount = 1;
925 Pfn1->u3.e2.ReferenceCount = 1;
926 }
927
928 /* Next */
929 PointerPte++;
930 }
931 }
932
933 VOID
934 NTAPI
935 MiInitializePfnDatabase(IN PLOADER_PARAMETER_BLOCK LoaderBlock)
936 {
937 /* Scan memory and start setting up PFN entries */
938 MiBuildPfnDatabaseFromPages(LoaderBlock);
939
940 /* Add the zero page */
941 MiBuildPfnDatabaseZeroPage();
942
943 /* Scan the loader block and build the rest of the PFN database */
944 MiBuildPfnDatabaseFromLoaderBlock(LoaderBlock);
945
946 /* Finally add the pages for the PFN database itself */
947 MiBuildPfnDatabaseSelf();
948 }
949
950 VOID
951 NTAPI
952 MiAdjustWorkingSetManagerParameters(IN BOOLEAN Client)
953 {
954 /* This function needs to do more work, for now, we tune page minimums */
955
956 /* Check for a system with around 64MB RAM or more */
957 if (MmNumberOfPhysicalPages >= (63 * _1MB) / PAGE_SIZE)
958 {
959 /* Double the minimum amount of pages we consider for a "plenty free" scenario */
960 MmPlentyFreePages *= 2;
961 }
962 }
963
964 VOID
965 NTAPI
966 MiNotifyMemoryEvents(VOID)
967 {
968 /* Are we in a low-memory situation? */
969 if (MmAvailablePages < MmLowMemoryThreshold)
970 {
971 /* Clear high, set low */
972 if (KeReadStateEvent(MiHighMemoryEvent)) KeClearEvent(MiHighMemoryEvent);
973 if (!KeReadStateEvent(MiLowMemoryEvent)) KeSetEvent(MiLowMemoryEvent, 0, FALSE);
974 }
975 else if (MmAvailablePages < MmHighMemoryThreshold)
976 {
977 /* We are in between, clear both */
978 if (KeReadStateEvent(MiHighMemoryEvent)) KeClearEvent(MiHighMemoryEvent);
979 if (KeReadStateEvent(MiLowMemoryEvent)) KeClearEvent(MiLowMemoryEvent);
980 }
981 else
982 {
983 /* Clear low, set high */
984 if (KeReadStateEvent(MiLowMemoryEvent)) KeClearEvent(MiLowMemoryEvent);
985 if (!KeReadStateEvent(MiHighMemoryEvent)) KeSetEvent(MiHighMemoryEvent, 0, FALSE);
986 }
987 }
988
989 NTSTATUS
990 NTAPI
991 MiCreateMemoryEvent(IN PUNICODE_STRING Name,
992 OUT PKEVENT *Event)
993 {
994 PACL Dacl;
995 HANDLE EventHandle;
996 ULONG DaclLength;
997 NTSTATUS Status;
998 OBJECT_ATTRIBUTES ObjectAttributes;
999 SECURITY_DESCRIPTOR SecurityDescriptor;
1000
1001 /* Create the SD */
1002 Status = RtlCreateSecurityDescriptor(&SecurityDescriptor,
1003 SECURITY_DESCRIPTOR_REVISION);
1004 if (!NT_SUCCESS(Status)) return Status;
1005
1006 /* One ACL with 3 ACEs, containing each one SID */
1007 DaclLength = sizeof(ACL) +
1008 3 * sizeof(ACCESS_ALLOWED_ACE) +
1009 RtlLengthSid(SeLocalSystemSid) +
1010 RtlLengthSid(SeAliasAdminsSid) +
1011 RtlLengthSid(SeWorldSid);
1012
1013 /* Allocate space for the DACL */
1014 Dacl = ExAllocatePoolWithTag(PagedPool, DaclLength, 'lcaD');
1015 if (!Dacl) return STATUS_INSUFFICIENT_RESOURCES;
1016
1017 /* Setup the ACL inside it */
1018 Status = RtlCreateAcl(Dacl, DaclLength, ACL_REVISION);
1019 if (!NT_SUCCESS(Status)) goto CleanUp;
1020
1021 /* Add query rights for everyone */
1022 Status = RtlAddAccessAllowedAce(Dacl,
1023 ACL_REVISION,
1024 SYNCHRONIZE | EVENT_QUERY_STATE | READ_CONTROL,
1025 SeWorldSid);
1026 if (!NT_SUCCESS(Status)) goto CleanUp;
1027
1028 /* Full rights for the admin */
1029 Status = RtlAddAccessAllowedAce(Dacl,
1030 ACL_REVISION,
1031 EVENT_ALL_ACCESS,
1032 SeAliasAdminsSid);
1033 if (!NT_SUCCESS(Status)) goto CleanUp;
1034
1035 /* As well as full rights for the system */
1036 Status = RtlAddAccessAllowedAce(Dacl,
1037 ACL_REVISION,
1038 EVENT_ALL_ACCESS,
1039 SeLocalSystemSid);
1040 if (!NT_SUCCESS(Status)) goto CleanUp;
1041
1042 /* Set this DACL inside the SD */
1043 Status = RtlSetDaclSecurityDescriptor(&SecurityDescriptor,
1044 TRUE,
1045 Dacl,
1046 FALSE);
1047 if (!NT_SUCCESS(Status)) goto CleanUp;
1048
1049 /* Setup the event attributes, making sure it's a permanent one */
1050 InitializeObjectAttributes(&ObjectAttributes,
1051 Name,
1052 OBJ_KERNEL_HANDLE | OBJ_PERMANENT,
1053 NULL,
1054 &SecurityDescriptor);
1055
1056 /* Create the event */
1057 Status = ZwCreateEvent(&EventHandle,
1058 EVENT_ALL_ACCESS,
1059 &ObjectAttributes,
1060 NotificationEvent,
1061 FALSE);
1062 CleanUp:
1063 /* Free the DACL */
1064 ExFreePool(Dacl);
1065
1066 /* Check if this is the success path */
1067 if (NT_SUCCESS(Status))
1068 {
1069 /* Add a reference to the object, then close the handle we had */
1070 Status = ObReferenceObjectByHandle(EventHandle,
1071 EVENT_MODIFY_STATE,
1072 ExEventObjectType,
1073 KernelMode,
1074 (PVOID*)Event,
1075 NULL);
1076 ZwClose (EventHandle);
1077 }
1078
1079 /* Return status */
1080 return Status;
1081 }
1082
1083 BOOLEAN
1084 NTAPI
1085 MiInitializeMemoryEvents(VOID)
1086 {
1087 UNICODE_STRING LowString = RTL_CONSTANT_STRING(L"\\KernelObjects\\LowMemoryCondition");
1088 UNICODE_STRING HighString = RTL_CONSTANT_STRING(L"\\KernelObjects\\HighMemoryCondition");
1089 UNICODE_STRING LowPagedPoolString = RTL_CONSTANT_STRING(L"\\KernelObjects\\LowPagedPoolCondition");
1090 UNICODE_STRING HighPagedPoolString = RTL_CONSTANT_STRING(L"\\KernelObjects\\HighPagedPoolCondition");
1091 UNICODE_STRING LowNonPagedPoolString = RTL_CONSTANT_STRING(L"\\KernelObjects\\LowNonPagedPoolCondition");
1092 UNICODE_STRING HighNonPagedPoolString = RTL_CONSTANT_STRING(L"\\KernelObjects\\HighNonPagedPoolCondition");
1093 NTSTATUS Status;
1094
1095 /* Check if we have a registry setting */
1096 if (MmLowMemoryThreshold)
1097 {
1098 /* Convert it to pages */
1099 MmLowMemoryThreshold *= (_1MB / PAGE_SIZE);
1100 }
1101 else
1102 {
1103 /* The low memory threshold is hit when we don't consider that we have "plenty" of free pages anymore */
1104 MmLowMemoryThreshold = MmPlentyFreePages;
1105
1106 /* More than one GB of memory? */
1107 if (MmNumberOfPhysicalPages > 0x40000)
1108 {
1109 /* Start at 32MB, and add another 16MB for each GB */
1110 MmLowMemoryThreshold = (32 * _1MB) / PAGE_SIZE;
1111 MmLowMemoryThreshold += ((MmNumberOfPhysicalPages - 0x40000) >> 7);
1112 }
1113 else if (MmNumberOfPhysicalPages > 0x8000)
1114 {
1115 /* For systems with > 128MB RAM, add another 4MB for each 128MB */
1116 MmLowMemoryThreshold += ((MmNumberOfPhysicalPages - 0x8000) >> 5);
1117 }
1118
1119 /* Don't let the minimum threshold go past 64MB */
1120 MmLowMemoryThreshold = min(MmLowMemoryThreshold, (64 * _1MB) / PAGE_SIZE);
1121 }
1122
1123 /* Check if we have a registry setting */
1124 if (MmHighMemoryThreshold)
1125 {
1126 /* Convert it into pages */
1127 MmHighMemoryThreshold *= (_1MB / PAGE_SIZE);
1128 }
1129 else
1130 {
1131 /* Otherwise, the default is three times the low memory threshold */
1132 MmHighMemoryThreshold = 3 * MmLowMemoryThreshold;
1133 ASSERT(MmHighMemoryThreshold > MmLowMemoryThreshold);
1134 }
1135
1136 /* Make sure high threshold is actually higher than the low */
1137 MmHighMemoryThreshold = max(MmHighMemoryThreshold, MmLowMemoryThreshold);
1138
1139 /* Create the memory events for all the thresholds */
1140 Status = MiCreateMemoryEvent(&LowString, &MiLowMemoryEvent);
1141 if (!NT_SUCCESS(Status)) return FALSE;
1142 Status = MiCreateMemoryEvent(&HighString, &MiHighMemoryEvent);
1143 if (!NT_SUCCESS(Status)) return FALSE;
1144 Status = MiCreateMemoryEvent(&LowPagedPoolString, &MiLowPagedPoolEvent);
1145 if (!NT_SUCCESS(Status)) return FALSE;
1146 Status = MiCreateMemoryEvent(&HighPagedPoolString, &MiHighPagedPoolEvent);
1147 if (!NT_SUCCESS(Status)) return FALSE;
1148 Status = MiCreateMemoryEvent(&LowNonPagedPoolString, &MiLowNonPagedPoolEvent);
1149 if (!NT_SUCCESS(Status)) return FALSE;
1150 Status = MiCreateMemoryEvent(&HighNonPagedPoolString, &MiHighNonPagedPoolEvent);
1151 if (!NT_SUCCESS(Status)) return FALSE;
1152
1153 /* Now setup the pool events */
1154 MiInitializePoolEvents();
1155
1156 /* Set the initial event state */
1157 MiNotifyMemoryEvents();
1158 return TRUE;
1159 }
1160
1161 VOID
1162 NTAPI
1163 MiAddHalIoMappings(VOID)
1164 {
1165 PVOID BaseAddress;
1166 PMMPTE PointerPde;
1167 PMMPTE PointerPte;
1168 ULONG i, j, PdeCount;
1169 PFN_NUMBER PageFrameIndex;
1170
1171 /* HAL Heap address -- should be on a PDE boundary */
1172 BaseAddress = (PVOID)0xFFC00000;
1173 ASSERT(MiAddressToPteOffset(BaseAddress) == 0);
1174
1175 /* Check how many PDEs the heap has */
1176 PointerPde = MiAddressToPde(BaseAddress);
1177 PdeCount = PDE_COUNT - ADDR_TO_PDE_OFFSET(BaseAddress);
1178 for (i = 0; i < PdeCount; i++)
1179 {
1180 /* Does the HAL own this mapping? */
1181 if ((PointerPde->u.Hard.Valid == 1) &&
1182 (PointerPde->u.Hard.LargePage == 0))
1183 {
1184 /* Get the PTE for it and scan each page */
1185 PointerPte = MiAddressToPte(BaseAddress);
1186 for (j = 0 ; j < PTE_COUNT; j++)
1187 {
1188 /* Does the HAL own this page? */
1189 if (PointerPte->u.Hard.Valid == 1)
1190 {
1191 /* Is the HAL using it for device or I/O mapped memory? */
1192 PageFrameIndex = PFN_FROM_PTE(PointerPte);
1193 if (!MiGetPfnEntry(PageFrameIndex))
1194 {
1195 /* FIXME: For PAT, we need to track I/O cache attributes for coherency */
1196 DPRINT1("HAL I/O Mapping at %p is unsafe\n", BaseAddress);
1197 }
1198 }
1199
1200 /* Move to the next page */
1201 BaseAddress = (PVOID)((ULONG_PTR)BaseAddress + PAGE_SIZE);
1202 PointerPte++;
1203 }
1204 }
1205 else
1206 {
1207 /* Move to the next address */
1208 BaseAddress = (PVOID)((ULONG_PTR)BaseAddress + PDE_MAPPED_VA);
1209 }
1210
1211 /* Move to the next PDE */
1212 PointerPde++;
1213 }
1214 }
1215
1216 VOID
1217 NTAPI
1218 MmDumpArmPfnDatabase(VOID)
1219 {
1220 ULONG i;
1221 PMMPFN Pfn1;
1222 PCHAR Consumer = "Unknown";
1223 KIRQL OldIrql;
1224 ULONG ActivePages = 0, FreePages = 0, OtherPages = 0;
1225
1226 KeRaiseIrql(HIGH_LEVEL, &OldIrql);
1227
1228 //
1229 // Loop the PFN database
1230 //
1231 for (i = 0; i <= MmHighestPhysicalPage; i++)
1232 {
1233 Pfn1 = MiGetPfnEntry(i);
1234 if (!Pfn1) continue;
1235
1236 //
1237 // Get the page location
1238 //
1239 switch (Pfn1->u3.e1.PageLocation)
1240 {
1241 case ActiveAndValid:
1242
1243 Consumer = "Active and Valid";
1244 ActivePages++;
1245 break;
1246
1247 case FreePageList:
1248
1249 Consumer = "Free Page List";
1250 FreePages++;
1251 break;
1252
1253 default:
1254
1255 Consumer = "Other (ASSERT!)";
1256 OtherPages++;
1257 break;
1258 }
1259
1260 //
1261 // Pretty-print the page
1262 //
1263 DbgPrint("0x%08p:\t%20s\t(%02d.%02d) [%08p-%08p])\n",
1264 i << PAGE_SHIFT,
1265 Consumer,
1266 Pfn1->u3.e2.ReferenceCount,
1267 Pfn1->u2.ShareCount,
1268 Pfn1->PteAddress,
1269 Pfn1->u4.PteFrame);
1270 }
1271
1272 DbgPrint("Active: %d pages\t[%d KB]\n", ActivePages, (ActivePages << PAGE_SHIFT) / 1024);
1273 DbgPrint("Free: %d pages\t[%d KB]\n", FreePages, (FreePages << PAGE_SHIFT) / 1024);
1274 DbgPrint("Other: %d pages\t[%d KB]\n", OtherPages, (OtherPages << PAGE_SHIFT) / 1024);
1275
1276 KeLowerIrql(OldIrql);
1277 }
1278
1279 PFN_NUMBER
1280 NTAPI
1281 MiPagesInLoaderBlock(IN PLOADER_PARAMETER_BLOCK LoaderBlock,
1282 IN PBOOLEAN IncludeType)
1283 {
1284 PLIST_ENTRY NextEntry;
1285 PFN_NUMBER PageCount = 0;
1286 PMEMORY_ALLOCATION_DESCRIPTOR MdBlock;
1287
1288 //
1289 // Now loop through the descriptors
1290 //
1291 NextEntry = LoaderBlock->MemoryDescriptorListHead.Flink;
1292 while (NextEntry != &LoaderBlock->MemoryDescriptorListHead)
1293 {
1294 //
1295 // Grab each one, and check if it's one we should include
1296 //
1297 MdBlock = CONTAINING_RECORD(NextEntry,
1298 MEMORY_ALLOCATION_DESCRIPTOR,
1299 ListEntry);
1300 if ((MdBlock->MemoryType < LoaderMaximum) &&
1301 (IncludeType[MdBlock->MemoryType]))
1302 {
1303 //
1304 // Add this to our running total
1305 //
1306 PageCount += MdBlock->PageCount;
1307 }
1308
1309 //
1310 // Try the next descriptor
1311 //
1312 NextEntry = MdBlock->ListEntry.Flink;
1313 }
1314
1315 //
1316 // Return the total
1317 //
1318 return PageCount;
1319 }
1320
1321 PPHYSICAL_MEMORY_DESCRIPTOR
1322 NTAPI
1323 MmInitializeMemoryLimits(IN PLOADER_PARAMETER_BLOCK LoaderBlock,
1324 IN PBOOLEAN IncludeType)
1325 {
1326 PLIST_ENTRY NextEntry;
1327 ULONG Run = 0, InitialRuns = 0;
1328 PFN_NUMBER NextPage = -1, PageCount = 0;
1329 PPHYSICAL_MEMORY_DESCRIPTOR Buffer, NewBuffer;
1330 PMEMORY_ALLOCATION_DESCRIPTOR MdBlock;
1331
1332 //
1333 // Scan the memory descriptors
1334 //
1335 NextEntry = LoaderBlock->MemoryDescriptorListHead.Flink;
1336 while (NextEntry != &LoaderBlock->MemoryDescriptorListHead)
1337 {
1338 //
1339 // For each one, increase the memory allocation estimate
1340 //
1341 InitialRuns++;
1342 NextEntry = NextEntry->Flink;
1343 }
1344
1345 //
1346 // Allocate the maximum we'll ever need
1347 //
1348 Buffer = ExAllocatePoolWithTag(NonPagedPool,
1349 sizeof(PHYSICAL_MEMORY_DESCRIPTOR) +
1350 sizeof(PHYSICAL_MEMORY_RUN) *
1351 (InitialRuns - 1),
1352 'lMmM');
1353 if (!Buffer) return NULL;
1354
1355 //
1356 // For now that's how many runs we have
1357 //
1358 Buffer->NumberOfRuns = InitialRuns;
1359
1360 //
1361 // Now loop through the descriptors again
1362 //
1363 NextEntry = LoaderBlock->MemoryDescriptorListHead.Flink;
1364 while (NextEntry != &LoaderBlock->MemoryDescriptorListHead)
1365 {
1366 //
1367 // Grab each one, and check if it's one we should include
1368 //
1369 MdBlock = CONTAINING_RECORD(NextEntry,
1370 MEMORY_ALLOCATION_DESCRIPTOR,
1371 ListEntry);
1372 if ((MdBlock->MemoryType < LoaderMaximum) &&
1373 (IncludeType[MdBlock->MemoryType]))
1374 {
1375 //
1376 // Add this to our running total
1377 //
1378 PageCount += MdBlock->PageCount;
1379
1380 //
1381 // Check if the next page is described by the next descriptor
1382 //
1383 if (MdBlock->BasePage == NextPage)
1384 {
1385 //
1386 // Combine it into the same physical run
1387 //
1388 ASSERT(MdBlock->PageCount != 0);
1389 Buffer->Run[Run - 1].PageCount += MdBlock->PageCount;
1390 NextPage += MdBlock->PageCount;
1391 }
1392 else
1393 {
1394 //
1395 // Otherwise just duplicate the descriptor's contents
1396 //
1397 Buffer->Run[Run].BasePage = MdBlock->BasePage;
1398 Buffer->Run[Run].PageCount = MdBlock->PageCount;
1399 NextPage = Buffer->Run[Run].BasePage + Buffer->Run[Run].PageCount;
1400
1401 //
1402 // And in this case, increase the number of runs
1403 //
1404 Run++;
1405 }
1406 }
1407
1408 //
1409 // Try the next descriptor
1410 //
1411 NextEntry = MdBlock->ListEntry.Flink;
1412 }
1413
1414 //
1415 // We should not have been able to go past our initial estimate
1416 //
1417 ASSERT(Run <= Buffer->NumberOfRuns);
1418
1419 //
1420 // Our guess was probably exaggerated...
1421 //
1422 if (InitialRuns > Run)
1423 {
1424 //
1425 // Allocate a more accurately sized buffer
1426 //
1427 NewBuffer = ExAllocatePoolWithTag(NonPagedPool,
1428 sizeof(PHYSICAL_MEMORY_DESCRIPTOR) +
1429 sizeof(PHYSICAL_MEMORY_RUN) *
1430 (Run - 1),
1431 'lMmM');
1432 if (NewBuffer)
1433 {
1434 //
1435 // Copy the old buffer into the new, then free it
1436 //
1437 RtlCopyMemory(NewBuffer->Run,
1438 Buffer->Run,
1439 sizeof(PHYSICAL_MEMORY_RUN) * Run);
1440 ExFreePool(Buffer);
1441
1442 //
1443 // Now use the new buffer
1444 //
1445 Buffer = NewBuffer;
1446 }
1447 }
1448
1449 //
1450 // Write the final numbers, and return it
1451 //
1452 Buffer->NumberOfRuns = Run;
1453 Buffer->NumberOfPages = PageCount;
1454 return Buffer;
1455 }
1456
1457 VOID
1458 NTAPI
1459 MiBuildPagedPool(VOID)
1460 {
1461 PMMPTE PointerPte, PointerPde;
1462 MMPTE TempPte = ValidKernelPte;
1463 PFN_NUMBER PageFrameIndex;
1464 KIRQL OldIrql;
1465 ULONG Size, BitMapSize;
1466 #if (_MI_PAGING_LEVELS == 2)
1467 //
1468 // Get the page frame number for the system page directory
1469 //
1470 PointerPte = MiAddressToPte(PDE_BASE);
1471 ASSERT(PD_COUNT == 1);
1472 MmSystemPageDirectory[0] = PFN_FROM_PTE(PointerPte);
1473
1474 //
1475 // Allocate a system PTE which will hold a copy of the page directory
1476 //
1477 PointerPte = MiReserveSystemPtes(1, SystemPteSpace);
1478 ASSERT(PointerPte);
1479 MmSystemPagePtes = MiPteToAddress(PointerPte);
1480
1481 //
1482 // Make this system PTE point to the system page directory.
1483 // It is now essentially double-mapped. This will be used later for lazy
1484 // evaluation of PDEs accross process switches, similarly to how the Global
1485 // page directory array in the old ReactOS Mm is used (but in a less hacky
1486 // way).
1487 //
1488 TempPte = ValidKernelPte;
1489 ASSERT(PD_COUNT == 1);
1490 TempPte.u.Hard.PageFrameNumber = MmSystemPageDirectory[0];
1491 MI_WRITE_VALID_PTE(PointerPte, TempPte);
1492 #endif
1493 //
1494 // Let's get back to paged pool work: size it up.
1495 // By default, it should be twice as big as nonpaged pool.
1496 //
1497 MmSizeOfPagedPoolInBytes = 2 * MmMaximumNonPagedPoolInBytes;
1498 if (MmSizeOfPagedPoolInBytes > ((ULONG_PTR)MmNonPagedSystemStart -
1499 (ULONG_PTR)MmPagedPoolStart))
1500 {
1501 //
1502 // On the other hand, we have limited VA space, so make sure that the VA
1503 // for paged pool doesn't overflow into nonpaged pool VA. Otherwise, set
1504 // whatever maximum is possible.
1505 //
1506 MmSizeOfPagedPoolInBytes = (ULONG_PTR)MmNonPagedSystemStart -
1507 (ULONG_PTR)MmPagedPoolStart;
1508 }
1509
1510 //
1511 // Get the size in pages and make sure paged pool is at least 32MB.
1512 //
1513 Size = MmSizeOfPagedPoolInBytes;
1514 if (Size < MI_MIN_INIT_PAGED_POOLSIZE) Size = MI_MIN_INIT_PAGED_POOLSIZE;
1515 Size = BYTES_TO_PAGES(Size);
1516
1517 //
1518 // Now check how many PTEs will be required for these many pages.
1519 //
1520 Size = (Size + (1024 - 1)) / 1024;
1521
1522 //
1523 // Recompute the page-aligned size of the paged pool, in bytes and pages.
1524 //
1525 MmSizeOfPagedPoolInBytes = Size * PAGE_SIZE * 1024;
1526 MmSizeOfPagedPoolInPages = MmSizeOfPagedPoolInBytes >> PAGE_SHIFT;
1527
1528 //
1529 // Let's be really sure this doesn't overflow into nonpaged system VA
1530 //
1531 ASSERT((MmSizeOfPagedPoolInBytes + (ULONG_PTR)MmPagedPoolStart) <=
1532 (ULONG_PTR)MmNonPagedSystemStart);
1533
1534 //
1535 // This is where paged pool ends
1536 //
1537 MmPagedPoolEnd = (PVOID)(((ULONG_PTR)MmPagedPoolStart +
1538 MmSizeOfPagedPoolInBytes) - 1);
1539
1540 //
1541 // So now get the PDE for paged pool and zero it out
1542 //
1543 PointerPde = MiAddressToPde(MmPagedPoolStart);
1544
1545 #if (_MI_PAGING_LEVELS >= 3)
1546 /* On these systems, there's no double-mapping, so instead, the PPE and PXEs
1547 * are setup to span the entire paged pool area, so there's no need for the
1548 * system PD */
1549 ASSERT(FALSE);
1550 #endif
1551
1552 RtlZeroMemory(PointerPde,
1553 (1 + MiAddressToPde(MmPagedPoolEnd) - PointerPde) * sizeof(MMPTE));
1554
1555 //
1556 // Next, get the first and last PTE
1557 //
1558 PointerPte = MiAddressToPte(MmPagedPoolStart);
1559 MmPagedPoolInfo.FirstPteForPagedPool = PointerPte;
1560 MmPagedPoolInfo.LastPteForPagedPool = MiAddressToPte(MmPagedPoolEnd);
1561
1562 //
1563 // Lock the PFN database
1564 //
1565 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
1566
1567 /* Allocate a page and map the first paged pool PDE */
1568 PageFrameIndex = MiRemoveZeroPage(0);
1569 TempPte.u.Hard.PageFrameNumber = PageFrameIndex;
1570 MI_WRITE_VALID_PTE(PointerPde, TempPte);
1571 #if (_MI_PAGING_LEVELS >= 3)
1572 /* Use the PPE of MmPagedPoolStart that was setup above */
1573 // Bla = PFN_FROM_PTE(PpeAddress(MmPagedPool...));
1574 ASSERT(FALSE);
1575 #else
1576 /* Do it this way */
1577 // Bla = MmSystemPageDirectory[(PointerPde - (PMMPTE)PDE_BASE) / PDE_COUNT]
1578
1579 /* Initialize the PFN entry for it */
1580 MiInitializePfnForOtherProcess(PageFrameIndex,
1581 PointerPde,
1582 MmSystemPageDirectory[(PointerPde - (PMMPTE)PDE_BASE) / PDE_COUNT]);
1583 #endif
1584
1585 //
1586 // Release the PFN database lock
1587 //
1588 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
1589
1590 //
1591 // We only have one PDE mapped for now... at fault time, additional PDEs
1592 // will be allocated to handle paged pool growth. This is where they'll have
1593 // to start.
1594 //
1595 MmPagedPoolInfo.NextPdeForPagedPoolExpansion = PointerPde + 1;
1596
1597 //
1598 // We keep track of each page via a bit, so check how big the bitmap will
1599 // have to be (make sure to align our page count such that it fits nicely
1600 // into a 4-byte aligned bitmap.
1601 //
1602 // We'll also allocate the bitmap header itself part of the same buffer.
1603 //
1604 Size = Size * 1024;
1605 ASSERT(Size == MmSizeOfPagedPoolInPages);
1606 BitMapSize = Size;
1607 Size = sizeof(RTL_BITMAP) + (((Size + 31) / 32) * sizeof(ULONG));
1608
1609 //
1610 // Allocate the allocation bitmap, which tells us which regions have not yet
1611 // been mapped into memory
1612 //
1613 MmPagedPoolInfo.PagedPoolAllocationMap = ExAllocatePoolWithTag(NonPagedPool,
1614 Size,
1615 ' mM');
1616 ASSERT(MmPagedPoolInfo.PagedPoolAllocationMap);
1617
1618 //
1619 // Initialize it such that at first, only the first page's worth of PTEs is
1620 // marked as allocated (incidentially, the first PDE we allocated earlier).
1621 //
1622 RtlInitializeBitMap(MmPagedPoolInfo.PagedPoolAllocationMap,
1623 (PULONG)(MmPagedPoolInfo.PagedPoolAllocationMap + 1),
1624 BitMapSize);
1625 RtlSetAllBits(MmPagedPoolInfo.PagedPoolAllocationMap);
1626 RtlClearBits(MmPagedPoolInfo.PagedPoolAllocationMap, 0, 1024);
1627
1628 //
1629 // We have a second bitmap, which keeps track of where allocations end.
1630 // Given the allocation bitmap and a base address, we can therefore figure
1631 // out which page is the last page of that allocation, and thus how big the
1632 // entire allocation is.
1633 //
1634 MmPagedPoolInfo.EndOfPagedPoolBitmap = ExAllocatePoolWithTag(NonPagedPool,
1635 Size,
1636 ' mM');
1637 ASSERT(MmPagedPoolInfo.EndOfPagedPoolBitmap);
1638 RtlInitializeBitMap(MmPagedPoolInfo.EndOfPagedPoolBitmap,
1639 (PULONG)(MmPagedPoolInfo.EndOfPagedPoolBitmap + 1),
1640 BitMapSize);
1641
1642 //
1643 // Since no allocations have been made yet, there are no bits set as the end
1644 //
1645 RtlClearAllBits(MmPagedPoolInfo.EndOfPagedPoolBitmap);
1646
1647 //
1648 // Initialize paged pool.
1649 //
1650 InitializePool(PagedPool, 0);
1651
1652 /* Default low threshold of 30MB or one fifth of paged pool */
1653 MiLowPagedPoolThreshold = (30 * _1MB) >> PAGE_SHIFT;
1654 MiLowPagedPoolThreshold = min(MiLowPagedPoolThreshold, Size / 5);
1655
1656 /* Default high threshold of 60MB or 25% */
1657 MiHighPagedPoolThreshold = (60 * _1MB) >> PAGE_SHIFT;
1658 MiHighPagedPoolThreshold = min(MiHighPagedPoolThreshold, (Size * 2) / 5);
1659 ASSERT(MiLowPagedPoolThreshold < MiHighPagedPoolThreshold);
1660
1661 /* Setup the global session space */
1662 MiInitializeSystemSpaceMap(NULL);
1663 }
1664
1665 VOID
1666 NTAPI
1667 MiDbgDumpMemoryDescriptors(VOID)
1668 {
1669 PLIST_ENTRY NextEntry;
1670 PMEMORY_ALLOCATION_DESCRIPTOR Md;
1671 ULONG TotalPages = 0;
1672 PCHAR
1673 MemType[] =
1674 {
1675 "ExceptionBlock ",
1676 "SystemBlock ",
1677 "Free ",
1678 "Bad ",
1679 "LoadedProgram ",
1680 "FirmwareTemporary ",
1681 "FirmwarePermanent ",
1682 "OsloaderHeap ",
1683 "OsloaderStack ",
1684 "SystemCode ",
1685 "HalCode ",
1686 "BootDriver ",
1687 "ConsoleInDriver ",
1688 "ConsoleOutDriver ",
1689 "StartupDpcStack ",
1690 "StartupKernelStack",
1691 "StartupPanicStack ",
1692 "StartupPcrPage ",
1693 "StartupPdrPage ",
1694 "RegistryData ",
1695 "MemoryData ",
1696 "NlsData ",
1697 "SpecialMemory ",
1698 "BBTMemory ",
1699 "LoaderReserve ",
1700 "LoaderXIPRom "
1701 };
1702
1703 DPRINT1("Base\t\tLength\t\tType\n");
1704 for (NextEntry = KeLoaderBlock->MemoryDescriptorListHead.Flink;
1705 NextEntry != &KeLoaderBlock->MemoryDescriptorListHead;
1706 NextEntry = NextEntry->Flink)
1707 {
1708 Md = CONTAINING_RECORD(NextEntry, MEMORY_ALLOCATION_DESCRIPTOR, ListEntry);
1709 DPRINT1("%08lX\t%08lX\t%s\n", Md->BasePage, Md->PageCount, MemType[Md->MemoryType]);
1710 TotalPages += Md->PageCount;
1711 }
1712
1713 DPRINT1("Total: %08lX (%d MB)\n", TotalPages, (TotalPages * PAGE_SIZE) / 1024 / 1024);
1714 }
1715
1716 BOOLEAN
1717 NTAPI
1718 MmArmInitSystem(IN ULONG Phase,
1719 IN PLOADER_PARAMETER_BLOCK LoaderBlock)
1720 {
1721 ULONG i;
1722 BOOLEAN IncludeType[LoaderMaximum];
1723 PVOID Bitmap;
1724 PPHYSICAL_MEMORY_RUN Run;
1725 PFN_NUMBER PageCount;
1726
1727 /* Dump memory descriptors */
1728 if (MiDbgEnableMdDump) MiDbgDumpMemoryDescriptors();
1729
1730 //
1731 // Instantiate memory that we don't consider RAM/usable
1732 // We use the same exclusions that Windows does, in order to try to be
1733 // compatible with WinLDR-style booting
1734 //
1735 for (i = 0; i < LoaderMaximum; i++) IncludeType[i] = TRUE;
1736 IncludeType[LoaderBad] = FALSE;
1737 IncludeType[LoaderFirmwarePermanent] = FALSE;
1738 IncludeType[LoaderSpecialMemory] = FALSE;
1739 IncludeType[LoaderBBTMemory] = FALSE;
1740 if (Phase == 0)
1741 {
1742 /* Initialize the phase 0 temporary event */
1743 KeInitializeEvent(&MiTempEvent, NotificationEvent, FALSE);
1744
1745 /* Set all the events to use the temporary event for now */
1746 MiLowMemoryEvent = &MiTempEvent;
1747 MiHighMemoryEvent = &MiTempEvent;
1748 MiLowPagedPoolEvent = &MiTempEvent;
1749 MiHighPagedPoolEvent = &MiTempEvent;
1750 MiLowNonPagedPoolEvent = &MiTempEvent;
1751 MiHighNonPagedPoolEvent = &MiTempEvent;
1752
1753 //
1754 // Define the basic user vs. kernel address space separation
1755 //
1756 MmSystemRangeStart = (PVOID)KSEG0_BASE;
1757 MmUserProbeAddress = (ULONG_PTR)MmSystemRangeStart - 0x10000;
1758 MmHighestUserAddress = (PVOID)(MmUserProbeAddress - 1);
1759
1760 /* Highest PTE and PDE based on the addresses above */
1761 MiHighestUserPte = MiAddressToPte(MmHighestUserAddress);
1762 MiHighestUserPde = MiAddressToPde(MmHighestUserAddress);
1763 #if (_MI_PAGING_LEVELS >= 3)
1764 /* We need the highest PPE and PXE addresses */
1765 ASSERT(FALSE);
1766 #endif
1767 //
1768 // Get the size of the boot loader's image allocations and then round
1769 // that region up to a PDE size, so that any PDEs we might create for
1770 // whatever follows are separate from the PDEs that boot loader might've
1771 // already created (and later, we can blow all that away if we want to).
1772 //
1773 MmBootImageSize = KeLoaderBlock->Extension->LoaderPagesSpanned;
1774 MmBootImageSize *= PAGE_SIZE;
1775 MmBootImageSize = (MmBootImageSize + PDE_MAPPED_VA - 1) & ~(PDE_MAPPED_VA - 1);
1776 ASSERT((MmBootImageSize % PDE_MAPPED_VA) == 0);
1777
1778 //
1779 // Set the size of session view, pool, and image
1780 //
1781 MmSessionSize = MI_SESSION_SIZE;
1782 MmSessionViewSize = MI_SESSION_VIEW_SIZE;
1783 MmSessionPoolSize = MI_SESSION_POOL_SIZE;
1784 MmSessionImageSize = MI_SESSION_IMAGE_SIZE;
1785
1786 //
1787 // Set the size of system view
1788 //
1789 MmSystemViewSize = MI_SYSTEM_VIEW_SIZE;
1790
1791 //
1792 // This is where it all ends
1793 //
1794 MiSessionImageEnd = (PVOID)PTE_BASE;
1795
1796 //
1797 // This is where we will load Win32k.sys and the video driver
1798 //
1799 MiSessionImageStart = (PVOID)((ULONG_PTR)MiSessionImageEnd -
1800 MmSessionImageSize);
1801
1802 //
1803 // So the view starts right below the session working set (itself below
1804 // the image area)
1805 //
1806 MiSessionViewStart = (PVOID)((ULONG_PTR)MiSessionImageEnd -
1807 MmSessionImageSize -
1808 MI_SESSION_WORKING_SET_SIZE -
1809 MmSessionViewSize);
1810
1811 //
1812 // Session pool follows
1813 //
1814 MiSessionPoolEnd = MiSessionViewStart;
1815 MiSessionPoolStart = (PVOID)((ULONG_PTR)MiSessionPoolEnd -
1816 MmSessionPoolSize);
1817
1818 //
1819 // And it all begins here
1820 //
1821 MmSessionBase = MiSessionPoolStart;
1822
1823 //
1824 // Sanity check that our math is correct
1825 //
1826 ASSERT((ULONG_PTR)MmSessionBase + MmSessionSize == PTE_BASE);
1827
1828 //
1829 // Session space ends wherever image session space ends
1830 //
1831 MiSessionSpaceEnd = MiSessionImageEnd;
1832
1833 //
1834 // System view space ends at session space, so now that we know where
1835 // this is, we can compute the base address of system view space itself.
1836 //
1837 MiSystemViewStart = (PVOID)((ULONG_PTR)MmSessionBase -
1838 MmSystemViewSize);
1839
1840 /* Compute the PTE addresses for all the addresses we carved out */
1841 MiSessionImagePteStart = MiAddressToPte(MiSessionImageStart);
1842 MiSessionImagePteEnd = MiAddressToPte(MiSessionImageEnd);
1843 MiSessionBasePte = MiAddressToPte(MmSessionBase);
1844 MiSessionLastPte = MiAddressToPte(MiSessionSpaceEnd);
1845
1846 /* Initialize the user mode image list */
1847 InitializeListHead(&MmLoadedUserImageList);
1848
1849 /* Initialize the paged pool mutex */
1850 KeInitializeGuardedMutex(&MmPagedPoolMutex);
1851
1852 /* Initialize the Loader Lock */
1853 KeInitializeMutant(&MmSystemLoadLock, FALSE);
1854
1855 /* Set the zero page event */
1856 KeInitializeEvent(&MmZeroingPageEvent, SynchronizationEvent, FALSE);
1857 MmZeroingPageThreadActive = FALSE;
1858
1859 //
1860 // Count physical pages on the system
1861 //
1862 PageCount = MiPagesInLoaderBlock(LoaderBlock, IncludeType);
1863
1864 //
1865 // Check if this is a machine with less than 19MB of RAM
1866 //
1867 if (PageCount < MI_MIN_PAGES_FOR_SYSPTE_TUNING)
1868 {
1869 //
1870 // Use the very minimum of system PTEs
1871 //
1872 MmNumberOfSystemPtes = 7000;
1873 }
1874 else
1875 {
1876 //
1877 // Use the default, but check if we have more than 32MB of RAM
1878 //
1879 MmNumberOfSystemPtes = 11000;
1880 if (PageCount > MI_MIN_PAGES_FOR_SYSPTE_BOOST)
1881 {
1882 //
1883 // Double the amount of system PTEs
1884 //
1885 MmNumberOfSystemPtes <<= 1;
1886 }
1887 }
1888
1889 DPRINT("System PTE count has been tuned to %d (%d bytes)\n",
1890 MmNumberOfSystemPtes, MmNumberOfSystemPtes * PAGE_SIZE);
1891
1892 /* Initialize the working set lock */
1893 ExInitializePushLock((PULONG_PTR)&MmSystemCacheWs.WorkingSetMutex);
1894
1895 /* Set commit limit */
1896 MmTotalCommitLimit = 2 * _1GB;
1897 MmTotalCommitLimitMaximum = MmTotalCommitLimit;
1898
1899 /* Has the allocation fragment been setup? */
1900 if (!MmAllocationFragment)
1901 {
1902 /* Use the default value */
1903 MmAllocationFragment = MI_ALLOCATION_FRAGMENT;
1904 if (PageCount < ((256 * _1MB) / PAGE_SIZE))
1905 {
1906 /* On memory systems with less than 256MB, divide by 4 */
1907 MmAllocationFragment = MI_ALLOCATION_FRAGMENT / 4;
1908 }
1909 else if (PageCount < (_1GB / PAGE_SIZE))
1910 {
1911 /* On systems with less than 1GB, divide by 2 */
1912 MmAllocationFragment = MI_ALLOCATION_FRAGMENT / 2;
1913 }
1914 }
1915 else
1916 {
1917 /* Convert from 1KB fragments to pages */
1918 MmAllocationFragment *= _1KB;
1919 MmAllocationFragment = ROUND_TO_PAGES(MmAllocationFragment);
1920
1921 /* Don't let it past the maximum */
1922 MmAllocationFragment = min(MmAllocationFragment,
1923 MI_MAX_ALLOCATION_FRAGMENT);
1924
1925 /* Don't let it too small either */
1926 MmAllocationFragment = max(MmAllocationFragment,
1927 MI_MIN_ALLOCATION_FRAGMENT);
1928 }
1929
1930 /* Initialize the platform-specific parts */
1931 MiInitMachineDependent(LoaderBlock);
1932
1933 //
1934 // Build the physical memory block
1935 //
1936 MmPhysicalMemoryBlock = MmInitializeMemoryLimits(LoaderBlock,
1937 IncludeType);
1938
1939 //
1940 // Allocate enough buffer for the PFN bitmap
1941 // Align it up to a 32-bit boundary
1942 //
1943 Bitmap = ExAllocatePoolWithTag(NonPagedPool,
1944 (((MmHighestPhysicalPage + 1) + 31) / 32) * 4,
1945 ' mM');
1946 if (!Bitmap)
1947 {
1948 //
1949 // This is critical
1950 //
1951 KeBugCheckEx(INSTALL_MORE_MEMORY,
1952 MmNumberOfPhysicalPages,
1953 MmLowestPhysicalPage,
1954 MmHighestPhysicalPage,
1955 0x101);
1956 }
1957
1958 //
1959 // Initialize it and clear all the bits to begin with
1960 //
1961 RtlInitializeBitMap(&MiPfnBitMap,
1962 Bitmap,
1963 MmHighestPhysicalPage + 1);
1964 RtlClearAllBits(&MiPfnBitMap);
1965
1966 //
1967 // Loop physical memory runs
1968 //
1969 for (i = 0; i < MmPhysicalMemoryBlock->NumberOfRuns; i++)
1970 {
1971 //
1972 // Get the run
1973 //
1974 Run = &MmPhysicalMemoryBlock->Run[i];
1975 DPRINT("PHYSICAL RAM [0x%08p to 0x%08p]\n",
1976 Run->BasePage << PAGE_SHIFT,
1977 (Run->BasePage + Run->PageCount) << PAGE_SHIFT);
1978
1979 //
1980 // Make sure it has pages inside it
1981 //
1982 if (Run->PageCount)
1983 {
1984 //
1985 // Set the bits in the PFN bitmap
1986 //
1987 RtlSetBits(&MiPfnBitMap, Run->BasePage, Run->PageCount);
1988 }
1989 }
1990
1991 /* Look for large page cache entries that need caching */
1992 MiSyncCachedRanges();
1993
1994 /* Loop for HAL Heap I/O device mappings that need coherency tracking */
1995 MiAddHalIoMappings();
1996
1997 /* Set the initial resident page count */
1998 MmResidentAvailablePages = MmAvailablePages - 32;
1999
2000 /* Initialize large page structures on PAE/x64, and MmProcessList on x86 */
2001 MiInitializeLargePageSupport();
2002
2003 /* Check if the registry says any drivers should be loaded with large pages */
2004 MiInitializeDriverLargePageList();
2005
2006 /* Relocate the boot drivers into system PTE space and fixup their PFNs */
2007 MiReloadBootLoadedDrivers(LoaderBlock);
2008
2009 /* FIXME: Call out into Driver Verifier for initialization */
2010
2011 /* Check how many pages the system has */
2012 if (MmNumberOfPhysicalPages <= ((13 * _1MB) / PAGE_SIZE))
2013 {
2014 /* Set small system */
2015 MmSystemSize = MmSmallSystem;
2016 }
2017 else if (MmNumberOfPhysicalPages <= ((19 * _1MB) / PAGE_SIZE))
2018 {
2019 /* Set small system and add 100 pages for the cache */
2020 MmSystemSize = MmSmallSystem;
2021 MmSystemCacheWsMinimum += 100;
2022 }
2023 else
2024 {
2025 /* Set medium system and add 400 pages for the cache */
2026 MmSystemSize = MmMediumSystem;
2027 MmSystemCacheWsMinimum += 400;
2028 }
2029
2030 /* Check for less than 24MB */
2031 if (MmNumberOfPhysicalPages < ((24 * _1MB) / PAGE_SIZE))
2032 {
2033 /* No more than 32 pages */
2034 MmSystemCacheWsMinimum = 32;
2035 }
2036
2037 /* Check for more than 32MB */
2038 if (MmNumberOfPhysicalPages >= ((32 * _1MB) / PAGE_SIZE))
2039 {
2040 /* Check for product type being "Wi" for WinNT */
2041 if (MmProductType == '\0i\0W')
2042 {
2043 /* Then this is a large system */
2044 MmSystemSize = MmLargeSystem;
2045 }
2046 else
2047 {
2048 /* For servers, we need 64MB to consider this as being large */
2049 if (MmNumberOfPhysicalPages >= ((64 * _1MB) / PAGE_SIZE))
2050 {
2051 /* Set it as large */
2052 MmSystemSize = MmLargeSystem;
2053 }
2054 }
2055 }
2056
2057 /* Check for more than 33 MB */
2058 if (MmNumberOfPhysicalPages > ((33 * _1MB) / PAGE_SIZE))
2059 {
2060 /* Add another 500 pages to the cache */
2061 MmSystemCacheWsMinimum += 500;
2062 }
2063
2064 /* Now setup the shared user data fields */
2065 ASSERT(SharedUserData->NumberOfPhysicalPages == 0);
2066 SharedUserData->NumberOfPhysicalPages = MmNumberOfPhysicalPages;
2067 SharedUserData->LargePageMinimum = 0;
2068
2069 /* Check for workstation (Wi for WinNT) */
2070 if (MmProductType == '\0i\0W')
2071 {
2072 /* Set Windows NT Workstation product type */
2073 SharedUserData->NtProductType = NtProductWinNt;
2074 MmProductType = 0;
2075 }
2076 else
2077 {
2078 /* Check for LanMan server */
2079 if (MmProductType == '\0a\0L')
2080 {
2081 /* This is a domain controller */
2082 SharedUserData->NtProductType = NtProductLanManNt;
2083 }
2084 else
2085 {
2086 /* Otherwise it must be a normal server */
2087 SharedUserData->NtProductType = NtProductServer;
2088 }
2089
2090 /* Set the product type, and make the system more aggressive with low memory */
2091 MmProductType = 1;
2092 MmMinimumFreePages = 81;
2093 }
2094
2095 /* Update working set tuning parameters */
2096 MiAdjustWorkingSetManagerParameters(!MmProductType);
2097
2098 /* Finetune the page count by removing working set and NP expansion */
2099 MmResidentAvailablePages -= MiExpansionPoolPagesInitialCharge;
2100 MmResidentAvailablePages -= MmSystemCacheWsMinimum;
2101 MmResidentAvailableAtInit = MmResidentAvailablePages;
2102 if (MmResidentAvailablePages <= 0)
2103 {
2104 /* This should not happen */
2105 DPRINT1("System cache working set too big\n");
2106 return FALSE;
2107 }
2108
2109 /* Initialize the system cache */
2110 //MiInitializeSystemCache(MmSystemCacheWsMinimum, MmAvailablePages);
2111
2112 /* Update the commit limit */
2113 MmTotalCommitLimit = MmAvailablePages;
2114 if (MmTotalCommitLimit > 1024) MmTotalCommitLimit -= 1024;
2115 MmTotalCommitLimitMaximum = MmTotalCommitLimit;
2116
2117 /* Size up paged pool and build the shadow system page directory */
2118 MiBuildPagedPool();
2119
2120 /* Debugger physical memory support is now ready to be used */
2121 MmDebugPte = MiAddressToPte(MiDebugMapping);
2122
2123 /* Initialize the loaded module list */
2124 MiInitializeLoadedModuleList(LoaderBlock);
2125 }
2126
2127 //
2128 // Always return success for now
2129 //
2130 return TRUE;
2131 }
2132
2133 /* EOF */