[NTOS]: Compute MiHighestUserPte, MiHighestUserPde, MiSessionImagePteStart, MiSession...
[reactos.git] / reactos / ntoskrnl / mm / ARM3 / mminit.c
1 /*
2 * PROJECT: ReactOS Kernel
3 * LICENSE: BSD - See COPYING.ARM in the top level directory
4 * FILE: ntoskrnl/mm/ARM3/mminit.c
5 * PURPOSE: ARM Memory Manager Initialization
6 * PROGRAMMERS: ReactOS Portable Systems Group
7 */
8
9 /* INCLUDES *******************************************************************/
10
11 #include <ntoskrnl.h>
12 #define NDEBUG
13 #include <debug.h>
14
15 #line 15 "ARM³::INIT"
16 #define MODULE_INVOLVED_IN_ARM3
17 #include "miarm.h"
18
19 /* GLOBALS ********************************************************************/
20
21 //
22 // These are all registry-configurable, but by default, the memory manager will
23 // figure out the most appropriate values.
24 //
25 ULONG MmMaximumNonPagedPoolPercent;
26 ULONG MmSizeOfNonPagedPoolInBytes;
27 ULONG MmMaximumNonPagedPoolInBytes;
28
29 /* Some of the same values, in pages */
30 PFN_NUMBER MmMaximumNonPagedPoolInPages;
31
32 //
33 // These numbers describe the discrete equation components of the nonpaged
34 // pool sizing algorithm.
35 //
36 // They are described on http://support.microsoft.com/default.aspx/kb/126402/ja
37 // along with the algorithm that uses them, which is implemented later below.
38 //
39 ULONG MmMinimumNonPagedPoolSize = 256 * 1024;
40 ULONG MmMinAdditionNonPagedPoolPerMb = 32 * 1024;
41 ULONG MmDefaultMaximumNonPagedPool = 1024 * 1024;
42 ULONG MmMaxAdditionNonPagedPoolPerMb = 400 * 1024;
43
44 //
45 // The memory layout (and especially variable names) of the NT kernel mode
46 // components can be a bit hard to twig, especially when it comes to the non
47 // paged area.
48 //
49 // There are really two components to the non-paged pool:
50 //
51 // - The initial nonpaged pool, sized dynamically up to a maximum.
52 // - The expansion nonpaged pool, sized dynamically up to a maximum.
53 //
54 // The initial nonpaged pool is physically continuous for performance, and
55 // immediately follows the PFN database, typically sharing the same PDE. It is
56 // a very small resource (32MB on a 1GB system), and capped at 128MB.
57 //
58 // Right now we call this the "ARM³ Nonpaged Pool" and it begins somewhere after
59 // the PFN database (which starts at 0xB0000000).
60 //
61 // The expansion nonpaged pool, on the other hand, can grow much bigger (400MB
62 // for a 1GB system). On ARM³ however, it is currently capped at 128MB.
63 //
64 // The address where the initial nonpaged pool starts is aptly named
65 // MmNonPagedPoolStart, and it describes a range of MmSizeOfNonPagedPoolInBytes
66 // bytes.
67 //
68 // Expansion nonpaged pool starts at an address described by the variable called
69 // MmNonPagedPoolExpansionStart, and it goes on for MmMaximumNonPagedPoolInBytes
70 // minus MmSizeOfNonPagedPoolInBytes bytes, always reaching MmNonPagedPoolEnd
71 // (because of the way it's calculated) at 0xFFBE0000.
72 //
73 // Initial nonpaged pool is allocated and mapped early-on during boot, but what
74 // about the expansion nonpaged pool? It is instead composed of special pages
75 // which belong to what are called System PTEs. These PTEs are the matter of a
76 // later discussion, but they are also considered part of the "nonpaged" OS, due
77 // to the fact that they are never paged out -- once an address is described by
78 // a System PTE, it is always valid, until the System PTE is torn down.
79 //
80 // System PTEs are actually composed of two "spaces", the system space proper,
81 // and the nonpaged pool expansion space. The latter, as we've already seen,
82 // begins at MmNonPagedPoolExpansionStart. Based on the number of System PTEs
83 // that the system will support, the remaining address space below this address
84 // is used to hold the system space PTEs. This address, in turn, is held in the
85 // variable named MmNonPagedSystemStart, which itself is never allowed to go
86 // below 0xEB000000 (thus creating an upper bound on the number of System PTEs).
87 //
88 // This means that 330MB are reserved for total nonpaged system VA, on top of
89 // whatever the initial nonpaged pool allocation is.
90 //
91 // The following URLs, valid as of April 23rd, 2008, support this evidence:
92 //
93 // http://www.cs.miami.edu/~burt/journal/NT/memory.html
94 // http://www.ditii.com/2007/09/28/windows-memory-management-x86-virtual-address-space/
95 //
96 PVOID MmNonPagedSystemStart;
97 PVOID MmNonPagedPoolStart;
98 PVOID MmNonPagedPoolExpansionStart;
99 PVOID MmNonPagedPoolEnd = MI_NONPAGED_POOL_END;
100
101 //
102 // This is where paged pool starts by default
103 //
104 PVOID MmPagedPoolStart = MI_PAGED_POOL_START;
105 PVOID MmPagedPoolEnd;
106
107 //
108 // And this is its default size
109 //
110 ULONG MmSizeOfPagedPoolInBytes = MI_MIN_INIT_PAGED_POOLSIZE;
111 PFN_NUMBER MmSizeOfPagedPoolInPages = MI_MIN_INIT_PAGED_POOLSIZE / PAGE_SIZE;
112
113 //
114 // Session space starts at 0xBFFFFFFF and grows downwards
115 // By default, it includes an 8MB image area where we map win32k and video card
116 // drivers, followed by a 4MB area containing the session's working set. This is
117 // then followed by a 20MB mapped view area and finally by the session's paged
118 // pool, by default 16MB.
119 //
120 // On a normal system, this results in session space occupying the region from
121 // 0xBD000000 to 0xC0000000
122 //
123 // See miarm.h for the defines that determine the sizing of this region. On an
124 // NT system, some of these can be configured through the registry, but we don't
125 // support that yet.
126 //
127 PVOID MiSessionSpaceEnd; // 0xC0000000
128 PVOID MiSessionImageEnd; // 0xC0000000
129 PVOID MiSessionImageStart; // 0xBF800000
130 PVOID MiSessionViewStart; // 0xBE000000
131 PVOID MiSessionPoolEnd; // 0xBE000000
132 PVOID MiSessionPoolStart; // 0xBD000000
133 PVOID MmSessionBase; // 0xBD000000
134 ULONG MmSessionSize;
135 ULONG MmSessionViewSize;
136 ULONG MmSessionPoolSize;
137 ULONG MmSessionImageSize;
138
139 /*
140 * These are the PTE addresses of the boundaries carved out above
141 */
142 PMMPTE MiSessionImagePteStart;
143 PMMPTE MiSessionImagePteEnd;
144 PMMPTE MiSessionBasePte;
145 PMMPTE MiSessionLastPte;
146
147 //
148 // The system view space, on the other hand, is where sections that are memory
149 // mapped into "system space" end up.
150 //
151 // By default, it is a 16MB region.
152 //
153 PVOID MiSystemViewStart;
154 ULONG MmSystemViewSize;
155
156 //
157 // A copy of the system page directory (the page directory associated with the
158 // System process) is kept (double-mapped) by the manager in order to lazily
159 // map paged pool PDEs into external processes when they fault on a paged pool
160 // address.
161 //
162 PFN_NUMBER MmSystemPageDirectory[PD_COUNT];
163 PMMPTE MmSystemPagePtes;
164
165 //
166 // The system cache starts right after hyperspace. The first few pages are for
167 // keeping track of the system working set list.
168 //
169 // This should be 0xC0C00000 -- the cache itself starts at 0xC1000000
170 //
171 PMMWSL MmSystemCacheWorkingSetList = MI_SYSTEM_CACHE_WS_START;
172
173 //
174 // Windows NT seems to choose between 7000, 11000 and 50000
175 // On systems with more than 32MB, this number is then doubled, and further
176 // aligned up to a PDE boundary (4MB).
177 //
178 ULONG MmNumberOfSystemPtes;
179
180 //
181 // This is how many pages the PFN database will take up
182 // In Windows, this includes the Quark Color Table, but not in ARM³
183 //
184 ULONG MxPfnAllocation;
185
186 //
187 // Unlike the old ReactOS Memory Manager, ARM³ (and Windows) does not keep track
188 // of pages that are not actually valid physical memory, such as ACPI reserved
189 // regions, BIOS address ranges, or holes in physical memory address space which
190 // could indicate device-mapped I/O memory.
191 //
192 // In fact, the lack of a PFN entry for a page usually indicates that this is
193 // I/O space instead.
194 //
195 // A bitmap, called the PFN bitmap, keeps track of all page frames by assigning
196 // a bit to each. If the bit is set, then the page is valid physical RAM.
197 //
198 RTL_BITMAP MiPfnBitMap;
199
200 //
201 // This structure describes the different pieces of RAM-backed address space
202 //
203 PPHYSICAL_MEMORY_DESCRIPTOR MmPhysicalMemoryBlock;
204
205 //
206 // This is where we keep track of the most basic physical layout markers
207 //
208 ULONG MmNumberOfPhysicalPages, MmHighestPhysicalPage, MmLowestPhysicalPage = -1;
209
210 //
211 // The total number of pages mapped by the boot loader, which include the kernel
212 // HAL, boot drivers, registry, NLS files and other loader data structures is
213 // kept track of here. This depends on "LoaderPagesSpanned" being correct when
214 // coming from the loader.
215 //
216 // This number is later aligned up to a PDE boundary.
217 //
218 ULONG MmBootImageSize;
219
220 //
221 // These three variables keep track of the core separation of address space that
222 // exists between kernel mode and user mode.
223 //
224 ULONG MmUserProbeAddress;
225 PVOID MmHighestUserAddress;
226 PVOID MmSystemRangeStart;
227
228 /* And these store the respective highest PTE/PDE address */
229 PMMPTE MiHighestUserPte;
230 PMMPDE MiHighestUserPde;
231
232 /* These variables define the system cache address space */
233 PVOID MmSystemCacheStart;
234 PVOID MmSystemCacheEnd;
235 MMSUPPORT MmSystemCacheWs;
236
237 //
238 // This is where hyperspace ends (followed by the system cache working set)
239 //
240 PVOID MmHyperSpaceEnd;
241
242 //
243 // Page coloring algorithm data
244 //
245 ULONG MmSecondaryColors;
246 ULONG MmSecondaryColorMask;
247
248 //
249 // Actual (registry-configurable) size of a GUI thread's stack
250 //
251 ULONG MmLargeStackSize = KERNEL_LARGE_STACK_SIZE;
252
253 //
254 // Before we have a PFN database, memory comes straight from our physical memory
255 // blocks, which is nice because it's guaranteed contiguous and also because once
256 // we take a page from here, the system doesn't see it anymore.
257 // However, once the fun is over, those pages must be re-integrated back into
258 // PFN society life, and that requires us keeping a copy of the original layout
259 // so that we can parse it later.
260 //
261 PMEMORY_ALLOCATION_DESCRIPTOR MxFreeDescriptor;
262 MEMORY_ALLOCATION_DESCRIPTOR MxOldFreeDescriptor;
263
264 /*
265 * For each page's worth bytes of L2 cache in a given set/way line, the zero and
266 * free lists are organized in what is called a "color".
267 *
268 * This array points to the two lists, so it can be thought of as a multi-dimensional
269 * array of MmFreePagesByColor[2][MmSecondaryColors]. Since the number is dynamic,
270 * we describe the array in pointer form instead.
271 *
272 * On a final note, the color tables themselves are right after the PFN database.
273 */
274 C_ASSERT(FreePageList == 1);
275 PMMCOLOR_TABLES MmFreePagesByColor[FreePageList + 1];
276
277 /* An event used in Phase 0 before the rest of the system is ready to go */
278 KEVENT MiTempEvent;
279
280 /* All the events used for memory threshold notifications */
281 PKEVENT MiLowMemoryEvent;
282 PKEVENT MiHighMemoryEvent;
283 PKEVENT MiLowPagedPoolEvent;
284 PKEVENT MiHighPagedPoolEvent;
285 PKEVENT MiLowNonPagedPoolEvent;
286 PKEVENT MiHighNonPagedPoolEvent;
287
288 /* The actual thresholds themselves, in page numbers */
289 PFN_NUMBER MmLowMemoryThreshold;
290 PFN_NUMBER MmHighMemoryThreshold;
291 PFN_NUMBER MiLowPagedPoolThreshold;
292 PFN_NUMBER MiHighPagedPoolThreshold;
293 PFN_NUMBER MiLowNonPagedPoolThreshold;
294 PFN_NUMBER MiHighNonPagedPoolThreshold;
295
296 /*
297 * This number determines how many free pages must exist, at minimum, until we
298 * start trimming working sets and flushing modified pages to obtain more free
299 * pages.
300 *
301 * This number changes if the system detects that this is a server product
302 */
303 PFN_NUMBER MmMinimumFreePages = 26;
304
305 /*
306 * This number indicates how many pages we consider to be a low limit of having
307 * "plenty" of free memory.
308 *
309 * It is doubled on systems that have more than 63MB of memory
310 */
311 PFN_NUMBER MmPlentyFreePages = 400;
312
313 /* These values store the type of system this is (small, med, large) and if server */
314 ULONG MmProductType;
315 MM_SYSTEMSIZE MmSystemSize;
316
317 /*
318 * These values store the cache working set minimums and maximums, in pages
319 *
320 * The minimum value is boosted on systems with more than 24MB of RAM, and cut
321 * down to only 32 pages on embedded (<24MB RAM) systems.
322 *
323 * An extra boost of 2MB is given on systems with more than 33MB of RAM.
324 */
325 PFN_NUMBER MmSystemCacheWsMinimum = 288;
326 PFN_NUMBER MmSystemCacheWsMaximum = 350;
327
328 /* FIXME: Move to cache/working set code later */
329 BOOLEAN MmLargeSystemCache;
330
331 /* PRIVATE FUNCTIONS **********************************************************/
332
333 //
334 // In Bavaria, this is probably a hate crime
335 //
336 VOID
337 FASTCALL
338 MiSyncARM3WithROS(IN PVOID AddressStart,
339 IN PVOID AddressEnd)
340 {
341 //
342 // Puerile piece of junk-grade carbonized horseshit puss sold to the lowest bidder
343 //
344 ULONG Pde = ADDR_TO_PDE_OFFSET(AddressStart);
345 while (Pde <= ADDR_TO_PDE_OFFSET(AddressEnd))
346 {
347 //
348 // This both odious and heinous
349 //
350 extern ULONG MmGlobalKernelPageDirectory[1024];
351 MmGlobalKernelPageDirectory[Pde] = ((PULONG)PDE_BASE)[Pde];
352 Pde++;
353 }
354 }
355
356 PFN_NUMBER
357 NTAPI
358 MxGetNextPage(IN PFN_NUMBER PageCount)
359 {
360 PFN_NUMBER Pfn;
361
362 /* Make sure we have enough pages */
363 if (PageCount > MxFreeDescriptor->PageCount)
364 {
365 /* Crash the system */
366 KeBugCheckEx(INSTALL_MORE_MEMORY,
367 MmNumberOfPhysicalPages,
368 MxFreeDescriptor->PageCount,
369 MxOldFreeDescriptor.PageCount,
370 PageCount);
371 }
372
373 /* Use our lowest usable free pages */
374 Pfn = MxFreeDescriptor->BasePage;
375 MxFreeDescriptor->BasePage += PageCount;
376 MxFreeDescriptor->PageCount -= PageCount;
377 return Pfn;
378 }
379
380 VOID
381 NTAPI
382 MiComputeColorInformation(VOID)
383 {
384 ULONG L2Associativity;
385
386 /* Check if no setting was provided already */
387 if (!MmSecondaryColors)
388 {
389 /* Get L2 cache information */
390 L2Associativity = KeGetPcr()->SecondLevelCacheAssociativity;
391
392 /* The number of colors is the number of cache bytes by set/way */
393 MmSecondaryColors = KeGetPcr()->SecondLevelCacheSize;
394 if (L2Associativity) MmSecondaryColors /= L2Associativity;
395 }
396
397 /* Now convert cache bytes into pages */
398 MmSecondaryColors >>= PAGE_SHIFT;
399 if (!MmSecondaryColors)
400 {
401 /* If there was no cache data from the KPCR, use the default colors */
402 MmSecondaryColors = MI_SECONDARY_COLORS;
403 }
404 else
405 {
406 /* Otherwise, make sure there aren't too many colors */
407 if (MmSecondaryColors > MI_MAX_SECONDARY_COLORS)
408 {
409 /* Set the maximum */
410 MmSecondaryColors = MI_MAX_SECONDARY_COLORS;
411 }
412
413 /* Make sure there aren't too little colors */
414 if (MmSecondaryColors < MI_MIN_SECONDARY_COLORS)
415 {
416 /* Set the default */
417 MmSecondaryColors = MI_SECONDARY_COLORS;
418 }
419
420 /* Finally make sure the colors are a power of two */
421 if (MmSecondaryColors & (MmSecondaryColors - 1))
422 {
423 /* Set the default */
424 MmSecondaryColors = MI_SECONDARY_COLORS;
425 }
426 }
427
428 /* Compute the mask and store it */
429 MmSecondaryColorMask = MmSecondaryColors - 1;
430 KeGetCurrentPrcb()->SecondaryColorMask = MmSecondaryColorMask;
431 }
432
433 VOID
434 NTAPI
435 MiInitializeColorTables(VOID)
436 {
437 ULONG i;
438 PMMPTE PointerPte, LastPte;
439 MMPTE TempPte = ValidKernelPte;
440
441 /* The color table starts after the ARM3 PFN database */
442 MmFreePagesByColor[0] = (PMMCOLOR_TABLES)&MmPfnDatabase[1][MmHighestPhysicalPage + 1];
443
444 /* Loop the PTEs. We have two color tables for each secondary color */
445 PointerPte = MiAddressToPte(&MmFreePagesByColor[0][0]);
446 LastPte = MiAddressToPte((ULONG_PTR)MmFreePagesByColor[0] +
447 (2 * MmSecondaryColors * sizeof(MMCOLOR_TABLES))
448 - 1);
449 while (PointerPte <= LastPte)
450 {
451 /* Check for valid PTE */
452 if (PointerPte->u.Hard.Valid == 0)
453 {
454 /* Get a page and map it */
455 TempPte.u.Hard.PageFrameNumber = MxGetNextPage(1);
456 ASSERT(TempPte.u.Hard.Valid == 1);
457 *PointerPte = TempPte;
458
459 /* Zero out the page */
460 RtlZeroMemory(MiPteToAddress(PointerPte), PAGE_SIZE);
461 }
462
463 /* Next */
464 PointerPte++;
465 }
466
467 /* Now set the address of the next list, right after this one */
468 MmFreePagesByColor[1] = &MmFreePagesByColor[0][MmSecondaryColors];
469
470 /* Now loop the lists to set them up */
471 for (i = 0; i < MmSecondaryColors; i++)
472 {
473 /* Set both free and zero lists for each color */
474 MmFreePagesByColor[ZeroedPageList][i].Flink = 0xFFFFFFFF;
475 MmFreePagesByColor[ZeroedPageList][i].Blink = (PVOID)0xFFFFFFFF;
476 MmFreePagesByColor[ZeroedPageList][i].Count = 0;
477 MmFreePagesByColor[FreePageList][i].Flink = 0xFFFFFFFF;
478 MmFreePagesByColor[FreePageList][i].Blink = (PVOID)0xFFFFFFFF;
479 MmFreePagesByColor[FreePageList][i].Count = 0;
480 }
481 }
482
483 BOOLEAN
484 NTAPI
485 MiIsRegularMemory(IN PLOADER_PARAMETER_BLOCK LoaderBlock,
486 IN PFN_NUMBER Pfn)
487 {
488 PLIST_ENTRY NextEntry;
489 PMEMORY_ALLOCATION_DESCRIPTOR MdBlock;
490
491 /* Loop the memory descriptors */
492 NextEntry = LoaderBlock->MemoryDescriptorListHead.Flink;
493 while (NextEntry != &LoaderBlock->MemoryDescriptorListHead)
494 {
495 /* Get the memory descriptor */
496 MdBlock = CONTAINING_RECORD(NextEntry,
497 MEMORY_ALLOCATION_DESCRIPTOR,
498 ListEntry);
499
500 /* Check if this PFN could be part of the block */
501 if (Pfn >= (MdBlock->BasePage))
502 {
503 /* Check if it really is part of the block */
504 if (Pfn < (MdBlock->BasePage + MdBlock->PageCount))
505 {
506 /* Check if the block is actually memory we don't map */
507 if ((MdBlock->MemoryType == LoaderFirmwarePermanent) ||
508 (MdBlock->MemoryType == LoaderBBTMemory) ||
509 (MdBlock->MemoryType == LoaderSpecialMemory))
510 {
511 /* We don't need PFN database entries for this memory */
512 break;
513 }
514
515 /* This is memory we want to map */
516 return TRUE;
517 }
518 }
519 else
520 {
521 /* Blocks are ordered, so if it's not here, it doesn't exist */
522 break;
523 }
524
525 /* Get to the next descriptor */
526 NextEntry = MdBlock->ListEntry.Flink;
527 }
528
529 /* Check if this PFN is actually from our free memory descriptor */
530 if ((Pfn >= MxOldFreeDescriptor.BasePage) &&
531 (Pfn < MxOldFreeDescriptor.BasePage + MxOldFreeDescriptor.PageCount))
532 {
533 /* We use these pages for initial mappings, so we do want to count them */
534 return TRUE;
535 }
536
537 /* Otherwise this isn't memory that we describe or care about */
538 return FALSE;
539 }
540
541 VOID
542 NTAPI
543 MiMapPfnDatabase(IN PLOADER_PARAMETER_BLOCK LoaderBlock)
544 {
545 ULONG FreePage, FreePageCount, PagesLeft, BasePage, PageCount;
546 PLIST_ENTRY NextEntry;
547 PMEMORY_ALLOCATION_DESCRIPTOR MdBlock;
548 PMMPTE PointerPte, LastPte;
549 MMPTE TempPte = ValidKernelPte;
550
551 /* Get current page data, since we won't be using MxGetNextPage as it would corrupt our state */
552 FreePage = MxFreeDescriptor->BasePage;
553 FreePageCount = MxFreeDescriptor->PageCount;
554 PagesLeft = 0;
555
556 /* Loop the memory descriptors */
557 NextEntry = LoaderBlock->MemoryDescriptorListHead.Flink;
558 while (NextEntry != &LoaderBlock->MemoryDescriptorListHead)
559 {
560 /* Get the descriptor */
561 MdBlock = CONTAINING_RECORD(NextEntry,
562 MEMORY_ALLOCATION_DESCRIPTOR,
563 ListEntry);
564 if ((MdBlock->MemoryType == LoaderFirmwarePermanent) ||
565 (MdBlock->MemoryType == LoaderBBTMemory) ||
566 (MdBlock->MemoryType == LoaderSpecialMemory))
567 {
568 /* These pages are not part of the PFN database */
569 NextEntry = MdBlock->ListEntry.Flink;
570 continue;
571 }
572
573 /* Next, check if this is our special free descriptor we've found */
574 if (MdBlock == MxFreeDescriptor)
575 {
576 /* Use the real numbers instead */
577 BasePage = MxOldFreeDescriptor.BasePage;
578 PageCount = MxOldFreeDescriptor.PageCount;
579 }
580 else
581 {
582 /* Use the descriptor's numbers */
583 BasePage = MdBlock->BasePage;
584 PageCount = MdBlock->PageCount;
585 }
586
587 /* Get the PTEs for this range */
588 PointerPte = MiAddressToPte(&MmPfnDatabase[0][BasePage]);
589 LastPte = MiAddressToPte(((ULONG_PTR)&MmPfnDatabase[0][BasePage + PageCount]) - 1);
590 DPRINT("MD Type: %lx Base: %lx Count: %lx\n", MdBlock->MemoryType, BasePage, PageCount);
591
592 /* Loop them */
593 while (PointerPte <= LastPte)
594 {
595 /* We'll only touch PTEs that aren't already valid */
596 if (PointerPte->u.Hard.Valid == 0)
597 {
598 /* Use the next free page */
599 TempPte.u.Hard.PageFrameNumber = FreePage;
600 ASSERT(FreePageCount != 0);
601
602 /* Consume free pages */
603 FreePage++;
604 FreePageCount--;
605 if (!FreePageCount)
606 {
607 /* Out of memory */
608 KeBugCheckEx(INSTALL_MORE_MEMORY,
609 MmNumberOfPhysicalPages,
610 FreePageCount,
611 MxOldFreeDescriptor.PageCount,
612 1);
613 }
614
615 /* Write out this PTE */
616 PagesLeft++;
617 ASSERT(PointerPte->u.Hard.Valid == 0);
618 ASSERT(TempPte.u.Hard.Valid == 1);
619 *PointerPte = TempPte;
620
621 /* Zero this page */
622 RtlZeroMemory(MiPteToAddress(PointerPte), PAGE_SIZE);
623 }
624
625 /* Next! */
626 PointerPte++;
627 }
628
629 /* Get the PTEs for this range */
630 PointerPte = MiAddressToPte(&MmPfnDatabase[1][BasePage]);
631 LastPte = MiAddressToPte(((ULONG_PTR)&MmPfnDatabase[1][BasePage + PageCount]) - 1);
632 DPRINT("MD Type: %lx Base: %lx Count: %lx\n", MdBlock->MemoryType, BasePage, PageCount);
633
634 /* Loop them */
635 while (PointerPte <= LastPte)
636 {
637 /* We'll only touch PTEs that aren't already valid */
638 if (PointerPte->u.Hard.Valid == 0)
639 {
640 /* Use the next free page */
641 TempPte.u.Hard.PageFrameNumber = FreePage;
642 ASSERT(FreePageCount != 0);
643
644 /* Consume free pages */
645 FreePage++;
646 FreePageCount--;
647 if (!FreePageCount)
648 {
649 /* Out of memory */
650 KeBugCheckEx(INSTALL_MORE_MEMORY,
651 MmNumberOfPhysicalPages,
652 FreePageCount,
653 MxOldFreeDescriptor.PageCount,
654 1);
655 }
656
657 /* Write out this PTE */
658 PagesLeft++;
659 ASSERT(PointerPte->u.Hard.Valid == 0);
660 ASSERT(TempPte.u.Hard.Valid == 1);
661 *PointerPte = TempPte;
662
663 /* Zero this page */
664 RtlZeroMemory(MiPteToAddress(PointerPte), PAGE_SIZE);
665 }
666
667 /* Next! */
668 PointerPte++;
669 }
670
671 /* Do the next address range */
672 NextEntry = MdBlock->ListEntry.Flink;
673 }
674
675 /* Now update the free descriptors to consume the pages we used up during the PFN allocation loop */
676 MxFreeDescriptor->BasePage = FreePage;
677 MxFreeDescriptor->PageCount = FreePageCount;
678 }
679
680 VOID
681 NTAPI
682 MiBuildPfnDatabaseFromPages(IN PLOADER_PARAMETER_BLOCK LoaderBlock)
683 {
684 PMMPDE PointerPde;
685 PMMPTE PointerPte;
686 ULONG i, Count, j;
687 PFN_NUMBER PageFrameIndex, StartupPdIndex, PtePageIndex;
688 PMMPFN Pfn1, Pfn2;
689 ULONG_PTR BaseAddress = 0;
690
691 /* PFN of the startup page directory */
692 StartupPdIndex = PFN_FROM_PTE(MiAddressToPde(PDE_BASE));
693
694 /* Start with the first PDE and scan them all */
695 PointerPde = MiAddressToPde(NULL);
696 Count = PD_COUNT * PDE_COUNT;
697 for (i = 0; i < Count; i++)
698 {
699 /* Check for valid PDE */
700 if (PointerPde->u.Hard.Valid == 1)
701 {
702 /* Get the PFN from it */
703 PageFrameIndex = PFN_FROM_PTE(PointerPde);
704
705 /* Do we want a PFN entry for this page? */
706 if (MiIsRegularMemory(LoaderBlock, PageFrameIndex))
707 {
708 /* Yes we do, set it up */
709 Pfn1 = MI_PFN_TO_PFNENTRY(PageFrameIndex);
710 Pfn1->u4.PteFrame = StartupPdIndex;
711 Pfn1->PteAddress = PointerPde;
712 Pfn1->u2.ShareCount++;
713 Pfn1->u3.e2.ReferenceCount = 1;
714 Pfn1->u3.e1.PageLocation = ActiveAndValid;
715 Pfn1->u3.e1.CacheAttribute = MiNonCached;
716 }
717 else
718 {
719 /* No PFN entry */
720 Pfn1 = NULL;
721 }
722
723 /* Now get the PTE and scan the pages */
724 PointerPte = MiAddressToPte(BaseAddress);
725 for (j = 0; j < PTE_COUNT; j++)
726 {
727 /* Check for a valid PTE */
728 if (PointerPte->u.Hard.Valid == 1)
729 {
730 /* Increase the shared count of the PFN entry for the PDE */
731 ASSERT(Pfn1 != NULL);
732 Pfn1->u2.ShareCount++;
733
734 /* Now check if the PTE is valid memory too */
735 PtePageIndex = PFN_FROM_PTE(PointerPte);
736 if (MiIsRegularMemory(LoaderBlock, PtePageIndex))
737 {
738 /*
739 * Only add pages above the end of system code or pages
740 * that are part of nonpaged pool
741 */
742 if ((BaseAddress >= 0xA0000000) ||
743 ((BaseAddress >= (ULONG_PTR)MmNonPagedPoolStart) &&
744 (BaseAddress < (ULONG_PTR)MmNonPagedPoolStart +
745 MmSizeOfNonPagedPoolInBytes)))
746 {
747 /* Get the PFN entry and make sure it too is valid */
748 Pfn2 = MI_PFN_TO_PFNENTRY(PtePageIndex);
749 if ((MmIsAddressValid(Pfn2)) &&
750 (MmIsAddressValid(Pfn2 + 1)))
751 {
752 /* Setup the PFN entry */
753 Pfn2->u4.PteFrame = PageFrameIndex;
754 Pfn2->PteAddress = PointerPte;
755 Pfn2->u2.ShareCount++;
756 Pfn2->u3.e2.ReferenceCount = 1;
757 Pfn2->u3.e1.PageLocation = ActiveAndValid;
758 Pfn2->u3.e1.CacheAttribute = MiNonCached;
759 }
760 }
761 }
762 }
763
764 /* Next PTE */
765 PointerPte++;
766 BaseAddress += PAGE_SIZE;
767 }
768 }
769 else
770 {
771 /* Next PDE mapped address */
772 BaseAddress += PDE_MAPPED_VA;
773 }
774
775 /* Next PTE */
776 PointerPde++;
777 }
778 }
779
780 VOID
781 NTAPI
782 MiBuildPfnDatabaseZeroPage(VOID)
783 {
784 PMMPFN Pfn1;
785 PMMPDE PointerPde;
786
787 /* Grab the lowest page and check if it has no real references */
788 Pfn1 = MI_PFN_TO_PFNENTRY(MmLowestPhysicalPage);
789 if (!(MmLowestPhysicalPage) && !(Pfn1->u3.e2.ReferenceCount))
790 {
791 /* Make it a bogus page to catch errors */
792 PointerPde = MiAddressToPde(0xFFFFFFFF);
793 Pfn1->u4.PteFrame = PFN_FROM_PTE(PointerPde);
794 Pfn1->PteAddress = PointerPde;
795 Pfn1->u2.ShareCount++;
796 Pfn1->u3.e2.ReferenceCount = 0xFFF0;
797 Pfn1->u3.e1.PageLocation = ActiveAndValid;
798 Pfn1->u3.e1.CacheAttribute = MiNonCached;
799 }
800 }
801
802 VOID
803 NTAPI
804 MiBuildPfnDatabaseFromLoaderBlock(IN PLOADER_PARAMETER_BLOCK LoaderBlock)
805 {
806 PLIST_ENTRY NextEntry;
807 PFN_NUMBER PageCount = 0;
808 PMEMORY_ALLOCATION_DESCRIPTOR MdBlock;
809 PFN_NUMBER PageFrameIndex;
810 PMMPFN Pfn1;
811 PMMPTE PointerPte;
812 PMMPDE PointerPde;
813
814 /* Now loop through the descriptors */
815 NextEntry = LoaderBlock->MemoryDescriptorListHead.Flink;
816 while (NextEntry != &LoaderBlock->MemoryDescriptorListHead)
817 {
818 /* Get the current descriptor */
819 MdBlock = CONTAINING_RECORD(NextEntry,
820 MEMORY_ALLOCATION_DESCRIPTOR,
821 ListEntry);
822
823 /* Read its data */
824 PageCount = MdBlock->PageCount;
825 PageFrameIndex = MdBlock->BasePage;
826
827 /* Don't allow memory above what the PFN database is mapping */
828 if (PageFrameIndex > MmHighestPhysicalPage)
829 {
830 /* Since they are ordered, everything past here will be larger */
831 break;
832 }
833
834 /* On the other hand, the end page might be higher up... */
835 if ((PageFrameIndex + PageCount) > (MmHighestPhysicalPage + 1))
836 {
837 /* In which case we'll trim the descriptor to go as high as we can */
838 PageCount = MmHighestPhysicalPage + 1 - PageFrameIndex;
839 MdBlock->PageCount = PageCount;
840
841 /* But if there's nothing left to trim, we got too high, so quit */
842 if (!PageCount) break;
843 }
844
845 /* Now check the descriptor type */
846 switch (MdBlock->MemoryType)
847 {
848 /* Check for bad RAM */
849 case LoaderBad:
850
851 DPRINT1("You have damaged RAM modules. Stopping boot\n");
852 while (TRUE);
853 break;
854
855 /* Check for free RAM */
856 case LoaderFree:
857 case LoaderLoadedProgram:
858 case LoaderFirmwareTemporary:
859 case LoaderOsloaderStack:
860
861 /* Get the last page of this descriptor. Note we loop backwards */
862 PageFrameIndex += PageCount - 1;
863 Pfn1 = MI_PFN_TO_PFNENTRY(PageFrameIndex);
864 while (PageCount--)
865 {
866 /* If the page really has no references, mark it as free */
867 if (!Pfn1->u3.e2.ReferenceCount)
868 {
869 Pfn1->u3.e1.CacheAttribute = MiNonCached;
870 //MiInsertPageInFreeList(PageFrameIndex);
871 }
872
873 /* Go to the next page */
874 Pfn1--;
875 PageFrameIndex--;
876 }
877
878 /* Done with this block */
879 break;
880
881 /* Check for pages that are invisible to us */
882 case LoaderFirmwarePermanent:
883 case LoaderSpecialMemory:
884 case LoaderBBTMemory:
885
886 /* And skip them */
887 break;
888
889 default:
890
891 /* Map these pages with the KSEG0 mapping that adds 0x80000000 */
892 PointerPte = MiAddressToPte(KSEG0_BASE + (PageFrameIndex << PAGE_SHIFT));
893 Pfn1 = MI_PFN_TO_PFNENTRY(PageFrameIndex);
894 while (PageCount--)
895 {
896 /* Check if the page is really unused */
897 PointerPde = MiAddressToPde(KSEG0_BASE + (PageFrameIndex << PAGE_SHIFT));
898 if (!Pfn1->u3.e2.ReferenceCount)
899 {
900 /* Mark it as being in-use */
901 Pfn1->u4.PteFrame = PFN_FROM_PTE(PointerPde);
902 Pfn1->PteAddress = PointerPte;
903 Pfn1->u2.ShareCount++;
904 Pfn1->u3.e2.ReferenceCount = 1;
905 Pfn1->u3.e1.PageLocation = ActiveAndValid;
906 Pfn1->u3.e1.CacheAttribute = MiNonCached;
907
908 /* Check for RAM disk page */
909 if (MdBlock->MemoryType == LoaderXIPRom)
910 {
911 /* Make it a pseudo-I/O ROM mapping */
912 Pfn1->u1.Flink = 0;
913 Pfn1->u2.ShareCount = 0;
914 Pfn1->u3.e2.ReferenceCount = 0;
915 Pfn1->u3.e1.PageLocation = 0;
916 Pfn1->u3.e1.Rom = 1;
917 Pfn1->u4.InPageError = 0;
918 Pfn1->u3.e1.PrototypePte = 1;
919 }
920 }
921
922 /* Advance page structures */
923 Pfn1++;
924 PageFrameIndex++;
925 PointerPte++;
926 }
927 break;
928 }
929
930 /* Next descriptor entry */
931 NextEntry = MdBlock->ListEntry.Flink;
932 }
933 }
934
935 VOID
936 NTAPI
937 MiBuildPfnDatabaseSelf(VOID)
938 {
939 PMMPTE PointerPte, LastPte;
940 PMMPFN Pfn1;
941
942 /* Loop the PFN database page */
943 PointerPte = MiAddressToPte(MI_PFN_TO_PFNENTRY(MmLowestPhysicalPage));
944 LastPte = MiAddressToPte(MI_PFN_TO_PFNENTRY(MmHighestPhysicalPage));
945 while (PointerPte <= LastPte)
946 {
947 /* Make sure the page is valid */
948 if (PointerPte->u.Hard.Valid == 1)
949 {
950 /* Get the PFN entry and just mark it referenced */
951 Pfn1 = MI_PFN_TO_PFNENTRY(PointerPte->u.Hard.PageFrameNumber);
952 Pfn1->u2.ShareCount = 1;
953 Pfn1->u3.e2.ReferenceCount = 1;
954 }
955
956 /* Next */
957 PointerPte++;
958 }
959 }
960
961 VOID
962 NTAPI
963 MiInitializePfnDatabase(IN PLOADER_PARAMETER_BLOCK LoaderBlock)
964 {
965 /* Scan memory and start setting up PFN entries */
966 MiBuildPfnDatabaseFromPages(LoaderBlock);
967
968 /* Add the zero page */
969 MiBuildPfnDatabaseZeroPage();
970
971 /* Scan the loader block and build the rest of the PFN database */
972 MiBuildPfnDatabaseFromLoaderBlock(LoaderBlock);
973
974 /* Finally add the pages for the PFN database itself */
975 MiBuildPfnDatabaseSelf();
976 }
977
978 VOID
979 NTAPI
980 MiAdjustWorkingSetManagerParameters(IN BOOLEAN Client)
981 {
982 /* This function needs to do more work, for now, we tune page minimums */
983
984 /* Check for a system with around 64MB RAM or more */
985 if (MmNumberOfPhysicalPages >= (63 * _1MB) / PAGE_SIZE)
986 {
987 /* Double the minimum amount of pages we consider for a "plenty free" scenario */
988 MmPlentyFreePages *= 2;
989 }
990 }
991
992 VOID
993 NTAPI
994 MiNotifyMemoryEvents(VOID)
995 {
996 /* Are we in a low-memory situation? */
997 if (MmAvailablePages < MmLowMemoryThreshold)
998 {
999 /* Clear high, set low */
1000 if (KeReadStateEvent(MiHighMemoryEvent)) KeClearEvent(MiHighMemoryEvent);
1001 if (!KeReadStateEvent(MiLowMemoryEvent)) KeSetEvent(MiLowMemoryEvent, 0, FALSE);
1002 }
1003 else if (MmAvailablePages < MmHighMemoryThreshold)
1004 {
1005 /* We are in between, clear both */
1006 if (KeReadStateEvent(MiHighMemoryEvent)) KeClearEvent(MiHighMemoryEvent);
1007 if (KeReadStateEvent(MiLowMemoryEvent)) KeClearEvent(MiLowMemoryEvent);
1008 }
1009 else
1010 {
1011 /* Clear low, set high */
1012 if (KeReadStateEvent(MiLowMemoryEvent)) KeClearEvent(MiLowMemoryEvent);
1013 if (!KeReadStateEvent(MiHighMemoryEvent)) KeSetEvent(MiHighMemoryEvent, 0, FALSE);
1014 }
1015 }
1016
1017 NTSTATUS
1018 NTAPI
1019 MiCreateMemoryEvent(IN PUNICODE_STRING Name,
1020 OUT PKEVENT *Event)
1021 {
1022 PACL Dacl;
1023 HANDLE EventHandle;
1024 ULONG DaclLength;
1025 NTSTATUS Status;
1026 OBJECT_ATTRIBUTES ObjectAttributes;
1027 SECURITY_DESCRIPTOR SecurityDescriptor;
1028
1029 /* Create the SD */
1030 Status = RtlCreateSecurityDescriptor(&SecurityDescriptor,
1031 SECURITY_DESCRIPTOR_REVISION);
1032 if (!NT_SUCCESS(Status)) return Status;
1033
1034 /* One ACL with 3 ACEs, containing each one SID */
1035 DaclLength = sizeof(ACL) +
1036 3 * sizeof(ACCESS_ALLOWED_ACE) +
1037 RtlLengthSid(SeLocalSystemSid) +
1038 RtlLengthSid(SeAliasAdminsSid) +
1039 RtlLengthSid(SeWorldSid);
1040
1041 /* Allocate space for the DACL */
1042 Dacl = ExAllocatePoolWithTag(PagedPool, DaclLength, 'lcaD');
1043 if (!Dacl) return STATUS_INSUFFICIENT_RESOURCES;
1044
1045 /* Setup the ACL inside it */
1046 Status = RtlCreateAcl(Dacl, DaclLength, ACL_REVISION);
1047 if (!NT_SUCCESS(Status)) goto CleanUp;
1048
1049 /* Add query rights for everyone */
1050 Status = RtlAddAccessAllowedAce(Dacl,
1051 ACL_REVISION,
1052 SYNCHRONIZE | EVENT_QUERY_STATE | READ_CONTROL,
1053 SeWorldSid);
1054 if (!NT_SUCCESS(Status)) goto CleanUp;
1055
1056 /* Full rights for the admin */
1057 Status = RtlAddAccessAllowedAce(Dacl,
1058 ACL_REVISION,
1059 EVENT_ALL_ACCESS,
1060 SeAliasAdminsSid);
1061 if (!NT_SUCCESS(Status)) goto CleanUp;
1062
1063 /* As well as full rights for the system */
1064 Status = RtlAddAccessAllowedAce(Dacl,
1065 ACL_REVISION,
1066 EVENT_ALL_ACCESS,
1067 SeLocalSystemSid);
1068 if (!NT_SUCCESS(Status)) goto CleanUp;
1069
1070 /* Set this DACL inside the SD */
1071 Status = RtlSetDaclSecurityDescriptor(&SecurityDescriptor,
1072 TRUE,
1073 Dacl,
1074 FALSE);
1075 if (!NT_SUCCESS(Status)) goto CleanUp;
1076
1077 /* Setup the event attributes, making sure it's a permanent one */
1078 InitializeObjectAttributes(&ObjectAttributes,
1079 Name,
1080 OBJ_KERNEL_HANDLE | OBJ_PERMANENT,
1081 NULL,
1082 &SecurityDescriptor);
1083
1084 /* Create the event */
1085 Status = ZwCreateEvent(&EventHandle,
1086 EVENT_ALL_ACCESS,
1087 &ObjectAttributes,
1088 NotificationEvent,
1089 FALSE);
1090 CleanUp:
1091 /* Free the DACL */
1092 ExFreePool(Dacl);
1093
1094 /* Check if this is the success path */
1095 if (NT_SUCCESS(Status))
1096 {
1097 /* Add a reference to the object, then close the handle we had */
1098 Status = ObReferenceObjectByHandle(EventHandle,
1099 EVENT_MODIFY_STATE,
1100 ExEventObjectType,
1101 KernelMode,
1102 (PVOID*)Event,
1103 NULL);
1104 ZwClose (EventHandle);
1105 }
1106
1107 /* Return status */
1108 return Status;
1109 }
1110
1111 BOOLEAN
1112 NTAPI
1113 MiInitializeMemoryEvents(VOID)
1114 {
1115 UNICODE_STRING LowString = RTL_CONSTANT_STRING(L"\\KernelObjects\\LowMemoryCondition");
1116 UNICODE_STRING HighString = RTL_CONSTANT_STRING(L"\\KernelObjects\\HighMemoryCondition");
1117 UNICODE_STRING LowPagedPoolString = RTL_CONSTANT_STRING(L"\\KernelObjects\\LowPagedPoolCondition");
1118 UNICODE_STRING HighPagedPoolString = RTL_CONSTANT_STRING(L"\\KernelObjects\\HighPagedPoolCondition");
1119 UNICODE_STRING LowNonPagedPoolString = RTL_CONSTANT_STRING(L"\\KernelObjects\\LowNonPagedPoolCondition");
1120 UNICODE_STRING HighNonPagedPoolString = RTL_CONSTANT_STRING(L"\\KernelObjects\\HighNonPagedPoolCondition");
1121 NTSTATUS Status;
1122
1123 /* Check if we have a registry setting */
1124 if (MmLowMemoryThreshold)
1125 {
1126 /* Convert it to pages */
1127 MmLowMemoryThreshold *= (_1MB / PAGE_SIZE);
1128 }
1129 else
1130 {
1131 /* The low memory threshold is hit when we don't consider that we have "plenty" of free pages anymore */
1132 MmLowMemoryThreshold = MmPlentyFreePages;
1133
1134 /* More than one GB of memory? */
1135 if (MmNumberOfPhysicalPages > 0x40000)
1136 {
1137 /* Start at 32MB, and add another 16MB for each GB */
1138 MmLowMemoryThreshold = (32 * _1MB) / PAGE_SIZE;
1139 MmLowMemoryThreshold += ((MmNumberOfPhysicalPages - 0x40000) >> 7);
1140 }
1141 else if (MmNumberOfPhysicalPages > 0x8000)
1142 {
1143 /* For systems with > 128MB RAM, add another 4MB for each 128MB */
1144 MmLowMemoryThreshold += ((MmNumberOfPhysicalPages - 0x8000) >> 5);
1145 }
1146
1147 /* Don't let the minimum threshold go past 64MB */
1148 MmLowMemoryThreshold = min(MmLowMemoryThreshold, (64 * _1MB) / PAGE_SIZE);
1149 }
1150
1151 /* Check if we have a registry setting */
1152 if (MmHighMemoryThreshold)
1153 {
1154 /* Convert it into pages */
1155 MmHighMemoryThreshold *= (_1MB / PAGE_SIZE);
1156 }
1157 else
1158 {
1159 /* Otherwise, the default is three times the low memory threshold */
1160 MmHighMemoryThreshold = 3 * MmLowMemoryThreshold;
1161 ASSERT(MmHighMemoryThreshold > MmLowMemoryThreshold);
1162 }
1163
1164 /* Make sure high threshold is actually higher than the low */
1165 MmHighMemoryThreshold = max(MmHighMemoryThreshold, MmLowMemoryThreshold);
1166
1167 /* Create the memory events for all the thresholds */
1168 Status = MiCreateMemoryEvent(&LowString, &MiLowMemoryEvent);
1169 if (!NT_SUCCESS(Status)) return FALSE;
1170 Status = MiCreateMemoryEvent(&HighString, &MiHighMemoryEvent);
1171 if (!NT_SUCCESS(Status)) return FALSE;
1172 Status = MiCreateMemoryEvent(&LowPagedPoolString, &MiLowPagedPoolEvent);
1173 if (!NT_SUCCESS(Status)) return FALSE;
1174 Status = MiCreateMemoryEvent(&HighPagedPoolString, &MiHighPagedPoolEvent);
1175 if (!NT_SUCCESS(Status)) return FALSE;
1176 Status = MiCreateMemoryEvent(&LowNonPagedPoolString, &MiLowNonPagedPoolEvent);
1177 if (!NT_SUCCESS(Status)) return FALSE;
1178 Status = MiCreateMemoryEvent(&HighNonPagedPoolString, &MiHighNonPagedPoolEvent);
1179 if (!NT_SUCCESS(Status)) return FALSE;
1180
1181 /* Now setup the pool events */
1182 MiInitializePoolEvents();
1183
1184 /* Set the initial event state */
1185 MiNotifyMemoryEvents();
1186 return TRUE;
1187 }
1188
1189 VOID
1190 NTAPI
1191 MiAddHalIoMappings(VOID)
1192 {
1193 PVOID BaseAddress;
1194 PMMPTE PointerPde;
1195 PMMPTE PointerPte;
1196 ULONG i, j, PdeCount;
1197 PFN_NUMBER PageFrameIndex;
1198
1199 /* HAL Heap address -- should be on a PDE boundary */
1200 BaseAddress = (PVOID)0xFFC00000;
1201 ASSERT(MiAddressToPteOffset(BaseAddress) == 0);
1202
1203 /* Check how many PDEs the heap has */
1204 PointerPde = MiAddressToPde(BaseAddress);
1205 PdeCount = PDE_COUNT - ADDR_TO_PDE_OFFSET(BaseAddress);
1206 for (i = 0; i < PdeCount; i++)
1207 {
1208 /* Does the HAL own this mapping? */
1209 if ((PointerPde->u.Hard.Valid == 1) &&
1210 (PointerPde->u.Hard.LargePage == 0))
1211 {
1212 /* Get the PTE for it and scan each page */
1213 PointerPte = MiAddressToPte(BaseAddress);
1214 for (j = 0 ; j < PTE_COUNT; j++)
1215 {
1216 /* Does the HAL own this page? */
1217 if (PointerPte->u.Hard.Valid == 1)
1218 {
1219 /* Is the HAL using it for device or I/O mapped memory? */
1220 PageFrameIndex = PFN_FROM_PTE(PointerPte);
1221 if (!MiGetPfnEntry(PageFrameIndex))
1222 {
1223 /* FIXME: For PAT, we need to track I/O cache attributes for coherency */
1224 DPRINT1("HAL I/O Mapping at %p is unsafe\n", BaseAddress);
1225 }
1226 }
1227
1228 /* Move to the next page */
1229 BaseAddress = (PVOID)((ULONG_PTR)BaseAddress + PAGE_SIZE);
1230 PointerPte++;
1231 }
1232 }
1233 else
1234 {
1235 /* Move to the next address */
1236 BaseAddress = (PVOID)((ULONG_PTR)BaseAddress + PDE_MAPPED_VA);
1237 }
1238
1239 /* Move to the next PDE */
1240 PointerPde++;
1241 }
1242 }
1243
1244 VOID
1245 NTAPI
1246 MmDumpArmPfnDatabase(VOID)
1247 {
1248 ULONG i;
1249 PMMPFN Pfn1;
1250 PCHAR Consumer = "Unknown";
1251 KIRQL OldIrql;
1252 ULONG ActivePages = 0, FreePages = 0, OtherPages = 0;
1253
1254 KeRaiseIrql(HIGH_LEVEL, &OldIrql);
1255
1256 //
1257 // Loop the PFN database
1258 //
1259 for (i = 0; i <= MmHighestPhysicalPage; i++)
1260 {
1261 Pfn1 = MI_PFN_TO_PFNENTRY(i);
1262 if (!Pfn1) continue;
1263
1264 //
1265 // Get the page location
1266 //
1267 switch (Pfn1->u3.e1.PageLocation)
1268 {
1269 case ActiveAndValid:
1270
1271 Consumer = "Active and Valid";
1272 ActivePages++;
1273 break;
1274
1275 case FreePageList:
1276
1277 Consumer = "Free Page List";
1278 FreePages++;
1279 break;
1280
1281 default:
1282
1283 Consumer = "Other (ASSERT!)";
1284 OtherPages++;
1285 break;
1286 }
1287
1288 //
1289 // Pretty-print the page
1290 //
1291 DbgPrint("0x%08p:\t%20s\t(%02d.%02d) [%08p-%08p])\n",
1292 i << PAGE_SHIFT,
1293 Consumer,
1294 Pfn1->u3.e2.ReferenceCount,
1295 Pfn1->u2.ShareCount,
1296 Pfn1->PteAddress,
1297 Pfn1->u4.PteFrame);
1298 }
1299
1300 DbgPrint("Active: %d pages\t[%d KB]\n", ActivePages, (ActivePages << PAGE_SHIFT) / 1024);
1301 DbgPrint("Free: %d pages\t[%d KB]\n", FreePages, (FreePages << PAGE_SHIFT) / 1024);
1302 DbgPrint("Other: %d pages\t[%d KB]\n", OtherPages, (OtherPages << PAGE_SHIFT) / 1024);
1303
1304 KeLowerIrql(OldIrql);
1305 }
1306
1307 PFN_NUMBER
1308 NTAPI
1309 MiPagesInLoaderBlock(IN PLOADER_PARAMETER_BLOCK LoaderBlock,
1310 IN PBOOLEAN IncludeType)
1311 {
1312 PLIST_ENTRY NextEntry;
1313 PFN_NUMBER PageCount = 0;
1314 PMEMORY_ALLOCATION_DESCRIPTOR MdBlock;
1315
1316 //
1317 // Now loop through the descriptors
1318 //
1319 NextEntry = LoaderBlock->MemoryDescriptorListHead.Flink;
1320 while (NextEntry != &LoaderBlock->MemoryDescriptorListHead)
1321 {
1322 //
1323 // Grab each one, and check if it's one we should include
1324 //
1325 MdBlock = CONTAINING_RECORD(NextEntry,
1326 MEMORY_ALLOCATION_DESCRIPTOR,
1327 ListEntry);
1328 if ((MdBlock->MemoryType < LoaderMaximum) &&
1329 (IncludeType[MdBlock->MemoryType]))
1330 {
1331 //
1332 // Add this to our running total
1333 //
1334 PageCount += MdBlock->PageCount;
1335 }
1336
1337 //
1338 // Try the next descriptor
1339 //
1340 NextEntry = MdBlock->ListEntry.Flink;
1341 }
1342
1343 //
1344 // Return the total
1345 //
1346 return PageCount;
1347 }
1348
1349 PPHYSICAL_MEMORY_DESCRIPTOR
1350 NTAPI
1351 MmInitializeMemoryLimits(IN PLOADER_PARAMETER_BLOCK LoaderBlock,
1352 IN PBOOLEAN IncludeType)
1353 {
1354 PLIST_ENTRY NextEntry;
1355 ULONG Run = 0, InitialRuns = 0;
1356 PFN_NUMBER NextPage = -1, PageCount = 0;
1357 PPHYSICAL_MEMORY_DESCRIPTOR Buffer, NewBuffer;
1358 PMEMORY_ALLOCATION_DESCRIPTOR MdBlock;
1359
1360 //
1361 // Scan the memory descriptors
1362 //
1363 NextEntry = LoaderBlock->MemoryDescriptorListHead.Flink;
1364 while (NextEntry != &LoaderBlock->MemoryDescriptorListHead)
1365 {
1366 //
1367 // For each one, increase the memory allocation estimate
1368 //
1369 InitialRuns++;
1370 NextEntry = NextEntry->Flink;
1371 }
1372
1373 //
1374 // Allocate the maximum we'll ever need
1375 //
1376 Buffer = ExAllocatePoolWithTag(NonPagedPool,
1377 sizeof(PHYSICAL_MEMORY_DESCRIPTOR) +
1378 sizeof(PHYSICAL_MEMORY_RUN) *
1379 (InitialRuns - 1),
1380 'lMmM');
1381 if (!Buffer) return NULL;
1382
1383 //
1384 // For now that's how many runs we have
1385 //
1386 Buffer->NumberOfRuns = InitialRuns;
1387
1388 //
1389 // Now loop through the descriptors again
1390 //
1391 NextEntry = LoaderBlock->MemoryDescriptorListHead.Flink;
1392 while (NextEntry != &LoaderBlock->MemoryDescriptorListHead)
1393 {
1394 //
1395 // Grab each one, and check if it's one we should include
1396 //
1397 MdBlock = CONTAINING_RECORD(NextEntry,
1398 MEMORY_ALLOCATION_DESCRIPTOR,
1399 ListEntry);
1400 if ((MdBlock->MemoryType < LoaderMaximum) &&
1401 (IncludeType[MdBlock->MemoryType]))
1402 {
1403 //
1404 // Add this to our running total
1405 //
1406 PageCount += MdBlock->PageCount;
1407
1408 //
1409 // Check if the next page is described by the next descriptor
1410 //
1411 if (MdBlock->BasePage == NextPage)
1412 {
1413 //
1414 // Combine it into the same physical run
1415 //
1416 ASSERT(MdBlock->PageCount != 0);
1417 Buffer->Run[Run - 1].PageCount += MdBlock->PageCount;
1418 NextPage += MdBlock->PageCount;
1419 }
1420 else
1421 {
1422 //
1423 // Otherwise just duplicate the descriptor's contents
1424 //
1425 Buffer->Run[Run].BasePage = MdBlock->BasePage;
1426 Buffer->Run[Run].PageCount = MdBlock->PageCount;
1427 NextPage = Buffer->Run[Run].BasePage + Buffer->Run[Run].PageCount;
1428
1429 //
1430 // And in this case, increase the number of runs
1431 //
1432 Run++;
1433 }
1434 }
1435
1436 //
1437 // Try the next descriptor
1438 //
1439 NextEntry = MdBlock->ListEntry.Flink;
1440 }
1441
1442 //
1443 // We should not have been able to go past our initial estimate
1444 //
1445 ASSERT(Run <= Buffer->NumberOfRuns);
1446
1447 //
1448 // Our guess was probably exaggerated...
1449 //
1450 if (InitialRuns > Run)
1451 {
1452 //
1453 // Allocate a more accurately sized buffer
1454 //
1455 NewBuffer = ExAllocatePoolWithTag(NonPagedPool,
1456 sizeof(PHYSICAL_MEMORY_DESCRIPTOR) +
1457 sizeof(PHYSICAL_MEMORY_RUN) *
1458 (Run - 1),
1459 'lMmM');
1460 if (NewBuffer)
1461 {
1462 //
1463 // Copy the old buffer into the new, then free it
1464 //
1465 RtlCopyMemory(NewBuffer->Run,
1466 Buffer->Run,
1467 sizeof(PHYSICAL_MEMORY_RUN) * Run);
1468 ExFreePool(Buffer);
1469
1470 //
1471 // Now use the new buffer
1472 //
1473 Buffer = NewBuffer;
1474 }
1475 }
1476
1477 //
1478 // Write the final numbers, and return it
1479 //
1480 Buffer->NumberOfRuns = Run;
1481 Buffer->NumberOfPages = PageCount;
1482 return Buffer;
1483 }
1484
1485 VOID
1486 NTAPI
1487 MiBuildPagedPool(VOID)
1488 {
1489 PMMPTE PointerPte, PointerPde;
1490 MMPTE TempPte = ValidKernelPte;
1491 PFN_NUMBER PageFrameIndex;
1492 KIRQL OldIrql;
1493 ULONG Size, BitMapSize;
1494
1495 //
1496 // Get the page frame number for the system page directory
1497 //
1498 PointerPte = MiAddressToPte(PDE_BASE);
1499 ASSERT(PD_COUNT == 1);
1500 MmSystemPageDirectory[0] = PFN_FROM_PTE(PointerPte);
1501
1502 //
1503 // Allocate a system PTE which will hold a copy of the page directory
1504 //
1505 PointerPte = MiReserveSystemPtes(1, SystemPteSpace);
1506 ASSERT(PointerPte);
1507 MmSystemPagePtes = MiPteToAddress(PointerPte);
1508
1509 //
1510 // Make this system PTE point to the system page directory.
1511 // It is now essentially double-mapped. This will be used later for lazy
1512 // evaluation of PDEs accross process switches, similarly to how the Global
1513 // page directory array in the old ReactOS Mm is used (but in a less hacky
1514 // way).
1515 //
1516 TempPte = ValidKernelPte;
1517 ASSERT(PD_COUNT == 1);
1518 TempPte.u.Hard.PageFrameNumber = MmSystemPageDirectory[0];
1519 ASSERT(PointerPte->u.Hard.Valid == 0);
1520 ASSERT(TempPte.u.Hard.Valid == 1);
1521 *PointerPte = TempPte;
1522
1523 //
1524 // Let's get back to paged pool work: size it up.
1525 // By default, it should be twice as big as nonpaged pool.
1526 //
1527 MmSizeOfPagedPoolInBytes = 2 * MmMaximumNonPagedPoolInBytes;
1528 if (MmSizeOfPagedPoolInBytes > ((ULONG_PTR)MmNonPagedSystemStart -
1529 (ULONG_PTR)MmPagedPoolStart))
1530 {
1531 //
1532 // On the other hand, we have limited VA space, so make sure that the VA
1533 // for paged pool doesn't overflow into nonpaged pool VA. Otherwise, set
1534 // whatever maximum is possible.
1535 //
1536 MmSizeOfPagedPoolInBytes = (ULONG_PTR)MmNonPagedSystemStart -
1537 (ULONG_PTR)MmPagedPoolStart;
1538 }
1539
1540 //
1541 // Get the size in pages and make sure paged pool is at least 32MB.
1542 //
1543 Size = MmSizeOfPagedPoolInBytes;
1544 if (Size < MI_MIN_INIT_PAGED_POOLSIZE) Size = MI_MIN_INIT_PAGED_POOLSIZE;
1545 Size = BYTES_TO_PAGES(Size);
1546
1547 //
1548 // Now check how many PTEs will be required for these many pages.
1549 //
1550 Size = (Size + (1024 - 1)) / 1024;
1551
1552 //
1553 // Recompute the page-aligned size of the paged pool, in bytes and pages.
1554 //
1555 MmSizeOfPagedPoolInBytes = Size * PAGE_SIZE * 1024;
1556 MmSizeOfPagedPoolInPages = MmSizeOfPagedPoolInBytes >> PAGE_SHIFT;
1557
1558 //
1559 // Let's be really sure this doesn't overflow into nonpaged system VA
1560 //
1561 ASSERT((MmSizeOfPagedPoolInBytes + (ULONG_PTR)MmPagedPoolStart) <=
1562 (ULONG_PTR)MmNonPagedSystemStart);
1563
1564 //
1565 // This is where paged pool ends
1566 //
1567 MmPagedPoolEnd = (PVOID)(((ULONG_PTR)MmPagedPoolStart +
1568 MmSizeOfPagedPoolInBytes) - 1);
1569
1570 //
1571 // So now get the PDE for paged pool and zero it out
1572 //
1573 PointerPde = MiAddressToPde(MmPagedPoolStart);
1574 RtlZeroMemory(PointerPde,
1575 (1 + MiAddressToPde(MmPagedPoolEnd) - PointerPde) * sizeof(MMPTE));
1576
1577 //
1578 // Next, get the first and last PTE
1579 //
1580 PointerPte = MiAddressToPte(MmPagedPoolStart);
1581 MmPagedPoolInfo.FirstPteForPagedPool = PointerPte;
1582 MmPagedPoolInfo.LastPteForPagedPool = MiAddressToPte(MmPagedPoolEnd);
1583
1584 //
1585 // Lock the PFN database
1586 //
1587 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
1588
1589 //
1590 // Allocate a page and map the first paged pool PDE
1591 //
1592 PageFrameIndex = MmAllocPage(MC_NPPOOL);
1593 TempPte.u.Hard.PageFrameNumber = PageFrameIndex;
1594 ASSERT(PointerPde->u.Hard.Valid == 0);
1595 ASSERT(TempPte.u.Hard.Valid == 1);
1596 *PointerPde = TempPte;
1597
1598 //
1599 // Release the PFN database lock
1600 //
1601 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
1602
1603 //
1604 // We only have one PDE mapped for now... at fault time, additional PDEs
1605 // will be allocated to handle paged pool growth. This is where they'll have
1606 // to start.
1607 //
1608 MmPagedPoolInfo.NextPdeForPagedPoolExpansion = PointerPde + 1;
1609
1610 //
1611 // We keep track of each page via a bit, so check how big the bitmap will
1612 // have to be (make sure to align our page count such that it fits nicely
1613 // into a 4-byte aligned bitmap.
1614 //
1615 // We'll also allocate the bitmap header itself part of the same buffer.
1616 //
1617 Size = Size * 1024;
1618 ASSERT(Size == MmSizeOfPagedPoolInPages);
1619 BitMapSize = Size;
1620 Size = sizeof(RTL_BITMAP) + (((Size + 31) / 32) * sizeof(ULONG));
1621
1622 //
1623 // Allocate the allocation bitmap, which tells us which regions have not yet
1624 // been mapped into memory
1625 //
1626 MmPagedPoolInfo.PagedPoolAllocationMap = ExAllocatePoolWithTag(NonPagedPool,
1627 Size,
1628 ' mM');
1629 ASSERT(MmPagedPoolInfo.PagedPoolAllocationMap);
1630
1631 //
1632 // Initialize it such that at first, only the first page's worth of PTEs is
1633 // marked as allocated (incidentially, the first PDE we allocated earlier).
1634 //
1635 RtlInitializeBitMap(MmPagedPoolInfo.PagedPoolAllocationMap,
1636 (PULONG)(MmPagedPoolInfo.PagedPoolAllocationMap + 1),
1637 BitMapSize);
1638 RtlSetAllBits(MmPagedPoolInfo.PagedPoolAllocationMap);
1639 RtlClearBits(MmPagedPoolInfo.PagedPoolAllocationMap, 0, 1024);
1640
1641 //
1642 // We have a second bitmap, which keeps track of where allocations end.
1643 // Given the allocation bitmap and a base address, we can therefore figure
1644 // out which page is the last page of that allocation, and thus how big the
1645 // entire allocation is.
1646 //
1647 MmPagedPoolInfo.EndOfPagedPoolBitmap = ExAllocatePoolWithTag(NonPagedPool,
1648 Size,
1649 ' mM');
1650 ASSERT(MmPagedPoolInfo.EndOfPagedPoolBitmap);
1651 RtlInitializeBitMap(MmPagedPoolInfo.EndOfPagedPoolBitmap,
1652 (PULONG)(MmPagedPoolInfo.EndOfPagedPoolBitmap + 1),
1653 BitMapSize);
1654
1655 //
1656 // Since no allocations have been made yet, there are no bits set as the end
1657 //
1658 RtlClearAllBits(MmPagedPoolInfo.EndOfPagedPoolBitmap);
1659
1660 //
1661 // Initialize paged pool.
1662 //
1663 InitializePool(PagedPool, 0);
1664
1665 /* Default low threshold of 30MB or one fifth of paged pool */
1666 MiLowPagedPoolThreshold = (30 * _1MB) >> PAGE_SHIFT;
1667 MiLowPagedPoolThreshold = min(MiLowPagedPoolThreshold, Size / 5);
1668
1669 /* Default high threshold of 60MB or 25% */
1670 MiHighPagedPoolThreshold = (60 * _1MB) >> PAGE_SHIFT;
1671 MiHighPagedPoolThreshold = min(MiHighPagedPoolThreshold, (Size * 2) / 5);
1672 ASSERT(MiLowPagedPoolThreshold < MiHighPagedPoolThreshold);
1673 }
1674
1675 NTSTATUS
1676 NTAPI
1677 MmArmInitSystem(IN ULONG Phase,
1678 IN PLOADER_PARAMETER_BLOCK LoaderBlock)
1679 {
1680 ULONG i;
1681 BOOLEAN IncludeType[LoaderMaximum];
1682 PVOID Bitmap;
1683 PPHYSICAL_MEMORY_RUN Run;
1684 PFN_NUMBER PageCount;
1685
1686 //
1687 // Instantiate memory that we don't consider RAM/usable
1688 // We use the same exclusions that Windows does, in order to try to be
1689 // compatible with WinLDR-style booting
1690 //
1691 for (i = 0; i < LoaderMaximum; i++) IncludeType[i] = TRUE;
1692 IncludeType[LoaderBad] = FALSE;
1693 IncludeType[LoaderFirmwarePermanent] = FALSE;
1694 IncludeType[LoaderSpecialMemory] = FALSE;
1695 IncludeType[LoaderBBTMemory] = FALSE;
1696 if (Phase == 0)
1697 {
1698 /* Initialize the phase 0 temporary event */
1699 KeInitializeEvent(&MiTempEvent, NotificationEvent, FALSE);
1700
1701 /* Set all the events to use the temporary event for now */
1702 MiLowMemoryEvent = &MiTempEvent;
1703 MiHighMemoryEvent = &MiTempEvent;
1704 MiLowPagedPoolEvent = &MiTempEvent;
1705 MiHighPagedPoolEvent = &MiTempEvent;
1706 MiLowNonPagedPoolEvent = &MiTempEvent;
1707 MiHighNonPagedPoolEvent = &MiTempEvent;
1708
1709 //
1710 // Define the basic user vs. kernel address space separation
1711 //
1712 MmSystemRangeStart = (PVOID)KSEG0_BASE;
1713 MmUserProbeAddress = (ULONG_PTR)MmSystemRangeStart - 0x10000;
1714 MmHighestUserAddress = (PVOID)(MmUserProbeAddress - 1);
1715
1716 /* Highest PTE and PDE based on the addresses above */
1717 MiHighestUserPte = MiAddressToPte(MmHighestUserAddress);
1718 MiHighestUserPde = MiAddressToPde(MmHighestUserAddress);
1719
1720 //
1721 // Get the size of the boot loader's image allocations and then round
1722 // that region up to a PDE size, so that any PDEs we might create for
1723 // whatever follows are separate from the PDEs that boot loader might've
1724 // already created (and later, we can blow all that away if we want to).
1725 //
1726 MmBootImageSize = KeLoaderBlock->Extension->LoaderPagesSpanned;
1727 MmBootImageSize *= PAGE_SIZE;
1728 MmBootImageSize = (MmBootImageSize + (4 * 1024 * 1024) - 1) & ~((4 * 1024 * 1024) - 1);
1729 ASSERT((MmBootImageSize % (4 * 1024 * 1024)) == 0);
1730
1731 //
1732 // Set the size of session view, pool, and image
1733 //
1734 MmSessionSize = MI_SESSION_SIZE;
1735 MmSessionViewSize = MI_SESSION_VIEW_SIZE;
1736 MmSessionPoolSize = MI_SESSION_POOL_SIZE;
1737 MmSessionImageSize = MI_SESSION_IMAGE_SIZE;
1738
1739 //
1740 // Set the size of system view
1741 //
1742 MmSystemViewSize = MI_SYSTEM_VIEW_SIZE;
1743
1744 //
1745 // This is where it all ends
1746 //
1747 MiSessionImageEnd = (PVOID)PTE_BASE;
1748
1749 //
1750 // This is where we will load Win32k.sys and the video driver
1751 //
1752 MiSessionImageStart = (PVOID)((ULONG_PTR)MiSessionImageEnd -
1753 MmSessionImageSize);
1754
1755 //
1756 // So the view starts right below the session working set (itself below
1757 // the image area)
1758 //
1759 MiSessionViewStart = (PVOID)((ULONG_PTR)MiSessionImageEnd -
1760 MmSessionImageSize -
1761 MI_SESSION_WORKING_SET_SIZE -
1762 MmSessionViewSize);
1763
1764 //
1765 // Session pool follows
1766 //
1767 MiSessionPoolEnd = MiSessionViewStart;
1768 MiSessionPoolStart = (PVOID)((ULONG_PTR)MiSessionPoolEnd -
1769 MmSessionPoolSize);
1770
1771 //
1772 // And it all begins here
1773 //
1774 MmSessionBase = MiSessionPoolStart;
1775
1776 //
1777 // Sanity check that our math is correct
1778 //
1779 ASSERT((ULONG_PTR)MmSessionBase + MmSessionSize == PTE_BASE);
1780
1781 //
1782 // Session space ends wherever image session space ends
1783 //
1784 MiSessionSpaceEnd = MiSessionImageEnd;
1785
1786 //
1787 // System view space ends at session space, so now that we know where
1788 // this is, we can compute the base address of system view space itself.
1789 //
1790 MiSystemViewStart = (PVOID)((ULONG_PTR)MmSessionBase -
1791 MmSystemViewSize);
1792
1793 /* Compute the PTE addresses for all the addresses we carved out */
1794 MiSessionImagePteStart = MiAddressToPte(MiSessionImageStart);
1795 MiSessionImagePteEnd = MiAddressToPte(MiSessionImageEnd);
1796 MiSessionBasePte = MiAddressToPte(MmSessionBase);
1797 MiSessionLastPte = MiAddressToPte(MiSessionSpaceEnd);
1798
1799 /* Initialize the user mode image list */
1800 InitializeListHead(&MmLoadedUserImageList);
1801
1802 /* Initialize the paged pool mutex */
1803 KeInitializeGuardedMutex(&MmPagedPoolMutex);
1804
1805 /* Initialize the Loader Lock */
1806 KeInitializeMutant(&MmSystemLoadLock, FALSE);
1807
1808 //
1809 // Count physical pages on the system
1810 //
1811 PageCount = MiPagesInLoaderBlock(LoaderBlock, IncludeType);
1812
1813 //
1814 // Check if this is a machine with less than 19MB of RAM
1815 //
1816 if (PageCount < MI_MIN_PAGES_FOR_SYSPTE_TUNING)
1817 {
1818 //
1819 // Use the very minimum of system PTEs
1820 //
1821 MmNumberOfSystemPtes = 7000;
1822 }
1823 else
1824 {
1825 //
1826 // Use the default, but check if we have more than 32MB of RAM
1827 //
1828 MmNumberOfSystemPtes = 11000;
1829 if (PageCount > MI_MIN_PAGES_FOR_SYSPTE_BOOST)
1830 {
1831 //
1832 // Double the amount of system PTEs
1833 //
1834 MmNumberOfSystemPtes <<= 1;
1835 }
1836 }
1837
1838 DPRINT("System PTE count has been tuned to %d (%d bytes)\n",
1839 MmNumberOfSystemPtes, MmNumberOfSystemPtes * PAGE_SIZE);
1840
1841 /* Initialize the platform-specific parts */
1842 MiInitMachineDependent(LoaderBlock);
1843
1844 //
1845 // Sync us up with ReactOS Mm
1846 //
1847 MiSyncARM3WithROS(MmNonPagedSystemStart, (PVOID)((ULONG_PTR)MmNonPagedPoolEnd - 1));
1848 MiSyncARM3WithROS(MmPfnDatabase[0], (PVOID)((ULONG_PTR)MmNonPagedPoolStart + MmSizeOfNonPagedPoolInBytes - 1));
1849 MiSyncARM3WithROS((PVOID)HYPER_SPACE, (PVOID)(HYPER_SPACE + PAGE_SIZE - 1));
1850
1851 //
1852 // Build the physical memory block
1853 //
1854 MmPhysicalMemoryBlock = MmInitializeMemoryLimits(LoaderBlock,
1855 IncludeType);
1856
1857 //
1858 // Allocate enough buffer for the PFN bitmap
1859 // Align it up to a 32-bit boundary
1860 //
1861 Bitmap = ExAllocatePoolWithTag(NonPagedPool,
1862 (((MmHighestPhysicalPage + 1) + 31) / 32) * 4,
1863 ' mM');
1864 if (!Bitmap)
1865 {
1866 //
1867 // This is critical
1868 //
1869 KeBugCheckEx(INSTALL_MORE_MEMORY,
1870 MmNumberOfPhysicalPages,
1871 MmLowestPhysicalPage,
1872 MmHighestPhysicalPage,
1873 0x101);
1874 }
1875
1876 //
1877 // Initialize it and clear all the bits to begin with
1878 //
1879 RtlInitializeBitMap(&MiPfnBitMap,
1880 Bitmap,
1881 MmHighestPhysicalPage + 1);
1882 RtlClearAllBits(&MiPfnBitMap);
1883
1884 //
1885 // Loop physical memory runs
1886 //
1887 for (i = 0; i < MmPhysicalMemoryBlock->NumberOfRuns; i++)
1888 {
1889 //
1890 // Get the run
1891 //
1892 Run = &MmPhysicalMemoryBlock->Run[i];
1893 DPRINT("PHYSICAL RAM [0x%08p to 0x%08p]\n",
1894 Run->BasePage << PAGE_SHIFT,
1895 (Run->BasePage + Run->PageCount) << PAGE_SHIFT);
1896
1897 //
1898 // Make sure it has pages inside it
1899 //
1900 if (Run->PageCount)
1901 {
1902 //
1903 // Set the bits in the PFN bitmap
1904 //
1905 RtlSetBits(&MiPfnBitMap, Run->BasePage, Run->PageCount);
1906 }
1907 }
1908
1909 /* Look for large page cache entries that need caching */
1910 MiSyncCachedRanges();
1911
1912 /* Loop for HAL Heap I/O device mappings that need coherency tracking */
1913 MiAddHalIoMappings();
1914
1915 /* Set the initial resident page count */
1916 MmResidentAvailablePages = MmAvailablePages - 32;
1917
1918 /* Initialize large page structures on PAE/x64, and MmProcessList on x86 */
1919 MiInitializeLargePageSupport();
1920
1921 /* Check if the registry says any drivers should be loaded with large pages */
1922 MiInitializeDriverLargePageList();
1923
1924 /* Relocate the boot drivers into system PTE space and fixup their PFNs */
1925 MiReloadBootLoadedDrivers(LoaderBlock);
1926
1927 /* FIXME: Call out into Driver Verifier for initialization */
1928
1929 /* Check how many pages the system has */
1930 if (MmNumberOfPhysicalPages <= (13 * _1MB))
1931 {
1932 /* Set small system */
1933 MmSystemSize = MmSmallSystem;
1934 }
1935 else if (MmNumberOfPhysicalPages <= (19 * _1MB))
1936 {
1937 /* Set small system and add 100 pages for the cache */
1938 MmSystemSize = MmSmallSystem;
1939 MmSystemCacheWsMinimum += 100;
1940 }
1941 else
1942 {
1943 /* Set medium system and add 400 pages for the cache */
1944 MmSystemSize = MmMediumSystem;
1945 MmSystemCacheWsMinimum += 400;
1946 }
1947
1948 /* Check for less than 24MB */
1949 if (MmNumberOfPhysicalPages < ((24 * _1MB) / PAGE_SIZE))
1950 {
1951 /* No more than 32 pages */
1952 MmSystemCacheWsMinimum = 32;
1953 }
1954
1955 /* Check for more than 32MB */
1956 if (MmNumberOfPhysicalPages >= ((32 * _1MB) / PAGE_SIZE))
1957 {
1958 /* Check for product type being "Wi" for WinNT */
1959 if (MmProductType == '\0i\0W')
1960 {
1961 /* Then this is a large system */
1962 MmSystemSize = MmLargeSystem;
1963 }
1964 else
1965 {
1966 /* For servers, we need 64MB to consider this as being large */
1967 if (MmNumberOfPhysicalPages >= ((64 * _1MB) / PAGE_SIZE))
1968 {
1969 /* Set it as large */
1970 MmSystemSize = MmLargeSystem;
1971 }
1972 }
1973 }
1974
1975 /* Check for more than 33 MB */
1976 if (MmNumberOfPhysicalPages > ((33 * _1MB) / PAGE_SIZE))
1977 {
1978 /* Add another 500 pages to the cache */
1979 MmSystemCacheWsMinimum += 500;
1980 }
1981
1982 /* Now setup the shared user data fields */
1983 ASSERT(SharedUserData->NumberOfPhysicalPages == 0);
1984 SharedUserData->NumberOfPhysicalPages = MmNumberOfPhysicalPages;
1985 SharedUserData->LargePageMinimum = 0;
1986
1987 /* Check for workstation (Wi for WinNT) */
1988 if (MmProductType == '\0i\0W')
1989 {
1990 /* Set Windows NT Workstation product type */
1991 SharedUserData->NtProductType = NtProductWinNt;
1992 MmProductType = 0;
1993 }
1994 else
1995 {
1996 /* Check for LanMan server */
1997 if (MmProductType == '\0a\0L')
1998 {
1999 /* This is a domain controller */
2000 SharedUserData->NtProductType = NtProductLanManNt;
2001 }
2002 else
2003 {
2004 /* Otherwise it must be a normal server */
2005 SharedUserData->NtProductType = NtProductServer;
2006 }
2007
2008 /* Set the product type, and make the system more aggressive with low memory */
2009 MmProductType = 1;
2010 MmMinimumFreePages = 81;
2011 }
2012
2013 /* Update working set tuning parameters */
2014 MiAdjustWorkingSetManagerParameters(!MmProductType);
2015
2016 /* Finetune the page count by removing working set and NP expansion */
2017 MmResidentAvailablePages -= MiExpansionPoolPagesInitialCharge;
2018 MmResidentAvailablePages -= MmSystemCacheWsMinimum;
2019 MmResidentAvailableAtInit = MmResidentAvailablePages;
2020 if (MmResidentAvailablePages <= 0)
2021 {
2022 /* This should not happen */
2023 DPRINT1("System cache working set too big\n");
2024 return FALSE;
2025 }
2026
2027 /* Size up paged pool and build the shadow system page directory */
2028 MiBuildPagedPool();
2029 }
2030
2031 //
2032 // Always return success for now
2033 //
2034 return STATUS_SUCCESS;
2035 }
2036
2037 /* EOF */