[NTOS]: Fix a very stupid bug which made any machine with less than 52GB of RAM appea...
[reactos.git] / reactos / ntoskrnl / mm / ARM3 / mminit.c
1 /*
2 * PROJECT: ReactOS Kernel
3 * LICENSE: BSD - See COPYING.ARM in the top level directory
4 * FILE: ntoskrnl/mm/ARM3/mminit.c
5 * PURPOSE: ARM Memory Manager Initialization
6 * PROGRAMMERS: ReactOS Portable Systems Group
7 */
8
9 /* INCLUDES *******************************************************************/
10
11 #include <ntoskrnl.h>
12 #define NDEBUG
13 #include <debug.h>
14
15 #line 15 "ARM³::INIT"
16 #define MODULE_INVOLVED_IN_ARM3
17 #include "miarm.h"
18
19 /* GLOBALS ********************************************************************/
20
21 //
22 // These are all registry-configurable, but by default, the memory manager will
23 // figure out the most appropriate values.
24 //
25 ULONG MmMaximumNonPagedPoolPercent;
26 SIZE_T MmSizeOfNonPagedPoolInBytes;
27 SIZE_T MmMaximumNonPagedPoolInBytes;
28
29 /* Some of the same values, in pages */
30 PFN_NUMBER MmMaximumNonPagedPoolInPages;
31
32 //
33 // These numbers describe the discrete equation components of the nonpaged
34 // pool sizing algorithm.
35 //
36 // They are described on http://support.microsoft.com/default.aspx/kb/126402/ja
37 // along with the algorithm that uses them, which is implemented later below.
38 //
39 SIZE_T MmMinimumNonPagedPoolSize = 256 * 1024;
40 ULONG MmMinAdditionNonPagedPoolPerMb = 32 * 1024;
41 SIZE_T MmDefaultMaximumNonPagedPool = 1024 * 1024;
42 ULONG MmMaxAdditionNonPagedPoolPerMb = 400 * 1024;
43
44 //
45 // The memory layout (and especially variable names) of the NT kernel mode
46 // components can be a bit hard to twig, especially when it comes to the non
47 // paged area.
48 //
49 // There are really two components to the non-paged pool:
50 //
51 // - The initial nonpaged pool, sized dynamically up to a maximum.
52 // - The expansion nonpaged pool, sized dynamically up to a maximum.
53 //
54 // The initial nonpaged pool is physically continuous for performance, and
55 // immediately follows the PFN database, typically sharing the same PDE. It is
56 // a very small resource (32MB on a 1GB system), and capped at 128MB.
57 //
58 // Right now we call this the "ARM³ Nonpaged Pool" and it begins somewhere after
59 // the PFN database (which starts at 0xB0000000).
60 //
61 // The expansion nonpaged pool, on the other hand, can grow much bigger (400MB
62 // for a 1GB system). On ARM³ however, it is currently capped at 128MB.
63 //
64 // The address where the initial nonpaged pool starts is aptly named
65 // MmNonPagedPoolStart, and it describes a range of MmSizeOfNonPagedPoolInBytes
66 // bytes.
67 //
68 // Expansion nonpaged pool starts at an address described by the variable called
69 // MmNonPagedPoolExpansionStart, and it goes on for MmMaximumNonPagedPoolInBytes
70 // minus MmSizeOfNonPagedPoolInBytes bytes, always reaching MmNonPagedPoolEnd
71 // (because of the way it's calculated) at 0xFFBE0000.
72 //
73 // Initial nonpaged pool is allocated and mapped early-on during boot, but what
74 // about the expansion nonpaged pool? It is instead composed of special pages
75 // which belong to what are called System PTEs. These PTEs are the matter of a
76 // later discussion, but they are also considered part of the "nonpaged" OS, due
77 // to the fact that they are never paged out -- once an address is described by
78 // a System PTE, it is always valid, until the System PTE is torn down.
79 //
80 // System PTEs are actually composed of two "spaces", the system space proper,
81 // and the nonpaged pool expansion space. The latter, as we've already seen,
82 // begins at MmNonPagedPoolExpansionStart. Based on the number of System PTEs
83 // that the system will support, the remaining address space below this address
84 // is used to hold the system space PTEs. This address, in turn, is held in the
85 // variable named MmNonPagedSystemStart, which itself is never allowed to go
86 // below 0xEB000000 (thus creating an upper bound on the number of System PTEs).
87 //
88 // This means that 330MB are reserved for total nonpaged system VA, on top of
89 // whatever the initial nonpaged pool allocation is.
90 //
91 // The following URLs, valid as of April 23rd, 2008, support this evidence:
92 //
93 // http://www.cs.miami.edu/~burt/journal/NT/memory.html
94 // http://www.ditii.com/2007/09/28/windows-memory-management-x86-virtual-address-space/
95 //
96 PVOID MmNonPagedSystemStart;
97 PVOID MmNonPagedPoolStart;
98 PVOID MmNonPagedPoolExpansionStart;
99 PVOID MmNonPagedPoolEnd = MI_NONPAGED_POOL_END;
100
101 //
102 // This is where paged pool starts by default
103 //
104 PVOID MmPagedPoolStart = MI_PAGED_POOL_START;
105 PVOID MmPagedPoolEnd;
106
107 //
108 // And this is its default size
109 //
110 SIZE_T MmSizeOfPagedPoolInBytes = MI_MIN_INIT_PAGED_POOLSIZE;
111 PFN_NUMBER MmSizeOfPagedPoolInPages = MI_MIN_INIT_PAGED_POOLSIZE / PAGE_SIZE;
112
113 //
114 // Session space starts at 0xBFFFFFFF and grows downwards
115 // By default, it includes an 8MB image area where we map win32k and video card
116 // drivers, followed by a 4MB area containing the session's working set. This is
117 // then followed by a 20MB mapped view area and finally by the session's paged
118 // pool, by default 16MB.
119 //
120 // On a normal system, this results in session space occupying the region from
121 // 0xBD000000 to 0xC0000000
122 //
123 // See miarm.h for the defines that determine the sizing of this region. On an
124 // NT system, some of these can be configured through the registry, but we don't
125 // support that yet.
126 //
127 PVOID MiSessionSpaceEnd; // 0xC0000000
128 PVOID MiSessionImageEnd; // 0xC0000000
129 PVOID MiSessionImageStart; // 0xBF800000
130 PVOID MiSessionViewStart; // 0xBE000000
131 PVOID MiSessionPoolEnd; // 0xBE000000
132 PVOID MiSessionPoolStart; // 0xBD000000
133 PVOID MmSessionBase; // 0xBD000000
134 SIZE_T MmSessionSize;
135 SIZE_T MmSessionViewSize;
136 SIZE_T MmSessionPoolSize;
137 SIZE_T MmSessionImageSize;
138
139 /*
140 * These are the PTE addresses of the boundaries carved out above
141 */
142 PMMPTE MiSessionImagePteStart;
143 PMMPTE MiSessionImagePteEnd;
144 PMMPTE MiSessionBasePte;
145 PMMPTE MiSessionLastPte;
146
147 //
148 // The system view space, on the other hand, is where sections that are memory
149 // mapped into "system space" end up.
150 //
151 // By default, it is a 16MB region.
152 //
153 PVOID MiSystemViewStart;
154 SIZE_T MmSystemViewSize;
155
156 //
157 // A copy of the system page directory (the page directory associated with the
158 // System process) is kept (double-mapped) by the manager in order to lazily
159 // map paged pool PDEs into external processes when they fault on a paged pool
160 // address.
161 //
162 PFN_NUMBER MmSystemPageDirectory[PD_COUNT];
163 PMMPTE MmSystemPagePtes;
164
165 //
166 // The system cache starts right after hyperspace. The first few pages are for
167 // keeping track of the system working set list.
168 //
169 // This should be 0xC0C00000 -- the cache itself starts at 0xC1000000
170 //
171 PMMWSL MmSystemCacheWorkingSetList = MI_SYSTEM_CACHE_WS_START;
172
173 //
174 // Windows NT seems to choose between 7000, 11000 and 50000
175 // On systems with more than 32MB, this number is then doubled, and further
176 // aligned up to a PDE boundary (4MB).
177 //
178 ULONG_PTR MmNumberOfSystemPtes;
179
180 //
181 // This is how many pages the PFN database will take up
182 // In Windows, this includes the Quark Color Table, but not in ARM³
183 //
184 PFN_NUMBER MxPfnAllocation;
185
186 //
187 // Unlike the old ReactOS Memory Manager, ARM³ (and Windows) does not keep track
188 // of pages that are not actually valid physical memory, such as ACPI reserved
189 // regions, BIOS address ranges, or holes in physical memory address space which
190 // could indicate device-mapped I/O memory.
191 //
192 // In fact, the lack of a PFN entry for a page usually indicates that this is
193 // I/O space instead.
194 //
195 // A bitmap, called the PFN bitmap, keeps track of all page frames by assigning
196 // a bit to each. If the bit is set, then the page is valid physical RAM.
197 //
198 RTL_BITMAP MiPfnBitMap;
199
200 //
201 // This structure describes the different pieces of RAM-backed address space
202 //
203 PPHYSICAL_MEMORY_DESCRIPTOR MmPhysicalMemoryBlock;
204
205 //
206 // This is where we keep track of the most basic physical layout markers
207 //
208 PFN_NUMBER MmNumberOfPhysicalPages, MmHighestPhysicalPage, MmLowestPhysicalPage = -1;
209
210 //
211 // The total number of pages mapped by the boot loader, which include the kernel
212 // HAL, boot drivers, registry, NLS files and other loader data structures is
213 // kept track of here. This depends on "LoaderPagesSpanned" being correct when
214 // coming from the loader.
215 //
216 // This number is later aligned up to a PDE boundary.
217 //
218 SIZE_T MmBootImageSize;
219
220 //
221 // These three variables keep track of the core separation of address space that
222 // exists between kernel mode and user mode.
223 //
224 ULONG_PTR MmUserProbeAddress;
225 PVOID MmHighestUserAddress;
226 PVOID MmSystemRangeStart;
227
228 /* And these store the respective highest PTE/PDE address */
229 PMMPTE MiHighestUserPte;
230 PMMPDE MiHighestUserPde;
231
232 /* These variables define the system cache address space */
233 PVOID MmSystemCacheStart;
234 PVOID MmSystemCacheEnd;
235 MMSUPPORT MmSystemCacheWs;
236
237 //
238 // This is where hyperspace ends (followed by the system cache working set)
239 //
240 PVOID MmHyperSpaceEnd;
241
242 //
243 // Page coloring algorithm data
244 //
245 ULONG MmSecondaryColors;
246 ULONG MmSecondaryColorMask;
247
248 //
249 // Actual (registry-configurable) size of a GUI thread's stack
250 //
251 ULONG MmLargeStackSize = KERNEL_LARGE_STACK_SIZE;
252
253 //
254 // Before we have a PFN database, memory comes straight from our physical memory
255 // blocks, which is nice because it's guaranteed contiguous and also because once
256 // we take a page from here, the system doesn't see it anymore.
257 // However, once the fun is over, those pages must be re-integrated back into
258 // PFN society life, and that requires us keeping a copy of the original layout
259 // so that we can parse it later.
260 //
261 PMEMORY_ALLOCATION_DESCRIPTOR MxFreeDescriptor;
262 MEMORY_ALLOCATION_DESCRIPTOR MxOldFreeDescriptor;
263
264 /*
265 * For each page's worth bytes of L2 cache in a given set/way line, the zero and
266 * free lists are organized in what is called a "color".
267 *
268 * This array points to the two lists, so it can be thought of as a multi-dimensional
269 * array of MmFreePagesByColor[2][MmSecondaryColors]. Since the number is dynamic,
270 * we describe the array in pointer form instead.
271 *
272 * On a final note, the color tables themselves are right after the PFN database.
273 */
274 C_ASSERT(FreePageList == 1);
275 PMMCOLOR_TABLES MmFreePagesByColor[FreePageList + 1];
276
277 /* An event used in Phase 0 before the rest of the system is ready to go */
278 KEVENT MiTempEvent;
279
280 /* All the events used for memory threshold notifications */
281 PKEVENT MiLowMemoryEvent;
282 PKEVENT MiHighMemoryEvent;
283 PKEVENT MiLowPagedPoolEvent;
284 PKEVENT MiHighPagedPoolEvent;
285 PKEVENT MiLowNonPagedPoolEvent;
286 PKEVENT MiHighNonPagedPoolEvent;
287
288 /* The actual thresholds themselves, in page numbers */
289 PFN_NUMBER MmLowMemoryThreshold;
290 PFN_NUMBER MmHighMemoryThreshold;
291 PFN_NUMBER MiLowPagedPoolThreshold;
292 PFN_NUMBER MiHighPagedPoolThreshold;
293 PFN_NUMBER MiLowNonPagedPoolThreshold;
294 PFN_NUMBER MiHighNonPagedPoolThreshold;
295
296 /*
297 * This number determines how many free pages must exist, at minimum, until we
298 * start trimming working sets and flushing modified pages to obtain more free
299 * pages.
300 *
301 * This number changes if the system detects that this is a server product
302 */
303 PFN_NUMBER MmMinimumFreePages = 26;
304
305 /*
306 * This number indicates how many pages we consider to be a low limit of having
307 * "plenty" of free memory.
308 *
309 * It is doubled on systems that have more than 63MB of memory
310 */
311 PFN_NUMBER MmPlentyFreePages = 400;
312
313 /* These values store the type of system this is (small, med, large) and if server */
314 ULONG MmProductType;
315 MM_SYSTEMSIZE MmSystemSize;
316
317 /*
318 * These values store the cache working set minimums and maximums, in pages
319 *
320 * The minimum value is boosted on systems with more than 24MB of RAM, and cut
321 * down to only 32 pages on embedded (<24MB RAM) systems.
322 *
323 * An extra boost of 2MB is given on systems with more than 33MB of RAM.
324 */
325 PFN_NUMBER MmSystemCacheWsMinimum = 288;
326 PFN_NUMBER MmSystemCacheWsMaximum = 350;
327
328 /* FIXME: Move to cache/working set code later */
329 BOOLEAN MmLargeSystemCache;
330
331 /*
332 * This value determines in how many fragments/chunks the subsection prototype
333 * PTEs should be allocated when mapping a section object. It is configurable in
334 * the registry through the MapAllocationFragment parameter.
335 *
336 * The default is 64KB on systems with more than 1GB of RAM, 32KB on systems with
337 * more than 256MB of RAM, and 16KB on systems with less than 256MB of RAM.
338 *
339 * The maximum it can be set to is 2MB, and the minimum is 4KB.
340 */
341 SIZE_T MmAllocationFragment;
342
343 /*
344 * These two values track how much virtual memory can be committed, and when
345 * expansion should happen.
346 */
347 // FIXME: They should be moved elsewhere since it's not an "init" setting?
348 SIZE_T MmTotalCommitLimit;
349 SIZE_T MmTotalCommitLimitMaximum;
350
351 /* PRIVATE FUNCTIONS **********************************************************/
352
353 //
354 // In Bavaria, this is probably a hate crime
355 //
356 VOID
357 FASTCALL
358 MiSyncARM3WithROS(IN PVOID AddressStart,
359 IN PVOID AddressEnd)
360 {
361 //
362 // Puerile piece of junk-grade carbonized horseshit puss sold to the lowest bidder
363 //
364 ULONG Pde = ADDR_TO_PDE_OFFSET(AddressStart);
365 while (Pde <= ADDR_TO_PDE_OFFSET(AddressEnd))
366 {
367 //
368 // This both odious and heinous
369 //
370 extern ULONG MmGlobalKernelPageDirectory[1024];
371 MmGlobalKernelPageDirectory[Pde] = ((PULONG)PDE_BASE)[Pde];
372 Pde++;
373 }
374 }
375
376 PFN_NUMBER
377 NTAPI
378 MxGetNextPage(IN PFN_NUMBER PageCount)
379 {
380 PFN_NUMBER Pfn;
381
382 /* Make sure we have enough pages */
383 if (PageCount > MxFreeDescriptor->PageCount)
384 {
385 /* Crash the system */
386 KeBugCheckEx(INSTALL_MORE_MEMORY,
387 MmNumberOfPhysicalPages,
388 MxFreeDescriptor->PageCount,
389 MxOldFreeDescriptor.PageCount,
390 PageCount);
391 }
392
393 /* Use our lowest usable free pages */
394 Pfn = MxFreeDescriptor->BasePage;
395 MxFreeDescriptor->BasePage += PageCount;
396 MxFreeDescriptor->PageCount -= PageCount;
397 return Pfn;
398 }
399
400 VOID
401 NTAPI
402 MiComputeColorInformation(VOID)
403 {
404 ULONG L2Associativity;
405
406 /* Check if no setting was provided already */
407 if (!MmSecondaryColors)
408 {
409 /* Get L2 cache information */
410 L2Associativity = KeGetPcr()->SecondLevelCacheAssociativity;
411
412 /* The number of colors is the number of cache bytes by set/way */
413 MmSecondaryColors = KeGetPcr()->SecondLevelCacheSize;
414 if (L2Associativity) MmSecondaryColors /= L2Associativity;
415 }
416
417 /* Now convert cache bytes into pages */
418 MmSecondaryColors >>= PAGE_SHIFT;
419 if (!MmSecondaryColors)
420 {
421 /* If there was no cache data from the KPCR, use the default colors */
422 MmSecondaryColors = MI_SECONDARY_COLORS;
423 }
424 else
425 {
426 /* Otherwise, make sure there aren't too many colors */
427 if (MmSecondaryColors > MI_MAX_SECONDARY_COLORS)
428 {
429 /* Set the maximum */
430 MmSecondaryColors = MI_MAX_SECONDARY_COLORS;
431 }
432
433 /* Make sure there aren't too little colors */
434 if (MmSecondaryColors < MI_MIN_SECONDARY_COLORS)
435 {
436 /* Set the default */
437 MmSecondaryColors = MI_SECONDARY_COLORS;
438 }
439
440 /* Finally make sure the colors are a power of two */
441 if (MmSecondaryColors & (MmSecondaryColors - 1))
442 {
443 /* Set the default */
444 MmSecondaryColors = MI_SECONDARY_COLORS;
445 }
446 }
447
448 /* Compute the mask and store it */
449 MmSecondaryColorMask = MmSecondaryColors - 1;
450 KeGetCurrentPrcb()->SecondaryColorMask = MmSecondaryColorMask;
451 }
452
453 VOID
454 NTAPI
455 MiInitializeColorTables(VOID)
456 {
457 ULONG i;
458 PMMPTE PointerPte, LastPte;
459 MMPTE TempPte = ValidKernelPte;
460
461 /* The color table starts after the ARM3 PFN database */
462 MmFreePagesByColor[0] = (PMMCOLOR_TABLES)&MmPfnDatabase[MmHighestPhysicalPage + 1];
463
464 /* Loop the PTEs. We have two color tables for each secondary color */
465 PointerPte = MiAddressToPte(&MmFreePagesByColor[0][0]);
466 LastPte = MiAddressToPte((ULONG_PTR)MmFreePagesByColor[0] +
467 (2 * MmSecondaryColors * sizeof(MMCOLOR_TABLES))
468 - 1);
469 while (PointerPte <= LastPte)
470 {
471 /* Check for valid PTE */
472 if (PointerPte->u.Hard.Valid == 0)
473 {
474 /* Get a page and map it */
475 TempPte.u.Hard.PageFrameNumber = MxGetNextPage(1);
476 MI_WRITE_VALID_PTE(PointerPte, TempPte);
477
478 /* Zero out the page */
479 RtlZeroMemory(MiPteToAddress(PointerPte), PAGE_SIZE);
480 }
481
482 /* Next */
483 PointerPte++;
484 }
485
486 /* Now set the address of the next list, right after this one */
487 MmFreePagesByColor[1] = &MmFreePagesByColor[0][MmSecondaryColors];
488
489 /* Now loop the lists to set them up */
490 for (i = 0; i < MmSecondaryColors; i++)
491 {
492 /* Set both free and zero lists for each color */
493 MmFreePagesByColor[ZeroedPageList][i].Flink = 0xFFFFFFFF;
494 MmFreePagesByColor[ZeroedPageList][i].Blink = (PVOID)0xFFFFFFFF;
495 MmFreePagesByColor[ZeroedPageList][i].Count = 0;
496 MmFreePagesByColor[FreePageList][i].Flink = 0xFFFFFFFF;
497 MmFreePagesByColor[FreePageList][i].Blink = (PVOID)0xFFFFFFFF;
498 MmFreePagesByColor[FreePageList][i].Count = 0;
499 }
500 }
501
502 BOOLEAN
503 NTAPI
504 MiIsRegularMemory(IN PLOADER_PARAMETER_BLOCK LoaderBlock,
505 IN PFN_NUMBER Pfn)
506 {
507 PLIST_ENTRY NextEntry;
508 PMEMORY_ALLOCATION_DESCRIPTOR MdBlock;
509
510 /* Loop the memory descriptors */
511 NextEntry = LoaderBlock->MemoryDescriptorListHead.Flink;
512 while (NextEntry != &LoaderBlock->MemoryDescriptorListHead)
513 {
514 /* Get the memory descriptor */
515 MdBlock = CONTAINING_RECORD(NextEntry,
516 MEMORY_ALLOCATION_DESCRIPTOR,
517 ListEntry);
518
519 /* Check if this PFN could be part of the block */
520 if (Pfn >= (MdBlock->BasePage))
521 {
522 /* Check if it really is part of the block */
523 if (Pfn < (MdBlock->BasePage + MdBlock->PageCount))
524 {
525 /* Check if the block is actually memory we don't map */
526 if ((MdBlock->MemoryType == LoaderFirmwarePermanent) ||
527 (MdBlock->MemoryType == LoaderBBTMemory) ||
528 (MdBlock->MemoryType == LoaderSpecialMemory))
529 {
530 /* We don't need PFN database entries for this memory */
531 break;
532 }
533
534 /* This is memory we want to map */
535 return TRUE;
536 }
537 }
538 else
539 {
540 /* Blocks are ordered, so if it's not here, it doesn't exist */
541 break;
542 }
543
544 /* Get to the next descriptor */
545 NextEntry = MdBlock->ListEntry.Flink;
546 }
547
548 /* Check if this PFN is actually from our free memory descriptor */
549 if ((Pfn >= MxOldFreeDescriptor.BasePage) &&
550 (Pfn < MxOldFreeDescriptor.BasePage + MxOldFreeDescriptor.PageCount))
551 {
552 /* We use these pages for initial mappings, so we do want to count them */
553 return TRUE;
554 }
555
556 /* Otherwise this isn't memory that we describe or care about */
557 return FALSE;
558 }
559
560 VOID
561 NTAPI
562 MiMapPfnDatabase(IN PLOADER_PARAMETER_BLOCK LoaderBlock)
563 {
564 ULONG FreePage, FreePageCount, PagesLeft, BasePage, PageCount;
565 PLIST_ENTRY NextEntry;
566 PMEMORY_ALLOCATION_DESCRIPTOR MdBlock;
567 PMMPTE PointerPte, LastPte;
568 MMPTE TempPte = ValidKernelPte;
569
570 /* Get current page data, since we won't be using MxGetNextPage as it would corrupt our state */
571 FreePage = MxFreeDescriptor->BasePage;
572 FreePageCount = MxFreeDescriptor->PageCount;
573 PagesLeft = 0;
574
575 /* Loop the memory descriptors */
576 NextEntry = LoaderBlock->MemoryDescriptorListHead.Flink;
577 while (NextEntry != &LoaderBlock->MemoryDescriptorListHead)
578 {
579 /* Get the descriptor */
580 MdBlock = CONTAINING_RECORD(NextEntry,
581 MEMORY_ALLOCATION_DESCRIPTOR,
582 ListEntry);
583 if ((MdBlock->MemoryType == LoaderFirmwarePermanent) ||
584 (MdBlock->MemoryType == LoaderBBTMemory) ||
585 (MdBlock->MemoryType == LoaderSpecialMemory))
586 {
587 /* These pages are not part of the PFN database */
588 NextEntry = MdBlock->ListEntry.Flink;
589 continue;
590 }
591
592 /* Next, check if this is our special free descriptor we've found */
593 if (MdBlock == MxFreeDescriptor)
594 {
595 /* Use the real numbers instead */
596 BasePage = MxOldFreeDescriptor.BasePage;
597 PageCount = MxOldFreeDescriptor.PageCount;
598 }
599 else
600 {
601 /* Use the descriptor's numbers */
602 BasePage = MdBlock->BasePage;
603 PageCount = MdBlock->PageCount;
604 }
605
606 /* Get the PTEs for this range */
607 PointerPte = MiAddressToPte(&MmPfnDatabase[BasePage]);
608 LastPte = MiAddressToPte(((ULONG_PTR)&MmPfnDatabase[BasePage + PageCount]) - 1);
609 DPRINT("MD Type: %lx Base: %lx Count: %lx\n", MdBlock->MemoryType, BasePage, PageCount);
610
611 /* Loop them */
612 while (PointerPte <= LastPte)
613 {
614 /* We'll only touch PTEs that aren't already valid */
615 if (PointerPte->u.Hard.Valid == 0)
616 {
617 /* Use the next free page */
618 TempPte.u.Hard.PageFrameNumber = FreePage;
619 ASSERT(FreePageCount != 0);
620
621 /* Consume free pages */
622 FreePage++;
623 FreePageCount--;
624 if (!FreePageCount)
625 {
626 /* Out of memory */
627 KeBugCheckEx(INSTALL_MORE_MEMORY,
628 MmNumberOfPhysicalPages,
629 FreePageCount,
630 MxOldFreeDescriptor.PageCount,
631 1);
632 }
633
634 /* Write out this PTE */
635 PagesLeft++;
636 MI_WRITE_VALID_PTE(PointerPte, TempPte);
637
638 /* Zero this page */
639 RtlZeroMemory(MiPteToAddress(PointerPte), PAGE_SIZE);
640 }
641
642 /* Next! */
643 PointerPte++;
644 }
645
646 /* Do the next address range */
647 NextEntry = MdBlock->ListEntry.Flink;
648 }
649
650 /* Now update the free descriptors to consume the pages we used up during the PFN allocation loop */
651 MxFreeDescriptor->BasePage = FreePage;
652 MxFreeDescriptor->PageCount = FreePageCount;
653 }
654
655 VOID
656 NTAPI
657 MiBuildPfnDatabaseFromPages(IN PLOADER_PARAMETER_BLOCK LoaderBlock)
658 {
659 PMMPDE PointerPde;
660 PMMPTE PointerPte;
661 ULONG i, Count, j;
662 PFN_NUMBER PageFrameIndex, StartupPdIndex, PtePageIndex;
663 PMMPFN Pfn1, Pfn2;
664 ULONG_PTR BaseAddress = 0;
665
666 /* PFN of the startup page directory */
667 StartupPdIndex = PFN_FROM_PTE(MiAddressToPde(PDE_BASE));
668
669 /* Start with the first PDE and scan them all */
670 PointerPde = MiAddressToPde(NULL);
671 Count = PD_COUNT * PDE_COUNT;
672 for (i = 0; i < Count; i++)
673 {
674 /* Check for valid PDE */
675 if (PointerPde->u.Hard.Valid == 1)
676 {
677 /* Get the PFN from it */
678 PageFrameIndex = PFN_FROM_PTE(PointerPde);
679
680 /* Do we want a PFN entry for this page? */
681 if (MiIsRegularMemory(LoaderBlock, PageFrameIndex))
682 {
683 /* Yes we do, set it up */
684 Pfn1 = MiGetPfnEntry(PageFrameIndex);
685 Pfn1->u4.PteFrame = StartupPdIndex;
686 Pfn1->PteAddress = PointerPde;
687 Pfn1->u2.ShareCount++;
688 Pfn1->u3.e2.ReferenceCount = 1;
689 Pfn1->u3.e1.PageLocation = ActiveAndValid;
690 Pfn1->u3.e1.CacheAttribute = MiNonCached;
691 }
692 else
693 {
694 /* No PFN entry */
695 Pfn1 = NULL;
696 }
697
698 /* Now get the PTE and scan the pages */
699 PointerPte = MiAddressToPte(BaseAddress);
700 for (j = 0; j < PTE_COUNT; j++)
701 {
702 /* Check for a valid PTE */
703 if (PointerPte->u.Hard.Valid == 1)
704 {
705 /* Increase the shared count of the PFN entry for the PDE */
706 ASSERT(Pfn1 != NULL);
707 Pfn1->u2.ShareCount++;
708
709 /* Now check if the PTE is valid memory too */
710 PtePageIndex = PFN_FROM_PTE(PointerPte);
711 if (MiIsRegularMemory(LoaderBlock, PtePageIndex))
712 {
713 /*
714 * Only add pages above the end of system code or pages
715 * that are part of nonpaged pool
716 */
717 if ((BaseAddress >= 0xA0000000) ||
718 ((BaseAddress >= (ULONG_PTR)MmNonPagedPoolStart) &&
719 (BaseAddress < (ULONG_PTR)MmNonPagedPoolStart +
720 MmSizeOfNonPagedPoolInBytes)))
721 {
722 /* Get the PFN entry and make sure it too is valid */
723 Pfn2 = MiGetPfnEntry(PtePageIndex);
724 if ((MmIsAddressValid(Pfn2)) &&
725 (MmIsAddressValid(Pfn2 + 1)))
726 {
727 /* Setup the PFN entry */
728 Pfn2->u4.PteFrame = PageFrameIndex;
729 Pfn2->PteAddress = PointerPte;
730 Pfn2->u2.ShareCount++;
731 Pfn2->u3.e2.ReferenceCount = 1;
732 Pfn2->u3.e1.PageLocation = ActiveAndValid;
733 Pfn2->u3.e1.CacheAttribute = MiNonCached;
734 }
735 }
736 }
737 }
738
739 /* Next PTE */
740 PointerPte++;
741 BaseAddress += PAGE_SIZE;
742 }
743 }
744 else
745 {
746 /* Next PDE mapped address */
747 BaseAddress += PDE_MAPPED_VA;
748 }
749
750 /* Next PTE */
751 PointerPde++;
752 }
753 }
754
755 VOID
756 NTAPI
757 MiBuildPfnDatabaseZeroPage(VOID)
758 {
759 PMMPFN Pfn1;
760 PMMPDE PointerPde;
761
762 /* Grab the lowest page and check if it has no real references */
763 Pfn1 = MiGetPfnEntry(MmLowestPhysicalPage);
764 if (!(MmLowestPhysicalPage) && !(Pfn1->u3.e2.ReferenceCount))
765 {
766 /* Make it a bogus page to catch errors */
767 PointerPde = MiAddressToPde(0xFFFFFFFF);
768 Pfn1->u4.PteFrame = PFN_FROM_PTE(PointerPde);
769 Pfn1->PteAddress = PointerPde;
770 Pfn1->u2.ShareCount++;
771 Pfn1->u3.e2.ReferenceCount = 0xFFF0;
772 Pfn1->u3.e1.PageLocation = ActiveAndValid;
773 Pfn1->u3.e1.CacheAttribute = MiNonCached;
774 }
775 }
776
777 VOID
778 NTAPI
779 MiBuildPfnDatabaseFromLoaderBlock(IN PLOADER_PARAMETER_BLOCK LoaderBlock)
780 {
781 PLIST_ENTRY NextEntry;
782 PFN_NUMBER PageCount = 0;
783 PMEMORY_ALLOCATION_DESCRIPTOR MdBlock;
784 PFN_NUMBER PageFrameIndex;
785 PMMPFN Pfn1;
786 PMMPTE PointerPte;
787 PMMPDE PointerPde;
788 KIRQL OldIrql;
789
790 /* Now loop through the descriptors */
791 NextEntry = LoaderBlock->MemoryDescriptorListHead.Flink;
792 while (NextEntry != &LoaderBlock->MemoryDescriptorListHead)
793 {
794 /* Get the current descriptor */
795 MdBlock = CONTAINING_RECORD(NextEntry,
796 MEMORY_ALLOCATION_DESCRIPTOR,
797 ListEntry);
798
799 /* Read its data */
800 PageCount = MdBlock->PageCount;
801 PageFrameIndex = MdBlock->BasePage;
802
803 /* Don't allow memory above what the PFN database is mapping */
804 if (PageFrameIndex > MmHighestPhysicalPage)
805 {
806 /* Since they are ordered, everything past here will be larger */
807 break;
808 }
809
810 /* On the other hand, the end page might be higher up... */
811 if ((PageFrameIndex + PageCount) > (MmHighestPhysicalPage + 1))
812 {
813 /* In which case we'll trim the descriptor to go as high as we can */
814 PageCount = MmHighestPhysicalPage + 1 - PageFrameIndex;
815 MdBlock->PageCount = PageCount;
816
817 /* But if there's nothing left to trim, we got too high, so quit */
818 if (!PageCount) break;
819 }
820
821 /* Now check the descriptor type */
822 switch (MdBlock->MemoryType)
823 {
824 /* Check for bad RAM */
825 case LoaderBad:
826
827 DPRINT1("You have damaged RAM modules. Stopping boot\n");
828 while (TRUE);
829 break;
830
831 /* Check for free RAM */
832 case LoaderFree:
833 case LoaderLoadedProgram:
834 case LoaderFirmwareTemporary:
835 case LoaderOsloaderStack:
836
837 /* Get the last page of this descriptor. Note we loop backwards */
838 PageFrameIndex += PageCount - 1;
839 Pfn1 = MiGetPfnEntry(PageFrameIndex);
840
841 /* Lock the PFN Database */
842 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
843 while (PageCount--)
844 {
845 /* If the page really has no references, mark it as free */
846 if (!Pfn1->u3.e2.ReferenceCount)
847 {
848 /* Add it to the free list */
849 Pfn1->u3.e1.CacheAttribute = MiNonCached;
850 MiInsertPageInFreeList(PageFrameIndex);
851 }
852
853 /* Go to the next page */
854 Pfn1--;
855 PageFrameIndex--;
856 }
857
858 /* Release PFN database */
859 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
860
861 /* Done with this block */
862 break;
863
864 /* Check for pages that are invisible to us */
865 case LoaderFirmwarePermanent:
866 case LoaderSpecialMemory:
867 case LoaderBBTMemory:
868
869 /* And skip them */
870 break;
871
872 default:
873
874 /* Map these pages with the KSEG0 mapping that adds 0x80000000 */
875 PointerPte = MiAddressToPte(KSEG0_BASE + (PageFrameIndex << PAGE_SHIFT));
876 Pfn1 = MiGetPfnEntry(PageFrameIndex);
877 while (PageCount--)
878 {
879 /* Check if the page is really unused */
880 PointerPde = MiAddressToPde(KSEG0_BASE + (PageFrameIndex << PAGE_SHIFT));
881 if (!Pfn1->u3.e2.ReferenceCount)
882 {
883 /* Mark it as being in-use */
884 Pfn1->u4.PteFrame = PFN_FROM_PTE(PointerPde);
885 Pfn1->PteAddress = PointerPte;
886 Pfn1->u2.ShareCount++;
887 Pfn1->u3.e2.ReferenceCount = 1;
888 Pfn1->u3.e1.PageLocation = ActiveAndValid;
889 Pfn1->u3.e1.CacheAttribute = MiNonCached;
890
891 /* Check for RAM disk page */
892 if (MdBlock->MemoryType == LoaderXIPRom)
893 {
894 /* Make it a pseudo-I/O ROM mapping */
895 Pfn1->u1.Flink = 0;
896 Pfn1->u2.ShareCount = 0;
897 Pfn1->u3.e2.ReferenceCount = 0;
898 Pfn1->u3.e1.PageLocation = 0;
899 Pfn1->u3.e1.Rom = 1;
900 Pfn1->u4.InPageError = 0;
901 Pfn1->u3.e1.PrototypePte = 1;
902 }
903 }
904
905 /* Advance page structures */
906 Pfn1++;
907 PageFrameIndex++;
908 PointerPte++;
909 }
910 break;
911 }
912
913 /* Next descriptor entry */
914 NextEntry = MdBlock->ListEntry.Flink;
915 }
916 }
917
918 VOID
919 NTAPI
920 MiBuildPfnDatabaseSelf(VOID)
921 {
922 PMMPTE PointerPte, LastPte;
923 PMMPFN Pfn1;
924
925 /* Loop the PFN database page */
926 PointerPte = MiAddressToPte(MiGetPfnEntry(MmLowestPhysicalPage));
927 LastPte = MiAddressToPte(MiGetPfnEntry(MmHighestPhysicalPage));
928 while (PointerPte <= LastPte)
929 {
930 /* Make sure the page is valid */
931 if (PointerPte->u.Hard.Valid == 1)
932 {
933 /* Get the PFN entry and just mark it referenced */
934 Pfn1 = MiGetPfnEntry(PointerPte->u.Hard.PageFrameNumber);
935 Pfn1->u2.ShareCount = 1;
936 Pfn1->u3.e2.ReferenceCount = 1;
937 }
938
939 /* Next */
940 PointerPte++;
941 }
942 }
943
944 VOID
945 NTAPI
946 MiInitializePfnDatabase(IN PLOADER_PARAMETER_BLOCK LoaderBlock)
947 {
948 /* Scan memory and start setting up PFN entries */
949 MiBuildPfnDatabaseFromPages(LoaderBlock);
950
951 /* Add the zero page */
952 MiBuildPfnDatabaseZeroPage();
953
954 /* Scan the loader block and build the rest of the PFN database */
955 MiBuildPfnDatabaseFromLoaderBlock(LoaderBlock);
956
957 /* Finally add the pages for the PFN database itself */
958 MiBuildPfnDatabaseSelf();
959 }
960
961 VOID
962 NTAPI
963 MiAdjustWorkingSetManagerParameters(IN BOOLEAN Client)
964 {
965 /* This function needs to do more work, for now, we tune page minimums */
966
967 /* Check for a system with around 64MB RAM or more */
968 if (MmNumberOfPhysicalPages >= (63 * _1MB) / PAGE_SIZE)
969 {
970 /* Double the minimum amount of pages we consider for a "plenty free" scenario */
971 MmPlentyFreePages *= 2;
972 }
973 }
974
975 VOID
976 NTAPI
977 MiNotifyMemoryEvents(VOID)
978 {
979 /* Are we in a low-memory situation? */
980 if (MmAvailablePages < MmLowMemoryThreshold)
981 {
982 /* Clear high, set low */
983 if (KeReadStateEvent(MiHighMemoryEvent)) KeClearEvent(MiHighMemoryEvent);
984 if (!KeReadStateEvent(MiLowMemoryEvent)) KeSetEvent(MiLowMemoryEvent, 0, FALSE);
985 }
986 else if (MmAvailablePages < MmHighMemoryThreshold)
987 {
988 /* We are in between, clear both */
989 if (KeReadStateEvent(MiHighMemoryEvent)) KeClearEvent(MiHighMemoryEvent);
990 if (KeReadStateEvent(MiLowMemoryEvent)) KeClearEvent(MiLowMemoryEvent);
991 }
992 else
993 {
994 /* Clear low, set high */
995 if (KeReadStateEvent(MiLowMemoryEvent)) KeClearEvent(MiLowMemoryEvent);
996 if (!KeReadStateEvent(MiHighMemoryEvent)) KeSetEvent(MiHighMemoryEvent, 0, FALSE);
997 }
998 }
999
1000 NTSTATUS
1001 NTAPI
1002 MiCreateMemoryEvent(IN PUNICODE_STRING Name,
1003 OUT PKEVENT *Event)
1004 {
1005 PACL Dacl;
1006 HANDLE EventHandle;
1007 ULONG DaclLength;
1008 NTSTATUS Status;
1009 OBJECT_ATTRIBUTES ObjectAttributes;
1010 SECURITY_DESCRIPTOR SecurityDescriptor;
1011
1012 /* Create the SD */
1013 Status = RtlCreateSecurityDescriptor(&SecurityDescriptor,
1014 SECURITY_DESCRIPTOR_REVISION);
1015 if (!NT_SUCCESS(Status)) return Status;
1016
1017 /* One ACL with 3 ACEs, containing each one SID */
1018 DaclLength = sizeof(ACL) +
1019 3 * sizeof(ACCESS_ALLOWED_ACE) +
1020 RtlLengthSid(SeLocalSystemSid) +
1021 RtlLengthSid(SeAliasAdminsSid) +
1022 RtlLengthSid(SeWorldSid);
1023
1024 /* Allocate space for the DACL */
1025 Dacl = ExAllocatePoolWithTag(PagedPool, DaclLength, 'lcaD');
1026 if (!Dacl) return STATUS_INSUFFICIENT_RESOURCES;
1027
1028 /* Setup the ACL inside it */
1029 Status = RtlCreateAcl(Dacl, DaclLength, ACL_REVISION);
1030 if (!NT_SUCCESS(Status)) goto CleanUp;
1031
1032 /* Add query rights for everyone */
1033 Status = RtlAddAccessAllowedAce(Dacl,
1034 ACL_REVISION,
1035 SYNCHRONIZE | EVENT_QUERY_STATE | READ_CONTROL,
1036 SeWorldSid);
1037 if (!NT_SUCCESS(Status)) goto CleanUp;
1038
1039 /* Full rights for the admin */
1040 Status = RtlAddAccessAllowedAce(Dacl,
1041 ACL_REVISION,
1042 EVENT_ALL_ACCESS,
1043 SeAliasAdminsSid);
1044 if (!NT_SUCCESS(Status)) goto CleanUp;
1045
1046 /* As well as full rights for the system */
1047 Status = RtlAddAccessAllowedAce(Dacl,
1048 ACL_REVISION,
1049 EVENT_ALL_ACCESS,
1050 SeLocalSystemSid);
1051 if (!NT_SUCCESS(Status)) goto CleanUp;
1052
1053 /* Set this DACL inside the SD */
1054 Status = RtlSetDaclSecurityDescriptor(&SecurityDescriptor,
1055 TRUE,
1056 Dacl,
1057 FALSE);
1058 if (!NT_SUCCESS(Status)) goto CleanUp;
1059
1060 /* Setup the event attributes, making sure it's a permanent one */
1061 InitializeObjectAttributes(&ObjectAttributes,
1062 Name,
1063 OBJ_KERNEL_HANDLE | OBJ_PERMANENT,
1064 NULL,
1065 &SecurityDescriptor);
1066
1067 /* Create the event */
1068 Status = ZwCreateEvent(&EventHandle,
1069 EVENT_ALL_ACCESS,
1070 &ObjectAttributes,
1071 NotificationEvent,
1072 FALSE);
1073 CleanUp:
1074 /* Free the DACL */
1075 ExFreePool(Dacl);
1076
1077 /* Check if this is the success path */
1078 if (NT_SUCCESS(Status))
1079 {
1080 /* Add a reference to the object, then close the handle we had */
1081 Status = ObReferenceObjectByHandle(EventHandle,
1082 EVENT_MODIFY_STATE,
1083 ExEventObjectType,
1084 KernelMode,
1085 (PVOID*)Event,
1086 NULL);
1087 ZwClose (EventHandle);
1088 }
1089
1090 /* Return status */
1091 return Status;
1092 }
1093
1094 BOOLEAN
1095 NTAPI
1096 MiInitializeMemoryEvents(VOID)
1097 {
1098 UNICODE_STRING LowString = RTL_CONSTANT_STRING(L"\\KernelObjects\\LowMemoryCondition");
1099 UNICODE_STRING HighString = RTL_CONSTANT_STRING(L"\\KernelObjects\\HighMemoryCondition");
1100 UNICODE_STRING LowPagedPoolString = RTL_CONSTANT_STRING(L"\\KernelObjects\\LowPagedPoolCondition");
1101 UNICODE_STRING HighPagedPoolString = RTL_CONSTANT_STRING(L"\\KernelObjects\\HighPagedPoolCondition");
1102 UNICODE_STRING LowNonPagedPoolString = RTL_CONSTANT_STRING(L"\\KernelObjects\\LowNonPagedPoolCondition");
1103 UNICODE_STRING HighNonPagedPoolString = RTL_CONSTANT_STRING(L"\\KernelObjects\\HighNonPagedPoolCondition");
1104 NTSTATUS Status;
1105
1106 /* Check if we have a registry setting */
1107 if (MmLowMemoryThreshold)
1108 {
1109 /* Convert it to pages */
1110 MmLowMemoryThreshold *= (_1MB / PAGE_SIZE);
1111 }
1112 else
1113 {
1114 /* The low memory threshold is hit when we don't consider that we have "plenty" of free pages anymore */
1115 MmLowMemoryThreshold = MmPlentyFreePages;
1116
1117 /* More than one GB of memory? */
1118 if (MmNumberOfPhysicalPages > 0x40000)
1119 {
1120 /* Start at 32MB, and add another 16MB for each GB */
1121 MmLowMemoryThreshold = (32 * _1MB) / PAGE_SIZE;
1122 MmLowMemoryThreshold += ((MmNumberOfPhysicalPages - 0x40000) >> 7);
1123 }
1124 else if (MmNumberOfPhysicalPages > 0x8000)
1125 {
1126 /* For systems with > 128MB RAM, add another 4MB for each 128MB */
1127 MmLowMemoryThreshold += ((MmNumberOfPhysicalPages - 0x8000) >> 5);
1128 }
1129
1130 /* Don't let the minimum threshold go past 64MB */
1131 MmLowMemoryThreshold = min(MmLowMemoryThreshold, (64 * _1MB) / PAGE_SIZE);
1132 }
1133
1134 /* Check if we have a registry setting */
1135 if (MmHighMemoryThreshold)
1136 {
1137 /* Convert it into pages */
1138 MmHighMemoryThreshold *= (_1MB / PAGE_SIZE);
1139 }
1140 else
1141 {
1142 /* Otherwise, the default is three times the low memory threshold */
1143 MmHighMemoryThreshold = 3 * MmLowMemoryThreshold;
1144 ASSERT(MmHighMemoryThreshold > MmLowMemoryThreshold);
1145 }
1146
1147 /* Make sure high threshold is actually higher than the low */
1148 MmHighMemoryThreshold = max(MmHighMemoryThreshold, MmLowMemoryThreshold);
1149
1150 /* Create the memory events for all the thresholds */
1151 Status = MiCreateMemoryEvent(&LowString, &MiLowMemoryEvent);
1152 if (!NT_SUCCESS(Status)) return FALSE;
1153 Status = MiCreateMemoryEvent(&HighString, &MiHighMemoryEvent);
1154 if (!NT_SUCCESS(Status)) return FALSE;
1155 Status = MiCreateMemoryEvent(&LowPagedPoolString, &MiLowPagedPoolEvent);
1156 if (!NT_SUCCESS(Status)) return FALSE;
1157 Status = MiCreateMemoryEvent(&HighPagedPoolString, &MiHighPagedPoolEvent);
1158 if (!NT_SUCCESS(Status)) return FALSE;
1159 Status = MiCreateMemoryEvent(&LowNonPagedPoolString, &MiLowNonPagedPoolEvent);
1160 if (!NT_SUCCESS(Status)) return FALSE;
1161 Status = MiCreateMemoryEvent(&HighNonPagedPoolString, &MiHighNonPagedPoolEvent);
1162 if (!NT_SUCCESS(Status)) return FALSE;
1163
1164 /* Now setup the pool events */
1165 MiInitializePoolEvents();
1166
1167 /* Set the initial event state */
1168 MiNotifyMemoryEvents();
1169 return TRUE;
1170 }
1171
1172 VOID
1173 NTAPI
1174 MiAddHalIoMappings(VOID)
1175 {
1176 PVOID BaseAddress;
1177 PMMPTE PointerPde;
1178 PMMPTE PointerPte;
1179 ULONG i, j, PdeCount;
1180 PFN_NUMBER PageFrameIndex;
1181
1182 /* HAL Heap address -- should be on a PDE boundary */
1183 BaseAddress = (PVOID)0xFFC00000;
1184 ASSERT(MiAddressToPteOffset(BaseAddress) == 0);
1185
1186 /* Check how many PDEs the heap has */
1187 PointerPde = MiAddressToPde(BaseAddress);
1188 PdeCount = PDE_COUNT - ADDR_TO_PDE_OFFSET(BaseAddress);
1189 for (i = 0; i < PdeCount; i++)
1190 {
1191 /* Does the HAL own this mapping? */
1192 if ((PointerPde->u.Hard.Valid == 1) &&
1193 (PointerPde->u.Hard.LargePage == 0))
1194 {
1195 /* Get the PTE for it and scan each page */
1196 PointerPte = MiAddressToPte(BaseAddress);
1197 for (j = 0 ; j < PTE_COUNT; j++)
1198 {
1199 /* Does the HAL own this page? */
1200 if (PointerPte->u.Hard.Valid == 1)
1201 {
1202 /* Is the HAL using it for device or I/O mapped memory? */
1203 PageFrameIndex = PFN_FROM_PTE(PointerPte);
1204 if (!MiGetPfnEntry(PageFrameIndex))
1205 {
1206 /* FIXME: For PAT, we need to track I/O cache attributes for coherency */
1207 DPRINT1("HAL I/O Mapping at %p is unsafe\n", BaseAddress);
1208 }
1209 }
1210
1211 /* Move to the next page */
1212 BaseAddress = (PVOID)((ULONG_PTR)BaseAddress + PAGE_SIZE);
1213 PointerPte++;
1214 }
1215 }
1216 else
1217 {
1218 /* Move to the next address */
1219 BaseAddress = (PVOID)((ULONG_PTR)BaseAddress + PDE_MAPPED_VA);
1220 }
1221
1222 /* Move to the next PDE */
1223 PointerPde++;
1224 }
1225 }
1226
1227 VOID
1228 NTAPI
1229 MmDumpArmPfnDatabase(VOID)
1230 {
1231 ULONG i;
1232 PMMPFN Pfn1;
1233 PCHAR Consumer = "Unknown";
1234 KIRQL OldIrql;
1235 ULONG ActivePages = 0, FreePages = 0, OtherPages = 0;
1236
1237 KeRaiseIrql(HIGH_LEVEL, &OldIrql);
1238
1239 //
1240 // Loop the PFN database
1241 //
1242 for (i = 0; i <= MmHighestPhysicalPage; i++)
1243 {
1244 Pfn1 = MiGetPfnEntry(i);
1245 if (!Pfn1) continue;
1246
1247 //
1248 // Get the page location
1249 //
1250 switch (Pfn1->u3.e1.PageLocation)
1251 {
1252 case ActiveAndValid:
1253
1254 Consumer = "Active and Valid";
1255 ActivePages++;
1256 break;
1257
1258 case FreePageList:
1259
1260 Consumer = "Free Page List";
1261 FreePages++;
1262 break;
1263
1264 default:
1265
1266 Consumer = "Other (ASSERT!)";
1267 OtherPages++;
1268 break;
1269 }
1270
1271 //
1272 // Pretty-print the page
1273 //
1274 DbgPrint("0x%08p:\t%20s\t(%02d.%02d) [%08p-%08p])\n",
1275 i << PAGE_SHIFT,
1276 Consumer,
1277 Pfn1->u3.e2.ReferenceCount,
1278 Pfn1->u2.ShareCount,
1279 Pfn1->PteAddress,
1280 Pfn1->u4.PteFrame);
1281 }
1282
1283 DbgPrint("Active: %d pages\t[%d KB]\n", ActivePages, (ActivePages << PAGE_SHIFT) / 1024);
1284 DbgPrint("Free: %d pages\t[%d KB]\n", FreePages, (FreePages << PAGE_SHIFT) / 1024);
1285 DbgPrint("Other: %d pages\t[%d KB]\n", OtherPages, (OtherPages << PAGE_SHIFT) / 1024);
1286
1287 KeLowerIrql(OldIrql);
1288 }
1289
1290 PFN_NUMBER
1291 NTAPI
1292 MiPagesInLoaderBlock(IN PLOADER_PARAMETER_BLOCK LoaderBlock,
1293 IN PBOOLEAN IncludeType)
1294 {
1295 PLIST_ENTRY NextEntry;
1296 PFN_NUMBER PageCount = 0;
1297 PMEMORY_ALLOCATION_DESCRIPTOR MdBlock;
1298
1299 //
1300 // Now loop through the descriptors
1301 //
1302 NextEntry = LoaderBlock->MemoryDescriptorListHead.Flink;
1303 while (NextEntry != &LoaderBlock->MemoryDescriptorListHead)
1304 {
1305 //
1306 // Grab each one, and check if it's one we should include
1307 //
1308 MdBlock = CONTAINING_RECORD(NextEntry,
1309 MEMORY_ALLOCATION_DESCRIPTOR,
1310 ListEntry);
1311 if ((MdBlock->MemoryType < LoaderMaximum) &&
1312 (IncludeType[MdBlock->MemoryType]))
1313 {
1314 //
1315 // Add this to our running total
1316 //
1317 PageCount += MdBlock->PageCount;
1318 }
1319
1320 //
1321 // Try the next descriptor
1322 //
1323 NextEntry = MdBlock->ListEntry.Flink;
1324 }
1325
1326 //
1327 // Return the total
1328 //
1329 return PageCount;
1330 }
1331
1332 PPHYSICAL_MEMORY_DESCRIPTOR
1333 NTAPI
1334 MmInitializeMemoryLimits(IN PLOADER_PARAMETER_BLOCK LoaderBlock,
1335 IN PBOOLEAN IncludeType)
1336 {
1337 PLIST_ENTRY NextEntry;
1338 ULONG Run = 0, InitialRuns = 0;
1339 PFN_NUMBER NextPage = -1, PageCount = 0;
1340 PPHYSICAL_MEMORY_DESCRIPTOR Buffer, NewBuffer;
1341 PMEMORY_ALLOCATION_DESCRIPTOR MdBlock;
1342
1343 //
1344 // Scan the memory descriptors
1345 //
1346 NextEntry = LoaderBlock->MemoryDescriptorListHead.Flink;
1347 while (NextEntry != &LoaderBlock->MemoryDescriptorListHead)
1348 {
1349 //
1350 // For each one, increase the memory allocation estimate
1351 //
1352 InitialRuns++;
1353 NextEntry = NextEntry->Flink;
1354 }
1355
1356 //
1357 // Allocate the maximum we'll ever need
1358 //
1359 Buffer = ExAllocatePoolWithTag(NonPagedPool,
1360 sizeof(PHYSICAL_MEMORY_DESCRIPTOR) +
1361 sizeof(PHYSICAL_MEMORY_RUN) *
1362 (InitialRuns - 1),
1363 'lMmM');
1364 if (!Buffer) return NULL;
1365
1366 //
1367 // For now that's how many runs we have
1368 //
1369 Buffer->NumberOfRuns = InitialRuns;
1370
1371 //
1372 // Now loop through the descriptors again
1373 //
1374 NextEntry = LoaderBlock->MemoryDescriptorListHead.Flink;
1375 while (NextEntry != &LoaderBlock->MemoryDescriptorListHead)
1376 {
1377 //
1378 // Grab each one, and check if it's one we should include
1379 //
1380 MdBlock = CONTAINING_RECORD(NextEntry,
1381 MEMORY_ALLOCATION_DESCRIPTOR,
1382 ListEntry);
1383 if ((MdBlock->MemoryType < LoaderMaximum) &&
1384 (IncludeType[MdBlock->MemoryType]))
1385 {
1386 //
1387 // Add this to our running total
1388 //
1389 PageCount += MdBlock->PageCount;
1390
1391 //
1392 // Check if the next page is described by the next descriptor
1393 //
1394 if (MdBlock->BasePage == NextPage)
1395 {
1396 //
1397 // Combine it into the same physical run
1398 //
1399 ASSERT(MdBlock->PageCount != 0);
1400 Buffer->Run[Run - 1].PageCount += MdBlock->PageCount;
1401 NextPage += MdBlock->PageCount;
1402 }
1403 else
1404 {
1405 //
1406 // Otherwise just duplicate the descriptor's contents
1407 //
1408 Buffer->Run[Run].BasePage = MdBlock->BasePage;
1409 Buffer->Run[Run].PageCount = MdBlock->PageCount;
1410 NextPage = Buffer->Run[Run].BasePage + Buffer->Run[Run].PageCount;
1411
1412 //
1413 // And in this case, increase the number of runs
1414 //
1415 Run++;
1416 }
1417 }
1418
1419 //
1420 // Try the next descriptor
1421 //
1422 NextEntry = MdBlock->ListEntry.Flink;
1423 }
1424
1425 //
1426 // We should not have been able to go past our initial estimate
1427 //
1428 ASSERT(Run <= Buffer->NumberOfRuns);
1429
1430 //
1431 // Our guess was probably exaggerated...
1432 //
1433 if (InitialRuns > Run)
1434 {
1435 //
1436 // Allocate a more accurately sized buffer
1437 //
1438 NewBuffer = ExAllocatePoolWithTag(NonPagedPool,
1439 sizeof(PHYSICAL_MEMORY_DESCRIPTOR) +
1440 sizeof(PHYSICAL_MEMORY_RUN) *
1441 (Run - 1),
1442 'lMmM');
1443 if (NewBuffer)
1444 {
1445 //
1446 // Copy the old buffer into the new, then free it
1447 //
1448 RtlCopyMemory(NewBuffer->Run,
1449 Buffer->Run,
1450 sizeof(PHYSICAL_MEMORY_RUN) * Run);
1451 ExFreePool(Buffer);
1452
1453 //
1454 // Now use the new buffer
1455 //
1456 Buffer = NewBuffer;
1457 }
1458 }
1459
1460 //
1461 // Write the final numbers, and return it
1462 //
1463 Buffer->NumberOfRuns = Run;
1464 Buffer->NumberOfPages = PageCount;
1465 return Buffer;
1466 }
1467
1468 VOID
1469 NTAPI
1470 MiBuildPagedPool(VOID)
1471 {
1472 PMMPTE PointerPte, PointerPde;
1473 MMPTE TempPte = ValidKernelPte;
1474 PFN_NUMBER PageFrameIndex;
1475 KIRQL OldIrql;
1476 ULONG Size, BitMapSize;
1477
1478 //
1479 // Get the page frame number for the system page directory
1480 //
1481 PointerPte = MiAddressToPte(PDE_BASE);
1482 ASSERT(PD_COUNT == 1);
1483 MmSystemPageDirectory[0] = PFN_FROM_PTE(PointerPte);
1484
1485 //
1486 // Allocate a system PTE which will hold a copy of the page directory
1487 //
1488 PointerPte = MiReserveSystemPtes(1, SystemPteSpace);
1489 ASSERT(PointerPte);
1490 MmSystemPagePtes = MiPteToAddress(PointerPte);
1491
1492 //
1493 // Make this system PTE point to the system page directory.
1494 // It is now essentially double-mapped. This will be used later for lazy
1495 // evaluation of PDEs accross process switches, similarly to how the Global
1496 // page directory array in the old ReactOS Mm is used (but in a less hacky
1497 // way).
1498 //
1499 TempPte = ValidKernelPte;
1500 ASSERT(PD_COUNT == 1);
1501 TempPte.u.Hard.PageFrameNumber = MmSystemPageDirectory[0];
1502 MI_WRITE_VALID_PTE(PointerPte, TempPte);
1503
1504 //
1505 // Let's get back to paged pool work: size it up.
1506 // By default, it should be twice as big as nonpaged pool.
1507 //
1508 MmSizeOfPagedPoolInBytes = 2 * MmMaximumNonPagedPoolInBytes;
1509 if (MmSizeOfPagedPoolInBytes > ((ULONG_PTR)MmNonPagedSystemStart -
1510 (ULONG_PTR)MmPagedPoolStart))
1511 {
1512 //
1513 // On the other hand, we have limited VA space, so make sure that the VA
1514 // for paged pool doesn't overflow into nonpaged pool VA. Otherwise, set
1515 // whatever maximum is possible.
1516 //
1517 MmSizeOfPagedPoolInBytes = (ULONG_PTR)MmNonPagedSystemStart -
1518 (ULONG_PTR)MmPagedPoolStart;
1519 }
1520
1521 //
1522 // Get the size in pages and make sure paged pool is at least 32MB.
1523 //
1524 Size = MmSizeOfPagedPoolInBytes;
1525 if (Size < MI_MIN_INIT_PAGED_POOLSIZE) Size = MI_MIN_INIT_PAGED_POOLSIZE;
1526 Size = BYTES_TO_PAGES(Size);
1527
1528 //
1529 // Now check how many PTEs will be required for these many pages.
1530 //
1531 Size = (Size + (1024 - 1)) / 1024;
1532
1533 //
1534 // Recompute the page-aligned size of the paged pool, in bytes and pages.
1535 //
1536 MmSizeOfPagedPoolInBytes = Size * PAGE_SIZE * 1024;
1537 MmSizeOfPagedPoolInPages = MmSizeOfPagedPoolInBytes >> PAGE_SHIFT;
1538
1539 //
1540 // Let's be really sure this doesn't overflow into nonpaged system VA
1541 //
1542 ASSERT((MmSizeOfPagedPoolInBytes + (ULONG_PTR)MmPagedPoolStart) <=
1543 (ULONG_PTR)MmNonPagedSystemStart);
1544
1545 //
1546 // This is where paged pool ends
1547 //
1548 MmPagedPoolEnd = (PVOID)(((ULONG_PTR)MmPagedPoolStart +
1549 MmSizeOfPagedPoolInBytes) - 1);
1550
1551 //
1552 // So now get the PDE for paged pool and zero it out
1553 //
1554 PointerPde = MiAddressToPde(MmPagedPoolStart);
1555 RtlZeroMemory(PointerPde,
1556 (1 + MiAddressToPde(MmPagedPoolEnd) - PointerPde) * sizeof(MMPTE));
1557
1558 //
1559 // Next, get the first and last PTE
1560 //
1561 PointerPte = MiAddressToPte(MmPagedPoolStart);
1562 MmPagedPoolInfo.FirstPteForPagedPool = PointerPte;
1563 MmPagedPoolInfo.LastPteForPagedPool = MiAddressToPte(MmPagedPoolEnd);
1564
1565 //
1566 // Lock the PFN database
1567 //
1568 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
1569
1570 /* Allocate a page and map the first paged pool PDE */
1571 PageFrameIndex = MiRemoveZeroPage(0);
1572 TempPte.u.Hard.PageFrameNumber = PageFrameIndex;
1573 MI_WRITE_VALID_PTE(PointerPde, TempPte);
1574
1575 /* Initialize the PFN entry for it */
1576 MiInitializePfnForOtherProcess(PageFrameIndex,
1577 PointerPde,
1578 MmSystemPageDirectory[(PointerPde - (PMMPTE)PDE_BASE) / PDE_COUNT]);
1579
1580 //
1581 // Release the PFN database lock
1582 //
1583 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
1584
1585 //
1586 // We only have one PDE mapped for now... at fault time, additional PDEs
1587 // will be allocated to handle paged pool growth. This is where they'll have
1588 // to start.
1589 //
1590 MmPagedPoolInfo.NextPdeForPagedPoolExpansion = PointerPde + 1;
1591
1592 //
1593 // We keep track of each page via a bit, so check how big the bitmap will
1594 // have to be (make sure to align our page count such that it fits nicely
1595 // into a 4-byte aligned bitmap.
1596 //
1597 // We'll also allocate the bitmap header itself part of the same buffer.
1598 //
1599 Size = Size * 1024;
1600 ASSERT(Size == MmSizeOfPagedPoolInPages);
1601 BitMapSize = Size;
1602 Size = sizeof(RTL_BITMAP) + (((Size + 31) / 32) * sizeof(ULONG));
1603
1604 //
1605 // Allocate the allocation bitmap, which tells us which regions have not yet
1606 // been mapped into memory
1607 //
1608 MmPagedPoolInfo.PagedPoolAllocationMap = ExAllocatePoolWithTag(NonPagedPool,
1609 Size,
1610 ' mM');
1611 ASSERT(MmPagedPoolInfo.PagedPoolAllocationMap);
1612
1613 //
1614 // Initialize it such that at first, only the first page's worth of PTEs is
1615 // marked as allocated (incidentially, the first PDE we allocated earlier).
1616 //
1617 RtlInitializeBitMap(MmPagedPoolInfo.PagedPoolAllocationMap,
1618 (PULONG)(MmPagedPoolInfo.PagedPoolAllocationMap + 1),
1619 BitMapSize);
1620 RtlSetAllBits(MmPagedPoolInfo.PagedPoolAllocationMap);
1621 RtlClearBits(MmPagedPoolInfo.PagedPoolAllocationMap, 0, 1024);
1622
1623 //
1624 // We have a second bitmap, which keeps track of where allocations end.
1625 // Given the allocation bitmap and a base address, we can therefore figure
1626 // out which page is the last page of that allocation, and thus how big the
1627 // entire allocation is.
1628 //
1629 MmPagedPoolInfo.EndOfPagedPoolBitmap = ExAllocatePoolWithTag(NonPagedPool,
1630 Size,
1631 ' mM');
1632 ASSERT(MmPagedPoolInfo.EndOfPagedPoolBitmap);
1633 RtlInitializeBitMap(MmPagedPoolInfo.EndOfPagedPoolBitmap,
1634 (PULONG)(MmPagedPoolInfo.EndOfPagedPoolBitmap + 1),
1635 BitMapSize);
1636
1637 //
1638 // Since no allocations have been made yet, there are no bits set as the end
1639 //
1640 RtlClearAllBits(MmPagedPoolInfo.EndOfPagedPoolBitmap);
1641
1642 //
1643 // Initialize paged pool.
1644 //
1645 InitializePool(PagedPool, 0);
1646
1647 /* Default low threshold of 30MB or one fifth of paged pool */
1648 MiLowPagedPoolThreshold = (30 * _1MB) >> PAGE_SHIFT;
1649 MiLowPagedPoolThreshold = min(MiLowPagedPoolThreshold, Size / 5);
1650
1651 /* Default high threshold of 60MB or 25% */
1652 MiHighPagedPoolThreshold = (60 * _1MB) >> PAGE_SHIFT;
1653 MiHighPagedPoolThreshold = min(MiHighPagedPoolThreshold, (Size * 2) / 5);
1654 ASSERT(MiLowPagedPoolThreshold < MiHighPagedPoolThreshold);
1655 }
1656
1657 NTSTATUS
1658 NTAPI
1659 MmArmInitSystem(IN ULONG Phase,
1660 IN PLOADER_PARAMETER_BLOCK LoaderBlock)
1661 {
1662 ULONG i;
1663 BOOLEAN IncludeType[LoaderMaximum];
1664 PVOID Bitmap;
1665 PPHYSICAL_MEMORY_RUN Run;
1666 PFN_NUMBER PageCount;
1667
1668 //
1669 // Instantiate memory that we don't consider RAM/usable
1670 // We use the same exclusions that Windows does, in order to try to be
1671 // compatible with WinLDR-style booting
1672 //
1673 for (i = 0; i < LoaderMaximum; i++) IncludeType[i] = TRUE;
1674 IncludeType[LoaderBad] = FALSE;
1675 IncludeType[LoaderFirmwarePermanent] = FALSE;
1676 IncludeType[LoaderSpecialMemory] = FALSE;
1677 IncludeType[LoaderBBTMemory] = FALSE;
1678 if (Phase == 0)
1679 {
1680 /* Initialize the phase 0 temporary event */
1681 KeInitializeEvent(&MiTempEvent, NotificationEvent, FALSE);
1682
1683 /* Set all the events to use the temporary event for now */
1684 MiLowMemoryEvent = &MiTempEvent;
1685 MiHighMemoryEvent = &MiTempEvent;
1686 MiLowPagedPoolEvent = &MiTempEvent;
1687 MiHighPagedPoolEvent = &MiTempEvent;
1688 MiLowNonPagedPoolEvent = &MiTempEvent;
1689 MiHighNonPagedPoolEvent = &MiTempEvent;
1690
1691 //
1692 // Define the basic user vs. kernel address space separation
1693 //
1694 MmSystemRangeStart = (PVOID)KSEG0_BASE;
1695 MmUserProbeAddress = (ULONG_PTR)MmSystemRangeStart - 0x10000;
1696 MmHighestUserAddress = (PVOID)(MmUserProbeAddress - 1);
1697
1698 /* Highest PTE and PDE based on the addresses above */
1699 MiHighestUserPte = MiAddressToPte(MmHighestUserAddress);
1700 MiHighestUserPde = MiAddressToPde(MmHighestUserAddress);
1701
1702 //
1703 // Get the size of the boot loader's image allocations and then round
1704 // that region up to a PDE size, so that any PDEs we might create for
1705 // whatever follows are separate from the PDEs that boot loader might've
1706 // already created (and later, we can blow all that away if we want to).
1707 //
1708 MmBootImageSize = KeLoaderBlock->Extension->LoaderPagesSpanned;
1709 MmBootImageSize *= PAGE_SIZE;
1710 MmBootImageSize = (MmBootImageSize + PDE_MAPPED_VA - 1) & ~(PDE_MAPPED_VA - 1);
1711 ASSERT((MmBootImageSize % PDE_MAPPED_VA) == 0);
1712
1713 //
1714 // Set the size of session view, pool, and image
1715 //
1716 MmSessionSize = MI_SESSION_SIZE;
1717 MmSessionViewSize = MI_SESSION_VIEW_SIZE;
1718 MmSessionPoolSize = MI_SESSION_POOL_SIZE;
1719 MmSessionImageSize = MI_SESSION_IMAGE_SIZE;
1720
1721 //
1722 // Set the size of system view
1723 //
1724 MmSystemViewSize = MI_SYSTEM_VIEW_SIZE;
1725
1726 //
1727 // This is where it all ends
1728 //
1729 MiSessionImageEnd = (PVOID)PTE_BASE;
1730
1731 //
1732 // This is where we will load Win32k.sys and the video driver
1733 //
1734 MiSessionImageStart = (PVOID)((ULONG_PTR)MiSessionImageEnd -
1735 MmSessionImageSize);
1736
1737 //
1738 // So the view starts right below the session working set (itself below
1739 // the image area)
1740 //
1741 MiSessionViewStart = (PVOID)((ULONG_PTR)MiSessionImageEnd -
1742 MmSessionImageSize -
1743 MI_SESSION_WORKING_SET_SIZE -
1744 MmSessionViewSize);
1745
1746 //
1747 // Session pool follows
1748 //
1749 MiSessionPoolEnd = MiSessionViewStart;
1750 MiSessionPoolStart = (PVOID)((ULONG_PTR)MiSessionPoolEnd -
1751 MmSessionPoolSize);
1752
1753 //
1754 // And it all begins here
1755 //
1756 MmSessionBase = MiSessionPoolStart;
1757
1758 //
1759 // Sanity check that our math is correct
1760 //
1761 ASSERT((ULONG_PTR)MmSessionBase + MmSessionSize == PTE_BASE);
1762
1763 //
1764 // Session space ends wherever image session space ends
1765 //
1766 MiSessionSpaceEnd = MiSessionImageEnd;
1767
1768 //
1769 // System view space ends at session space, so now that we know where
1770 // this is, we can compute the base address of system view space itself.
1771 //
1772 MiSystemViewStart = (PVOID)((ULONG_PTR)MmSessionBase -
1773 MmSystemViewSize);
1774
1775 /* Compute the PTE addresses for all the addresses we carved out */
1776 MiSessionImagePteStart = MiAddressToPte(MiSessionImageStart);
1777 MiSessionImagePteEnd = MiAddressToPte(MiSessionImageEnd);
1778 MiSessionBasePte = MiAddressToPte(MmSessionBase);
1779 MiSessionLastPte = MiAddressToPte(MiSessionSpaceEnd);
1780
1781 /* Initialize the user mode image list */
1782 InitializeListHead(&MmLoadedUserImageList);
1783
1784 /* Initialize the paged pool mutex */
1785 KeInitializeGuardedMutex(&MmPagedPoolMutex);
1786
1787 /* Initialize the Loader Lock */
1788 KeInitializeMutant(&MmSystemLoadLock, FALSE);
1789
1790 //
1791 // Count physical pages on the system
1792 //
1793 PageCount = MiPagesInLoaderBlock(LoaderBlock, IncludeType);
1794
1795 //
1796 // Check if this is a machine with less than 19MB of RAM
1797 //
1798 if (PageCount < MI_MIN_PAGES_FOR_SYSPTE_TUNING)
1799 {
1800 //
1801 // Use the very minimum of system PTEs
1802 //
1803 MmNumberOfSystemPtes = 7000;
1804 }
1805 else
1806 {
1807 //
1808 // Use the default, but check if we have more than 32MB of RAM
1809 //
1810 MmNumberOfSystemPtes = 11000;
1811 if (PageCount > MI_MIN_PAGES_FOR_SYSPTE_BOOST)
1812 {
1813 //
1814 // Double the amount of system PTEs
1815 //
1816 MmNumberOfSystemPtes <<= 1;
1817 }
1818 }
1819
1820 DPRINT("System PTE count has been tuned to %d (%d bytes)\n",
1821 MmNumberOfSystemPtes, MmNumberOfSystemPtes * PAGE_SIZE);
1822
1823 /* Initialize the working set lock */
1824 ExInitializePushLock((PULONG_PTR)&MmSystemCacheWs.WorkingSetMutex);
1825
1826 /* Set commit limit */
1827 MmTotalCommitLimit = 2 * _1GB;
1828 MmTotalCommitLimitMaximum = MmTotalCommitLimit;
1829
1830 /* Has the allocation fragment been setup? */
1831 if (!MmAllocationFragment)
1832 {
1833 /* Use the default value */
1834 MmAllocationFragment = MI_ALLOCATION_FRAGMENT;
1835 if (PageCount < ((256 * _1MB) / PAGE_SIZE))
1836 {
1837 /* On memory systems with less than 256MB, divide by 4 */
1838 MmAllocationFragment = MI_ALLOCATION_FRAGMENT / 4;
1839 }
1840 else if (PageCount < (_1GB / PAGE_SIZE))
1841 {
1842 /* On systems with less than 1GB, divide by 2 */
1843 MmAllocationFragment = MI_ALLOCATION_FRAGMENT / 2;
1844 }
1845 }
1846 else
1847 {
1848 /* Convert from 1KB fragments to pages */
1849 MmAllocationFragment *= _1KB;
1850 MmAllocationFragment = ROUND_TO_PAGES(MmAllocationFragment);
1851
1852 /* Don't let it past the maximum */
1853 MmAllocationFragment = min(MmAllocationFragment,
1854 MI_MAX_ALLOCATION_FRAGMENT);
1855
1856 /* Don't let it too small either */
1857 MmAllocationFragment = max(MmAllocationFragment,
1858 MI_MIN_ALLOCATION_FRAGMENT);
1859 }
1860
1861 /* Initialize the platform-specific parts */
1862 MiInitMachineDependent(LoaderBlock);
1863
1864 //
1865 // Sync us up with ReactOS Mm
1866 //
1867 MiSyncARM3WithROS(MmNonPagedSystemStart, (PVOID)((ULONG_PTR)MmNonPagedPoolEnd - 1));
1868 MiSyncARM3WithROS(MmPfnDatabase, (PVOID)((ULONG_PTR)MmNonPagedPoolStart + MmSizeOfNonPagedPoolInBytes - 1));
1869 MiSyncARM3WithROS((PVOID)HYPER_SPACE, (PVOID)(HYPER_SPACE + PAGE_SIZE - 1));
1870
1871 //
1872 // Build the physical memory block
1873 //
1874 MmPhysicalMemoryBlock = MmInitializeMemoryLimits(LoaderBlock,
1875 IncludeType);
1876
1877 //
1878 // Allocate enough buffer for the PFN bitmap
1879 // Align it up to a 32-bit boundary
1880 //
1881 Bitmap = ExAllocatePoolWithTag(NonPagedPool,
1882 (((MmHighestPhysicalPage + 1) + 31) / 32) * 4,
1883 ' mM');
1884 if (!Bitmap)
1885 {
1886 //
1887 // This is critical
1888 //
1889 KeBugCheckEx(INSTALL_MORE_MEMORY,
1890 MmNumberOfPhysicalPages,
1891 MmLowestPhysicalPage,
1892 MmHighestPhysicalPage,
1893 0x101);
1894 }
1895
1896 //
1897 // Initialize it and clear all the bits to begin with
1898 //
1899 RtlInitializeBitMap(&MiPfnBitMap,
1900 Bitmap,
1901 MmHighestPhysicalPage + 1);
1902 RtlClearAllBits(&MiPfnBitMap);
1903
1904 //
1905 // Loop physical memory runs
1906 //
1907 for (i = 0; i < MmPhysicalMemoryBlock->NumberOfRuns; i++)
1908 {
1909 //
1910 // Get the run
1911 //
1912 Run = &MmPhysicalMemoryBlock->Run[i];
1913 DPRINT("PHYSICAL RAM [0x%08p to 0x%08p]\n",
1914 Run->BasePage << PAGE_SHIFT,
1915 (Run->BasePage + Run->PageCount) << PAGE_SHIFT);
1916
1917 //
1918 // Make sure it has pages inside it
1919 //
1920 if (Run->PageCount)
1921 {
1922 //
1923 // Set the bits in the PFN bitmap
1924 //
1925 RtlSetBits(&MiPfnBitMap, Run->BasePage, Run->PageCount);
1926 }
1927 }
1928
1929 /* Look for large page cache entries that need caching */
1930 MiSyncCachedRanges();
1931
1932 /* Loop for HAL Heap I/O device mappings that need coherency tracking */
1933 MiAddHalIoMappings();
1934
1935 /* Set the initial resident page count */
1936 MmResidentAvailablePages = MmAvailablePages - 32;
1937
1938 /* Initialize large page structures on PAE/x64, and MmProcessList on x86 */
1939 MiInitializeLargePageSupport();
1940
1941 /* Check if the registry says any drivers should be loaded with large pages */
1942 MiInitializeDriverLargePageList();
1943
1944 /* Relocate the boot drivers into system PTE space and fixup their PFNs */
1945 MiReloadBootLoadedDrivers(LoaderBlock);
1946
1947 /* FIXME: Call out into Driver Verifier for initialization */
1948
1949 /* Check how many pages the system has */
1950 if (MmNumberOfPhysicalPages <= ((13 * _1MB) / PAGE_SIZE))
1951 {
1952 /* Set small system */
1953 MmSystemSize = MmSmallSystem;
1954 }
1955 else if (MmNumberOfPhysicalPages <= ((19 * _1MB) / PAGE_SIZE))
1956 {
1957 /* Set small system and add 100 pages for the cache */
1958 MmSystemSize = MmSmallSystem;
1959 MmSystemCacheWsMinimum += 100;
1960 }
1961 else
1962 {
1963 /* Set medium system and add 400 pages for the cache */
1964 MmSystemSize = MmMediumSystem;
1965 MmSystemCacheWsMinimum += 400;
1966 }
1967
1968 /* Check for less than 24MB */
1969 if (MmNumberOfPhysicalPages < ((24 * _1MB) / PAGE_SIZE))
1970 {
1971 /* No more than 32 pages */
1972 MmSystemCacheWsMinimum = 32;
1973 }
1974
1975 /* Check for more than 32MB */
1976 if (MmNumberOfPhysicalPages >= ((32 * _1MB) / PAGE_SIZE))
1977 {
1978 /* Check for product type being "Wi" for WinNT */
1979 if (MmProductType == '\0i\0W')
1980 {
1981 /* Then this is a large system */
1982 MmSystemSize = MmLargeSystem;
1983 }
1984 else
1985 {
1986 /* For servers, we need 64MB to consider this as being large */
1987 if (MmNumberOfPhysicalPages >= ((64 * _1MB) / PAGE_SIZE))
1988 {
1989 /* Set it as large */
1990 MmSystemSize = MmLargeSystem;
1991 }
1992 }
1993 }
1994
1995 /* Check for more than 33 MB */
1996 if (MmNumberOfPhysicalPages > ((33 * _1MB) / PAGE_SIZE))
1997 {
1998 /* Add another 500 pages to the cache */
1999 MmSystemCacheWsMinimum += 500;
2000 }
2001
2002 /* Now setup the shared user data fields */
2003 ASSERT(SharedUserData->NumberOfPhysicalPages == 0);
2004 SharedUserData->NumberOfPhysicalPages = MmNumberOfPhysicalPages;
2005 SharedUserData->LargePageMinimum = 0;
2006
2007 /* Check for workstation (Wi for WinNT) */
2008 if (MmProductType == '\0i\0W')
2009 {
2010 /* Set Windows NT Workstation product type */
2011 SharedUserData->NtProductType = NtProductWinNt;
2012 MmProductType = 0;
2013 }
2014 else
2015 {
2016 /* Check for LanMan server */
2017 if (MmProductType == '\0a\0L')
2018 {
2019 /* This is a domain controller */
2020 SharedUserData->NtProductType = NtProductLanManNt;
2021 }
2022 else
2023 {
2024 /* Otherwise it must be a normal server */
2025 SharedUserData->NtProductType = NtProductServer;
2026 }
2027
2028 /* Set the product type, and make the system more aggressive with low memory */
2029 MmProductType = 1;
2030 MmMinimumFreePages = 81;
2031 }
2032
2033 /* Update working set tuning parameters */
2034 MiAdjustWorkingSetManagerParameters(!MmProductType);
2035
2036 /* Finetune the page count by removing working set and NP expansion */
2037 MmResidentAvailablePages -= MiExpansionPoolPagesInitialCharge;
2038 MmResidentAvailablePages -= MmSystemCacheWsMinimum;
2039 MmResidentAvailableAtInit = MmResidentAvailablePages;
2040 if (MmResidentAvailablePages <= 0)
2041 {
2042 /* This should not happen */
2043 DPRINT1("System cache working set too big\n");
2044 return FALSE;
2045 }
2046
2047 /* Initialize the system cache */
2048 //MiInitializeSystemCache(MmSystemCacheWsMinimum, MmAvailablePages);
2049
2050 /* Update the commit limit */
2051 MmTotalCommitLimit = MmAvailablePages;
2052 if (MmTotalCommitLimit > 1024) MmTotalCommitLimit -= 1024;
2053 MmTotalCommitLimitMaximum = MmTotalCommitLimit;
2054
2055 /* Size up paged pool and build the shadow system page directory */
2056 MiBuildPagedPool();
2057
2058 /* Debugger physical memory support is now ready to be used */
2059 MmDebugPte = MiAddressToPte(MiDebugMapping);
2060
2061 /* Initialize the loaded module list */
2062 MiInitializeLoadedModuleList(LoaderBlock);
2063 }
2064
2065 //
2066 // Always return success for now
2067 //
2068 return STATUS_SUCCESS;
2069 }
2070
2071 /* EOF */