[NTOS]: Initialize system views by calling MiInitializeSystemSpaceMap. This sets...
[reactos.git] / reactos / ntoskrnl / mm / ARM3 / mminit.c
1 /*
2 * PROJECT: ReactOS Kernel
3 * LICENSE: BSD - See COPYING.ARM in the top level directory
4 * FILE: ntoskrnl/mm/ARM3/mminit.c
5 * PURPOSE: ARM Memory Manager Initialization
6 * PROGRAMMERS: ReactOS Portable Systems Group
7 */
8
9 /* INCLUDES *******************************************************************/
10
11 #include <ntoskrnl.h>
12 #define NDEBUG
13 #include <debug.h>
14
15 #line 15 "ARM³::INIT"
16 #define MODULE_INVOLVED_IN_ARM3
17 #include "miarm.h"
18
19 /* GLOBALS ********************************************************************/
20
21 //
22 // These are all registry-configurable, but by default, the memory manager will
23 // figure out the most appropriate values.
24 //
25 ULONG MmMaximumNonPagedPoolPercent;
26 SIZE_T MmSizeOfNonPagedPoolInBytes;
27 SIZE_T MmMaximumNonPagedPoolInBytes;
28
29 /* Some of the same values, in pages */
30 PFN_NUMBER MmMaximumNonPagedPoolInPages;
31
32 //
33 // These numbers describe the discrete equation components of the nonpaged
34 // pool sizing algorithm.
35 //
36 // They are described on http://support.microsoft.com/default.aspx/kb/126402/ja
37 // along with the algorithm that uses them, which is implemented later below.
38 //
39 SIZE_T MmMinimumNonPagedPoolSize = 256 * 1024;
40 ULONG MmMinAdditionNonPagedPoolPerMb = 32 * 1024;
41 SIZE_T MmDefaultMaximumNonPagedPool = 1024 * 1024;
42 ULONG MmMaxAdditionNonPagedPoolPerMb = 400 * 1024;
43
44 //
45 // The memory layout (and especially variable names) of the NT kernel mode
46 // components can be a bit hard to twig, especially when it comes to the non
47 // paged area.
48 //
49 // There are really two components to the non-paged pool:
50 //
51 // - The initial nonpaged pool, sized dynamically up to a maximum.
52 // - The expansion nonpaged pool, sized dynamically up to a maximum.
53 //
54 // The initial nonpaged pool is physically continuous for performance, and
55 // immediately follows the PFN database, typically sharing the same PDE. It is
56 // a very small resource (32MB on a 1GB system), and capped at 128MB.
57 //
58 // Right now we call this the "ARM³ Nonpaged Pool" and it begins somewhere after
59 // the PFN database (which starts at 0xB0000000).
60 //
61 // The expansion nonpaged pool, on the other hand, can grow much bigger (400MB
62 // for a 1GB system). On ARM³ however, it is currently capped at 128MB.
63 //
64 // The address where the initial nonpaged pool starts is aptly named
65 // MmNonPagedPoolStart, and it describes a range of MmSizeOfNonPagedPoolInBytes
66 // bytes.
67 //
68 // Expansion nonpaged pool starts at an address described by the variable called
69 // MmNonPagedPoolExpansionStart, and it goes on for MmMaximumNonPagedPoolInBytes
70 // minus MmSizeOfNonPagedPoolInBytes bytes, always reaching MmNonPagedPoolEnd
71 // (because of the way it's calculated) at 0xFFBE0000.
72 //
73 // Initial nonpaged pool is allocated and mapped early-on during boot, but what
74 // about the expansion nonpaged pool? It is instead composed of special pages
75 // which belong to what are called System PTEs. These PTEs are the matter of a
76 // later discussion, but they are also considered part of the "nonpaged" OS, due
77 // to the fact that they are never paged out -- once an address is described by
78 // a System PTE, it is always valid, until the System PTE is torn down.
79 //
80 // System PTEs are actually composed of two "spaces", the system space proper,
81 // and the nonpaged pool expansion space. The latter, as we've already seen,
82 // begins at MmNonPagedPoolExpansionStart. Based on the number of System PTEs
83 // that the system will support, the remaining address space below this address
84 // is used to hold the system space PTEs. This address, in turn, is held in the
85 // variable named MmNonPagedSystemStart, which itself is never allowed to go
86 // below 0xEB000000 (thus creating an upper bound on the number of System PTEs).
87 //
88 // This means that 330MB are reserved for total nonpaged system VA, on top of
89 // whatever the initial nonpaged pool allocation is.
90 //
91 // The following URLs, valid as of April 23rd, 2008, support this evidence:
92 //
93 // http://www.cs.miami.edu/~burt/journal/NT/memory.html
94 // http://www.ditii.com/2007/09/28/windows-memory-management-x86-virtual-address-space/
95 //
96 PVOID MmNonPagedSystemStart;
97 PVOID MmNonPagedPoolStart;
98 PVOID MmNonPagedPoolExpansionStart;
99 PVOID MmNonPagedPoolEnd = MI_NONPAGED_POOL_END;
100
101 //
102 // This is where paged pool starts by default
103 //
104 PVOID MmPagedPoolStart = MI_PAGED_POOL_START;
105 PVOID MmPagedPoolEnd;
106
107 //
108 // And this is its default size
109 //
110 SIZE_T MmSizeOfPagedPoolInBytes = MI_MIN_INIT_PAGED_POOLSIZE;
111 PFN_NUMBER MmSizeOfPagedPoolInPages = MI_MIN_INIT_PAGED_POOLSIZE / PAGE_SIZE;
112
113 //
114 // Session space starts at 0xBFFFFFFF and grows downwards
115 // By default, it includes an 8MB image area where we map win32k and video card
116 // drivers, followed by a 4MB area containing the session's working set. This is
117 // then followed by a 20MB mapped view area and finally by the session's paged
118 // pool, by default 16MB.
119 //
120 // On a normal system, this results in session space occupying the region from
121 // 0xBD000000 to 0xC0000000
122 //
123 // See miarm.h for the defines that determine the sizing of this region. On an
124 // NT system, some of these can be configured through the registry, but we don't
125 // support that yet.
126 //
127 PVOID MiSessionSpaceEnd; // 0xC0000000
128 PVOID MiSessionImageEnd; // 0xC0000000
129 PVOID MiSessionImageStart; // 0xBF800000
130 PVOID MiSessionViewStart; // 0xBE000000
131 PVOID MiSessionPoolEnd; // 0xBE000000
132 PVOID MiSessionPoolStart; // 0xBD000000
133 PVOID MmSessionBase; // 0xBD000000
134 SIZE_T MmSessionSize;
135 SIZE_T MmSessionViewSize;
136 SIZE_T MmSessionPoolSize;
137 SIZE_T MmSessionImageSize;
138
139 /*
140 * These are the PTE addresses of the boundaries carved out above
141 */
142 PMMPTE MiSessionImagePteStart;
143 PMMPTE MiSessionImagePteEnd;
144 PMMPTE MiSessionBasePte;
145 PMMPTE MiSessionLastPte;
146
147 //
148 // The system view space, on the other hand, is where sections that are memory
149 // mapped into "system space" end up.
150 //
151 // By default, it is a 16MB region.
152 //
153 PVOID MiSystemViewStart;
154 SIZE_T MmSystemViewSize;
155
156 #if (_MI_PAGING_LEVELS == 2)
157 //
158 // A copy of the system page directory (the page directory associated with the
159 // System process) is kept (double-mapped) by the manager in order to lazily
160 // map paged pool PDEs into external processes when they fault on a paged pool
161 // address.
162 //
163 PFN_NUMBER MmSystemPageDirectory[PD_COUNT];
164 PMMPTE MmSystemPagePtes;
165 #endif
166
167 //
168 // The system cache starts right after hyperspace. The first few pages are for
169 // keeping track of the system working set list.
170 //
171 // This should be 0xC0C00000 -- the cache itself starts at 0xC1000000
172 //
173 PMMWSL MmSystemCacheWorkingSetList = MI_SYSTEM_CACHE_WS_START;
174
175 //
176 // Windows NT seems to choose between 7000, 11000 and 50000
177 // On systems with more than 32MB, this number is then doubled, and further
178 // aligned up to a PDE boundary (4MB).
179 //
180 ULONG_PTR MmNumberOfSystemPtes;
181
182 //
183 // This is how many pages the PFN database will take up
184 // In Windows, this includes the Quark Color Table, but not in ARM³
185 //
186 PFN_NUMBER MxPfnAllocation;
187
188 //
189 // Unlike the old ReactOS Memory Manager, ARM³ (and Windows) does not keep track
190 // of pages that are not actually valid physical memory, such as ACPI reserved
191 // regions, BIOS address ranges, or holes in physical memory address space which
192 // could indicate device-mapped I/O memory.
193 //
194 // In fact, the lack of a PFN entry for a page usually indicates that this is
195 // I/O space instead.
196 //
197 // A bitmap, called the PFN bitmap, keeps track of all page frames by assigning
198 // a bit to each. If the bit is set, then the page is valid physical RAM.
199 //
200 RTL_BITMAP MiPfnBitMap;
201
202 //
203 // This structure describes the different pieces of RAM-backed address space
204 //
205 PPHYSICAL_MEMORY_DESCRIPTOR MmPhysicalMemoryBlock;
206
207 //
208 // This is where we keep track of the most basic physical layout markers
209 //
210 PFN_NUMBER MmNumberOfPhysicalPages, MmHighestPhysicalPage, MmLowestPhysicalPage = -1;
211
212 //
213 // The total number of pages mapped by the boot loader, which include the kernel
214 // HAL, boot drivers, registry, NLS files and other loader data structures is
215 // kept track of here. This depends on "LoaderPagesSpanned" being correct when
216 // coming from the loader.
217 //
218 // This number is later aligned up to a PDE boundary.
219 //
220 SIZE_T MmBootImageSize;
221
222 //
223 // These three variables keep track of the core separation of address space that
224 // exists between kernel mode and user mode.
225 //
226 ULONG_PTR MmUserProbeAddress;
227 PVOID MmHighestUserAddress;
228 PVOID MmSystemRangeStart;
229
230 /* And these store the respective highest PTE/PDE address */
231 PMMPTE MiHighestUserPte;
232 PMMPDE MiHighestUserPde;
233 #if (_MI_PAGING_LEVELS >= 3)
234 /* We need the highest PPE and PXE addresses */
235 #endif
236
237 /* These variables define the system cache address space */
238 PVOID MmSystemCacheStart;
239 PVOID MmSystemCacheEnd;
240 MMSUPPORT MmSystemCacheWs;
241
242 //
243 // This is where hyperspace ends (followed by the system cache working set)
244 //
245 PVOID MmHyperSpaceEnd;
246
247 //
248 // Page coloring algorithm data
249 //
250 ULONG MmSecondaryColors;
251 ULONG MmSecondaryColorMask;
252
253 //
254 // Actual (registry-configurable) size of a GUI thread's stack
255 //
256 ULONG MmLargeStackSize = KERNEL_LARGE_STACK_SIZE;
257
258 //
259 // Before we have a PFN database, memory comes straight from our physical memory
260 // blocks, which is nice because it's guaranteed contiguous and also because once
261 // we take a page from here, the system doesn't see it anymore.
262 // However, once the fun is over, those pages must be re-integrated back into
263 // PFN society life, and that requires us keeping a copy of the original layout
264 // so that we can parse it later.
265 //
266 PMEMORY_ALLOCATION_DESCRIPTOR MxFreeDescriptor;
267 MEMORY_ALLOCATION_DESCRIPTOR MxOldFreeDescriptor;
268
269 /*
270 * For each page's worth bytes of L2 cache in a given set/way line, the zero and
271 * free lists are organized in what is called a "color".
272 *
273 * This array points to the two lists, so it can be thought of as a multi-dimensional
274 * array of MmFreePagesByColor[2][MmSecondaryColors]. Since the number is dynamic,
275 * we describe the array in pointer form instead.
276 *
277 * On a final note, the color tables themselves are right after the PFN database.
278 */
279 C_ASSERT(FreePageList == 1);
280 PMMCOLOR_TABLES MmFreePagesByColor[FreePageList + 1];
281
282 /* An event used in Phase 0 before the rest of the system is ready to go */
283 KEVENT MiTempEvent;
284
285 /* All the events used for memory threshold notifications */
286 PKEVENT MiLowMemoryEvent;
287 PKEVENT MiHighMemoryEvent;
288 PKEVENT MiLowPagedPoolEvent;
289 PKEVENT MiHighPagedPoolEvent;
290 PKEVENT MiLowNonPagedPoolEvent;
291 PKEVENT MiHighNonPagedPoolEvent;
292
293 /* The actual thresholds themselves, in page numbers */
294 PFN_NUMBER MmLowMemoryThreshold;
295 PFN_NUMBER MmHighMemoryThreshold;
296 PFN_NUMBER MiLowPagedPoolThreshold;
297 PFN_NUMBER MiHighPagedPoolThreshold;
298 PFN_NUMBER MiLowNonPagedPoolThreshold;
299 PFN_NUMBER MiHighNonPagedPoolThreshold;
300
301 /*
302 * This number determines how many free pages must exist, at minimum, until we
303 * start trimming working sets and flushing modified pages to obtain more free
304 * pages.
305 *
306 * This number changes if the system detects that this is a server product
307 */
308 PFN_NUMBER MmMinimumFreePages = 26;
309
310 /*
311 * This number indicates how many pages we consider to be a low limit of having
312 * "plenty" of free memory.
313 *
314 * It is doubled on systems that have more than 63MB of memory
315 */
316 PFN_NUMBER MmPlentyFreePages = 400;
317
318 /* These values store the type of system this is (small, med, large) and if server */
319 ULONG MmProductType;
320 MM_SYSTEMSIZE MmSystemSize;
321
322 /*
323 * These values store the cache working set minimums and maximums, in pages
324 *
325 * The minimum value is boosted on systems with more than 24MB of RAM, and cut
326 * down to only 32 pages on embedded (<24MB RAM) systems.
327 *
328 * An extra boost of 2MB is given on systems with more than 33MB of RAM.
329 */
330 PFN_NUMBER MmSystemCacheWsMinimum = 288;
331 PFN_NUMBER MmSystemCacheWsMaximum = 350;
332
333 /* FIXME: Move to cache/working set code later */
334 BOOLEAN MmLargeSystemCache;
335
336 /*
337 * This value determines in how many fragments/chunks the subsection prototype
338 * PTEs should be allocated when mapping a section object. It is configurable in
339 * the registry through the MapAllocationFragment parameter.
340 *
341 * The default is 64KB on systems with more than 1GB of RAM, 32KB on systems with
342 * more than 256MB of RAM, and 16KB on systems with less than 256MB of RAM.
343 *
344 * The maximum it can be set to is 2MB, and the minimum is 4KB.
345 */
346 SIZE_T MmAllocationFragment;
347
348 /*
349 * These two values track how much virtual memory can be committed, and when
350 * expansion should happen.
351 */
352 // FIXME: They should be moved elsewhere since it's not an "init" setting?
353 SIZE_T MmTotalCommitLimit;
354 SIZE_T MmTotalCommitLimitMaximum;
355
356 /* PRIVATE FUNCTIONS **********************************************************/
357
358 #ifndef _M_AMD64
359 //
360 // In Bavaria, this is probably a hate crime
361 //
362 VOID
363 FASTCALL
364 MiSyncARM3WithROS(IN PVOID AddressStart,
365 IN PVOID AddressEnd)
366 {
367 //
368 // Puerile piece of junk-grade carbonized horseshit puss sold to the lowest bidder
369 //
370 ULONG Pde = ADDR_TO_PDE_OFFSET(AddressStart);
371 while (Pde <= ADDR_TO_PDE_OFFSET(AddressEnd))
372 {
373 //
374 // This both odious and heinous
375 //
376 extern ULONG MmGlobalKernelPageDirectory[1024];
377 MmGlobalKernelPageDirectory[Pde] = ((PULONG)PDE_BASE)[Pde];
378 Pde++;
379 }
380 }
381 #endif
382
383 PFN_NUMBER
384 NTAPI
385 MxGetNextPage(IN PFN_NUMBER PageCount)
386 {
387 PFN_NUMBER Pfn;
388
389 /* Make sure we have enough pages */
390 if (PageCount > MxFreeDescriptor->PageCount)
391 {
392 /* Crash the system */
393 KeBugCheckEx(INSTALL_MORE_MEMORY,
394 MmNumberOfPhysicalPages,
395 MxFreeDescriptor->PageCount,
396 MxOldFreeDescriptor.PageCount,
397 PageCount);
398 }
399
400 /* Use our lowest usable free pages */
401 Pfn = MxFreeDescriptor->BasePage;
402 MxFreeDescriptor->BasePage += PageCount;
403 MxFreeDescriptor->PageCount -= PageCount;
404 return Pfn;
405 }
406
407 VOID
408 NTAPI
409 MiComputeColorInformation(VOID)
410 {
411 ULONG L2Associativity;
412
413 /* Check if no setting was provided already */
414 if (!MmSecondaryColors)
415 {
416 /* Get L2 cache information */
417 L2Associativity = KeGetPcr()->SecondLevelCacheAssociativity;
418
419 /* The number of colors is the number of cache bytes by set/way */
420 MmSecondaryColors = KeGetPcr()->SecondLevelCacheSize;
421 if (L2Associativity) MmSecondaryColors /= L2Associativity;
422 }
423
424 /* Now convert cache bytes into pages */
425 MmSecondaryColors >>= PAGE_SHIFT;
426 if (!MmSecondaryColors)
427 {
428 /* If there was no cache data from the KPCR, use the default colors */
429 MmSecondaryColors = MI_SECONDARY_COLORS;
430 }
431 else
432 {
433 /* Otherwise, make sure there aren't too many colors */
434 if (MmSecondaryColors > MI_MAX_SECONDARY_COLORS)
435 {
436 /* Set the maximum */
437 MmSecondaryColors = MI_MAX_SECONDARY_COLORS;
438 }
439
440 /* Make sure there aren't too little colors */
441 if (MmSecondaryColors < MI_MIN_SECONDARY_COLORS)
442 {
443 /* Set the default */
444 MmSecondaryColors = MI_SECONDARY_COLORS;
445 }
446
447 /* Finally make sure the colors are a power of two */
448 if (MmSecondaryColors & (MmSecondaryColors - 1))
449 {
450 /* Set the default */
451 MmSecondaryColors = MI_SECONDARY_COLORS;
452 }
453 }
454
455 /* Compute the mask and store it */
456 MmSecondaryColorMask = MmSecondaryColors - 1;
457 KeGetCurrentPrcb()->SecondaryColorMask = MmSecondaryColorMask;
458 }
459
460 VOID
461 NTAPI
462 MiInitializeColorTables(VOID)
463 {
464 ULONG i;
465 PMMPTE PointerPte, LastPte;
466 MMPTE TempPte = ValidKernelPte;
467
468 /* The color table starts after the ARM3 PFN database */
469 MmFreePagesByColor[0] = (PMMCOLOR_TABLES)&MmPfnDatabase[MmHighestPhysicalPage + 1];
470
471 /* Loop the PTEs. We have two color tables for each secondary color */
472 PointerPte = MiAddressToPte(&MmFreePagesByColor[0][0]);
473 LastPte = MiAddressToPte((ULONG_PTR)MmFreePagesByColor[0] +
474 (2 * MmSecondaryColors * sizeof(MMCOLOR_TABLES))
475 - 1);
476 while (PointerPte <= LastPte)
477 {
478 /* Check for valid PTE */
479 if (PointerPte->u.Hard.Valid == 0)
480 {
481 /* Get a page and map it */
482 TempPte.u.Hard.PageFrameNumber = MxGetNextPage(1);
483 MI_WRITE_VALID_PTE(PointerPte, TempPte);
484
485 /* Zero out the page */
486 RtlZeroMemory(MiPteToAddress(PointerPte), PAGE_SIZE);
487 }
488
489 /* Next */
490 PointerPte++;
491 }
492
493 /* Now set the address of the next list, right after this one */
494 MmFreePagesByColor[1] = &MmFreePagesByColor[0][MmSecondaryColors];
495
496 /* Now loop the lists to set them up */
497 for (i = 0; i < MmSecondaryColors; i++)
498 {
499 /* Set both free and zero lists for each color */
500 MmFreePagesByColor[ZeroedPageList][i].Flink = 0xFFFFFFFF;
501 MmFreePagesByColor[ZeroedPageList][i].Blink = (PVOID)0xFFFFFFFF;
502 MmFreePagesByColor[ZeroedPageList][i].Count = 0;
503 MmFreePagesByColor[FreePageList][i].Flink = 0xFFFFFFFF;
504 MmFreePagesByColor[FreePageList][i].Blink = (PVOID)0xFFFFFFFF;
505 MmFreePagesByColor[FreePageList][i].Count = 0;
506 }
507 }
508
509 BOOLEAN
510 NTAPI
511 MiIsRegularMemory(IN PLOADER_PARAMETER_BLOCK LoaderBlock,
512 IN PFN_NUMBER Pfn)
513 {
514 PLIST_ENTRY NextEntry;
515 PMEMORY_ALLOCATION_DESCRIPTOR MdBlock;
516
517 /* Loop the memory descriptors */
518 NextEntry = LoaderBlock->MemoryDescriptorListHead.Flink;
519 while (NextEntry != &LoaderBlock->MemoryDescriptorListHead)
520 {
521 /* Get the memory descriptor */
522 MdBlock = CONTAINING_RECORD(NextEntry,
523 MEMORY_ALLOCATION_DESCRIPTOR,
524 ListEntry);
525
526 /* Check if this PFN could be part of the block */
527 if (Pfn >= (MdBlock->BasePage))
528 {
529 /* Check if it really is part of the block */
530 if (Pfn < (MdBlock->BasePage + MdBlock->PageCount))
531 {
532 /* Check if the block is actually memory we don't map */
533 if ((MdBlock->MemoryType == LoaderFirmwarePermanent) ||
534 (MdBlock->MemoryType == LoaderBBTMemory) ||
535 (MdBlock->MemoryType == LoaderSpecialMemory))
536 {
537 /* We don't need PFN database entries for this memory */
538 break;
539 }
540
541 /* This is memory we want to map */
542 return TRUE;
543 }
544 }
545 else
546 {
547 /* Blocks are ordered, so if it's not here, it doesn't exist */
548 break;
549 }
550
551 /* Get to the next descriptor */
552 NextEntry = MdBlock->ListEntry.Flink;
553 }
554
555 /* Check if this PFN is actually from our free memory descriptor */
556 if ((Pfn >= MxOldFreeDescriptor.BasePage) &&
557 (Pfn < MxOldFreeDescriptor.BasePage + MxOldFreeDescriptor.PageCount))
558 {
559 /* We use these pages for initial mappings, so we do want to count them */
560 return TRUE;
561 }
562
563 /* Otherwise this isn't memory that we describe or care about */
564 return FALSE;
565 }
566
567 VOID
568 NTAPI
569 MiMapPfnDatabase(IN PLOADER_PARAMETER_BLOCK LoaderBlock)
570 {
571 ULONG FreePage, FreePageCount, PagesLeft, BasePage, PageCount;
572 PLIST_ENTRY NextEntry;
573 PMEMORY_ALLOCATION_DESCRIPTOR MdBlock;
574 PMMPTE PointerPte, LastPte;
575 MMPTE TempPte = ValidKernelPte;
576
577 /* Get current page data, since we won't be using MxGetNextPage as it would corrupt our state */
578 FreePage = MxFreeDescriptor->BasePage;
579 FreePageCount = MxFreeDescriptor->PageCount;
580 PagesLeft = 0;
581
582 /* Loop the memory descriptors */
583 NextEntry = LoaderBlock->MemoryDescriptorListHead.Flink;
584 while (NextEntry != &LoaderBlock->MemoryDescriptorListHead)
585 {
586 /* Get the descriptor */
587 MdBlock = CONTAINING_RECORD(NextEntry,
588 MEMORY_ALLOCATION_DESCRIPTOR,
589 ListEntry);
590 if ((MdBlock->MemoryType == LoaderFirmwarePermanent) ||
591 (MdBlock->MemoryType == LoaderBBTMemory) ||
592 (MdBlock->MemoryType == LoaderSpecialMemory))
593 {
594 /* These pages are not part of the PFN database */
595 NextEntry = MdBlock->ListEntry.Flink;
596 continue;
597 }
598
599 /* Next, check if this is our special free descriptor we've found */
600 if (MdBlock == MxFreeDescriptor)
601 {
602 /* Use the real numbers instead */
603 BasePage = MxOldFreeDescriptor.BasePage;
604 PageCount = MxOldFreeDescriptor.PageCount;
605 }
606 else
607 {
608 /* Use the descriptor's numbers */
609 BasePage = MdBlock->BasePage;
610 PageCount = MdBlock->PageCount;
611 }
612
613 /* Get the PTEs for this range */
614 PointerPte = MiAddressToPte(&MmPfnDatabase[BasePage]);
615 LastPte = MiAddressToPte(((ULONG_PTR)&MmPfnDatabase[BasePage + PageCount]) - 1);
616 DPRINT("MD Type: %lx Base: %lx Count: %lx\n", MdBlock->MemoryType, BasePage, PageCount);
617
618 /* Loop them */
619 while (PointerPte <= LastPte)
620 {
621 /* We'll only touch PTEs that aren't already valid */
622 if (PointerPte->u.Hard.Valid == 0)
623 {
624 /* Use the next free page */
625 TempPte.u.Hard.PageFrameNumber = FreePage;
626 ASSERT(FreePageCount != 0);
627
628 /* Consume free pages */
629 FreePage++;
630 FreePageCount--;
631 if (!FreePageCount)
632 {
633 /* Out of memory */
634 KeBugCheckEx(INSTALL_MORE_MEMORY,
635 MmNumberOfPhysicalPages,
636 FreePageCount,
637 MxOldFreeDescriptor.PageCount,
638 1);
639 }
640
641 /* Write out this PTE */
642 PagesLeft++;
643 MI_WRITE_VALID_PTE(PointerPte, TempPte);
644
645 /* Zero this page */
646 RtlZeroMemory(MiPteToAddress(PointerPte), PAGE_SIZE);
647 }
648
649 /* Next! */
650 PointerPte++;
651 }
652
653 /* Do the next address range */
654 NextEntry = MdBlock->ListEntry.Flink;
655 }
656
657 /* Now update the free descriptors to consume the pages we used up during the PFN allocation loop */
658 MxFreeDescriptor->BasePage = FreePage;
659 MxFreeDescriptor->PageCount = FreePageCount;
660 }
661
662 VOID
663 NTAPI
664 MiBuildPfnDatabaseFromPages(IN PLOADER_PARAMETER_BLOCK LoaderBlock)
665 {
666 PMMPDE PointerPde;
667 PMMPTE PointerPte;
668 ULONG i, Count, j;
669 PFN_NUMBER PageFrameIndex, StartupPdIndex, PtePageIndex;
670 PMMPFN Pfn1, Pfn2;
671 ULONG_PTR BaseAddress = 0;
672
673 /* PFN of the startup page directory */
674 StartupPdIndex = PFN_FROM_PTE(MiAddressToPde(PDE_BASE));
675
676 /* Start with the first PDE and scan them all */
677 PointerPde = MiAddressToPde(NULL);
678 Count = PD_COUNT * PDE_COUNT;
679 for (i = 0; i < Count; i++)
680 {
681 /* Check for valid PDE */
682 if (PointerPde->u.Hard.Valid == 1)
683 {
684 /* Get the PFN from it */
685 PageFrameIndex = PFN_FROM_PTE(PointerPde);
686
687 /* Do we want a PFN entry for this page? */
688 if (MiIsRegularMemory(LoaderBlock, PageFrameIndex))
689 {
690 /* Yes we do, set it up */
691 Pfn1 = MiGetPfnEntry(PageFrameIndex);
692 Pfn1->u4.PteFrame = StartupPdIndex;
693 Pfn1->PteAddress = PointerPde;
694 Pfn1->u2.ShareCount++;
695 Pfn1->u3.e2.ReferenceCount = 1;
696 Pfn1->u3.e1.PageLocation = ActiveAndValid;
697 Pfn1->u3.e1.CacheAttribute = MiNonCached;
698 }
699 else
700 {
701 /* No PFN entry */
702 Pfn1 = NULL;
703 }
704
705 /* Now get the PTE and scan the pages */
706 PointerPte = MiAddressToPte(BaseAddress);
707 for (j = 0; j < PTE_COUNT; j++)
708 {
709 /* Check for a valid PTE */
710 if (PointerPte->u.Hard.Valid == 1)
711 {
712 /* Increase the shared count of the PFN entry for the PDE */
713 ASSERT(Pfn1 != NULL);
714 Pfn1->u2.ShareCount++;
715
716 /* Now check if the PTE is valid memory too */
717 PtePageIndex = PFN_FROM_PTE(PointerPte);
718 if (MiIsRegularMemory(LoaderBlock, PtePageIndex))
719 {
720 /*
721 * Only add pages above the end of system code or pages
722 * that are part of nonpaged pool
723 */
724 if ((BaseAddress >= 0xA0000000) ||
725 ((BaseAddress >= (ULONG_PTR)MmNonPagedPoolStart) &&
726 (BaseAddress < (ULONG_PTR)MmNonPagedPoolStart +
727 MmSizeOfNonPagedPoolInBytes)))
728 {
729 /* Get the PFN entry and make sure it too is valid */
730 Pfn2 = MiGetPfnEntry(PtePageIndex);
731 if ((MmIsAddressValid(Pfn2)) &&
732 (MmIsAddressValid(Pfn2 + 1)))
733 {
734 /* Setup the PFN entry */
735 Pfn2->u4.PteFrame = PageFrameIndex;
736 Pfn2->PteAddress = PointerPte;
737 Pfn2->u2.ShareCount++;
738 Pfn2->u3.e2.ReferenceCount = 1;
739 Pfn2->u3.e1.PageLocation = ActiveAndValid;
740 Pfn2->u3.e1.CacheAttribute = MiNonCached;
741 }
742 }
743 }
744 }
745
746 /* Next PTE */
747 PointerPte++;
748 BaseAddress += PAGE_SIZE;
749 }
750 }
751 else
752 {
753 /* Next PDE mapped address */
754 BaseAddress += PDE_MAPPED_VA;
755 }
756
757 /* Next PTE */
758 PointerPde++;
759 }
760 }
761
762 VOID
763 NTAPI
764 MiBuildPfnDatabaseZeroPage(VOID)
765 {
766 PMMPFN Pfn1;
767 PMMPDE PointerPde;
768
769 /* Grab the lowest page and check if it has no real references */
770 Pfn1 = MiGetPfnEntry(MmLowestPhysicalPage);
771 if (!(MmLowestPhysicalPage) && !(Pfn1->u3.e2.ReferenceCount))
772 {
773 /* Make it a bogus page to catch errors */
774 PointerPde = MiAddressToPde(0xFFFFFFFF);
775 Pfn1->u4.PteFrame = PFN_FROM_PTE(PointerPde);
776 Pfn1->PteAddress = PointerPde;
777 Pfn1->u2.ShareCount++;
778 Pfn1->u3.e2.ReferenceCount = 0xFFF0;
779 Pfn1->u3.e1.PageLocation = ActiveAndValid;
780 Pfn1->u3.e1.CacheAttribute = MiNonCached;
781 }
782 }
783
784 VOID
785 NTAPI
786 MiBuildPfnDatabaseFromLoaderBlock(IN PLOADER_PARAMETER_BLOCK LoaderBlock)
787 {
788 PLIST_ENTRY NextEntry;
789 PFN_NUMBER PageCount = 0;
790 PMEMORY_ALLOCATION_DESCRIPTOR MdBlock;
791 PFN_NUMBER PageFrameIndex;
792 PMMPFN Pfn1;
793 PMMPTE PointerPte;
794 PMMPDE PointerPde;
795 KIRQL OldIrql;
796
797 /* Now loop through the descriptors */
798 NextEntry = LoaderBlock->MemoryDescriptorListHead.Flink;
799 while (NextEntry != &LoaderBlock->MemoryDescriptorListHead)
800 {
801 /* Get the current descriptor */
802 MdBlock = CONTAINING_RECORD(NextEntry,
803 MEMORY_ALLOCATION_DESCRIPTOR,
804 ListEntry);
805
806 /* Read its data */
807 PageCount = MdBlock->PageCount;
808 PageFrameIndex = MdBlock->BasePage;
809
810 /* Don't allow memory above what the PFN database is mapping */
811 if (PageFrameIndex > MmHighestPhysicalPage)
812 {
813 /* Since they are ordered, everything past here will be larger */
814 break;
815 }
816
817 /* On the other hand, the end page might be higher up... */
818 if ((PageFrameIndex + PageCount) > (MmHighestPhysicalPage + 1))
819 {
820 /* In which case we'll trim the descriptor to go as high as we can */
821 PageCount = MmHighestPhysicalPage + 1 - PageFrameIndex;
822 MdBlock->PageCount = PageCount;
823
824 /* But if there's nothing left to trim, we got too high, so quit */
825 if (!PageCount) break;
826 }
827
828 /* Now check the descriptor type */
829 switch (MdBlock->MemoryType)
830 {
831 /* Check for bad RAM */
832 case LoaderBad:
833
834 DPRINT1("You either have specified /BURNMEMORY or damaged RAM modules.\n");
835 break;
836
837 /* Check for free RAM */
838 case LoaderFree:
839 case LoaderLoadedProgram:
840 case LoaderFirmwareTemporary:
841 case LoaderOsloaderStack:
842
843 /* Get the last page of this descriptor. Note we loop backwards */
844 PageFrameIndex += PageCount - 1;
845 Pfn1 = MiGetPfnEntry(PageFrameIndex);
846
847 /* Lock the PFN Database */
848 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
849 while (PageCount--)
850 {
851 /* If the page really has no references, mark it as free */
852 if (!Pfn1->u3.e2.ReferenceCount)
853 {
854 /* Add it to the free list */
855 Pfn1->u3.e1.CacheAttribute = MiNonCached;
856 MiInsertPageInFreeList(PageFrameIndex);
857 }
858
859 /* Go to the next page */
860 Pfn1--;
861 PageFrameIndex--;
862 }
863
864 /* Release PFN database */
865 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
866
867 /* Done with this block */
868 break;
869
870 /* Check for pages that are invisible to us */
871 case LoaderFirmwarePermanent:
872 case LoaderSpecialMemory:
873 case LoaderBBTMemory:
874
875 /* And skip them */
876 break;
877
878 default:
879
880 /* Map these pages with the KSEG0 mapping that adds 0x80000000 */
881 PointerPte = MiAddressToPte(KSEG0_BASE + (PageFrameIndex << PAGE_SHIFT));
882 Pfn1 = MiGetPfnEntry(PageFrameIndex);
883 while (PageCount--)
884 {
885 /* Check if the page is really unused */
886 PointerPde = MiAddressToPde(KSEG0_BASE + (PageFrameIndex << PAGE_SHIFT));
887 if (!Pfn1->u3.e2.ReferenceCount)
888 {
889 /* Mark it as being in-use */
890 Pfn1->u4.PteFrame = PFN_FROM_PTE(PointerPde);
891 Pfn1->PteAddress = PointerPte;
892 Pfn1->u2.ShareCount++;
893 Pfn1->u3.e2.ReferenceCount = 1;
894 Pfn1->u3.e1.PageLocation = ActiveAndValid;
895 Pfn1->u3.e1.CacheAttribute = MiNonCached;
896
897 /* Check for RAM disk page */
898 if (MdBlock->MemoryType == LoaderXIPRom)
899 {
900 /* Make it a pseudo-I/O ROM mapping */
901 Pfn1->u1.Flink = 0;
902 Pfn1->u2.ShareCount = 0;
903 Pfn1->u3.e2.ReferenceCount = 0;
904 Pfn1->u3.e1.PageLocation = 0;
905 Pfn1->u3.e1.Rom = 1;
906 Pfn1->u4.InPageError = 0;
907 Pfn1->u3.e1.PrototypePte = 1;
908 }
909 }
910
911 /* Advance page structures */
912 Pfn1++;
913 PageFrameIndex++;
914 PointerPte++;
915 }
916 break;
917 }
918
919 /* Next descriptor entry */
920 NextEntry = MdBlock->ListEntry.Flink;
921 }
922 }
923
924 VOID
925 NTAPI
926 MiBuildPfnDatabaseSelf(VOID)
927 {
928 PMMPTE PointerPte, LastPte;
929 PMMPFN Pfn1;
930
931 /* Loop the PFN database page */
932 PointerPte = MiAddressToPte(MiGetPfnEntry(MmLowestPhysicalPage));
933 LastPte = MiAddressToPte(MiGetPfnEntry(MmHighestPhysicalPage));
934 while (PointerPte <= LastPte)
935 {
936 /* Make sure the page is valid */
937 if (PointerPte->u.Hard.Valid == 1)
938 {
939 /* Get the PFN entry and just mark it referenced */
940 Pfn1 = MiGetPfnEntry(PointerPte->u.Hard.PageFrameNumber);
941 Pfn1->u2.ShareCount = 1;
942 Pfn1->u3.e2.ReferenceCount = 1;
943 }
944
945 /* Next */
946 PointerPte++;
947 }
948 }
949
950 VOID
951 NTAPI
952 MiInitializePfnDatabase(IN PLOADER_PARAMETER_BLOCK LoaderBlock)
953 {
954 /* Scan memory and start setting up PFN entries */
955 MiBuildPfnDatabaseFromPages(LoaderBlock);
956
957 /* Add the zero page */
958 MiBuildPfnDatabaseZeroPage();
959
960 /* Scan the loader block and build the rest of the PFN database */
961 MiBuildPfnDatabaseFromLoaderBlock(LoaderBlock);
962
963 /* Finally add the pages for the PFN database itself */
964 MiBuildPfnDatabaseSelf();
965 }
966
967 VOID
968 NTAPI
969 MiAdjustWorkingSetManagerParameters(IN BOOLEAN Client)
970 {
971 /* This function needs to do more work, for now, we tune page minimums */
972
973 /* Check for a system with around 64MB RAM or more */
974 if (MmNumberOfPhysicalPages >= (63 * _1MB) / PAGE_SIZE)
975 {
976 /* Double the minimum amount of pages we consider for a "plenty free" scenario */
977 MmPlentyFreePages *= 2;
978 }
979 }
980
981 VOID
982 NTAPI
983 MiNotifyMemoryEvents(VOID)
984 {
985 /* Are we in a low-memory situation? */
986 if (MmAvailablePages < MmLowMemoryThreshold)
987 {
988 /* Clear high, set low */
989 if (KeReadStateEvent(MiHighMemoryEvent)) KeClearEvent(MiHighMemoryEvent);
990 if (!KeReadStateEvent(MiLowMemoryEvent)) KeSetEvent(MiLowMemoryEvent, 0, FALSE);
991 }
992 else if (MmAvailablePages < MmHighMemoryThreshold)
993 {
994 /* We are in between, clear both */
995 if (KeReadStateEvent(MiHighMemoryEvent)) KeClearEvent(MiHighMemoryEvent);
996 if (KeReadStateEvent(MiLowMemoryEvent)) KeClearEvent(MiLowMemoryEvent);
997 }
998 else
999 {
1000 /* Clear low, set high */
1001 if (KeReadStateEvent(MiLowMemoryEvent)) KeClearEvent(MiLowMemoryEvent);
1002 if (!KeReadStateEvent(MiHighMemoryEvent)) KeSetEvent(MiHighMemoryEvent, 0, FALSE);
1003 }
1004 }
1005
1006 NTSTATUS
1007 NTAPI
1008 MiCreateMemoryEvent(IN PUNICODE_STRING Name,
1009 OUT PKEVENT *Event)
1010 {
1011 PACL Dacl;
1012 HANDLE EventHandle;
1013 ULONG DaclLength;
1014 NTSTATUS Status;
1015 OBJECT_ATTRIBUTES ObjectAttributes;
1016 SECURITY_DESCRIPTOR SecurityDescriptor;
1017
1018 /* Create the SD */
1019 Status = RtlCreateSecurityDescriptor(&SecurityDescriptor,
1020 SECURITY_DESCRIPTOR_REVISION);
1021 if (!NT_SUCCESS(Status)) return Status;
1022
1023 /* One ACL with 3 ACEs, containing each one SID */
1024 DaclLength = sizeof(ACL) +
1025 3 * sizeof(ACCESS_ALLOWED_ACE) +
1026 RtlLengthSid(SeLocalSystemSid) +
1027 RtlLengthSid(SeAliasAdminsSid) +
1028 RtlLengthSid(SeWorldSid);
1029
1030 /* Allocate space for the DACL */
1031 Dacl = ExAllocatePoolWithTag(PagedPool, DaclLength, 'lcaD');
1032 if (!Dacl) return STATUS_INSUFFICIENT_RESOURCES;
1033
1034 /* Setup the ACL inside it */
1035 Status = RtlCreateAcl(Dacl, DaclLength, ACL_REVISION);
1036 if (!NT_SUCCESS(Status)) goto CleanUp;
1037
1038 /* Add query rights for everyone */
1039 Status = RtlAddAccessAllowedAce(Dacl,
1040 ACL_REVISION,
1041 SYNCHRONIZE | EVENT_QUERY_STATE | READ_CONTROL,
1042 SeWorldSid);
1043 if (!NT_SUCCESS(Status)) goto CleanUp;
1044
1045 /* Full rights for the admin */
1046 Status = RtlAddAccessAllowedAce(Dacl,
1047 ACL_REVISION,
1048 EVENT_ALL_ACCESS,
1049 SeAliasAdminsSid);
1050 if (!NT_SUCCESS(Status)) goto CleanUp;
1051
1052 /* As well as full rights for the system */
1053 Status = RtlAddAccessAllowedAce(Dacl,
1054 ACL_REVISION,
1055 EVENT_ALL_ACCESS,
1056 SeLocalSystemSid);
1057 if (!NT_SUCCESS(Status)) goto CleanUp;
1058
1059 /* Set this DACL inside the SD */
1060 Status = RtlSetDaclSecurityDescriptor(&SecurityDescriptor,
1061 TRUE,
1062 Dacl,
1063 FALSE);
1064 if (!NT_SUCCESS(Status)) goto CleanUp;
1065
1066 /* Setup the event attributes, making sure it's a permanent one */
1067 InitializeObjectAttributes(&ObjectAttributes,
1068 Name,
1069 OBJ_KERNEL_HANDLE | OBJ_PERMANENT,
1070 NULL,
1071 &SecurityDescriptor);
1072
1073 /* Create the event */
1074 Status = ZwCreateEvent(&EventHandle,
1075 EVENT_ALL_ACCESS,
1076 &ObjectAttributes,
1077 NotificationEvent,
1078 FALSE);
1079 CleanUp:
1080 /* Free the DACL */
1081 ExFreePool(Dacl);
1082
1083 /* Check if this is the success path */
1084 if (NT_SUCCESS(Status))
1085 {
1086 /* Add a reference to the object, then close the handle we had */
1087 Status = ObReferenceObjectByHandle(EventHandle,
1088 EVENT_MODIFY_STATE,
1089 ExEventObjectType,
1090 KernelMode,
1091 (PVOID*)Event,
1092 NULL);
1093 ZwClose (EventHandle);
1094 }
1095
1096 /* Return status */
1097 return Status;
1098 }
1099
1100 BOOLEAN
1101 NTAPI
1102 MiInitializeMemoryEvents(VOID)
1103 {
1104 UNICODE_STRING LowString = RTL_CONSTANT_STRING(L"\\KernelObjects\\LowMemoryCondition");
1105 UNICODE_STRING HighString = RTL_CONSTANT_STRING(L"\\KernelObjects\\HighMemoryCondition");
1106 UNICODE_STRING LowPagedPoolString = RTL_CONSTANT_STRING(L"\\KernelObjects\\LowPagedPoolCondition");
1107 UNICODE_STRING HighPagedPoolString = RTL_CONSTANT_STRING(L"\\KernelObjects\\HighPagedPoolCondition");
1108 UNICODE_STRING LowNonPagedPoolString = RTL_CONSTANT_STRING(L"\\KernelObjects\\LowNonPagedPoolCondition");
1109 UNICODE_STRING HighNonPagedPoolString = RTL_CONSTANT_STRING(L"\\KernelObjects\\HighNonPagedPoolCondition");
1110 NTSTATUS Status;
1111
1112 /* Check if we have a registry setting */
1113 if (MmLowMemoryThreshold)
1114 {
1115 /* Convert it to pages */
1116 MmLowMemoryThreshold *= (_1MB / PAGE_SIZE);
1117 }
1118 else
1119 {
1120 /* The low memory threshold is hit when we don't consider that we have "plenty" of free pages anymore */
1121 MmLowMemoryThreshold = MmPlentyFreePages;
1122
1123 /* More than one GB of memory? */
1124 if (MmNumberOfPhysicalPages > 0x40000)
1125 {
1126 /* Start at 32MB, and add another 16MB for each GB */
1127 MmLowMemoryThreshold = (32 * _1MB) / PAGE_SIZE;
1128 MmLowMemoryThreshold += ((MmNumberOfPhysicalPages - 0x40000) >> 7);
1129 }
1130 else if (MmNumberOfPhysicalPages > 0x8000)
1131 {
1132 /* For systems with > 128MB RAM, add another 4MB for each 128MB */
1133 MmLowMemoryThreshold += ((MmNumberOfPhysicalPages - 0x8000) >> 5);
1134 }
1135
1136 /* Don't let the minimum threshold go past 64MB */
1137 MmLowMemoryThreshold = min(MmLowMemoryThreshold, (64 * _1MB) / PAGE_SIZE);
1138 }
1139
1140 /* Check if we have a registry setting */
1141 if (MmHighMemoryThreshold)
1142 {
1143 /* Convert it into pages */
1144 MmHighMemoryThreshold *= (_1MB / PAGE_SIZE);
1145 }
1146 else
1147 {
1148 /* Otherwise, the default is three times the low memory threshold */
1149 MmHighMemoryThreshold = 3 * MmLowMemoryThreshold;
1150 ASSERT(MmHighMemoryThreshold > MmLowMemoryThreshold);
1151 }
1152
1153 /* Make sure high threshold is actually higher than the low */
1154 MmHighMemoryThreshold = max(MmHighMemoryThreshold, MmLowMemoryThreshold);
1155
1156 /* Create the memory events for all the thresholds */
1157 Status = MiCreateMemoryEvent(&LowString, &MiLowMemoryEvent);
1158 if (!NT_SUCCESS(Status)) return FALSE;
1159 Status = MiCreateMemoryEvent(&HighString, &MiHighMemoryEvent);
1160 if (!NT_SUCCESS(Status)) return FALSE;
1161 Status = MiCreateMemoryEvent(&LowPagedPoolString, &MiLowPagedPoolEvent);
1162 if (!NT_SUCCESS(Status)) return FALSE;
1163 Status = MiCreateMemoryEvent(&HighPagedPoolString, &MiHighPagedPoolEvent);
1164 if (!NT_SUCCESS(Status)) return FALSE;
1165 Status = MiCreateMemoryEvent(&LowNonPagedPoolString, &MiLowNonPagedPoolEvent);
1166 if (!NT_SUCCESS(Status)) return FALSE;
1167 Status = MiCreateMemoryEvent(&HighNonPagedPoolString, &MiHighNonPagedPoolEvent);
1168 if (!NT_SUCCESS(Status)) return FALSE;
1169
1170 /* Now setup the pool events */
1171 MiInitializePoolEvents();
1172
1173 /* Set the initial event state */
1174 MiNotifyMemoryEvents();
1175 return TRUE;
1176 }
1177
1178 VOID
1179 NTAPI
1180 MiAddHalIoMappings(VOID)
1181 {
1182 PVOID BaseAddress;
1183 PMMPTE PointerPde;
1184 PMMPTE PointerPte;
1185 ULONG i, j, PdeCount;
1186 PFN_NUMBER PageFrameIndex;
1187
1188 /* HAL Heap address -- should be on a PDE boundary */
1189 BaseAddress = (PVOID)0xFFC00000;
1190 ASSERT(MiAddressToPteOffset(BaseAddress) == 0);
1191
1192 /* Check how many PDEs the heap has */
1193 PointerPde = MiAddressToPde(BaseAddress);
1194 PdeCount = PDE_COUNT - ADDR_TO_PDE_OFFSET(BaseAddress);
1195 for (i = 0; i < PdeCount; i++)
1196 {
1197 /* Does the HAL own this mapping? */
1198 if ((PointerPde->u.Hard.Valid == 1) &&
1199 (PointerPde->u.Hard.LargePage == 0))
1200 {
1201 /* Get the PTE for it and scan each page */
1202 PointerPte = MiAddressToPte(BaseAddress);
1203 for (j = 0 ; j < PTE_COUNT; j++)
1204 {
1205 /* Does the HAL own this page? */
1206 if (PointerPte->u.Hard.Valid == 1)
1207 {
1208 /* Is the HAL using it for device or I/O mapped memory? */
1209 PageFrameIndex = PFN_FROM_PTE(PointerPte);
1210 if (!MiGetPfnEntry(PageFrameIndex))
1211 {
1212 /* FIXME: For PAT, we need to track I/O cache attributes for coherency */
1213 DPRINT1("HAL I/O Mapping at %p is unsafe\n", BaseAddress);
1214 }
1215 }
1216
1217 /* Move to the next page */
1218 BaseAddress = (PVOID)((ULONG_PTR)BaseAddress + PAGE_SIZE);
1219 PointerPte++;
1220 }
1221 }
1222 else
1223 {
1224 /* Move to the next address */
1225 BaseAddress = (PVOID)((ULONG_PTR)BaseAddress + PDE_MAPPED_VA);
1226 }
1227
1228 /* Move to the next PDE */
1229 PointerPde++;
1230 }
1231 }
1232
1233 VOID
1234 NTAPI
1235 MmDumpArmPfnDatabase(VOID)
1236 {
1237 ULONG i;
1238 PMMPFN Pfn1;
1239 PCHAR Consumer = "Unknown";
1240 KIRQL OldIrql;
1241 ULONG ActivePages = 0, FreePages = 0, OtherPages = 0;
1242
1243 KeRaiseIrql(HIGH_LEVEL, &OldIrql);
1244
1245 //
1246 // Loop the PFN database
1247 //
1248 for (i = 0; i <= MmHighestPhysicalPage; i++)
1249 {
1250 Pfn1 = MiGetPfnEntry(i);
1251 if (!Pfn1) continue;
1252
1253 //
1254 // Get the page location
1255 //
1256 switch (Pfn1->u3.e1.PageLocation)
1257 {
1258 case ActiveAndValid:
1259
1260 Consumer = "Active and Valid";
1261 ActivePages++;
1262 break;
1263
1264 case FreePageList:
1265
1266 Consumer = "Free Page List";
1267 FreePages++;
1268 break;
1269
1270 default:
1271
1272 Consumer = "Other (ASSERT!)";
1273 OtherPages++;
1274 break;
1275 }
1276
1277 //
1278 // Pretty-print the page
1279 //
1280 DbgPrint("0x%08p:\t%20s\t(%02d.%02d) [%08p-%08p])\n",
1281 i << PAGE_SHIFT,
1282 Consumer,
1283 Pfn1->u3.e2.ReferenceCount,
1284 Pfn1->u2.ShareCount,
1285 Pfn1->PteAddress,
1286 Pfn1->u4.PteFrame);
1287 }
1288
1289 DbgPrint("Active: %d pages\t[%d KB]\n", ActivePages, (ActivePages << PAGE_SHIFT) / 1024);
1290 DbgPrint("Free: %d pages\t[%d KB]\n", FreePages, (FreePages << PAGE_SHIFT) / 1024);
1291 DbgPrint("Other: %d pages\t[%d KB]\n", OtherPages, (OtherPages << PAGE_SHIFT) / 1024);
1292
1293 KeLowerIrql(OldIrql);
1294 }
1295
1296 PFN_NUMBER
1297 NTAPI
1298 MiPagesInLoaderBlock(IN PLOADER_PARAMETER_BLOCK LoaderBlock,
1299 IN PBOOLEAN IncludeType)
1300 {
1301 PLIST_ENTRY NextEntry;
1302 PFN_NUMBER PageCount = 0;
1303 PMEMORY_ALLOCATION_DESCRIPTOR MdBlock;
1304
1305 //
1306 // Now loop through the descriptors
1307 //
1308 NextEntry = LoaderBlock->MemoryDescriptorListHead.Flink;
1309 while (NextEntry != &LoaderBlock->MemoryDescriptorListHead)
1310 {
1311 //
1312 // Grab each one, and check if it's one we should include
1313 //
1314 MdBlock = CONTAINING_RECORD(NextEntry,
1315 MEMORY_ALLOCATION_DESCRIPTOR,
1316 ListEntry);
1317 if ((MdBlock->MemoryType < LoaderMaximum) &&
1318 (IncludeType[MdBlock->MemoryType]))
1319 {
1320 //
1321 // Add this to our running total
1322 //
1323 PageCount += MdBlock->PageCount;
1324 }
1325
1326 //
1327 // Try the next descriptor
1328 //
1329 NextEntry = MdBlock->ListEntry.Flink;
1330 }
1331
1332 //
1333 // Return the total
1334 //
1335 return PageCount;
1336 }
1337
1338 PPHYSICAL_MEMORY_DESCRIPTOR
1339 NTAPI
1340 MmInitializeMemoryLimits(IN PLOADER_PARAMETER_BLOCK LoaderBlock,
1341 IN PBOOLEAN IncludeType)
1342 {
1343 PLIST_ENTRY NextEntry;
1344 ULONG Run = 0, InitialRuns = 0;
1345 PFN_NUMBER NextPage = -1, PageCount = 0;
1346 PPHYSICAL_MEMORY_DESCRIPTOR Buffer, NewBuffer;
1347 PMEMORY_ALLOCATION_DESCRIPTOR MdBlock;
1348
1349 //
1350 // Scan the memory descriptors
1351 //
1352 NextEntry = LoaderBlock->MemoryDescriptorListHead.Flink;
1353 while (NextEntry != &LoaderBlock->MemoryDescriptorListHead)
1354 {
1355 //
1356 // For each one, increase the memory allocation estimate
1357 //
1358 InitialRuns++;
1359 NextEntry = NextEntry->Flink;
1360 }
1361
1362 //
1363 // Allocate the maximum we'll ever need
1364 //
1365 Buffer = ExAllocatePoolWithTag(NonPagedPool,
1366 sizeof(PHYSICAL_MEMORY_DESCRIPTOR) +
1367 sizeof(PHYSICAL_MEMORY_RUN) *
1368 (InitialRuns - 1),
1369 'lMmM');
1370 if (!Buffer) return NULL;
1371
1372 //
1373 // For now that's how many runs we have
1374 //
1375 Buffer->NumberOfRuns = InitialRuns;
1376
1377 //
1378 // Now loop through the descriptors again
1379 //
1380 NextEntry = LoaderBlock->MemoryDescriptorListHead.Flink;
1381 while (NextEntry != &LoaderBlock->MemoryDescriptorListHead)
1382 {
1383 //
1384 // Grab each one, and check if it's one we should include
1385 //
1386 MdBlock = CONTAINING_RECORD(NextEntry,
1387 MEMORY_ALLOCATION_DESCRIPTOR,
1388 ListEntry);
1389 if ((MdBlock->MemoryType < LoaderMaximum) &&
1390 (IncludeType[MdBlock->MemoryType]))
1391 {
1392 //
1393 // Add this to our running total
1394 //
1395 PageCount += MdBlock->PageCount;
1396
1397 //
1398 // Check if the next page is described by the next descriptor
1399 //
1400 if (MdBlock->BasePage == NextPage)
1401 {
1402 //
1403 // Combine it into the same physical run
1404 //
1405 ASSERT(MdBlock->PageCount != 0);
1406 Buffer->Run[Run - 1].PageCount += MdBlock->PageCount;
1407 NextPage += MdBlock->PageCount;
1408 }
1409 else
1410 {
1411 //
1412 // Otherwise just duplicate the descriptor's contents
1413 //
1414 Buffer->Run[Run].BasePage = MdBlock->BasePage;
1415 Buffer->Run[Run].PageCount = MdBlock->PageCount;
1416 NextPage = Buffer->Run[Run].BasePage + Buffer->Run[Run].PageCount;
1417
1418 //
1419 // And in this case, increase the number of runs
1420 //
1421 Run++;
1422 }
1423 }
1424
1425 //
1426 // Try the next descriptor
1427 //
1428 NextEntry = MdBlock->ListEntry.Flink;
1429 }
1430
1431 //
1432 // We should not have been able to go past our initial estimate
1433 //
1434 ASSERT(Run <= Buffer->NumberOfRuns);
1435
1436 //
1437 // Our guess was probably exaggerated...
1438 //
1439 if (InitialRuns > Run)
1440 {
1441 //
1442 // Allocate a more accurately sized buffer
1443 //
1444 NewBuffer = ExAllocatePoolWithTag(NonPagedPool,
1445 sizeof(PHYSICAL_MEMORY_DESCRIPTOR) +
1446 sizeof(PHYSICAL_MEMORY_RUN) *
1447 (Run - 1),
1448 'lMmM');
1449 if (NewBuffer)
1450 {
1451 //
1452 // Copy the old buffer into the new, then free it
1453 //
1454 RtlCopyMemory(NewBuffer->Run,
1455 Buffer->Run,
1456 sizeof(PHYSICAL_MEMORY_RUN) * Run);
1457 ExFreePool(Buffer);
1458
1459 //
1460 // Now use the new buffer
1461 //
1462 Buffer = NewBuffer;
1463 }
1464 }
1465
1466 //
1467 // Write the final numbers, and return it
1468 //
1469 Buffer->NumberOfRuns = Run;
1470 Buffer->NumberOfPages = PageCount;
1471 return Buffer;
1472 }
1473
1474 VOID
1475 NTAPI
1476 MiBuildPagedPool(VOID)
1477 {
1478 PMMPTE PointerPte, PointerPde;
1479 MMPTE TempPte = ValidKernelPte;
1480 PFN_NUMBER PageFrameIndex;
1481 KIRQL OldIrql;
1482 ULONG Size, BitMapSize;
1483 #if (_MI_PAGING_LEVELS == 2)
1484 //
1485 // Get the page frame number for the system page directory
1486 //
1487 PointerPte = MiAddressToPte(PDE_BASE);
1488 ASSERT(PD_COUNT == 1);
1489 MmSystemPageDirectory[0] = PFN_FROM_PTE(PointerPte);
1490
1491 //
1492 // Allocate a system PTE which will hold a copy of the page directory
1493 //
1494 PointerPte = MiReserveSystemPtes(1, SystemPteSpace);
1495 ASSERT(PointerPte);
1496 MmSystemPagePtes = MiPteToAddress(PointerPte);
1497
1498 //
1499 // Make this system PTE point to the system page directory.
1500 // It is now essentially double-mapped. This will be used later for lazy
1501 // evaluation of PDEs accross process switches, similarly to how the Global
1502 // page directory array in the old ReactOS Mm is used (but in a less hacky
1503 // way).
1504 //
1505 TempPte = ValidKernelPte;
1506 ASSERT(PD_COUNT == 1);
1507 TempPte.u.Hard.PageFrameNumber = MmSystemPageDirectory[0];
1508 MI_WRITE_VALID_PTE(PointerPte, TempPte);
1509 #endif
1510 //
1511 // Let's get back to paged pool work: size it up.
1512 // By default, it should be twice as big as nonpaged pool.
1513 //
1514 MmSizeOfPagedPoolInBytes = 2 * MmMaximumNonPagedPoolInBytes;
1515 if (MmSizeOfPagedPoolInBytes > ((ULONG_PTR)MmNonPagedSystemStart -
1516 (ULONG_PTR)MmPagedPoolStart))
1517 {
1518 //
1519 // On the other hand, we have limited VA space, so make sure that the VA
1520 // for paged pool doesn't overflow into nonpaged pool VA. Otherwise, set
1521 // whatever maximum is possible.
1522 //
1523 MmSizeOfPagedPoolInBytes = (ULONG_PTR)MmNonPagedSystemStart -
1524 (ULONG_PTR)MmPagedPoolStart;
1525 }
1526
1527 //
1528 // Get the size in pages and make sure paged pool is at least 32MB.
1529 //
1530 Size = MmSizeOfPagedPoolInBytes;
1531 if (Size < MI_MIN_INIT_PAGED_POOLSIZE) Size = MI_MIN_INIT_PAGED_POOLSIZE;
1532 Size = BYTES_TO_PAGES(Size);
1533
1534 //
1535 // Now check how many PTEs will be required for these many pages.
1536 //
1537 Size = (Size + (1024 - 1)) / 1024;
1538
1539 //
1540 // Recompute the page-aligned size of the paged pool, in bytes and pages.
1541 //
1542 MmSizeOfPagedPoolInBytes = Size * PAGE_SIZE * 1024;
1543 MmSizeOfPagedPoolInPages = MmSizeOfPagedPoolInBytes >> PAGE_SHIFT;
1544
1545 //
1546 // Let's be really sure this doesn't overflow into nonpaged system VA
1547 //
1548 ASSERT((MmSizeOfPagedPoolInBytes + (ULONG_PTR)MmPagedPoolStart) <=
1549 (ULONG_PTR)MmNonPagedSystemStart);
1550
1551 //
1552 // This is where paged pool ends
1553 //
1554 MmPagedPoolEnd = (PVOID)(((ULONG_PTR)MmPagedPoolStart +
1555 MmSizeOfPagedPoolInBytes) - 1);
1556
1557 //
1558 // So now get the PDE for paged pool and zero it out
1559 //
1560 PointerPde = MiAddressToPde(MmPagedPoolStart);
1561
1562 #if (_MI_PAGING_LEVELS >= 3)
1563 /* On these systems, there's no double-mapping, so instead, the PPE and PXEs
1564 * are setup to span the entire paged pool area, so there's no need for the
1565 * system PD */
1566 ASSERT(FALSE);
1567 #endif
1568
1569 RtlZeroMemory(PointerPde,
1570 (1 + MiAddressToPde(MmPagedPoolEnd) - PointerPde) * sizeof(MMPTE));
1571
1572 //
1573 // Next, get the first and last PTE
1574 //
1575 PointerPte = MiAddressToPte(MmPagedPoolStart);
1576 MmPagedPoolInfo.FirstPteForPagedPool = PointerPte;
1577 MmPagedPoolInfo.LastPteForPagedPool = MiAddressToPte(MmPagedPoolEnd);
1578
1579 //
1580 // Lock the PFN database
1581 //
1582 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
1583
1584 /* Allocate a page and map the first paged pool PDE */
1585 PageFrameIndex = MiRemoveZeroPage(0);
1586 TempPte.u.Hard.PageFrameNumber = PageFrameIndex;
1587 MI_WRITE_VALID_PTE(PointerPde, TempPte);
1588 #if (_MI_PAGING_LEVELS >= 3)
1589 /* Use the PPE of MmPagedPoolStart that was setup above */
1590 // Bla = PFN_FROM_PTE(PpeAddress(MmPagedPool...));
1591 ASSERT(FALSE);
1592 #else
1593 /* Do it this way */
1594 // Bla = MmSystemPageDirectory[(PointerPde - (PMMPTE)PDE_BASE) / PDE_COUNT]
1595
1596 /* Initialize the PFN entry for it */
1597 MiInitializePfnForOtherProcess(PageFrameIndex,
1598 PointerPde,
1599 MmSystemPageDirectory[(PointerPde - (PMMPTE)PDE_BASE) / PDE_COUNT]);
1600 #endif
1601
1602 //
1603 // Release the PFN database lock
1604 //
1605 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
1606
1607 //
1608 // We only have one PDE mapped for now... at fault time, additional PDEs
1609 // will be allocated to handle paged pool growth. This is where they'll have
1610 // to start.
1611 //
1612 MmPagedPoolInfo.NextPdeForPagedPoolExpansion = PointerPde + 1;
1613
1614 //
1615 // We keep track of each page via a bit, so check how big the bitmap will
1616 // have to be (make sure to align our page count such that it fits nicely
1617 // into a 4-byte aligned bitmap.
1618 //
1619 // We'll also allocate the bitmap header itself part of the same buffer.
1620 //
1621 Size = Size * 1024;
1622 ASSERT(Size == MmSizeOfPagedPoolInPages);
1623 BitMapSize = Size;
1624 Size = sizeof(RTL_BITMAP) + (((Size + 31) / 32) * sizeof(ULONG));
1625
1626 //
1627 // Allocate the allocation bitmap, which tells us which regions have not yet
1628 // been mapped into memory
1629 //
1630 MmPagedPoolInfo.PagedPoolAllocationMap = ExAllocatePoolWithTag(NonPagedPool,
1631 Size,
1632 ' mM');
1633 ASSERT(MmPagedPoolInfo.PagedPoolAllocationMap);
1634
1635 //
1636 // Initialize it such that at first, only the first page's worth of PTEs is
1637 // marked as allocated (incidentially, the first PDE we allocated earlier).
1638 //
1639 RtlInitializeBitMap(MmPagedPoolInfo.PagedPoolAllocationMap,
1640 (PULONG)(MmPagedPoolInfo.PagedPoolAllocationMap + 1),
1641 BitMapSize);
1642 RtlSetAllBits(MmPagedPoolInfo.PagedPoolAllocationMap);
1643 RtlClearBits(MmPagedPoolInfo.PagedPoolAllocationMap, 0, 1024);
1644
1645 //
1646 // We have a second bitmap, which keeps track of where allocations end.
1647 // Given the allocation bitmap and a base address, we can therefore figure
1648 // out which page is the last page of that allocation, and thus how big the
1649 // entire allocation is.
1650 //
1651 MmPagedPoolInfo.EndOfPagedPoolBitmap = ExAllocatePoolWithTag(NonPagedPool,
1652 Size,
1653 ' mM');
1654 ASSERT(MmPagedPoolInfo.EndOfPagedPoolBitmap);
1655 RtlInitializeBitMap(MmPagedPoolInfo.EndOfPagedPoolBitmap,
1656 (PULONG)(MmPagedPoolInfo.EndOfPagedPoolBitmap + 1),
1657 BitMapSize);
1658
1659 //
1660 // Since no allocations have been made yet, there are no bits set as the end
1661 //
1662 RtlClearAllBits(MmPagedPoolInfo.EndOfPagedPoolBitmap);
1663
1664 //
1665 // Initialize paged pool.
1666 //
1667 InitializePool(PagedPool, 0);
1668
1669 /* Default low threshold of 30MB or one fifth of paged pool */
1670 MiLowPagedPoolThreshold = (30 * _1MB) >> PAGE_SHIFT;
1671 MiLowPagedPoolThreshold = min(MiLowPagedPoolThreshold, Size / 5);
1672
1673 /* Default high threshold of 60MB or 25% */
1674 MiHighPagedPoolThreshold = (60 * _1MB) >> PAGE_SHIFT;
1675 MiHighPagedPoolThreshold = min(MiHighPagedPoolThreshold, (Size * 2) / 5);
1676 ASSERT(MiLowPagedPoolThreshold < MiHighPagedPoolThreshold);
1677
1678 /* Setup the global session space */
1679 MiInitializeSystemSpaceMap(NULL);
1680 }
1681
1682 NTSTATUS
1683 NTAPI
1684 MmArmInitSystem(IN ULONG Phase,
1685 IN PLOADER_PARAMETER_BLOCK LoaderBlock)
1686 {
1687 ULONG i;
1688 BOOLEAN IncludeType[LoaderMaximum];
1689 PVOID Bitmap;
1690 PPHYSICAL_MEMORY_RUN Run;
1691 PFN_NUMBER PageCount;
1692
1693 //
1694 // Instantiate memory that we don't consider RAM/usable
1695 // We use the same exclusions that Windows does, in order to try to be
1696 // compatible with WinLDR-style booting
1697 //
1698 for (i = 0; i < LoaderMaximum; i++) IncludeType[i] = TRUE;
1699 IncludeType[LoaderBad] = FALSE;
1700 IncludeType[LoaderFirmwarePermanent] = FALSE;
1701 IncludeType[LoaderSpecialMemory] = FALSE;
1702 IncludeType[LoaderBBTMemory] = FALSE;
1703 if (Phase == 0)
1704 {
1705 /* Initialize the phase 0 temporary event */
1706 KeInitializeEvent(&MiTempEvent, NotificationEvent, FALSE);
1707
1708 /* Set all the events to use the temporary event for now */
1709 MiLowMemoryEvent = &MiTempEvent;
1710 MiHighMemoryEvent = &MiTempEvent;
1711 MiLowPagedPoolEvent = &MiTempEvent;
1712 MiHighPagedPoolEvent = &MiTempEvent;
1713 MiLowNonPagedPoolEvent = &MiTempEvent;
1714 MiHighNonPagedPoolEvent = &MiTempEvent;
1715
1716 //
1717 // Define the basic user vs. kernel address space separation
1718 //
1719 MmSystemRangeStart = (PVOID)KSEG0_BASE;
1720 MmUserProbeAddress = (ULONG_PTR)MmSystemRangeStart - 0x10000;
1721 MmHighestUserAddress = (PVOID)(MmUserProbeAddress - 1);
1722
1723 /* Highest PTE and PDE based on the addresses above */
1724 MiHighestUserPte = MiAddressToPte(MmHighestUserAddress);
1725 MiHighestUserPde = MiAddressToPde(MmHighestUserAddress);
1726 #if (_MI_PAGING_LEVELS >= 3)
1727 /* We need the highest PPE and PXE addresses */
1728 ASSERT(FALSE);
1729 #endif
1730 //
1731 // Get the size of the boot loader's image allocations and then round
1732 // that region up to a PDE size, so that any PDEs we might create for
1733 // whatever follows are separate from the PDEs that boot loader might've
1734 // already created (and later, we can blow all that away if we want to).
1735 //
1736 MmBootImageSize = KeLoaderBlock->Extension->LoaderPagesSpanned;
1737 MmBootImageSize *= PAGE_SIZE;
1738 MmBootImageSize = (MmBootImageSize + PDE_MAPPED_VA - 1) & ~(PDE_MAPPED_VA - 1);
1739 ASSERT((MmBootImageSize % PDE_MAPPED_VA) == 0);
1740
1741 //
1742 // Set the size of session view, pool, and image
1743 //
1744 MmSessionSize = MI_SESSION_SIZE;
1745 MmSessionViewSize = MI_SESSION_VIEW_SIZE;
1746 MmSessionPoolSize = MI_SESSION_POOL_SIZE;
1747 MmSessionImageSize = MI_SESSION_IMAGE_SIZE;
1748
1749 //
1750 // Set the size of system view
1751 //
1752 MmSystemViewSize = MI_SYSTEM_VIEW_SIZE;
1753
1754 //
1755 // This is where it all ends
1756 //
1757 MiSessionImageEnd = (PVOID)PTE_BASE;
1758
1759 //
1760 // This is where we will load Win32k.sys and the video driver
1761 //
1762 MiSessionImageStart = (PVOID)((ULONG_PTR)MiSessionImageEnd -
1763 MmSessionImageSize);
1764
1765 //
1766 // So the view starts right below the session working set (itself below
1767 // the image area)
1768 //
1769 MiSessionViewStart = (PVOID)((ULONG_PTR)MiSessionImageEnd -
1770 MmSessionImageSize -
1771 MI_SESSION_WORKING_SET_SIZE -
1772 MmSessionViewSize);
1773
1774 //
1775 // Session pool follows
1776 //
1777 MiSessionPoolEnd = MiSessionViewStart;
1778 MiSessionPoolStart = (PVOID)((ULONG_PTR)MiSessionPoolEnd -
1779 MmSessionPoolSize);
1780
1781 //
1782 // And it all begins here
1783 //
1784 MmSessionBase = MiSessionPoolStart;
1785
1786 //
1787 // Sanity check that our math is correct
1788 //
1789 ASSERT((ULONG_PTR)MmSessionBase + MmSessionSize == PTE_BASE);
1790
1791 //
1792 // Session space ends wherever image session space ends
1793 //
1794 MiSessionSpaceEnd = MiSessionImageEnd;
1795
1796 //
1797 // System view space ends at session space, so now that we know where
1798 // this is, we can compute the base address of system view space itself.
1799 //
1800 MiSystemViewStart = (PVOID)((ULONG_PTR)MmSessionBase -
1801 MmSystemViewSize);
1802
1803 /* Compute the PTE addresses for all the addresses we carved out */
1804 MiSessionImagePteStart = MiAddressToPte(MiSessionImageStart);
1805 MiSessionImagePteEnd = MiAddressToPte(MiSessionImageEnd);
1806 MiSessionBasePte = MiAddressToPte(MmSessionBase);
1807 MiSessionLastPte = MiAddressToPte(MiSessionSpaceEnd);
1808
1809 /* Initialize the user mode image list */
1810 InitializeListHead(&MmLoadedUserImageList);
1811
1812 /* Initialize the paged pool mutex */
1813 KeInitializeGuardedMutex(&MmPagedPoolMutex);
1814
1815 /* Initialize the Loader Lock */
1816 KeInitializeMutant(&MmSystemLoadLock, FALSE);
1817
1818 /* Set the zero page event */
1819 KeInitializeEvent(&MmZeroingPageEvent, SynchronizationEvent, FALSE);
1820 MmZeroingPageThreadActive = FALSE;
1821
1822 //
1823 // Count physical pages on the system
1824 //
1825 PageCount = MiPagesInLoaderBlock(LoaderBlock, IncludeType);
1826
1827 //
1828 // Check if this is a machine with less than 19MB of RAM
1829 //
1830 if (PageCount < MI_MIN_PAGES_FOR_SYSPTE_TUNING)
1831 {
1832 //
1833 // Use the very minimum of system PTEs
1834 //
1835 MmNumberOfSystemPtes = 7000;
1836 }
1837 else
1838 {
1839 //
1840 // Use the default, but check if we have more than 32MB of RAM
1841 //
1842 MmNumberOfSystemPtes = 11000;
1843 if (PageCount > MI_MIN_PAGES_FOR_SYSPTE_BOOST)
1844 {
1845 //
1846 // Double the amount of system PTEs
1847 //
1848 MmNumberOfSystemPtes <<= 1;
1849 }
1850 }
1851
1852 DPRINT("System PTE count has been tuned to %d (%d bytes)\n",
1853 MmNumberOfSystemPtes, MmNumberOfSystemPtes * PAGE_SIZE);
1854
1855 /* Initialize the working set lock */
1856 ExInitializePushLock((PULONG_PTR)&MmSystemCacheWs.WorkingSetMutex);
1857
1858 /* Set commit limit */
1859 MmTotalCommitLimit = 2 * _1GB;
1860 MmTotalCommitLimitMaximum = MmTotalCommitLimit;
1861
1862 /* Has the allocation fragment been setup? */
1863 if (!MmAllocationFragment)
1864 {
1865 /* Use the default value */
1866 MmAllocationFragment = MI_ALLOCATION_FRAGMENT;
1867 if (PageCount < ((256 * _1MB) / PAGE_SIZE))
1868 {
1869 /* On memory systems with less than 256MB, divide by 4 */
1870 MmAllocationFragment = MI_ALLOCATION_FRAGMENT / 4;
1871 }
1872 else if (PageCount < (_1GB / PAGE_SIZE))
1873 {
1874 /* On systems with less than 1GB, divide by 2 */
1875 MmAllocationFragment = MI_ALLOCATION_FRAGMENT / 2;
1876 }
1877 }
1878 else
1879 {
1880 /* Convert from 1KB fragments to pages */
1881 MmAllocationFragment *= _1KB;
1882 MmAllocationFragment = ROUND_TO_PAGES(MmAllocationFragment);
1883
1884 /* Don't let it past the maximum */
1885 MmAllocationFragment = min(MmAllocationFragment,
1886 MI_MAX_ALLOCATION_FRAGMENT);
1887
1888 /* Don't let it too small either */
1889 MmAllocationFragment = max(MmAllocationFragment,
1890 MI_MIN_ALLOCATION_FRAGMENT);
1891 }
1892
1893 /* Initialize the platform-specific parts */
1894 MiInitMachineDependent(LoaderBlock);
1895
1896 //
1897 // Sync us up with ReactOS Mm
1898 //
1899 MiSyncARM3WithROS(MmNonPagedSystemStart, (PVOID)((ULONG_PTR)MmNonPagedPoolEnd - 1));
1900 MiSyncARM3WithROS(MmPfnDatabase, (PVOID)((ULONG_PTR)MmNonPagedPoolStart + MmSizeOfNonPagedPoolInBytes - 1));
1901 MiSyncARM3WithROS((PVOID)HYPER_SPACE, (PVOID)(HYPER_SPACE + PAGE_SIZE - 1));
1902
1903 //
1904 // Build the physical memory block
1905 //
1906 MmPhysicalMemoryBlock = MmInitializeMemoryLimits(LoaderBlock,
1907 IncludeType);
1908
1909 //
1910 // Allocate enough buffer for the PFN bitmap
1911 // Align it up to a 32-bit boundary
1912 //
1913 Bitmap = ExAllocatePoolWithTag(NonPagedPool,
1914 (((MmHighestPhysicalPage + 1) + 31) / 32) * 4,
1915 ' mM');
1916 if (!Bitmap)
1917 {
1918 //
1919 // This is critical
1920 //
1921 KeBugCheckEx(INSTALL_MORE_MEMORY,
1922 MmNumberOfPhysicalPages,
1923 MmLowestPhysicalPage,
1924 MmHighestPhysicalPage,
1925 0x101);
1926 }
1927
1928 //
1929 // Initialize it and clear all the bits to begin with
1930 //
1931 RtlInitializeBitMap(&MiPfnBitMap,
1932 Bitmap,
1933 MmHighestPhysicalPage + 1);
1934 RtlClearAllBits(&MiPfnBitMap);
1935
1936 //
1937 // Loop physical memory runs
1938 //
1939 for (i = 0; i < MmPhysicalMemoryBlock->NumberOfRuns; i++)
1940 {
1941 //
1942 // Get the run
1943 //
1944 Run = &MmPhysicalMemoryBlock->Run[i];
1945 DPRINT("PHYSICAL RAM [0x%08p to 0x%08p]\n",
1946 Run->BasePage << PAGE_SHIFT,
1947 (Run->BasePage + Run->PageCount) << PAGE_SHIFT);
1948
1949 //
1950 // Make sure it has pages inside it
1951 //
1952 if (Run->PageCount)
1953 {
1954 //
1955 // Set the bits in the PFN bitmap
1956 //
1957 RtlSetBits(&MiPfnBitMap, Run->BasePage, Run->PageCount);
1958 }
1959 }
1960
1961 /* Look for large page cache entries that need caching */
1962 MiSyncCachedRanges();
1963
1964 /* Loop for HAL Heap I/O device mappings that need coherency tracking */
1965 MiAddHalIoMappings();
1966
1967 /* Set the initial resident page count */
1968 MmResidentAvailablePages = MmAvailablePages - 32;
1969
1970 /* Initialize large page structures on PAE/x64, and MmProcessList on x86 */
1971 MiInitializeLargePageSupport();
1972
1973 /* Check if the registry says any drivers should be loaded with large pages */
1974 MiInitializeDriverLargePageList();
1975
1976 /* Relocate the boot drivers into system PTE space and fixup their PFNs */
1977 MiReloadBootLoadedDrivers(LoaderBlock);
1978
1979 /* FIXME: Call out into Driver Verifier for initialization */
1980
1981 /* Check how many pages the system has */
1982 if (MmNumberOfPhysicalPages <= ((13 * _1MB) / PAGE_SIZE))
1983 {
1984 /* Set small system */
1985 MmSystemSize = MmSmallSystem;
1986 }
1987 else if (MmNumberOfPhysicalPages <= ((19 * _1MB) / PAGE_SIZE))
1988 {
1989 /* Set small system and add 100 pages for the cache */
1990 MmSystemSize = MmSmallSystem;
1991 MmSystemCacheWsMinimum += 100;
1992 }
1993 else
1994 {
1995 /* Set medium system and add 400 pages for the cache */
1996 MmSystemSize = MmMediumSystem;
1997 MmSystemCacheWsMinimum += 400;
1998 }
1999
2000 /* Check for less than 24MB */
2001 if (MmNumberOfPhysicalPages < ((24 * _1MB) / PAGE_SIZE))
2002 {
2003 /* No more than 32 pages */
2004 MmSystemCacheWsMinimum = 32;
2005 }
2006
2007 /* Check for more than 32MB */
2008 if (MmNumberOfPhysicalPages >= ((32 * _1MB) / PAGE_SIZE))
2009 {
2010 /* Check for product type being "Wi" for WinNT */
2011 if (MmProductType == '\0i\0W')
2012 {
2013 /* Then this is a large system */
2014 MmSystemSize = MmLargeSystem;
2015 }
2016 else
2017 {
2018 /* For servers, we need 64MB to consider this as being large */
2019 if (MmNumberOfPhysicalPages >= ((64 * _1MB) / PAGE_SIZE))
2020 {
2021 /* Set it as large */
2022 MmSystemSize = MmLargeSystem;
2023 }
2024 }
2025 }
2026
2027 /* Check for more than 33 MB */
2028 if (MmNumberOfPhysicalPages > ((33 * _1MB) / PAGE_SIZE))
2029 {
2030 /* Add another 500 pages to the cache */
2031 MmSystemCacheWsMinimum += 500;
2032 }
2033
2034 /* Now setup the shared user data fields */
2035 ASSERT(SharedUserData->NumberOfPhysicalPages == 0);
2036 SharedUserData->NumberOfPhysicalPages = MmNumberOfPhysicalPages;
2037 SharedUserData->LargePageMinimum = 0;
2038
2039 /* Check for workstation (Wi for WinNT) */
2040 if (MmProductType == '\0i\0W')
2041 {
2042 /* Set Windows NT Workstation product type */
2043 SharedUserData->NtProductType = NtProductWinNt;
2044 MmProductType = 0;
2045 }
2046 else
2047 {
2048 /* Check for LanMan server */
2049 if (MmProductType == '\0a\0L')
2050 {
2051 /* This is a domain controller */
2052 SharedUserData->NtProductType = NtProductLanManNt;
2053 }
2054 else
2055 {
2056 /* Otherwise it must be a normal server */
2057 SharedUserData->NtProductType = NtProductServer;
2058 }
2059
2060 /* Set the product type, and make the system more aggressive with low memory */
2061 MmProductType = 1;
2062 MmMinimumFreePages = 81;
2063 }
2064
2065 /* Update working set tuning parameters */
2066 MiAdjustWorkingSetManagerParameters(!MmProductType);
2067
2068 /* Finetune the page count by removing working set and NP expansion */
2069 MmResidentAvailablePages -= MiExpansionPoolPagesInitialCharge;
2070 MmResidentAvailablePages -= MmSystemCacheWsMinimum;
2071 MmResidentAvailableAtInit = MmResidentAvailablePages;
2072 if (MmResidentAvailablePages <= 0)
2073 {
2074 /* This should not happen */
2075 DPRINT1("System cache working set too big\n");
2076 return FALSE;
2077 }
2078
2079 /* Initialize the system cache */
2080 //MiInitializeSystemCache(MmSystemCacheWsMinimum, MmAvailablePages);
2081
2082 /* Update the commit limit */
2083 MmTotalCommitLimit = MmAvailablePages;
2084 if (MmTotalCommitLimit > 1024) MmTotalCommitLimit -= 1024;
2085 MmTotalCommitLimitMaximum = MmTotalCommitLimit;
2086
2087 /* Size up paged pool and build the shadow system page directory */
2088 MiBuildPagedPool();
2089
2090 /* Debugger physical memory support is now ready to be used */
2091 MmDebugPte = MiAddressToPte(MiDebugMapping);
2092
2093 /* Initialize the loaded module list */
2094 MiInitializeLoadedModuleList(LoaderBlock);
2095 }
2096
2097 //
2098 // Always return success for now
2099 //
2100 return STATUS_SUCCESS;
2101 }
2102
2103 /* EOF */