[NTOSKRNL]
[reactos.git] / reactos / ntoskrnl / mm / ARM3 / mminit.c
1 /*
2 * PROJECT: ReactOS Kernel
3 * LICENSE: BSD - See COPYING.ARM in the top level directory
4 * FILE: ntoskrnl/mm/ARM3/mminit.c
5 * PURPOSE: ARM Memory Manager Initialization
6 * PROGRAMMERS: ReactOS Portable Systems Group
7 */
8
9 /* INCLUDES *******************************************************************/
10
11 #include <ntoskrnl.h>
12 #define NDEBUG
13 #include <debug.h>
14
15 #line 15 "ARM³::INIT"
16 #define MODULE_INVOLVED_IN_ARM3
17 #include "miarm.h"
18
19 /* GLOBALS ********************************************************************/
20
21 //
22 // These are all registry-configurable, but by default, the memory manager will
23 // figure out the most appropriate values.
24 //
25 ULONG MmMaximumNonPagedPoolPercent;
26 SIZE_T MmSizeOfNonPagedPoolInBytes;
27 SIZE_T MmMaximumNonPagedPoolInBytes;
28
29 /* Some of the same values, in pages */
30 PFN_NUMBER MmMaximumNonPagedPoolInPages;
31
32 //
33 // These numbers describe the discrete equation components of the nonpaged
34 // pool sizing algorithm.
35 //
36 // They are described on http://support.microsoft.com/default.aspx/kb/126402/ja
37 // along with the algorithm that uses them, which is implemented later below.
38 //
39 SIZE_T MmMinimumNonPagedPoolSize = 256 * 1024;
40 ULONG MmMinAdditionNonPagedPoolPerMb = 32 * 1024;
41 SIZE_T MmDefaultMaximumNonPagedPool = 1024 * 1024;
42 ULONG MmMaxAdditionNonPagedPoolPerMb = 400 * 1024;
43
44 //
45 // The memory layout (and especially variable names) of the NT kernel mode
46 // components can be a bit hard to twig, especially when it comes to the non
47 // paged area.
48 //
49 // There are really two components to the non-paged pool:
50 //
51 // - The initial nonpaged pool, sized dynamically up to a maximum.
52 // - The expansion nonpaged pool, sized dynamically up to a maximum.
53 //
54 // The initial nonpaged pool is physically continuous for performance, and
55 // immediately follows the PFN database, typically sharing the same PDE. It is
56 // a very small resource (32MB on a 1GB system), and capped at 128MB.
57 //
58 // Right now we call this the "ARM³ Nonpaged Pool" and it begins somewhere after
59 // the PFN database (which starts at 0xB0000000).
60 //
61 // The expansion nonpaged pool, on the other hand, can grow much bigger (400MB
62 // for a 1GB system). On ARM³ however, it is currently capped at 128MB.
63 //
64 // The address where the initial nonpaged pool starts is aptly named
65 // MmNonPagedPoolStart, and it describes a range of MmSizeOfNonPagedPoolInBytes
66 // bytes.
67 //
68 // Expansion nonpaged pool starts at an address described by the variable called
69 // MmNonPagedPoolExpansionStart, and it goes on for MmMaximumNonPagedPoolInBytes
70 // minus MmSizeOfNonPagedPoolInBytes bytes, always reaching MmNonPagedPoolEnd
71 // (because of the way it's calculated) at 0xFFBE0000.
72 //
73 // Initial nonpaged pool is allocated and mapped early-on during boot, but what
74 // about the expansion nonpaged pool? It is instead composed of special pages
75 // which belong to what are called System PTEs. These PTEs are the matter of a
76 // later discussion, but they are also considered part of the "nonpaged" OS, due
77 // to the fact that they are never paged out -- once an address is described by
78 // a System PTE, it is always valid, until the System PTE is torn down.
79 //
80 // System PTEs are actually composed of two "spaces", the system space proper,
81 // and the nonpaged pool expansion space. The latter, as we've already seen,
82 // begins at MmNonPagedPoolExpansionStart. Based on the number of System PTEs
83 // that the system will support, the remaining address space below this address
84 // is used to hold the system space PTEs. This address, in turn, is held in the
85 // variable named MmNonPagedSystemStart, which itself is never allowed to go
86 // below 0xEB000000 (thus creating an upper bound on the number of System PTEs).
87 //
88 // This means that 330MB are reserved for total nonpaged system VA, on top of
89 // whatever the initial nonpaged pool allocation is.
90 //
91 // The following URLs, valid as of April 23rd, 2008, support this evidence:
92 //
93 // http://www.cs.miami.edu/~burt/journal/NT/memory.html
94 // http://www.ditii.com/2007/09/28/windows-memory-management-x86-virtual-address-space/
95 //
96 PVOID MmNonPagedSystemStart;
97 PVOID MmNonPagedPoolStart;
98 PVOID MmNonPagedPoolExpansionStart;
99 PVOID MmNonPagedPoolEnd = MI_NONPAGED_POOL_END;
100
101 //
102 // This is where paged pool starts by default
103 //
104 PVOID MmPagedPoolStart = MI_PAGED_POOL_START;
105 PVOID MmPagedPoolEnd;
106
107 //
108 // And this is its default size
109 //
110 SIZE_T MmSizeOfPagedPoolInBytes = MI_MIN_INIT_PAGED_POOLSIZE;
111 PFN_NUMBER MmSizeOfPagedPoolInPages = MI_MIN_INIT_PAGED_POOLSIZE / PAGE_SIZE;
112
113 //
114 // Session space starts at 0xBFFFFFFF and grows downwards
115 // By default, it includes an 8MB image area where we map win32k and video card
116 // drivers, followed by a 4MB area containing the session's working set. This is
117 // then followed by a 20MB mapped view area and finally by the session's paged
118 // pool, by default 16MB.
119 //
120 // On a normal system, this results in session space occupying the region from
121 // 0xBD000000 to 0xC0000000
122 //
123 // See miarm.h for the defines that determine the sizing of this region. On an
124 // NT system, some of these can be configured through the registry, but we don't
125 // support that yet.
126 //
127 PVOID MiSessionSpaceEnd; // 0xC0000000
128 PVOID MiSessionImageEnd; // 0xC0000000
129 PVOID MiSessionImageStart; // 0xBF800000
130 PVOID MiSessionViewStart; // 0xBE000000
131 PVOID MiSessionPoolEnd; // 0xBE000000
132 PVOID MiSessionPoolStart; // 0xBD000000
133 PVOID MmSessionBase; // 0xBD000000
134 SIZE_T MmSessionSize;
135 SIZE_T MmSessionViewSize;
136 SIZE_T MmSessionPoolSize;
137 SIZE_T MmSessionImageSize;
138
139 /*
140 * These are the PTE addresses of the boundaries carved out above
141 */
142 PMMPTE MiSessionImagePteStart;
143 PMMPTE MiSessionImagePteEnd;
144 PMMPTE MiSessionBasePte;
145 PMMPTE MiSessionLastPte;
146
147 //
148 // The system view space, on the other hand, is where sections that are memory
149 // mapped into "system space" end up.
150 //
151 // By default, it is a 16MB region.
152 //
153 PVOID MiSystemViewStart;
154 SIZE_T MmSystemViewSize;
155
156 //
157 // A copy of the system page directory (the page directory associated with the
158 // System process) is kept (double-mapped) by the manager in order to lazily
159 // map paged pool PDEs into external processes when they fault on a paged pool
160 // address.
161 //
162 PFN_NUMBER MmSystemPageDirectory[PD_COUNT];
163 PMMPTE MmSystemPagePtes;
164
165 //
166 // The system cache starts right after hyperspace. The first few pages are for
167 // keeping track of the system working set list.
168 //
169 // This should be 0xC0C00000 -- the cache itself starts at 0xC1000000
170 //
171 PMMWSL MmSystemCacheWorkingSetList = MI_SYSTEM_CACHE_WS_START;
172
173 //
174 // Windows NT seems to choose between 7000, 11000 and 50000
175 // On systems with more than 32MB, this number is then doubled, and further
176 // aligned up to a PDE boundary (4MB).
177 //
178 ULONG_PTR MmNumberOfSystemPtes;
179
180 //
181 // This is how many pages the PFN database will take up
182 // In Windows, this includes the Quark Color Table, but not in ARM³
183 //
184 PFN_NUMBER MxPfnAllocation;
185
186 //
187 // Unlike the old ReactOS Memory Manager, ARM³ (and Windows) does not keep track
188 // of pages that are not actually valid physical memory, such as ACPI reserved
189 // regions, BIOS address ranges, or holes in physical memory address space which
190 // could indicate device-mapped I/O memory.
191 //
192 // In fact, the lack of a PFN entry for a page usually indicates that this is
193 // I/O space instead.
194 //
195 // A bitmap, called the PFN bitmap, keeps track of all page frames by assigning
196 // a bit to each. If the bit is set, then the page is valid physical RAM.
197 //
198 RTL_BITMAP MiPfnBitMap;
199
200 //
201 // This structure describes the different pieces of RAM-backed address space
202 //
203 PPHYSICAL_MEMORY_DESCRIPTOR MmPhysicalMemoryBlock;
204
205 //
206 // This is where we keep track of the most basic physical layout markers
207 //
208 PFN_NUMBER MmNumberOfPhysicalPages, MmHighestPhysicalPage, MmLowestPhysicalPage = -1;
209
210 //
211 // The total number of pages mapped by the boot loader, which include the kernel
212 // HAL, boot drivers, registry, NLS files and other loader data structures is
213 // kept track of here. This depends on "LoaderPagesSpanned" being correct when
214 // coming from the loader.
215 //
216 // This number is later aligned up to a PDE boundary.
217 //
218 SIZE_T MmBootImageSize;
219
220 //
221 // These three variables keep track of the core separation of address space that
222 // exists between kernel mode and user mode.
223 //
224 ULONG_PTR MmUserProbeAddress;
225 PVOID MmHighestUserAddress;
226 PVOID MmSystemRangeStart;
227
228 /* And these store the respective highest PTE/PDE address */
229 PMMPTE MiHighestUserPte;
230 PMMPDE MiHighestUserPde;
231
232 /* These variables define the system cache address space */
233 PVOID MmSystemCacheStart;
234 PVOID MmSystemCacheEnd;
235 MMSUPPORT MmSystemCacheWs;
236
237 //
238 // This is where hyperspace ends (followed by the system cache working set)
239 //
240 PVOID MmHyperSpaceEnd;
241
242 //
243 // Page coloring algorithm data
244 //
245 ULONG MmSecondaryColors;
246 ULONG MmSecondaryColorMask;
247
248 //
249 // Actual (registry-configurable) size of a GUI thread's stack
250 //
251 ULONG MmLargeStackSize = KERNEL_LARGE_STACK_SIZE;
252
253 //
254 // Before we have a PFN database, memory comes straight from our physical memory
255 // blocks, which is nice because it's guaranteed contiguous and also because once
256 // we take a page from here, the system doesn't see it anymore.
257 // However, once the fun is over, those pages must be re-integrated back into
258 // PFN society life, and that requires us keeping a copy of the original layout
259 // so that we can parse it later.
260 //
261 PMEMORY_ALLOCATION_DESCRIPTOR MxFreeDescriptor;
262 MEMORY_ALLOCATION_DESCRIPTOR MxOldFreeDescriptor;
263
264 /*
265 * For each page's worth bytes of L2 cache in a given set/way line, the zero and
266 * free lists are organized in what is called a "color".
267 *
268 * This array points to the two lists, so it can be thought of as a multi-dimensional
269 * array of MmFreePagesByColor[2][MmSecondaryColors]. Since the number is dynamic,
270 * we describe the array in pointer form instead.
271 *
272 * On a final note, the color tables themselves are right after the PFN database.
273 */
274 C_ASSERT(FreePageList == 1);
275 PMMCOLOR_TABLES MmFreePagesByColor[FreePageList + 1];
276
277 /* An event used in Phase 0 before the rest of the system is ready to go */
278 KEVENT MiTempEvent;
279
280 /* All the events used for memory threshold notifications */
281 PKEVENT MiLowMemoryEvent;
282 PKEVENT MiHighMemoryEvent;
283 PKEVENT MiLowPagedPoolEvent;
284 PKEVENT MiHighPagedPoolEvent;
285 PKEVENT MiLowNonPagedPoolEvent;
286 PKEVENT MiHighNonPagedPoolEvent;
287
288 /* The actual thresholds themselves, in page numbers */
289 PFN_NUMBER MmLowMemoryThreshold;
290 PFN_NUMBER MmHighMemoryThreshold;
291 PFN_NUMBER MiLowPagedPoolThreshold;
292 PFN_NUMBER MiHighPagedPoolThreshold;
293 PFN_NUMBER MiLowNonPagedPoolThreshold;
294 PFN_NUMBER MiHighNonPagedPoolThreshold;
295
296 /*
297 * This number determines how many free pages must exist, at minimum, until we
298 * start trimming working sets and flushing modified pages to obtain more free
299 * pages.
300 *
301 * This number changes if the system detects that this is a server product
302 */
303 PFN_NUMBER MmMinimumFreePages = 26;
304
305 /*
306 * This number indicates how many pages we consider to be a low limit of having
307 * "plenty" of free memory.
308 *
309 * It is doubled on systems that have more than 63MB of memory
310 */
311 PFN_NUMBER MmPlentyFreePages = 400;
312
313 /* These values store the type of system this is (small, med, large) and if server */
314 ULONG MmProductType;
315 MM_SYSTEMSIZE MmSystemSize;
316
317 /*
318 * These values store the cache working set minimums and maximums, in pages
319 *
320 * The minimum value is boosted on systems with more than 24MB of RAM, and cut
321 * down to only 32 pages on embedded (<24MB RAM) systems.
322 *
323 * An extra boost of 2MB is given on systems with more than 33MB of RAM.
324 */
325 PFN_NUMBER MmSystemCacheWsMinimum = 288;
326 PFN_NUMBER MmSystemCacheWsMaximum = 350;
327
328 /* FIXME: Move to cache/working set code later */
329 BOOLEAN MmLargeSystemCache;
330
331 /*
332 * This value determines in how many fragments/chunks the subsection prototype
333 * PTEs should be allocated when mapping a section object. It is configurable in
334 * the registry through the MapAllocationFragment parameter.
335 *
336 * The default is 64KB on systems with more than 1GB of RAM, 32KB on systems with
337 * more than 256MB of RAM, and 16KB on systems with less than 256MB of RAM.
338 *
339 * The maximum it can be set to is 2MB, and the minimum is 4KB.
340 */
341 SIZE_T MmAllocationFragment;
342
343 /*
344 * These two values track how much virtual memory can be committed, and when
345 * expansion should happen.
346 */
347 // FIXME: They should be moved elsewhere since it's not an "init" setting?
348 SIZE_T MmTotalCommitLimit;
349 SIZE_T MmTotalCommitLimitMaximum;
350
351 /* PRIVATE FUNCTIONS **********************************************************/
352
353 #ifndef _M_AMD64
354 //
355 // In Bavaria, this is probably a hate crime
356 //
357 VOID
358 FASTCALL
359 MiSyncARM3WithROS(IN PVOID AddressStart,
360 IN PVOID AddressEnd)
361 {
362 //
363 // Puerile piece of junk-grade carbonized horseshit puss sold to the lowest bidder
364 //
365 ULONG Pde = ADDR_TO_PDE_OFFSET(AddressStart);
366 while (Pde <= ADDR_TO_PDE_OFFSET(AddressEnd))
367 {
368 //
369 // This both odious and heinous
370 //
371 extern ULONG MmGlobalKernelPageDirectory[1024];
372 MmGlobalKernelPageDirectory[Pde] = ((PULONG)PDE_BASE)[Pde];
373 Pde++;
374 }
375 }
376 #endif
377
378 PFN_NUMBER
379 NTAPI
380 MxGetNextPage(IN PFN_NUMBER PageCount)
381 {
382 PFN_NUMBER Pfn;
383
384 /* Make sure we have enough pages */
385 if (PageCount > MxFreeDescriptor->PageCount)
386 {
387 /* Crash the system */
388 KeBugCheckEx(INSTALL_MORE_MEMORY,
389 MmNumberOfPhysicalPages,
390 MxFreeDescriptor->PageCount,
391 MxOldFreeDescriptor.PageCount,
392 PageCount);
393 }
394
395 /* Use our lowest usable free pages */
396 Pfn = MxFreeDescriptor->BasePage;
397 MxFreeDescriptor->BasePage += PageCount;
398 MxFreeDescriptor->PageCount -= PageCount;
399 return Pfn;
400 }
401
402 VOID
403 NTAPI
404 MiComputeColorInformation(VOID)
405 {
406 ULONG L2Associativity;
407
408 /* Check if no setting was provided already */
409 if (!MmSecondaryColors)
410 {
411 /* Get L2 cache information */
412 L2Associativity = KeGetPcr()->SecondLevelCacheAssociativity;
413
414 /* The number of colors is the number of cache bytes by set/way */
415 MmSecondaryColors = KeGetPcr()->SecondLevelCacheSize;
416 if (L2Associativity) MmSecondaryColors /= L2Associativity;
417 }
418
419 /* Now convert cache bytes into pages */
420 MmSecondaryColors >>= PAGE_SHIFT;
421 if (!MmSecondaryColors)
422 {
423 /* If there was no cache data from the KPCR, use the default colors */
424 MmSecondaryColors = MI_SECONDARY_COLORS;
425 }
426 else
427 {
428 /* Otherwise, make sure there aren't too many colors */
429 if (MmSecondaryColors > MI_MAX_SECONDARY_COLORS)
430 {
431 /* Set the maximum */
432 MmSecondaryColors = MI_MAX_SECONDARY_COLORS;
433 }
434
435 /* Make sure there aren't too little colors */
436 if (MmSecondaryColors < MI_MIN_SECONDARY_COLORS)
437 {
438 /* Set the default */
439 MmSecondaryColors = MI_SECONDARY_COLORS;
440 }
441
442 /* Finally make sure the colors are a power of two */
443 if (MmSecondaryColors & (MmSecondaryColors - 1))
444 {
445 /* Set the default */
446 MmSecondaryColors = MI_SECONDARY_COLORS;
447 }
448 }
449
450 /* Compute the mask and store it */
451 MmSecondaryColorMask = MmSecondaryColors - 1;
452 KeGetCurrentPrcb()->SecondaryColorMask = MmSecondaryColorMask;
453 }
454
455 VOID
456 NTAPI
457 MiInitializeColorTables(VOID)
458 {
459 ULONG i;
460 PMMPTE PointerPte, LastPte;
461 MMPTE TempPte = ValidKernelPte;
462
463 /* The color table starts after the ARM3 PFN database */
464 MmFreePagesByColor[0] = (PMMCOLOR_TABLES)&MmPfnDatabase[MmHighestPhysicalPage + 1];
465
466 /* Loop the PTEs. We have two color tables for each secondary color */
467 PointerPte = MiAddressToPte(&MmFreePagesByColor[0][0]);
468 LastPte = MiAddressToPte((ULONG_PTR)MmFreePagesByColor[0] +
469 (2 * MmSecondaryColors * sizeof(MMCOLOR_TABLES))
470 - 1);
471 while (PointerPte <= LastPte)
472 {
473 /* Check for valid PTE */
474 if (PointerPte->u.Hard.Valid == 0)
475 {
476 /* Get a page and map it */
477 TempPte.u.Hard.PageFrameNumber = MxGetNextPage(1);
478 MI_WRITE_VALID_PTE(PointerPte, TempPte);
479
480 /* Zero out the page */
481 RtlZeroMemory(MiPteToAddress(PointerPte), PAGE_SIZE);
482 }
483
484 /* Next */
485 PointerPte++;
486 }
487
488 /* Now set the address of the next list, right after this one */
489 MmFreePagesByColor[1] = &MmFreePagesByColor[0][MmSecondaryColors];
490
491 /* Now loop the lists to set them up */
492 for (i = 0; i < MmSecondaryColors; i++)
493 {
494 /* Set both free and zero lists for each color */
495 MmFreePagesByColor[ZeroedPageList][i].Flink = 0xFFFFFFFF;
496 MmFreePagesByColor[ZeroedPageList][i].Blink = (PVOID)0xFFFFFFFF;
497 MmFreePagesByColor[ZeroedPageList][i].Count = 0;
498 MmFreePagesByColor[FreePageList][i].Flink = 0xFFFFFFFF;
499 MmFreePagesByColor[FreePageList][i].Blink = (PVOID)0xFFFFFFFF;
500 MmFreePagesByColor[FreePageList][i].Count = 0;
501 }
502 }
503
504 BOOLEAN
505 NTAPI
506 MiIsRegularMemory(IN PLOADER_PARAMETER_BLOCK LoaderBlock,
507 IN PFN_NUMBER Pfn)
508 {
509 PLIST_ENTRY NextEntry;
510 PMEMORY_ALLOCATION_DESCRIPTOR MdBlock;
511
512 /* Loop the memory descriptors */
513 NextEntry = LoaderBlock->MemoryDescriptorListHead.Flink;
514 while (NextEntry != &LoaderBlock->MemoryDescriptorListHead)
515 {
516 /* Get the memory descriptor */
517 MdBlock = CONTAINING_RECORD(NextEntry,
518 MEMORY_ALLOCATION_DESCRIPTOR,
519 ListEntry);
520
521 /* Check if this PFN could be part of the block */
522 if (Pfn >= (MdBlock->BasePage))
523 {
524 /* Check if it really is part of the block */
525 if (Pfn < (MdBlock->BasePage + MdBlock->PageCount))
526 {
527 /* Check if the block is actually memory we don't map */
528 if ((MdBlock->MemoryType == LoaderFirmwarePermanent) ||
529 (MdBlock->MemoryType == LoaderBBTMemory) ||
530 (MdBlock->MemoryType == LoaderSpecialMemory))
531 {
532 /* We don't need PFN database entries for this memory */
533 break;
534 }
535
536 /* This is memory we want to map */
537 return TRUE;
538 }
539 }
540 else
541 {
542 /* Blocks are ordered, so if it's not here, it doesn't exist */
543 break;
544 }
545
546 /* Get to the next descriptor */
547 NextEntry = MdBlock->ListEntry.Flink;
548 }
549
550 /* Check if this PFN is actually from our free memory descriptor */
551 if ((Pfn >= MxOldFreeDescriptor.BasePage) &&
552 (Pfn < MxOldFreeDescriptor.BasePage + MxOldFreeDescriptor.PageCount))
553 {
554 /* We use these pages for initial mappings, so we do want to count them */
555 return TRUE;
556 }
557
558 /* Otherwise this isn't memory that we describe or care about */
559 return FALSE;
560 }
561
562 VOID
563 NTAPI
564 MiMapPfnDatabase(IN PLOADER_PARAMETER_BLOCK LoaderBlock)
565 {
566 ULONG FreePage, FreePageCount, PagesLeft, BasePage, PageCount;
567 PLIST_ENTRY NextEntry;
568 PMEMORY_ALLOCATION_DESCRIPTOR MdBlock;
569 PMMPTE PointerPte, LastPte;
570 MMPTE TempPte = ValidKernelPte;
571
572 /* Get current page data, since we won't be using MxGetNextPage as it would corrupt our state */
573 FreePage = MxFreeDescriptor->BasePage;
574 FreePageCount = MxFreeDescriptor->PageCount;
575 PagesLeft = 0;
576
577 /* Loop the memory descriptors */
578 NextEntry = LoaderBlock->MemoryDescriptorListHead.Flink;
579 while (NextEntry != &LoaderBlock->MemoryDescriptorListHead)
580 {
581 /* Get the descriptor */
582 MdBlock = CONTAINING_RECORD(NextEntry,
583 MEMORY_ALLOCATION_DESCRIPTOR,
584 ListEntry);
585 if ((MdBlock->MemoryType == LoaderFirmwarePermanent) ||
586 (MdBlock->MemoryType == LoaderBBTMemory) ||
587 (MdBlock->MemoryType == LoaderSpecialMemory))
588 {
589 /* These pages are not part of the PFN database */
590 NextEntry = MdBlock->ListEntry.Flink;
591 continue;
592 }
593
594 /* Next, check if this is our special free descriptor we've found */
595 if (MdBlock == MxFreeDescriptor)
596 {
597 /* Use the real numbers instead */
598 BasePage = MxOldFreeDescriptor.BasePage;
599 PageCount = MxOldFreeDescriptor.PageCount;
600 }
601 else
602 {
603 /* Use the descriptor's numbers */
604 BasePage = MdBlock->BasePage;
605 PageCount = MdBlock->PageCount;
606 }
607
608 /* Get the PTEs for this range */
609 PointerPte = MiAddressToPte(&MmPfnDatabase[BasePage]);
610 LastPte = MiAddressToPte(((ULONG_PTR)&MmPfnDatabase[BasePage + PageCount]) - 1);
611 DPRINT("MD Type: %lx Base: %lx Count: %lx\n", MdBlock->MemoryType, BasePage, PageCount);
612
613 /* Loop them */
614 while (PointerPte <= LastPte)
615 {
616 /* We'll only touch PTEs that aren't already valid */
617 if (PointerPte->u.Hard.Valid == 0)
618 {
619 /* Use the next free page */
620 TempPte.u.Hard.PageFrameNumber = FreePage;
621 ASSERT(FreePageCount != 0);
622
623 /* Consume free pages */
624 FreePage++;
625 FreePageCount--;
626 if (!FreePageCount)
627 {
628 /* Out of memory */
629 KeBugCheckEx(INSTALL_MORE_MEMORY,
630 MmNumberOfPhysicalPages,
631 FreePageCount,
632 MxOldFreeDescriptor.PageCount,
633 1);
634 }
635
636 /* Write out this PTE */
637 PagesLeft++;
638 MI_WRITE_VALID_PTE(PointerPte, TempPte);
639
640 /* Zero this page */
641 RtlZeroMemory(MiPteToAddress(PointerPte), PAGE_SIZE);
642 }
643
644 /* Next! */
645 PointerPte++;
646 }
647
648 /* Do the next address range */
649 NextEntry = MdBlock->ListEntry.Flink;
650 }
651
652 /* Now update the free descriptors to consume the pages we used up during the PFN allocation loop */
653 MxFreeDescriptor->BasePage = FreePage;
654 MxFreeDescriptor->PageCount = FreePageCount;
655 }
656
657 VOID
658 NTAPI
659 MiBuildPfnDatabaseFromPages(IN PLOADER_PARAMETER_BLOCK LoaderBlock)
660 {
661 PMMPDE PointerPde;
662 PMMPTE PointerPte;
663 ULONG i, Count, j;
664 PFN_NUMBER PageFrameIndex, StartupPdIndex, PtePageIndex;
665 PMMPFN Pfn1, Pfn2;
666 ULONG_PTR BaseAddress = 0;
667
668 /* PFN of the startup page directory */
669 StartupPdIndex = PFN_FROM_PTE(MiAddressToPde(PDE_BASE));
670
671 /* Start with the first PDE and scan them all */
672 PointerPde = MiAddressToPde(NULL);
673 Count = PD_COUNT * PDE_COUNT;
674 for (i = 0; i < Count; i++)
675 {
676 /* Check for valid PDE */
677 if (PointerPde->u.Hard.Valid == 1)
678 {
679 /* Get the PFN from it */
680 PageFrameIndex = PFN_FROM_PTE(PointerPde);
681
682 /* Do we want a PFN entry for this page? */
683 if (MiIsRegularMemory(LoaderBlock, PageFrameIndex))
684 {
685 /* Yes we do, set it up */
686 Pfn1 = MiGetPfnEntry(PageFrameIndex);
687 Pfn1->u4.PteFrame = StartupPdIndex;
688 Pfn1->PteAddress = PointerPde;
689 Pfn1->u2.ShareCount++;
690 Pfn1->u3.e2.ReferenceCount = 1;
691 Pfn1->u3.e1.PageLocation = ActiveAndValid;
692 Pfn1->u3.e1.CacheAttribute = MiNonCached;
693 }
694 else
695 {
696 /* No PFN entry */
697 Pfn1 = NULL;
698 }
699
700 /* Now get the PTE and scan the pages */
701 PointerPte = MiAddressToPte(BaseAddress);
702 for (j = 0; j < PTE_COUNT; j++)
703 {
704 /* Check for a valid PTE */
705 if (PointerPte->u.Hard.Valid == 1)
706 {
707 /* Increase the shared count of the PFN entry for the PDE */
708 ASSERT(Pfn1 != NULL);
709 Pfn1->u2.ShareCount++;
710
711 /* Now check if the PTE is valid memory too */
712 PtePageIndex = PFN_FROM_PTE(PointerPte);
713 if (MiIsRegularMemory(LoaderBlock, PtePageIndex))
714 {
715 /*
716 * Only add pages above the end of system code or pages
717 * that are part of nonpaged pool
718 */
719 if ((BaseAddress >= 0xA0000000) ||
720 ((BaseAddress >= (ULONG_PTR)MmNonPagedPoolStart) &&
721 (BaseAddress < (ULONG_PTR)MmNonPagedPoolStart +
722 MmSizeOfNonPagedPoolInBytes)))
723 {
724 /* Get the PFN entry and make sure it too is valid */
725 Pfn2 = MiGetPfnEntry(PtePageIndex);
726 if ((MmIsAddressValid(Pfn2)) &&
727 (MmIsAddressValid(Pfn2 + 1)))
728 {
729 /* Setup the PFN entry */
730 Pfn2->u4.PteFrame = PageFrameIndex;
731 Pfn2->PteAddress = PointerPte;
732 Pfn2->u2.ShareCount++;
733 Pfn2->u3.e2.ReferenceCount = 1;
734 Pfn2->u3.e1.PageLocation = ActiveAndValid;
735 Pfn2->u3.e1.CacheAttribute = MiNonCached;
736 }
737 }
738 }
739 }
740
741 /* Next PTE */
742 PointerPte++;
743 BaseAddress += PAGE_SIZE;
744 }
745 }
746 else
747 {
748 /* Next PDE mapped address */
749 BaseAddress += PDE_MAPPED_VA;
750 }
751
752 /* Next PTE */
753 PointerPde++;
754 }
755 }
756
757 VOID
758 NTAPI
759 MiBuildPfnDatabaseZeroPage(VOID)
760 {
761 PMMPFN Pfn1;
762 PMMPDE PointerPde;
763
764 /* Grab the lowest page and check if it has no real references */
765 Pfn1 = MiGetPfnEntry(MmLowestPhysicalPage);
766 if (!(MmLowestPhysicalPage) && !(Pfn1->u3.e2.ReferenceCount))
767 {
768 /* Make it a bogus page to catch errors */
769 PointerPde = MiAddressToPde(0xFFFFFFFF);
770 Pfn1->u4.PteFrame = PFN_FROM_PTE(PointerPde);
771 Pfn1->PteAddress = PointerPde;
772 Pfn1->u2.ShareCount++;
773 Pfn1->u3.e2.ReferenceCount = 0xFFF0;
774 Pfn1->u3.e1.PageLocation = ActiveAndValid;
775 Pfn1->u3.e1.CacheAttribute = MiNonCached;
776 }
777 }
778
779 VOID
780 NTAPI
781 MiBuildPfnDatabaseFromLoaderBlock(IN PLOADER_PARAMETER_BLOCK LoaderBlock)
782 {
783 PLIST_ENTRY NextEntry;
784 PFN_NUMBER PageCount = 0;
785 PMEMORY_ALLOCATION_DESCRIPTOR MdBlock;
786 PFN_NUMBER PageFrameIndex;
787 PMMPFN Pfn1;
788 PMMPTE PointerPte;
789 PMMPDE PointerPde;
790 KIRQL OldIrql;
791
792 /* Now loop through the descriptors */
793 NextEntry = LoaderBlock->MemoryDescriptorListHead.Flink;
794 while (NextEntry != &LoaderBlock->MemoryDescriptorListHead)
795 {
796 /* Get the current descriptor */
797 MdBlock = CONTAINING_RECORD(NextEntry,
798 MEMORY_ALLOCATION_DESCRIPTOR,
799 ListEntry);
800
801 /* Read its data */
802 PageCount = MdBlock->PageCount;
803 PageFrameIndex = MdBlock->BasePage;
804
805 /* Don't allow memory above what the PFN database is mapping */
806 if (PageFrameIndex > MmHighestPhysicalPage)
807 {
808 /* Since they are ordered, everything past here will be larger */
809 break;
810 }
811
812 /* On the other hand, the end page might be higher up... */
813 if ((PageFrameIndex + PageCount) > (MmHighestPhysicalPage + 1))
814 {
815 /* In which case we'll trim the descriptor to go as high as we can */
816 PageCount = MmHighestPhysicalPage + 1 - PageFrameIndex;
817 MdBlock->PageCount = PageCount;
818
819 /* But if there's nothing left to trim, we got too high, so quit */
820 if (!PageCount) break;
821 }
822
823 /* Now check the descriptor type */
824 switch (MdBlock->MemoryType)
825 {
826 /* Check for bad RAM */
827 case LoaderBad:
828
829 DPRINT1("You have damaged RAM modules. Stopping boot\n");
830 while (TRUE);
831 break;
832
833 /* Check for free RAM */
834 case LoaderFree:
835 case LoaderLoadedProgram:
836 case LoaderFirmwareTemporary:
837 case LoaderOsloaderStack:
838
839 /* Get the last page of this descriptor. Note we loop backwards */
840 PageFrameIndex += PageCount - 1;
841 Pfn1 = MiGetPfnEntry(PageFrameIndex);
842
843 /* Lock the PFN Database */
844 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
845 while (PageCount--)
846 {
847 /* If the page really has no references, mark it as free */
848 if (!Pfn1->u3.e2.ReferenceCount)
849 {
850 /* Add it to the free list */
851 Pfn1->u3.e1.CacheAttribute = MiNonCached;
852 MiInsertPageInFreeList(PageFrameIndex);
853 }
854
855 /* Go to the next page */
856 Pfn1--;
857 PageFrameIndex--;
858 }
859
860 /* Release PFN database */
861 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
862
863 /* Done with this block */
864 break;
865
866 /* Check for pages that are invisible to us */
867 case LoaderFirmwarePermanent:
868 case LoaderSpecialMemory:
869 case LoaderBBTMemory:
870
871 /* And skip them */
872 break;
873
874 default:
875
876 /* Map these pages with the KSEG0 mapping that adds 0x80000000 */
877 PointerPte = MiAddressToPte(KSEG0_BASE + (PageFrameIndex << PAGE_SHIFT));
878 Pfn1 = MiGetPfnEntry(PageFrameIndex);
879 while (PageCount--)
880 {
881 /* Check if the page is really unused */
882 PointerPde = MiAddressToPde(KSEG0_BASE + (PageFrameIndex << PAGE_SHIFT));
883 if (!Pfn1->u3.e2.ReferenceCount)
884 {
885 /* Mark it as being in-use */
886 Pfn1->u4.PteFrame = PFN_FROM_PTE(PointerPde);
887 Pfn1->PteAddress = PointerPte;
888 Pfn1->u2.ShareCount++;
889 Pfn1->u3.e2.ReferenceCount = 1;
890 Pfn1->u3.e1.PageLocation = ActiveAndValid;
891 Pfn1->u3.e1.CacheAttribute = MiNonCached;
892
893 /* Check for RAM disk page */
894 if (MdBlock->MemoryType == LoaderXIPRom)
895 {
896 /* Make it a pseudo-I/O ROM mapping */
897 Pfn1->u1.Flink = 0;
898 Pfn1->u2.ShareCount = 0;
899 Pfn1->u3.e2.ReferenceCount = 0;
900 Pfn1->u3.e1.PageLocation = 0;
901 Pfn1->u3.e1.Rom = 1;
902 Pfn1->u4.InPageError = 0;
903 Pfn1->u3.e1.PrototypePte = 1;
904 }
905 }
906
907 /* Advance page structures */
908 Pfn1++;
909 PageFrameIndex++;
910 PointerPte++;
911 }
912 break;
913 }
914
915 /* Next descriptor entry */
916 NextEntry = MdBlock->ListEntry.Flink;
917 }
918 }
919
920 VOID
921 NTAPI
922 MiBuildPfnDatabaseSelf(VOID)
923 {
924 PMMPTE PointerPte, LastPte;
925 PMMPFN Pfn1;
926
927 /* Loop the PFN database page */
928 PointerPte = MiAddressToPte(MiGetPfnEntry(MmLowestPhysicalPage));
929 LastPte = MiAddressToPte(MiGetPfnEntry(MmHighestPhysicalPage));
930 while (PointerPte <= LastPte)
931 {
932 /* Make sure the page is valid */
933 if (PointerPte->u.Hard.Valid == 1)
934 {
935 /* Get the PFN entry and just mark it referenced */
936 Pfn1 = MiGetPfnEntry(PointerPte->u.Hard.PageFrameNumber);
937 Pfn1->u2.ShareCount = 1;
938 Pfn1->u3.e2.ReferenceCount = 1;
939 }
940
941 /* Next */
942 PointerPte++;
943 }
944 }
945
946 VOID
947 NTAPI
948 MiInitializePfnDatabase(IN PLOADER_PARAMETER_BLOCK LoaderBlock)
949 {
950 /* Scan memory and start setting up PFN entries */
951 MiBuildPfnDatabaseFromPages(LoaderBlock);
952
953 /* Add the zero page */
954 MiBuildPfnDatabaseZeroPage();
955
956 /* Scan the loader block and build the rest of the PFN database */
957 MiBuildPfnDatabaseFromLoaderBlock(LoaderBlock);
958
959 /* Finally add the pages for the PFN database itself */
960 MiBuildPfnDatabaseSelf();
961 }
962
963 VOID
964 NTAPI
965 MiAdjustWorkingSetManagerParameters(IN BOOLEAN Client)
966 {
967 /* This function needs to do more work, for now, we tune page minimums */
968
969 /* Check for a system with around 64MB RAM or more */
970 if (MmNumberOfPhysicalPages >= (63 * _1MB) / PAGE_SIZE)
971 {
972 /* Double the minimum amount of pages we consider for a "plenty free" scenario */
973 MmPlentyFreePages *= 2;
974 }
975 }
976
977 VOID
978 NTAPI
979 MiNotifyMemoryEvents(VOID)
980 {
981 /* Are we in a low-memory situation? */
982 if (MmAvailablePages < MmLowMemoryThreshold)
983 {
984 /* Clear high, set low */
985 if (KeReadStateEvent(MiHighMemoryEvent)) KeClearEvent(MiHighMemoryEvent);
986 if (!KeReadStateEvent(MiLowMemoryEvent)) KeSetEvent(MiLowMemoryEvent, 0, FALSE);
987 }
988 else if (MmAvailablePages < MmHighMemoryThreshold)
989 {
990 /* We are in between, clear both */
991 if (KeReadStateEvent(MiHighMemoryEvent)) KeClearEvent(MiHighMemoryEvent);
992 if (KeReadStateEvent(MiLowMemoryEvent)) KeClearEvent(MiLowMemoryEvent);
993 }
994 else
995 {
996 /* Clear low, set high */
997 if (KeReadStateEvent(MiLowMemoryEvent)) KeClearEvent(MiLowMemoryEvent);
998 if (!KeReadStateEvent(MiHighMemoryEvent)) KeSetEvent(MiHighMemoryEvent, 0, FALSE);
999 }
1000 }
1001
1002 NTSTATUS
1003 NTAPI
1004 MiCreateMemoryEvent(IN PUNICODE_STRING Name,
1005 OUT PKEVENT *Event)
1006 {
1007 PACL Dacl;
1008 HANDLE EventHandle;
1009 ULONG DaclLength;
1010 NTSTATUS Status;
1011 OBJECT_ATTRIBUTES ObjectAttributes;
1012 SECURITY_DESCRIPTOR SecurityDescriptor;
1013
1014 /* Create the SD */
1015 Status = RtlCreateSecurityDescriptor(&SecurityDescriptor,
1016 SECURITY_DESCRIPTOR_REVISION);
1017 if (!NT_SUCCESS(Status)) return Status;
1018
1019 /* One ACL with 3 ACEs, containing each one SID */
1020 DaclLength = sizeof(ACL) +
1021 3 * sizeof(ACCESS_ALLOWED_ACE) +
1022 RtlLengthSid(SeLocalSystemSid) +
1023 RtlLengthSid(SeAliasAdminsSid) +
1024 RtlLengthSid(SeWorldSid);
1025
1026 /* Allocate space for the DACL */
1027 Dacl = ExAllocatePoolWithTag(PagedPool, DaclLength, 'lcaD');
1028 if (!Dacl) return STATUS_INSUFFICIENT_RESOURCES;
1029
1030 /* Setup the ACL inside it */
1031 Status = RtlCreateAcl(Dacl, DaclLength, ACL_REVISION);
1032 if (!NT_SUCCESS(Status)) goto CleanUp;
1033
1034 /* Add query rights for everyone */
1035 Status = RtlAddAccessAllowedAce(Dacl,
1036 ACL_REVISION,
1037 SYNCHRONIZE | EVENT_QUERY_STATE | READ_CONTROL,
1038 SeWorldSid);
1039 if (!NT_SUCCESS(Status)) goto CleanUp;
1040
1041 /* Full rights for the admin */
1042 Status = RtlAddAccessAllowedAce(Dacl,
1043 ACL_REVISION,
1044 EVENT_ALL_ACCESS,
1045 SeAliasAdminsSid);
1046 if (!NT_SUCCESS(Status)) goto CleanUp;
1047
1048 /* As well as full rights for the system */
1049 Status = RtlAddAccessAllowedAce(Dacl,
1050 ACL_REVISION,
1051 EVENT_ALL_ACCESS,
1052 SeLocalSystemSid);
1053 if (!NT_SUCCESS(Status)) goto CleanUp;
1054
1055 /* Set this DACL inside the SD */
1056 Status = RtlSetDaclSecurityDescriptor(&SecurityDescriptor,
1057 TRUE,
1058 Dacl,
1059 FALSE);
1060 if (!NT_SUCCESS(Status)) goto CleanUp;
1061
1062 /* Setup the event attributes, making sure it's a permanent one */
1063 InitializeObjectAttributes(&ObjectAttributes,
1064 Name,
1065 OBJ_KERNEL_HANDLE | OBJ_PERMANENT,
1066 NULL,
1067 &SecurityDescriptor);
1068
1069 /* Create the event */
1070 Status = ZwCreateEvent(&EventHandle,
1071 EVENT_ALL_ACCESS,
1072 &ObjectAttributes,
1073 NotificationEvent,
1074 FALSE);
1075 CleanUp:
1076 /* Free the DACL */
1077 ExFreePool(Dacl);
1078
1079 /* Check if this is the success path */
1080 if (NT_SUCCESS(Status))
1081 {
1082 /* Add a reference to the object, then close the handle we had */
1083 Status = ObReferenceObjectByHandle(EventHandle,
1084 EVENT_MODIFY_STATE,
1085 ExEventObjectType,
1086 KernelMode,
1087 (PVOID*)Event,
1088 NULL);
1089 ZwClose (EventHandle);
1090 }
1091
1092 /* Return status */
1093 return Status;
1094 }
1095
1096 BOOLEAN
1097 NTAPI
1098 MiInitializeMemoryEvents(VOID)
1099 {
1100 UNICODE_STRING LowString = RTL_CONSTANT_STRING(L"\\KernelObjects\\LowMemoryCondition");
1101 UNICODE_STRING HighString = RTL_CONSTANT_STRING(L"\\KernelObjects\\HighMemoryCondition");
1102 UNICODE_STRING LowPagedPoolString = RTL_CONSTANT_STRING(L"\\KernelObjects\\LowPagedPoolCondition");
1103 UNICODE_STRING HighPagedPoolString = RTL_CONSTANT_STRING(L"\\KernelObjects\\HighPagedPoolCondition");
1104 UNICODE_STRING LowNonPagedPoolString = RTL_CONSTANT_STRING(L"\\KernelObjects\\LowNonPagedPoolCondition");
1105 UNICODE_STRING HighNonPagedPoolString = RTL_CONSTANT_STRING(L"\\KernelObjects\\HighNonPagedPoolCondition");
1106 NTSTATUS Status;
1107
1108 /* Check if we have a registry setting */
1109 if (MmLowMemoryThreshold)
1110 {
1111 /* Convert it to pages */
1112 MmLowMemoryThreshold *= (_1MB / PAGE_SIZE);
1113 }
1114 else
1115 {
1116 /* The low memory threshold is hit when we don't consider that we have "plenty" of free pages anymore */
1117 MmLowMemoryThreshold = MmPlentyFreePages;
1118
1119 /* More than one GB of memory? */
1120 if (MmNumberOfPhysicalPages > 0x40000)
1121 {
1122 /* Start at 32MB, and add another 16MB for each GB */
1123 MmLowMemoryThreshold = (32 * _1MB) / PAGE_SIZE;
1124 MmLowMemoryThreshold += ((MmNumberOfPhysicalPages - 0x40000) >> 7);
1125 }
1126 else if (MmNumberOfPhysicalPages > 0x8000)
1127 {
1128 /* For systems with > 128MB RAM, add another 4MB for each 128MB */
1129 MmLowMemoryThreshold += ((MmNumberOfPhysicalPages - 0x8000) >> 5);
1130 }
1131
1132 /* Don't let the minimum threshold go past 64MB */
1133 MmLowMemoryThreshold = min(MmLowMemoryThreshold, (64 * _1MB) / PAGE_SIZE);
1134 }
1135
1136 /* Check if we have a registry setting */
1137 if (MmHighMemoryThreshold)
1138 {
1139 /* Convert it into pages */
1140 MmHighMemoryThreshold *= (_1MB / PAGE_SIZE);
1141 }
1142 else
1143 {
1144 /* Otherwise, the default is three times the low memory threshold */
1145 MmHighMemoryThreshold = 3 * MmLowMemoryThreshold;
1146 ASSERT(MmHighMemoryThreshold > MmLowMemoryThreshold);
1147 }
1148
1149 /* Make sure high threshold is actually higher than the low */
1150 MmHighMemoryThreshold = max(MmHighMemoryThreshold, MmLowMemoryThreshold);
1151
1152 /* Create the memory events for all the thresholds */
1153 Status = MiCreateMemoryEvent(&LowString, &MiLowMemoryEvent);
1154 if (!NT_SUCCESS(Status)) return FALSE;
1155 Status = MiCreateMemoryEvent(&HighString, &MiHighMemoryEvent);
1156 if (!NT_SUCCESS(Status)) return FALSE;
1157 Status = MiCreateMemoryEvent(&LowPagedPoolString, &MiLowPagedPoolEvent);
1158 if (!NT_SUCCESS(Status)) return FALSE;
1159 Status = MiCreateMemoryEvent(&HighPagedPoolString, &MiHighPagedPoolEvent);
1160 if (!NT_SUCCESS(Status)) return FALSE;
1161 Status = MiCreateMemoryEvent(&LowNonPagedPoolString, &MiLowNonPagedPoolEvent);
1162 if (!NT_SUCCESS(Status)) return FALSE;
1163 Status = MiCreateMemoryEvent(&HighNonPagedPoolString, &MiHighNonPagedPoolEvent);
1164 if (!NT_SUCCESS(Status)) return FALSE;
1165
1166 /* Now setup the pool events */
1167 MiInitializePoolEvents();
1168
1169 /* Set the initial event state */
1170 MiNotifyMemoryEvents();
1171 return TRUE;
1172 }
1173
1174 VOID
1175 NTAPI
1176 MiAddHalIoMappings(VOID)
1177 {
1178 PVOID BaseAddress;
1179 PMMPTE PointerPde;
1180 PMMPTE PointerPte;
1181 ULONG i, j, PdeCount;
1182 PFN_NUMBER PageFrameIndex;
1183
1184 /* HAL Heap address -- should be on a PDE boundary */
1185 BaseAddress = (PVOID)0xFFC00000;
1186 ASSERT(MiAddressToPteOffset(BaseAddress) == 0);
1187
1188 /* Check how many PDEs the heap has */
1189 PointerPde = MiAddressToPde(BaseAddress);
1190 PdeCount = PDE_COUNT - ADDR_TO_PDE_OFFSET(BaseAddress);
1191 for (i = 0; i < PdeCount; i++)
1192 {
1193 /* Does the HAL own this mapping? */
1194 if ((PointerPde->u.Hard.Valid == 1) &&
1195 (PointerPde->u.Hard.LargePage == 0))
1196 {
1197 /* Get the PTE for it and scan each page */
1198 PointerPte = MiAddressToPte(BaseAddress);
1199 for (j = 0 ; j < PTE_COUNT; j++)
1200 {
1201 /* Does the HAL own this page? */
1202 if (PointerPte->u.Hard.Valid == 1)
1203 {
1204 /* Is the HAL using it for device or I/O mapped memory? */
1205 PageFrameIndex = PFN_FROM_PTE(PointerPte);
1206 if (!MiGetPfnEntry(PageFrameIndex))
1207 {
1208 /* FIXME: For PAT, we need to track I/O cache attributes for coherency */
1209 DPRINT1("HAL I/O Mapping at %p is unsafe\n", BaseAddress);
1210 }
1211 }
1212
1213 /* Move to the next page */
1214 BaseAddress = (PVOID)((ULONG_PTR)BaseAddress + PAGE_SIZE);
1215 PointerPte++;
1216 }
1217 }
1218 else
1219 {
1220 /* Move to the next address */
1221 BaseAddress = (PVOID)((ULONG_PTR)BaseAddress + PDE_MAPPED_VA);
1222 }
1223
1224 /* Move to the next PDE */
1225 PointerPde++;
1226 }
1227 }
1228
1229 VOID
1230 NTAPI
1231 MmDumpArmPfnDatabase(VOID)
1232 {
1233 ULONG i;
1234 PMMPFN Pfn1;
1235 PCHAR Consumer = "Unknown";
1236 KIRQL OldIrql;
1237 ULONG ActivePages = 0, FreePages = 0, OtherPages = 0;
1238
1239 KeRaiseIrql(HIGH_LEVEL, &OldIrql);
1240
1241 //
1242 // Loop the PFN database
1243 //
1244 for (i = 0; i <= MmHighestPhysicalPage; i++)
1245 {
1246 Pfn1 = MiGetPfnEntry(i);
1247 if (!Pfn1) continue;
1248
1249 //
1250 // Get the page location
1251 //
1252 switch (Pfn1->u3.e1.PageLocation)
1253 {
1254 case ActiveAndValid:
1255
1256 Consumer = "Active and Valid";
1257 ActivePages++;
1258 break;
1259
1260 case FreePageList:
1261
1262 Consumer = "Free Page List";
1263 FreePages++;
1264 break;
1265
1266 default:
1267
1268 Consumer = "Other (ASSERT!)";
1269 OtherPages++;
1270 break;
1271 }
1272
1273 //
1274 // Pretty-print the page
1275 //
1276 DbgPrint("0x%08p:\t%20s\t(%02d.%02d) [%08p-%08p])\n",
1277 i << PAGE_SHIFT,
1278 Consumer,
1279 Pfn1->u3.e2.ReferenceCount,
1280 Pfn1->u2.ShareCount,
1281 Pfn1->PteAddress,
1282 Pfn1->u4.PteFrame);
1283 }
1284
1285 DbgPrint("Active: %d pages\t[%d KB]\n", ActivePages, (ActivePages << PAGE_SHIFT) / 1024);
1286 DbgPrint("Free: %d pages\t[%d KB]\n", FreePages, (FreePages << PAGE_SHIFT) / 1024);
1287 DbgPrint("Other: %d pages\t[%d KB]\n", OtherPages, (OtherPages << PAGE_SHIFT) / 1024);
1288
1289 KeLowerIrql(OldIrql);
1290 }
1291
1292 PFN_NUMBER
1293 NTAPI
1294 MiPagesInLoaderBlock(IN PLOADER_PARAMETER_BLOCK LoaderBlock,
1295 IN PBOOLEAN IncludeType)
1296 {
1297 PLIST_ENTRY NextEntry;
1298 PFN_NUMBER PageCount = 0;
1299 PMEMORY_ALLOCATION_DESCRIPTOR MdBlock;
1300
1301 //
1302 // Now loop through the descriptors
1303 //
1304 NextEntry = LoaderBlock->MemoryDescriptorListHead.Flink;
1305 while (NextEntry != &LoaderBlock->MemoryDescriptorListHead)
1306 {
1307 //
1308 // Grab each one, and check if it's one we should include
1309 //
1310 MdBlock = CONTAINING_RECORD(NextEntry,
1311 MEMORY_ALLOCATION_DESCRIPTOR,
1312 ListEntry);
1313 if ((MdBlock->MemoryType < LoaderMaximum) &&
1314 (IncludeType[MdBlock->MemoryType]))
1315 {
1316 //
1317 // Add this to our running total
1318 //
1319 PageCount += MdBlock->PageCount;
1320 }
1321
1322 //
1323 // Try the next descriptor
1324 //
1325 NextEntry = MdBlock->ListEntry.Flink;
1326 }
1327
1328 //
1329 // Return the total
1330 //
1331 return PageCount;
1332 }
1333
1334 PPHYSICAL_MEMORY_DESCRIPTOR
1335 NTAPI
1336 MmInitializeMemoryLimits(IN PLOADER_PARAMETER_BLOCK LoaderBlock,
1337 IN PBOOLEAN IncludeType)
1338 {
1339 PLIST_ENTRY NextEntry;
1340 ULONG Run = 0, InitialRuns = 0;
1341 PFN_NUMBER NextPage = -1, PageCount = 0;
1342 PPHYSICAL_MEMORY_DESCRIPTOR Buffer, NewBuffer;
1343 PMEMORY_ALLOCATION_DESCRIPTOR MdBlock;
1344
1345 //
1346 // Scan the memory descriptors
1347 //
1348 NextEntry = LoaderBlock->MemoryDescriptorListHead.Flink;
1349 while (NextEntry != &LoaderBlock->MemoryDescriptorListHead)
1350 {
1351 //
1352 // For each one, increase the memory allocation estimate
1353 //
1354 InitialRuns++;
1355 NextEntry = NextEntry->Flink;
1356 }
1357
1358 //
1359 // Allocate the maximum we'll ever need
1360 //
1361 Buffer = ExAllocatePoolWithTag(NonPagedPool,
1362 sizeof(PHYSICAL_MEMORY_DESCRIPTOR) +
1363 sizeof(PHYSICAL_MEMORY_RUN) *
1364 (InitialRuns - 1),
1365 'lMmM');
1366 if (!Buffer) return NULL;
1367
1368 //
1369 // For now that's how many runs we have
1370 //
1371 Buffer->NumberOfRuns = InitialRuns;
1372
1373 //
1374 // Now loop through the descriptors again
1375 //
1376 NextEntry = LoaderBlock->MemoryDescriptorListHead.Flink;
1377 while (NextEntry != &LoaderBlock->MemoryDescriptorListHead)
1378 {
1379 //
1380 // Grab each one, and check if it's one we should include
1381 //
1382 MdBlock = CONTAINING_RECORD(NextEntry,
1383 MEMORY_ALLOCATION_DESCRIPTOR,
1384 ListEntry);
1385 if ((MdBlock->MemoryType < LoaderMaximum) &&
1386 (IncludeType[MdBlock->MemoryType]))
1387 {
1388 //
1389 // Add this to our running total
1390 //
1391 PageCount += MdBlock->PageCount;
1392
1393 //
1394 // Check if the next page is described by the next descriptor
1395 //
1396 if (MdBlock->BasePage == NextPage)
1397 {
1398 //
1399 // Combine it into the same physical run
1400 //
1401 ASSERT(MdBlock->PageCount != 0);
1402 Buffer->Run[Run - 1].PageCount += MdBlock->PageCount;
1403 NextPage += MdBlock->PageCount;
1404 }
1405 else
1406 {
1407 //
1408 // Otherwise just duplicate the descriptor's contents
1409 //
1410 Buffer->Run[Run].BasePage = MdBlock->BasePage;
1411 Buffer->Run[Run].PageCount = MdBlock->PageCount;
1412 NextPage = Buffer->Run[Run].BasePage + Buffer->Run[Run].PageCount;
1413
1414 //
1415 // And in this case, increase the number of runs
1416 //
1417 Run++;
1418 }
1419 }
1420
1421 //
1422 // Try the next descriptor
1423 //
1424 NextEntry = MdBlock->ListEntry.Flink;
1425 }
1426
1427 //
1428 // We should not have been able to go past our initial estimate
1429 //
1430 ASSERT(Run <= Buffer->NumberOfRuns);
1431
1432 //
1433 // Our guess was probably exaggerated...
1434 //
1435 if (InitialRuns > Run)
1436 {
1437 //
1438 // Allocate a more accurately sized buffer
1439 //
1440 NewBuffer = ExAllocatePoolWithTag(NonPagedPool,
1441 sizeof(PHYSICAL_MEMORY_DESCRIPTOR) +
1442 sizeof(PHYSICAL_MEMORY_RUN) *
1443 (Run - 1),
1444 'lMmM');
1445 if (NewBuffer)
1446 {
1447 //
1448 // Copy the old buffer into the new, then free it
1449 //
1450 RtlCopyMemory(NewBuffer->Run,
1451 Buffer->Run,
1452 sizeof(PHYSICAL_MEMORY_RUN) * Run);
1453 ExFreePool(Buffer);
1454
1455 //
1456 // Now use the new buffer
1457 //
1458 Buffer = NewBuffer;
1459 }
1460 }
1461
1462 //
1463 // Write the final numbers, and return it
1464 //
1465 Buffer->NumberOfRuns = Run;
1466 Buffer->NumberOfPages = PageCount;
1467 return Buffer;
1468 }
1469
1470 VOID
1471 NTAPI
1472 MiBuildPagedPool(VOID)
1473 {
1474 PMMPTE PointerPte, PointerPde;
1475 MMPTE TempPte = ValidKernelPte;
1476 PFN_NUMBER PageFrameIndex;
1477 KIRQL OldIrql;
1478 ULONG Size, BitMapSize;
1479
1480 //
1481 // Get the page frame number for the system page directory
1482 //
1483 PointerPte = MiAddressToPte(PDE_BASE);
1484 ASSERT(PD_COUNT == 1);
1485 MmSystemPageDirectory[0] = PFN_FROM_PTE(PointerPte);
1486
1487 //
1488 // Allocate a system PTE which will hold a copy of the page directory
1489 //
1490 PointerPte = MiReserveSystemPtes(1, SystemPteSpace);
1491 ASSERT(PointerPte);
1492 MmSystemPagePtes = MiPteToAddress(PointerPte);
1493
1494 //
1495 // Make this system PTE point to the system page directory.
1496 // It is now essentially double-mapped. This will be used later for lazy
1497 // evaluation of PDEs accross process switches, similarly to how the Global
1498 // page directory array in the old ReactOS Mm is used (but in a less hacky
1499 // way).
1500 //
1501 TempPte = ValidKernelPte;
1502 ASSERT(PD_COUNT == 1);
1503 TempPte.u.Hard.PageFrameNumber = MmSystemPageDirectory[0];
1504 MI_WRITE_VALID_PTE(PointerPte, TempPte);
1505
1506 //
1507 // Let's get back to paged pool work: size it up.
1508 // By default, it should be twice as big as nonpaged pool.
1509 //
1510 MmSizeOfPagedPoolInBytes = 2 * MmMaximumNonPagedPoolInBytes;
1511 if (MmSizeOfPagedPoolInBytes > ((ULONG_PTR)MmNonPagedSystemStart -
1512 (ULONG_PTR)MmPagedPoolStart))
1513 {
1514 //
1515 // On the other hand, we have limited VA space, so make sure that the VA
1516 // for paged pool doesn't overflow into nonpaged pool VA. Otherwise, set
1517 // whatever maximum is possible.
1518 //
1519 MmSizeOfPagedPoolInBytes = (ULONG_PTR)MmNonPagedSystemStart -
1520 (ULONG_PTR)MmPagedPoolStart;
1521 }
1522
1523 //
1524 // Get the size in pages and make sure paged pool is at least 32MB.
1525 //
1526 Size = MmSizeOfPagedPoolInBytes;
1527 if (Size < MI_MIN_INIT_PAGED_POOLSIZE) Size = MI_MIN_INIT_PAGED_POOLSIZE;
1528 Size = BYTES_TO_PAGES(Size);
1529
1530 //
1531 // Now check how many PTEs will be required for these many pages.
1532 //
1533 Size = (Size + (1024 - 1)) / 1024;
1534
1535 //
1536 // Recompute the page-aligned size of the paged pool, in bytes and pages.
1537 //
1538 MmSizeOfPagedPoolInBytes = Size * PAGE_SIZE * 1024;
1539 MmSizeOfPagedPoolInPages = MmSizeOfPagedPoolInBytes >> PAGE_SHIFT;
1540
1541 //
1542 // Let's be really sure this doesn't overflow into nonpaged system VA
1543 //
1544 ASSERT((MmSizeOfPagedPoolInBytes + (ULONG_PTR)MmPagedPoolStart) <=
1545 (ULONG_PTR)MmNonPagedSystemStart);
1546
1547 //
1548 // This is where paged pool ends
1549 //
1550 MmPagedPoolEnd = (PVOID)(((ULONG_PTR)MmPagedPoolStart +
1551 MmSizeOfPagedPoolInBytes) - 1);
1552
1553 //
1554 // So now get the PDE for paged pool and zero it out
1555 //
1556 PointerPde = MiAddressToPde(MmPagedPoolStart);
1557 RtlZeroMemory(PointerPde,
1558 (1 + MiAddressToPde(MmPagedPoolEnd) - PointerPde) * sizeof(MMPTE));
1559
1560 //
1561 // Next, get the first and last PTE
1562 //
1563 PointerPte = MiAddressToPte(MmPagedPoolStart);
1564 MmPagedPoolInfo.FirstPteForPagedPool = PointerPte;
1565 MmPagedPoolInfo.LastPteForPagedPool = MiAddressToPte(MmPagedPoolEnd);
1566
1567 //
1568 // Lock the PFN database
1569 //
1570 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
1571
1572 /* Allocate a page and map the first paged pool PDE */
1573 PageFrameIndex = MiRemoveZeroPage(0);
1574 TempPte.u.Hard.PageFrameNumber = PageFrameIndex;
1575 MI_WRITE_VALID_PTE(PointerPde, TempPte);
1576
1577 /* Initialize the PFN entry for it */
1578 MiInitializePfnForOtherProcess(PageFrameIndex,
1579 PointerPde,
1580 MmSystemPageDirectory[(PointerPde - (PMMPTE)PDE_BASE) / PDE_COUNT]);
1581
1582 //
1583 // Release the PFN database lock
1584 //
1585 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
1586
1587 //
1588 // We only have one PDE mapped for now... at fault time, additional PDEs
1589 // will be allocated to handle paged pool growth. This is where they'll have
1590 // to start.
1591 //
1592 MmPagedPoolInfo.NextPdeForPagedPoolExpansion = PointerPde + 1;
1593
1594 //
1595 // We keep track of each page via a bit, so check how big the bitmap will
1596 // have to be (make sure to align our page count such that it fits nicely
1597 // into a 4-byte aligned bitmap.
1598 //
1599 // We'll also allocate the bitmap header itself part of the same buffer.
1600 //
1601 Size = Size * 1024;
1602 ASSERT(Size == MmSizeOfPagedPoolInPages);
1603 BitMapSize = Size;
1604 Size = sizeof(RTL_BITMAP) + (((Size + 31) / 32) * sizeof(ULONG));
1605
1606 //
1607 // Allocate the allocation bitmap, which tells us which regions have not yet
1608 // been mapped into memory
1609 //
1610 MmPagedPoolInfo.PagedPoolAllocationMap = ExAllocatePoolWithTag(NonPagedPool,
1611 Size,
1612 ' mM');
1613 ASSERT(MmPagedPoolInfo.PagedPoolAllocationMap);
1614
1615 //
1616 // Initialize it such that at first, only the first page's worth of PTEs is
1617 // marked as allocated (incidentially, the first PDE we allocated earlier).
1618 //
1619 RtlInitializeBitMap(MmPagedPoolInfo.PagedPoolAllocationMap,
1620 (PULONG)(MmPagedPoolInfo.PagedPoolAllocationMap + 1),
1621 BitMapSize);
1622 RtlSetAllBits(MmPagedPoolInfo.PagedPoolAllocationMap);
1623 RtlClearBits(MmPagedPoolInfo.PagedPoolAllocationMap, 0, 1024);
1624
1625 //
1626 // We have a second bitmap, which keeps track of where allocations end.
1627 // Given the allocation bitmap and a base address, we can therefore figure
1628 // out which page is the last page of that allocation, and thus how big the
1629 // entire allocation is.
1630 //
1631 MmPagedPoolInfo.EndOfPagedPoolBitmap = ExAllocatePoolWithTag(NonPagedPool,
1632 Size,
1633 ' mM');
1634 ASSERT(MmPagedPoolInfo.EndOfPagedPoolBitmap);
1635 RtlInitializeBitMap(MmPagedPoolInfo.EndOfPagedPoolBitmap,
1636 (PULONG)(MmPagedPoolInfo.EndOfPagedPoolBitmap + 1),
1637 BitMapSize);
1638
1639 //
1640 // Since no allocations have been made yet, there are no bits set as the end
1641 //
1642 RtlClearAllBits(MmPagedPoolInfo.EndOfPagedPoolBitmap);
1643
1644 //
1645 // Initialize paged pool.
1646 //
1647 InitializePool(PagedPool, 0);
1648
1649 /* Default low threshold of 30MB or one fifth of paged pool */
1650 MiLowPagedPoolThreshold = (30 * _1MB) >> PAGE_SHIFT;
1651 MiLowPagedPoolThreshold = min(MiLowPagedPoolThreshold, Size / 5);
1652
1653 /* Default high threshold of 60MB or 25% */
1654 MiHighPagedPoolThreshold = (60 * _1MB) >> PAGE_SHIFT;
1655 MiHighPagedPoolThreshold = min(MiHighPagedPoolThreshold, (Size * 2) / 5);
1656 ASSERT(MiLowPagedPoolThreshold < MiHighPagedPoolThreshold);
1657 }
1658
1659 NTSTATUS
1660 NTAPI
1661 MmArmInitSystem(IN ULONG Phase,
1662 IN PLOADER_PARAMETER_BLOCK LoaderBlock)
1663 {
1664 ULONG i;
1665 BOOLEAN IncludeType[LoaderMaximum];
1666 PVOID Bitmap;
1667 PPHYSICAL_MEMORY_RUN Run;
1668 PFN_NUMBER PageCount;
1669
1670 //
1671 // Instantiate memory that we don't consider RAM/usable
1672 // We use the same exclusions that Windows does, in order to try to be
1673 // compatible with WinLDR-style booting
1674 //
1675 for (i = 0; i < LoaderMaximum; i++) IncludeType[i] = TRUE;
1676 IncludeType[LoaderBad] = FALSE;
1677 IncludeType[LoaderFirmwarePermanent] = FALSE;
1678 IncludeType[LoaderSpecialMemory] = FALSE;
1679 IncludeType[LoaderBBTMemory] = FALSE;
1680 if (Phase == 0)
1681 {
1682 /* Initialize the phase 0 temporary event */
1683 KeInitializeEvent(&MiTempEvent, NotificationEvent, FALSE);
1684
1685 /* Set all the events to use the temporary event for now */
1686 MiLowMemoryEvent = &MiTempEvent;
1687 MiHighMemoryEvent = &MiTempEvent;
1688 MiLowPagedPoolEvent = &MiTempEvent;
1689 MiHighPagedPoolEvent = &MiTempEvent;
1690 MiLowNonPagedPoolEvent = &MiTempEvent;
1691 MiHighNonPagedPoolEvent = &MiTempEvent;
1692
1693 //
1694 // Define the basic user vs. kernel address space separation
1695 //
1696 MmSystemRangeStart = (PVOID)KSEG0_BASE;
1697 MmUserProbeAddress = (ULONG_PTR)MmSystemRangeStart - 0x10000;
1698 MmHighestUserAddress = (PVOID)(MmUserProbeAddress - 1);
1699
1700 /* Highest PTE and PDE based on the addresses above */
1701 MiHighestUserPte = MiAddressToPte(MmHighestUserAddress);
1702 MiHighestUserPde = MiAddressToPde(MmHighestUserAddress);
1703
1704 //
1705 // Get the size of the boot loader's image allocations and then round
1706 // that region up to a PDE size, so that any PDEs we might create for
1707 // whatever follows are separate from the PDEs that boot loader might've
1708 // already created (and later, we can blow all that away if we want to).
1709 //
1710 MmBootImageSize = KeLoaderBlock->Extension->LoaderPagesSpanned;
1711 MmBootImageSize *= PAGE_SIZE;
1712 MmBootImageSize = (MmBootImageSize + PDE_MAPPED_VA - 1) & ~(PDE_MAPPED_VA - 1);
1713 ASSERT((MmBootImageSize % PDE_MAPPED_VA) == 0);
1714
1715 //
1716 // Set the size of session view, pool, and image
1717 //
1718 MmSessionSize = MI_SESSION_SIZE;
1719 MmSessionViewSize = MI_SESSION_VIEW_SIZE;
1720 MmSessionPoolSize = MI_SESSION_POOL_SIZE;
1721 MmSessionImageSize = MI_SESSION_IMAGE_SIZE;
1722
1723 //
1724 // Set the size of system view
1725 //
1726 MmSystemViewSize = MI_SYSTEM_VIEW_SIZE;
1727
1728 //
1729 // This is where it all ends
1730 //
1731 MiSessionImageEnd = (PVOID)PTE_BASE;
1732
1733 //
1734 // This is where we will load Win32k.sys and the video driver
1735 //
1736 MiSessionImageStart = (PVOID)((ULONG_PTR)MiSessionImageEnd -
1737 MmSessionImageSize);
1738
1739 //
1740 // So the view starts right below the session working set (itself below
1741 // the image area)
1742 //
1743 MiSessionViewStart = (PVOID)((ULONG_PTR)MiSessionImageEnd -
1744 MmSessionImageSize -
1745 MI_SESSION_WORKING_SET_SIZE -
1746 MmSessionViewSize);
1747
1748 //
1749 // Session pool follows
1750 //
1751 MiSessionPoolEnd = MiSessionViewStart;
1752 MiSessionPoolStart = (PVOID)((ULONG_PTR)MiSessionPoolEnd -
1753 MmSessionPoolSize);
1754
1755 //
1756 // And it all begins here
1757 //
1758 MmSessionBase = MiSessionPoolStart;
1759
1760 //
1761 // Sanity check that our math is correct
1762 //
1763 ASSERT((ULONG_PTR)MmSessionBase + MmSessionSize == PTE_BASE);
1764
1765 //
1766 // Session space ends wherever image session space ends
1767 //
1768 MiSessionSpaceEnd = MiSessionImageEnd;
1769
1770 //
1771 // System view space ends at session space, so now that we know where
1772 // this is, we can compute the base address of system view space itself.
1773 //
1774 MiSystemViewStart = (PVOID)((ULONG_PTR)MmSessionBase -
1775 MmSystemViewSize);
1776
1777 /* Compute the PTE addresses for all the addresses we carved out */
1778 MiSessionImagePteStart = MiAddressToPte(MiSessionImageStart);
1779 MiSessionImagePteEnd = MiAddressToPte(MiSessionImageEnd);
1780 MiSessionBasePte = MiAddressToPte(MmSessionBase);
1781 MiSessionLastPte = MiAddressToPte(MiSessionSpaceEnd);
1782
1783 /* Initialize the user mode image list */
1784 InitializeListHead(&MmLoadedUserImageList);
1785
1786 /* Initialize the paged pool mutex */
1787 KeInitializeGuardedMutex(&MmPagedPoolMutex);
1788
1789 /* Initialize the Loader Lock */
1790 KeInitializeMutant(&MmSystemLoadLock, FALSE);
1791
1792 //
1793 // Count physical pages on the system
1794 //
1795 PageCount = MiPagesInLoaderBlock(LoaderBlock, IncludeType);
1796
1797 //
1798 // Check if this is a machine with less than 19MB of RAM
1799 //
1800 if (PageCount < MI_MIN_PAGES_FOR_SYSPTE_TUNING)
1801 {
1802 //
1803 // Use the very minimum of system PTEs
1804 //
1805 MmNumberOfSystemPtes = 7000;
1806 }
1807 else
1808 {
1809 //
1810 // Use the default, but check if we have more than 32MB of RAM
1811 //
1812 MmNumberOfSystemPtes = 11000;
1813 if (PageCount > MI_MIN_PAGES_FOR_SYSPTE_BOOST)
1814 {
1815 //
1816 // Double the amount of system PTEs
1817 //
1818 MmNumberOfSystemPtes <<= 1;
1819 }
1820 }
1821
1822 DPRINT("System PTE count has been tuned to %d (%d bytes)\n",
1823 MmNumberOfSystemPtes, MmNumberOfSystemPtes * PAGE_SIZE);
1824
1825 /* Initialize the working set lock */
1826 ExInitializePushLock((PULONG_PTR)&MmSystemCacheWs.WorkingSetMutex);
1827
1828 /* Set commit limit */
1829 MmTotalCommitLimit = 2 * _1GB;
1830 MmTotalCommitLimitMaximum = MmTotalCommitLimit;
1831
1832 /* Has the allocation fragment been setup? */
1833 if (!MmAllocationFragment)
1834 {
1835 /* Use the default value */
1836 MmAllocationFragment = MI_ALLOCATION_FRAGMENT;
1837 if (PageCount < ((256 * _1MB) / PAGE_SIZE))
1838 {
1839 /* On memory systems with less than 256MB, divide by 4 */
1840 MmAllocationFragment = MI_ALLOCATION_FRAGMENT / 4;
1841 }
1842 else if (PageCount < (_1GB / PAGE_SIZE))
1843 {
1844 /* On systems with less than 1GB, divide by 2 */
1845 MmAllocationFragment = MI_ALLOCATION_FRAGMENT / 2;
1846 }
1847 }
1848 else
1849 {
1850 /* Convert from 1KB fragments to pages */
1851 MmAllocationFragment *= _1KB;
1852 MmAllocationFragment = ROUND_TO_PAGES(MmAllocationFragment);
1853
1854 /* Don't let it past the maximum */
1855 MmAllocationFragment = min(MmAllocationFragment,
1856 MI_MAX_ALLOCATION_FRAGMENT);
1857
1858 /* Don't let it too small either */
1859 MmAllocationFragment = max(MmAllocationFragment,
1860 MI_MIN_ALLOCATION_FRAGMENT);
1861 }
1862
1863 /* Initialize the platform-specific parts */
1864 MiInitMachineDependent(LoaderBlock);
1865
1866 //
1867 // Sync us up with ReactOS Mm
1868 //
1869 MiSyncARM3WithROS(MmNonPagedSystemStart, (PVOID)((ULONG_PTR)MmNonPagedPoolEnd - 1));
1870 MiSyncARM3WithROS(MmPfnDatabase, (PVOID)((ULONG_PTR)MmNonPagedPoolStart + MmSizeOfNonPagedPoolInBytes - 1));
1871 MiSyncARM3WithROS((PVOID)HYPER_SPACE, (PVOID)(HYPER_SPACE + PAGE_SIZE - 1));
1872
1873 //
1874 // Build the physical memory block
1875 //
1876 MmPhysicalMemoryBlock = MmInitializeMemoryLimits(LoaderBlock,
1877 IncludeType);
1878
1879 //
1880 // Allocate enough buffer for the PFN bitmap
1881 // Align it up to a 32-bit boundary
1882 //
1883 Bitmap = ExAllocatePoolWithTag(NonPagedPool,
1884 (((MmHighestPhysicalPage + 1) + 31) / 32) * 4,
1885 ' mM');
1886 if (!Bitmap)
1887 {
1888 //
1889 // This is critical
1890 //
1891 KeBugCheckEx(INSTALL_MORE_MEMORY,
1892 MmNumberOfPhysicalPages,
1893 MmLowestPhysicalPage,
1894 MmHighestPhysicalPage,
1895 0x101);
1896 }
1897
1898 //
1899 // Initialize it and clear all the bits to begin with
1900 //
1901 RtlInitializeBitMap(&MiPfnBitMap,
1902 Bitmap,
1903 MmHighestPhysicalPage + 1);
1904 RtlClearAllBits(&MiPfnBitMap);
1905
1906 //
1907 // Loop physical memory runs
1908 //
1909 for (i = 0; i < MmPhysicalMemoryBlock->NumberOfRuns; i++)
1910 {
1911 //
1912 // Get the run
1913 //
1914 Run = &MmPhysicalMemoryBlock->Run[i];
1915 DPRINT("PHYSICAL RAM [0x%08p to 0x%08p]\n",
1916 Run->BasePage << PAGE_SHIFT,
1917 (Run->BasePage + Run->PageCount) << PAGE_SHIFT);
1918
1919 //
1920 // Make sure it has pages inside it
1921 //
1922 if (Run->PageCount)
1923 {
1924 //
1925 // Set the bits in the PFN bitmap
1926 //
1927 RtlSetBits(&MiPfnBitMap, Run->BasePage, Run->PageCount);
1928 }
1929 }
1930
1931 /* Look for large page cache entries that need caching */
1932 MiSyncCachedRanges();
1933
1934 /* Loop for HAL Heap I/O device mappings that need coherency tracking */
1935 MiAddHalIoMappings();
1936
1937 /* Set the initial resident page count */
1938 MmResidentAvailablePages = MmAvailablePages - 32;
1939
1940 /* Initialize large page structures on PAE/x64, and MmProcessList on x86 */
1941 MiInitializeLargePageSupport();
1942
1943 /* Check if the registry says any drivers should be loaded with large pages */
1944 MiInitializeDriverLargePageList();
1945
1946 /* Relocate the boot drivers into system PTE space and fixup their PFNs */
1947 MiReloadBootLoadedDrivers(LoaderBlock);
1948
1949 /* FIXME: Call out into Driver Verifier for initialization */
1950
1951 /* Check how many pages the system has */
1952 if (MmNumberOfPhysicalPages <= ((13 * _1MB) / PAGE_SIZE))
1953 {
1954 /* Set small system */
1955 MmSystemSize = MmSmallSystem;
1956 }
1957 else if (MmNumberOfPhysicalPages <= ((19 * _1MB) / PAGE_SIZE))
1958 {
1959 /* Set small system and add 100 pages for the cache */
1960 MmSystemSize = MmSmallSystem;
1961 MmSystemCacheWsMinimum += 100;
1962 }
1963 else
1964 {
1965 /* Set medium system and add 400 pages for the cache */
1966 MmSystemSize = MmMediumSystem;
1967 MmSystemCacheWsMinimum += 400;
1968 }
1969
1970 /* Check for less than 24MB */
1971 if (MmNumberOfPhysicalPages < ((24 * _1MB) / PAGE_SIZE))
1972 {
1973 /* No more than 32 pages */
1974 MmSystemCacheWsMinimum = 32;
1975 }
1976
1977 /* Check for more than 32MB */
1978 if (MmNumberOfPhysicalPages >= ((32 * _1MB) / PAGE_SIZE))
1979 {
1980 /* Check for product type being "Wi" for WinNT */
1981 if (MmProductType == '\0i\0W')
1982 {
1983 /* Then this is a large system */
1984 MmSystemSize = MmLargeSystem;
1985 }
1986 else
1987 {
1988 /* For servers, we need 64MB to consider this as being large */
1989 if (MmNumberOfPhysicalPages >= ((64 * _1MB) / PAGE_SIZE))
1990 {
1991 /* Set it as large */
1992 MmSystemSize = MmLargeSystem;
1993 }
1994 }
1995 }
1996
1997 /* Check for more than 33 MB */
1998 if (MmNumberOfPhysicalPages > ((33 * _1MB) / PAGE_SIZE))
1999 {
2000 /* Add another 500 pages to the cache */
2001 MmSystemCacheWsMinimum += 500;
2002 }
2003
2004 /* Now setup the shared user data fields */
2005 ASSERT(SharedUserData->NumberOfPhysicalPages == 0);
2006 SharedUserData->NumberOfPhysicalPages = MmNumberOfPhysicalPages;
2007 SharedUserData->LargePageMinimum = 0;
2008
2009 /* Check for workstation (Wi for WinNT) */
2010 if (MmProductType == '\0i\0W')
2011 {
2012 /* Set Windows NT Workstation product type */
2013 SharedUserData->NtProductType = NtProductWinNt;
2014 MmProductType = 0;
2015 }
2016 else
2017 {
2018 /* Check for LanMan server */
2019 if (MmProductType == '\0a\0L')
2020 {
2021 /* This is a domain controller */
2022 SharedUserData->NtProductType = NtProductLanManNt;
2023 }
2024 else
2025 {
2026 /* Otherwise it must be a normal server */
2027 SharedUserData->NtProductType = NtProductServer;
2028 }
2029
2030 /* Set the product type, and make the system more aggressive with low memory */
2031 MmProductType = 1;
2032 MmMinimumFreePages = 81;
2033 }
2034
2035 /* Update working set tuning parameters */
2036 MiAdjustWorkingSetManagerParameters(!MmProductType);
2037
2038 /* Finetune the page count by removing working set and NP expansion */
2039 MmResidentAvailablePages -= MiExpansionPoolPagesInitialCharge;
2040 MmResidentAvailablePages -= MmSystemCacheWsMinimum;
2041 MmResidentAvailableAtInit = MmResidentAvailablePages;
2042 if (MmResidentAvailablePages <= 0)
2043 {
2044 /* This should not happen */
2045 DPRINT1("System cache working set too big\n");
2046 return FALSE;
2047 }
2048
2049 /* Initialize the system cache */
2050 //MiInitializeSystemCache(MmSystemCacheWsMinimum, MmAvailablePages);
2051
2052 /* Update the commit limit */
2053 MmTotalCommitLimit = MmAvailablePages;
2054 if (MmTotalCommitLimit > 1024) MmTotalCommitLimit -= 1024;
2055 MmTotalCommitLimitMaximum = MmTotalCommitLimit;
2056
2057 /* Size up paged pool and build the shadow system page directory */
2058 MiBuildPagedPool();
2059
2060 /* Debugger physical memory support is now ready to be used */
2061 MmDebugPte = MiAddressToPte(MiDebugMapping);
2062
2063 /* Initialize the loaded module list */
2064 MiInitializeLoadedModuleList(LoaderBlock);
2065 }
2066
2067 //
2068 // Always return success for now
2069 //
2070 return STATUS_SUCCESS;
2071 }
2072
2073 /* EOF */