Sync with trunk r58033.
[reactos.git] / ntoskrnl / mm / ARM3 / mminit.c
1 /*
2 * PROJECT: ReactOS Kernel
3 * LICENSE: BSD - See COPYING.ARM in the top level directory
4 * FILE: ntoskrnl/mm/ARM3/mminit.c
5 * PURPOSE: ARM Memory Manager Initialization
6 * PROGRAMMERS: ReactOS Portable Systems Group
7 */
8
9 /* INCLUDES *******************************************************************/
10
11 #include <ntoskrnl.h>
12 #define NDEBUG
13 #include <debug.h>
14
15 #define MODULE_INVOLVED_IN_ARM3
16 #include "miarm.h"
17 #undef MmSystemRangeStart
18
19 /* GLOBALS ********************************************************************/
20
21 //
22 // These are all registry-configurable, but by default, the memory manager will
23 // figure out the most appropriate values.
24 //
25 ULONG MmMaximumNonPagedPoolPercent;
26 SIZE_T MmSizeOfNonPagedPoolInBytes;
27 SIZE_T MmMaximumNonPagedPoolInBytes;
28
29 /* Some of the same values, in pages */
30 PFN_NUMBER MmMaximumNonPagedPoolInPages;
31
32 //
33 // These numbers describe the discrete equation components of the nonpaged
34 // pool sizing algorithm.
35 //
36 // They are described on http://support.microsoft.com/default.aspx/kb/126402/ja
37 // along with the algorithm that uses them, which is implemented later below.
38 //
39 SIZE_T MmMinimumNonPagedPoolSize = 256 * 1024;
40 ULONG MmMinAdditionNonPagedPoolPerMb = 32 * 1024;
41 SIZE_T MmDefaultMaximumNonPagedPool = 1024 * 1024;
42 ULONG MmMaxAdditionNonPagedPoolPerMb = 400 * 1024;
43
44 //
45 // The memory layout (and especially variable names) of the NT kernel mode
46 // components can be a bit hard to twig, especially when it comes to the non
47 // paged area.
48 //
49 // There are really two components to the non-paged pool:
50 //
51 // - The initial nonpaged pool, sized dynamically up to a maximum.
52 // - The expansion nonpaged pool, sized dynamically up to a maximum.
53 //
54 // The initial nonpaged pool is physically continuous for performance, and
55 // immediately follows the PFN database, typically sharing the same PDE. It is
56 // a very small resource (32MB on a 1GB system), and capped at 128MB.
57 //
58 // Right now we call this the "ARM³ Nonpaged Pool" and it begins somewhere after
59 // the PFN database (which starts at 0xB0000000).
60 //
61 // The expansion nonpaged pool, on the other hand, can grow much bigger (400MB
62 // for a 1GB system). On ARM³ however, it is currently capped at 128MB.
63 //
64 // The address where the initial nonpaged pool starts is aptly named
65 // MmNonPagedPoolStart, and it describes a range of MmSizeOfNonPagedPoolInBytes
66 // bytes.
67 //
68 // Expansion nonpaged pool starts at an address described by the variable called
69 // MmNonPagedPoolExpansionStart, and it goes on for MmMaximumNonPagedPoolInBytes
70 // minus MmSizeOfNonPagedPoolInBytes bytes, always reaching MmNonPagedPoolEnd
71 // (because of the way it's calculated) at 0xFFBE0000.
72 //
73 // Initial nonpaged pool is allocated and mapped early-on during boot, but what
74 // about the expansion nonpaged pool? It is instead composed of special pages
75 // which belong to what are called System PTEs. These PTEs are the matter of a
76 // later discussion, but they are also considered part of the "nonpaged" OS, due
77 // to the fact that they are never paged out -- once an address is described by
78 // a System PTE, it is always valid, until the System PTE is torn down.
79 //
80 // System PTEs are actually composed of two "spaces", the system space proper,
81 // and the nonpaged pool expansion space. The latter, as we've already seen,
82 // begins at MmNonPagedPoolExpansionStart. Based on the number of System PTEs
83 // that the system will support, the remaining address space below this address
84 // is used to hold the system space PTEs. This address, in turn, is held in the
85 // variable named MmNonPagedSystemStart, which itself is never allowed to go
86 // below 0xEB000000 (thus creating an upper bound on the number of System PTEs).
87 //
88 // This means that 330MB are reserved for total nonpaged system VA, on top of
89 // whatever the initial nonpaged pool allocation is.
90 //
91 // The following URLs, valid as of April 23rd, 2008, support this evidence:
92 //
93 // http://www.cs.miami.edu/~burt/journal/NT/memory.html
94 // http://www.ditii.com/2007/09/28/windows-memory-management-x86-virtual-address-space/
95 //
96 PVOID MmNonPagedSystemStart;
97 SIZE_T MiNonPagedSystemSize;
98 PVOID MmNonPagedPoolStart;
99 PVOID MmNonPagedPoolExpansionStart;
100 PVOID MmNonPagedPoolEnd = MI_NONPAGED_POOL_END;
101
102 //
103 // This is where paged pool starts by default
104 //
105 PVOID MmPagedPoolStart = MI_PAGED_POOL_START;
106 PVOID MmPagedPoolEnd;
107
108 //
109 // And this is its default size
110 //
111 SIZE_T MmSizeOfPagedPoolInBytes = MI_MIN_INIT_PAGED_POOLSIZE;
112 PFN_NUMBER MmSizeOfPagedPoolInPages = MI_MIN_INIT_PAGED_POOLSIZE / PAGE_SIZE;
113
114 //
115 // Session space starts at 0xBFFFFFFF and grows downwards
116 // By default, it includes an 8MB image area where we map win32k and video card
117 // drivers, followed by a 4MB area containing the session's working set. This is
118 // then followed by a 20MB mapped view area and finally by the session's paged
119 // pool, by default 16MB.
120 //
121 // On a normal system, this results in session space occupying the region from
122 // 0xBD000000 to 0xC0000000
123 //
124 // See miarm.h for the defines that determine the sizing of this region. On an
125 // NT system, some of these can be configured through the registry, but we don't
126 // support that yet.
127 //
128 PVOID MiSessionSpaceEnd; // 0xC0000000
129 PVOID MiSessionImageEnd; // 0xC0000000
130 PVOID MiSessionImageStart; // 0xBF800000
131 PVOID MiSessionSpaceWs;
132 PVOID MiSessionViewStart; // 0xBE000000
133 PVOID MiSessionPoolEnd; // 0xBE000000
134 PVOID MiSessionPoolStart; // 0xBD000000
135 PVOID MmSessionBase; // 0xBD000000
136 SIZE_T MmSessionSize;
137 SIZE_T MmSessionViewSize;
138 SIZE_T MmSessionPoolSize;
139 SIZE_T MmSessionImageSize;
140
141 /*
142 * These are the PTE addresses of the boundaries carved out above
143 */
144 PMMPTE MiSessionImagePteStart;
145 PMMPTE MiSessionImagePteEnd;
146 PMMPTE MiSessionBasePte;
147 PMMPTE MiSessionLastPte;
148
149 //
150 // The system view space, on the other hand, is where sections that are memory
151 // mapped into "system space" end up.
152 //
153 // By default, it is a 16MB region, but we hack it to be 32MB for ReactOS
154 //
155 PVOID MiSystemViewStart;
156 SIZE_T MmSystemViewSize;
157
158 #if (_MI_PAGING_LEVELS == 2)
159 //
160 // A copy of the system page directory (the page directory associated with the
161 // System process) is kept (double-mapped) by the manager in order to lazily
162 // map paged pool PDEs into external processes when they fault on a paged pool
163 // address.
164 //
165 PFN_NUMBER MmSystemPageDirectory[PD_COUNT];
166 PMMPDE MmSystemPagePtes;
167 #endif
168
169 //
170 // The system cache starts right after hyperspace. The first few pages are for
171 // keeping track of the system working set list.
172 //
173 // This should be 0xC0C00000 -- the cache itself starts at 0xC1000000
174 //
175 PMMWSL MmSystemCacheWorkingSetList = (PVOID)MI_SYSTEM_CACHE_WS_START;
176
177 //
178 // Windows NT seems to choose between 7000, 11000 and 50000
179 // On systems with more than 32MB, this number is then doubled, and further
180 // aligned up to a PDE boundary (4MB).
181 //
182 PFN_COUNT MmNumberOfSystemPtes;
183
184 //
185 // This is how many pages the PFN database will take up
186 // In Windows, this includes the Quark Color Table, but not in ARM³
187 //
188 PFN_NUMBER MxPfnAllocation;
189
190 //
191 // Unlike the old ReactOS Memory Manager, ARM³ (and Windows) does not keep track
192 // of pages that are not actually valid physical memory, such as ACPI reserved
193 // regions, BIOS address ranges, or holes in physical memory address space which
194 // could indicate device-mapped I/O memory.
195 //
196 // In fact, the lack of a PFN entry for a page usually indicates that this is
197 // I/O space instead.
198 //
199 // A bitmap, called the PFN bitmap, keeps track of all page frames by assigning
200 // a bit to each. If the bit is set, then the page is valid physical RAM.
201 //
202 RTL_BITMAP MiPfnBitMap;
203
204 //
205 // This structure describes the different pieces of RAM-backed address space
206 //
207 PPHYSICAL_MEMORY_DESCRIPTOR MmPhysicalMemoryBlock;
208
209 //
210 // This is where we keep track of the most basic physical layout markers
211 //
212 PFN_NUMBER MmHighestPhysicalPage, MmLowestPhysicalPage = -1;
213 PFN_COUNT MmNumberOfPhysicalPages;
214
215 //
216 // The total number of pages mapped by the boot loader, which include the kernel
217 // HAL, boot drivers, registry, NLS files and other loader data structures is
218 // kept track of here. This depends on "LoaderPagesSpanned" being correct when
219 // coming from the loader.
220 //
221 // This number is later aligned up to a PDE boundary.
222 //
223 SIZE_T MmBootImageSize;
224
225 //
226 // These three variables keep track of the core separation of address space that
227 // exists between kernel mode and user mode.
228 //
229 ULONG_PTR MmUserProbeAddress;
230 PVOID MmHighestUserAddress;
231 PVOID MmSystemRangeStart;
232
233 /* And these store the respective highest PTE/PDE address */
234 PMMPTE MiHighestUserPte;
235 PMMPDE MiHighestUserPde;
236 #if (_MI_PAGING_LEVELS >= 3)
237 PMMPTE MiHighestUserPpe;
238 #if (_MI_PAGING_LEVELS >= 4)
239 PMMPTE MiHighestUserPxe;
240 #endif
241 #endif
242
243 /* These variables define the system cache address space */
244 PVOID MmSystemCacheStart;
245 PVOID MmSystemCacheEnd;
246 MMSUPPORT MmSystemCacheWs;
247
248 //
249 // This is where hyperspace ends (followed by the system cache working set)
250 //
251 PVOID MmHyperSpaceEnd;
252
253 //
254 // Page coloring algorithm data
255 //
256 ULONG MmSecondaryColors;
257 ULONG MmSecondaryColorMask;
258
259 //
260 // Actual (registry-configurable) size of a GUI thread's stack
261 //
262 ULONG MmLargeStackSize = KERNEL_LARGE_STACK_SIZE;
263
264 //
265 // Before we have a PFN database, memory comes straight from our physical memory
266 // blocks, which is nice because it's guaranteed contiguous and also because once
267 // we take a page from here, the system doesn't see it anymore.
268 // However, once the fun is over, those pages must be re-integrated back into
269 // PFN society life, and that requires us keeping a copy of the original layout
270 // so that we can parse it later.
271 //
272 PMEMORY_ALLOCATION_DESCRIPTOR MxFreeDescriptor;
273 MEMORY_ALLOCATION_DESCRIPTOR MxOldFreeDescriptor;
274
275 /*
276 * For each page's worth bytes of L2 cache in a given set/way line, the zero and
277 * free lists are organized in what is called a "color".
278 *
279 * This array points to the two lists, so it can be thought of as a multi-dimensional
280 * array of MmFreePagesByColor[2][MmSecondaryColors]. Since the number is dynamic,
281 * we describe the array in pointer form instead.
282 *
283 * On a final note, the color tables themselves are right after the PFN database.
284 */
285 C_ASSERT(FreePageList == 1);
286 PMMCOLOR_TABLES MmFreePagesByColor[FreePageList + 1];
287
288 /* An event used in Phase 0 before the rest of the system is ready to go */
289 KEVENT MiTempEvent;
290
291 /* All the events used for memory threshold notifications */
292 PKEVENT MiLowMemoryEvent;
293 PKEVENT MiHighMemoryEvent;
294 PKEVENT MiLowPagedPoolEvent;
295 PKEVENT MiHighPagedPoolEvent;
296 PKEVENT MiLowNonPagedPoolEvent;
297 PKEVENT MiHighNonPagedPoolEvent;
298
299 /* The actual thresholds themselves, in page numbers */
300 PFN_NUMBER MmLowMemoryThreshold;
301 PFN_NUMBER MmHighMemoryThreshold;
302 PFN_NUMBER MiLowPagedPoolThreshold;
303 PFN_NUMBER MiHighPagedPoolThreshold;
304 PFN_NUMBER MiLowNonPagedPoolThreshold;
305 PFN_NUMBER MiHighNonPagedPoolThreshold;
306
307 /*
308 * This number determines how many free pages must exist, at minimum, until we
309 * start trimming working sets and flushing modified pages to obtain more free
310 * pages.
311 *
312 * This number changes if the system detects that this is a server product
313 */
314 PFN_NUMBER MmMinimumFreePages = 26;
315
316 /*
317 * This number indicates how many pages we consider to be a low limit of having
318 * "plenty" of free memory.
319 *
320 * It is doubled on systems that have more than 63MB of memory
321 */
322 PFN_NUMBER MmPlentyFreePages = 400;
323
324 /* These values store the type of system this is (small, med, large) and if server */
325 ULONG MmProductType;
326 MM_SYSTEMSIZE MmSystemSize;
327
328 /*
329 * These values store the cache working set minimums and maximums, in pages
330 *
331 * The minimum value is boosted on systems with more than 24MB of RAM, and cut
332 * down to only 32 pages on embedded (<24MB RAM) systems.
333 *
334 * An extra boost of 2MB is given on systems with more than 33MB of RAM.
335 */
336 PFN_NUMBER MmSystemCacheWsMinimum = 288;
337 PFN_NUMBER MmSystemCacheWsMaximum = 350;
338
339 /* FIXME: Move to cache/working set code later */
340 BOOLEAN MmLargeSystemCache;
341
342 /*
343 * This value determines in how many fragments/chunks the subsection prototype
344 * PTEs should be allocated when mapping a section object. It is configurable in
345 * the registry through the MapAllocationFragment parameter.
346 *
347 * The default is 64KB on systems with more than 1GB of RAM, 32KB on systems with
348 * more than 256MB of RAM, and 16KB on systems with less than 256MB of RAM.
349 *
350 * The maximum it can be set to is 2MB, and the minimum is 4KB.
351 */
352 SIZE_T MmAllocationFragment;
353
354 /*
355 * These two values track how much virtual memory can be committed, and when
356 * expansion should happen.
357 */
358 // FIXME: They should be moved elsewhere since it's not an "init" setting?
359 SIZE_T MmTotalCommitLimit;
360 SIZE_T MmTotalCommitLimitMaximum;
361
362 /* Internal setting used for debugging memory descriptors */
363 BOOLEAN MiDbgEnableMdDump =
364 #ifdef _ARM_
365 TRUE;
366 #else
367 FALSE;
368 #endif
369
370 /* Number of memory descriptors in the loader block */
371 ULONG MiNumberDescriptors = 0;
372
373 /* Number of free pages in the loader block */
374 PFN_NUMBER MiNumberOfFreePages = 0;
375
376
377 /* PRIVATE FUNCTIONS **********************************************************/
378
379 VOID
380 NTAPI
381 MiScanMemoryDescriptors(IN PLOADER_PARAMETER_BLOCK LoaderBlock)
382 {
383 PLIST_ENTRY ListEntry;
384 PMEMORY_ALLOCATION_DESCRIPTOR Descriptor;
385 PFN_NUMBER PageFrameIndex, FreePages = 0;
386
387 /* Loop the memory descriptors */
388 for (ListEntry = LoaderBlock->MemoryDescriptorListHead.Flink;
389 ListEntry != &LoaderBlock->MemoryDescriptorListHead;
390 ListEntry = ListEntry->Flink)
391 {
392 /* Get the descriptor */
393 Descriptor = CONTAINING_RECORD(ListEntry,
394 MEMORY_ALLOCATION_DESCRIPTOR,
395 ListEntry);
396 DPRINT("MD Type: %lx Base: %lx Count: %lx\n",
397 Descriptor->MemoryType, Descriptor->BasePage, Descriptor->PageCount);
398
399 /* Count this descriptor */
400 MiNumberDescriptors++;
401
402 /* Check if this is invisible memory */
403 if ((Descriptor->MemoryType == LoaderFirmwarePermanent) ||
404 (Descriptor->MemoryType == LoaderSpecialMemory) ||
405 (Descriptor->MemoryType == LoaderHALCachedMemory) ||
406 (Descriptor->MemoryType == LoaderBBTMemory))
407 {
408 /* Skip this descriptor */
409 continue;
410 }
411
412 /* Check if this is bad memory */
413 if (Descriptor->MemoryType != LoaderBad)
414 {
415 /* Count this in the total of pages */
416 MmNumberOfPhysicalPages += (PFN_COUNT)Descriptor->PageCount;
417 }
418
419 /* Check if this is the new lowest page */
420 if (Descriptor->BasePage < MmLowestPhysicalPage)
421 {
422 /* Update the lowest page */
423 MmLowestPhysicalPage = Descriptor->BasePage;
424 }
425
426 /* Check if this is the new highest page */
427 PageFrameIndex = Descriptor->BasePage + Descriptor->PageCount;
428 if (PageFrameIndex > MmHighestPhysicalPage)
429 {
430 /* Update the highest page */
431 MmHighestPhysicalPage = PageFrameIndex - 1;
432 }
433
434 /* Check if this is free memory */
435 if ((Descriptor->MemoryType == LoaderFree) ||
436 (Descriptor->MemoryType == LoaderLoadedProgram) ||
437 (Descriptor->MemoryType == LoaderFirmwareTemporary) ||
438 (Descriptor->MemoryType == LoaderOsloaderStack))
439 {
440 /* Count it too free pages */
441 MiNumberOfFreePages += Descriptor->PageCount;
442
443 /* Check if this is the largest memory descriptor */
444 if (Descriptor->PageCount > FreePages)
445 {
446 /* Remember it */
447 MxFreeDescriptor = Descriptor;
448 FreePages = Descriptor->PageCount;
449 }
450 }
451 }
452
453 /* Save original values of the free descriptor, since it'll be
454 * altered by early allocations */
455 MxOldFreeDescriptor = *MxFreeDescriptor;
456 }
457
458 PFN_NUMBER
459 NTAPI
460 INIT_FUNCTION
461 MxGetNextPage(IN PFN_NUMBER PageCount)
462 {
463 PFN_NUMBER Pfn;
464
465 /* Make sure we have enough pages */
466 if (PageCount > MxFreeDescriptor->PageCount)
467 {
468 /* Crash the system */
469 KeBugCheckEx(INSTALL_MORE_MEMORY,
470 MmNumberOfPhysicalPages,
471 MxFreeDescriptor->PageCount,
472 MxOldFreeDescriptor.PageCount,
473 PageCount);
474 }
475
476 /* Use our lowest usable free pages */
477 Pfn = MxFreeDescriptor->BasePage;
478 MxFreeDescriptor->BasePage += PageCount;
479 MxFreeDescriptor->PageCount -= PageCount;
480 return Pfn;
481 }
482
483 VOID
484 NTAPI
485 INIT_FUNCTION
486 MiComputeColorInformation(VOID)
487 {
488 ULONG L2Associativity;
489
490 /* Check if no setting was provided already */
491 if (!MmSecondaryColors)
492 {
493 /* Get L2 cache information */
494 L2Associativity = KeGetPcr()->SecondLevelCacheAssociativity;
495
496 /* The number of colors is the number of cache bytes by set/way */
497 MmSecondaryColors = KeGetPcr()->SecondLevelCacheSize;
498 if (L2Associativity) MmSecondaryColors /= L2Associativity;
499 }
500
501 /* Now convert cache bytes into pages */
502 MmSecondaryColors >>= PAGE_SHIFT;
503 if (!MmSecondaryColors)
504 {
505 /* If there was no cache data from the KPCR, use the default colors */
506 MmSecondaryColors = MI_SECONDARY_COLORS;
507 }
508 else
509 {
510 /* Otherwise, make sure there aren't too many colors */
511 if (MmSecondaryColors > MI_MAX_SECONDARY_COLORS)
512 {
513 /* Set the maximum */
514 MmSecondaryColors = MI_MAX_SECONDARY_COLORS;
515 }
516
517 /* Make sure there aren't too little colors */
518 if (MmSecondaryColors < MI_MIN_SECONDARY_COLORS)
519 {
520 /* Set the default */
521 MmSecondaryColors = MI_SECONDARY_COLORS;
522 }
523
524 /* Finally make sure the colors are a power of two */
525 if (MmSecondaryColors & (MmSecondaryColors - 1))
526 {
527 /* Set the default */
528 MmSecondaryColors = MI_SECONDARY_COLORS;
529 }
530 }
531
532 /* Compute the mask and store it */
533 MmSecondaryColorMask = MmSecondaryColors - 1;
534 KeGetCurrentPrcb()->SecondaryColorMask = MmSecondaryColorMask;
535 }
536
537 VOID
538 NTAPI
539 INIT_FUNCTION
540 MiInitializeColorTables(VOID)
541 {
542 ULONG i;
543 PMMPTE PointerPte, LastPte;
544 MMPTE TempPte = ValidKernelPte;
545
546 /* The color table starts after the ARM3 PFN database */
547 MmFreePagesByColor[0] = (PMMCOLOR_TABLES)&MmPfnDatabase[MmHighestPhysicalPage + 1];
548
549 /* Loop the PTEs. We have two color tables for each secondary color */
550 PointerPte = MiAddressToPte(&MmFreePagesByColor[0][0]);
551 LastPte = MiAddressToPte((ULONG_PTR)MmFreePagesByColor[0] +
552 (2 * MmSecondaryColors * sizeof(MMCOLOR_TABLES))
553 - 1);
554 while (PointerPte <= LastPte)
555 {
556 /* Check for valid PTE */
557 if (PointerPte->u.Hard.Valid == 0)
558 {
559 /* Get a page and map it */
560 TempPte.u.Hard.PageFrameNumber = MxGetNextPage(1);
561 MI_WRITE_VALID_PTE(PointerPte, TempPte);
562
563 /* Zero out the page */
564 RtlZeroMemory(MiPteToAddress(PointerPte), PAGE_SIZE);
565 }
566
567 /* Next */
568 PointerPte++;
569 }
570
571 /* Now set the address of the next list, right after this one */
572 MmFreePagesByColor[1] = &MmFreePagesByColor[0][MmSecondaryColors];
573
574 /* Now loop the lists to set them up */
575 for (i = 0; i < MmSecondaryColors; i++)
576 {
577 /* Set both free and zero lists for each color */
578 MmFreePagesByColor[ZeroedPageList][i].Flink = LIST_HEAD;
579 MmFreePagesByColor[ZeroedPageList][i].Blink = (PVOID)LIST_HEAD;
580 MmFreePagesByColor[ZeroedPageList][i].Count = 0;
581 MmFreePagesByColor[FreePageList][i].Flink = LIST_HEAD;
582 MmFreePagesByColor[FreePageList][i].Blink = (PVOID)LIST_HEAD;
583 MmFreePagesByColor[FreePageList][i].Count = 0;
584 }
585 }
586
587 #ifndef _M_AMD64
588 BOOLEAN
589 NTAPI
590 INIT_FUNCTION
591 MiIsRegularMemory(IN PLOADER_PARAMETER_BLOCK LoaderBlock,
592 IN PFN_NUMBER Pfn)
593 {
594 PLIST_ENTRY NextEntry;
595 PMEMORY_ALLOCATION_DESCRIPTOR MdBlock;
596
597 /* Loop the memory descriptors */
598 NextEntry = LoaderBlock->MemoryDescriptorListHead.Flink;
599 while (NextEntry != &LoaderBlock->MemoryDescriptorListHead)
600 {
601 /* Get the memory descriptor */
602 MdBlock = CONTAINING_RECORD(NextEntry,
603 MEMORY_ALLOCATION_DESCRIPTOR,
604 ListEntry);
605
606 /* Check if this PFN could be part of the block */
607 if (Pfn >= (MdBlock->BasePage))
608 {
609 /* Check if it really is part of the block */
610 if (Pfn < (MdBlock->BasePage + MdBlock->PageCount))
611 {
612 /* Check if the block is actually memory we don't map */
613 if ((MdBlock->MemoryType == LoaderFirmwarePermanent) ||
614 (MdBlock->MemoryType == LoaderBBTMemory) ||
615 (MdBlock->MemoryType == LoaderSpecialMemory))
616 {
617 /* We don't need PFN database entries for this memory */
618 break;
619 }
620
621 /* This is memory we want to map */
622 return TRUE;
623 }
624 }
625 else
626 {
627 /* Blocks are ordered, so if it's not here, it doesn't exist */
628 break;
629 }
630
631 /* Get to the next descriptor */
632 NextEntry = MdBlock->ListEntry.Flink;
633 }
634
635 /* Check if this PFN is actually from our free memory descriptor */
636 if ((Pfn >= MxOldFreeDescriptor.BasePage) &&
637 (Pfn < MxOldFreeDescriptor.BasePage + MxOldFreeDescriptor.PageCount))
638 {
639 /* We use these pages for initial mappings, so we do want to count them */
640 return TRUE;
641 }
642
643 /* Otherwise this isn't memory that we describe or care about */
644 return FALSE;
645 }
646
647 VOID
648 NTAPI
649 INIT_FUNCTION
650 MiMapPfnDatabase(IN PLOADER_PARAMETER_BLOCK LoaderBlock)
651 {
652 PFN_NUMBER FreePage, FreePageCount, PagesLeft, BasePage, PageCount;
653 PLIST_ENTRY NextEntry;
654 PMEMORY_ALLOCATION_DESCRIPTOR MdBlock;
655 PMMPTE PointerPte, LastPte;
656 MMPTE TempPte = ValidKernelPte;
657
658 /* Get current page data, since we won't be using MxGetNextPage as it would corrupt our state */
659 FreePage = MxFreeDescriptor->BasePage;
660 FreePageCount = MxFreeDescriptor->PageCount;
661 PagesLeft = 0;
662
663 /* Loop the memory descriptors */
664 NextEntry = LoaderBlock->MemoryDescriptorListHead.Flink;
665 while (NextEntry != &LoaderBlock->MemoryDescriptorListHead)
666 {
667 /* Get the descriptor */
668 MdBlock = CONTAINING_RECORD(NextEntry,
669 MEMORY_ALLOCATION_DESCRIPTOR,
670 ListEntry);
671 if ((MdBlock->MemoryType == LoaderFirmwarePermanent) ||
672 (MdBlock->MemoryType == LoaderBBTMemory) ||
673 (MdBlock->MemoryType == LoaderSpecialMemory))
674 {
675 /* These pages are not part of the PFN database */
676 NextEntry = MdBlock->ListEntry.Flink;
677 continue;
678 }
679
680 /* Next, check if this is our special free descriptor we've found */
681 if (MdBlock == MxFreeDescriptor)
682 {
683 /* Use the real numbers instead */
684 BasePage = MxOldFreeDescriptor.BasePage;
685 PageCount = MxOldFreeDescriptor.PageCount;
686 }
687 else
688 {
689 /* Use the descriptor's numbers */
690 BasePage = MdBlock->BasePage;
691 PageCount = MdBlock->PageCount;
692 }
693
694 /* Get the PTEs for this range */
695 PointerPte = MiAddressToPte(&MmPfnDatabase[BasePage]);
696 LastPte = MiAddressToPte(((ULONG_PTR)&MmPfnDatabase[BasePage + PageCount]) - 1);
697 DPRINT("MD Type: %lx Base: %lx Count: %lx\n", MdBlock->MemoryType, BasePage, PageCount);
698
699 /* Loop them */
700 while (PointerPte <= LastPte)
701 {
702 /* We'll only touch PTEs that aren't already valid */
703 if (PointerPte->u.Hard.Valid == 0)
704 {
705 /* Use the next free page */
706 TempPte.u.Hard.PageFrameNumber = FreePage;
707 ASSERT(FreePageCount != 0);
708
709 /* Consume free pages */
710 FreePage++;
711 FreePageCount--;
712 if (!FreePageCount)
713 {
714 /* Out of memory */
715 KeBugCheckEx(INSTALL_MORE_MEMORY,
716 MmNumberOfPhysicalPages,
717 FreePageCount,
718 MxOldFreeDescriptor.PageCount,
719 1);
720 }
721
722 /* Write out this PTE */
723 PagesLeft++;
724 MI_WRITE_VALID_PTE(PointerPte, TempPte);
725
726 /* Zero this page */
727 RtlZeroMemory(MiPteToAddress(PointerPte), PAGE_SIZE);
728 }
729
730 /* Next! */
731 PointerPte++;
732 }
733
734 /* Do the next address range */
735 NextEntry = MdBlock->ListEntry.Flink;
736 }
737
738 /* Now update the free descriptors to consume the pages we used up during the PFN allocation loop */
739 MxFreeDescriptor->BasePage = FreePage;
740 MxFreeDescriptor->PageCount = FreePageCount;
741 }
742
743 VOID
744 NTAPI
745 INIT_FUNCTION
746 MiBuildPfnDatabaseFromPages(IN PLOADER_PARAMETER_BLOCK LoaderBlock)
747 {
748 PMMPDE PointerPde;
749 PMMPTE PointerPte;
750 ULONG i, Count, j;
751 PFN_NUMBER PageFrameIndex, StartupPdIndex, PtePageIndex;
752 PMMPFN Pfn1, Pfn2;
753 ULONG_PTR BaseAddress = 0;
754
755 /* PFN of the startup page directory */
756 StartupPdIndex = PFN_FROM_PTE(MiAddressToPde(PDE_BASE));
757
758 /* Start with the first PDE and scan them all */
759 PointerPde = MiAddressToPde(NULL);
760 Count = PD_COUNT * PDE_COUNT;
761 for (i = 0; i < Count; i++)
762 {
763 /* Check for valid PDE */
764 if (PointerPde->u.Hard.Valid == 1)
765 {
766 /* Get the PFN from it */
767 PageFrameIndex = PFN_FROM_PTE(PointerPde);
768
769 /* Do we want a PFN entry for this page? */
770 if (MiIsRegularMemory(LoaderBlock, PageFrameIndex))
771 {
772 /* Yes we do, set it up */
773 Pfn1 = MiGetPfnEntry(PageFrameIndex);
774 Pfn1->u4.PteFrame = StartupPdIndex;
775 Pfn1->PteAddress = (PMMPTE)PointerPde;
776 Pfn1->u2.ShareCount++;
777 Pfn1->u3.e2.ReferenceCount = 1;
778 Pfn1->u3.e1.PageLocation = ActiveAndValid;
779 Pfn1->u3.e1.CacheAttribute = MiNonCached;
780 #if MI_TRACE_PFNS
781 Pfn1->PfnUsage = MI_USAGE_INIT_MEMORY;
782 memcpy(Pfn1->ProcessName, "Initial PDE", 16);
783 #endif
784 }
785 else
786 {
787 /* No PFN entry */
788 Pfn1 = NULL;
789 }
790
791 /* Now get the PTE and scan the pages */
792 PointerPte = MiAddressToPte(BaseAddress);
793 for (j = 0; j < PTE_COUNT; j++)
794 {
795 /* Check for a valid PTE */
796 if (PointerPte->u.Hard.Valid == 1)
797 {
798 /* Increase the shared count of the PFN entry for the PDE */
799 ASSERT(Pfn1 != NULL);
800 Pfn1->u2.ShareCount++;
801
802 /* Now check if the PTE is valid memory too */
803 PtePageIndex = PFN_FROM_PTE(PointerPte);
804 if (MiIsRegularMemory(LoaderBlock, PtePageIndex))
805 {
806 /*
807 * Only add pages above the end of system code or pages
808 * that are part of nonpaged pool
809 */
810 if ((BaseAddress >= 0xA0000000) ||
811 ((BaseAddress >= (ULONG_PTR)MmNonPagedPoolStart) &&
812 (BaseAddress < (ULONG_PTR)MmNonPagedPoolStart +
813 MmSizeOfNonPagedPoolInBytes)))
814 {
815 /* Get the PFN entry and make sure it too is valid */
816 Pfn2 = MiGetPfnEntry(PtePageIndex);
817 if ((MmIsAddressValid(Pfn2)) &&
818 (MmIsAddressValid(Pfn2 + 1)))
819 {
820 /* Setup the PFN entry */
821 Pfn2->u4.PteFrame = PageFrameIndex;
822 Pfn2->PteAddress = PointerPte;
823 Pfn2->u2.ShareCount++;
824 Pfn2->u3.e2.ReferenceCount = 1;
825 Pfn2->u3.e1.PageLocation = ActiveAndValid;
826 Pfn2->u3.e1.CacheAttribute = MiNonCached;
827 #if MI_TRACE_PFNS
828 Pfn2->PfnUsage = MI_USAGE_INIT_MEMORY;
829 memcpy(Pfn1->ProcessName, "Initial PTE", 16);
830 #endif
831 }
832 }
833 }
834 }
835
836 /* Next PTE */
837 PointerPte++;
838 BaseAddress += PAGE_SIZE;
839 }
840 }
841 else
842 {
843 /* Next PDE mapped address */
844 BaseAddress += PDE_MAPPED_VA;
845 }
846
847 /* Next PTE */
848 PointerPde++;
849 }
850 }
851
852 VOID
853 NTAPI
854 INIT_FUNCTION
855 MiBuildPfnDatabaseZeroPage(VOID)
856 {
857 PMMPFN Pfn1;
858 PMMPDE PointerPde;
859
860 /* Grab the lowest page and check if it has no real references */
861 Pfn1 = MiGetPfnEntry(MmLowestPhysicalPage);
862 if (!(MmLowestPhysicalPage) && !(Pfn1->u3.e2.ReferenceCount))
863 {
864 /* Make it a bogus page to catch errors */
865 PointerPde = MiAddressToPde(0xFFFFFFFF);
866 Pfn1->u4.PteFrame = PFN_FROM_PTE(PointerPde);
867 Pfn1->PteAddress = (PMMPTE)PointerPde;
868 Pfn1->u2.ShareCount++;
869 Pfn1->u3.e2.ReferenceCount = 0xFFF0;
870 Pfn1->u3.e1.PageLocation = ActiveAndValid;
871 Pfn1->u3.e1.CacheAttribute = MiNonCached;
872 }
873 }
874
875 VOID
876 NTAPI
877 INIT_FUNCTION
878 MiBuildPfnDatabaseFromLoaderBlock(IN PLOADER_PARAMETER_BLOCK LoaderBlock)
879 {
880 PLIST_ENTRY NextEntry;
881 PFN_NUMBER PageCount = 0;
882 PMEMORY_ALLOCATION_DESCRIPTOR MdBlock;
883 PFN_NUMBER PageFrameIndex;
884 PMMPFN Pfn1;
885 PMMPTE PointerPte;
886 PMMPDE PointerPde;
887 KIRQL OldIrql;
888
889 /* Now loop through the descriptors */
890 NextEntry = LoaderBlock->MemoryDescriptorListHead.Flink;
891 while (NextEntry != &LoaderBlock->MemoryDescriptorListHead)
892 {
893 /* Get the current descriptor */
894 MdBlock = CONTAINING_RECORD(NextEntry,
895 MEMORY_ALLOCATION_DESCRIPTOR,
896 ListEntry);
897
898 /* Read its data */
899 PageCount = MdBlock->PageCount;
900 PageFrameIndex = MdBlock->BasePage;
901
902 /* Don't allow memory above what the PFN database is mapping */
903 if (PageFrameIndex > MmHighestPhysicalPage)
904 {
905 /* Since they are ordered, everything past here will be larger */
906 break;
907 }
908
909 /* On the other hand, the end page might be higher up... */
910 if ((PageFrameIndex + PageCount) > (MmHighestPhysicalPage + 1))
911 {
912 /* In which case we'll trim the descriptor to go as high as we can */
913 PageCount = MmHighestPhysicalPage + 1 - PageFrameIndex;
914 MdBlock->PageCount = PageCount;
915
916 /* But if there's nothing left to trim, we got too high, so quit */
917 if (!PageCount) break;
918 }
919
920 /* Now check the descriptor type */
921 switch (MdBlock->MemoryType)
922 {
923 /* Check for bad RAM */
924 case LoaderBad:
925
926 DPRINT1("You either have specified /BURNMEMORY or damaged RAM modules.\n");
927 break;
928
929 /* Check for free RAM */
930 case LoaderFree:
931 case LoaderLoadedProgram:
932 case LoaderFirmwareTemporary:
933 case LoaderOsloaderStack:
934
935 /* Get the last page of this descriptor. Note we loop backwards */
936 PageFrameIndex += PageCount - 1;
937 Pfn1 = MiGetPfnEntry(PageFrameIndex);
938
939 /* Lock the PFN Database */
940 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
941 while (PageCount--)
942 {
943 /* If the page really has no references, mark it as free */
944 if (!Pfn1->u3.e2.ReferenceCount)
945 {
946 /* Add it to the free list */
947 Pfn1->u3.e1.CacheAttribute = MiNonCached;
948 MiInsertPageInFreeList(PageFrameIndex);
949 }
950
951 /* Go to the next page */
952 Pfn1--;
953 PageFrameIndex--;
954 }
955
956 /* Release PFN database */
957 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
958
959 /* Done with this block */
960 break;
961
962 /* Check for pages that are invisible to us */
963 case LoaderFirmwarePermanent:
964 case LoaderSpecialMemory:
965 case LoaderBBTMemory:
966
967 /* And skip them */
968 break;
969
970 default:
971
972 /* Map these pages with the KSEG0 mapping that adds 0x80000000 */
973 PointerPte = MiAddressToPte(KSEG0_BASE + (PageFrameIndex << PAGE_SHIFT));
974 Pfn1 = MiGetPfnEntry(PageFrameIndex);
975 while (PageCount--)
976 {
977 /* Check if the page is really unused */
978 PointerPde = MiAddressToPde(KSEG0_BASE + (PageFrameIndex << PAGE_SHIFT));
979 if (!Pfn1->u3.e2.ReferenceCount)
980 {
981 /* Mark it as being in-use */
982 Pfn1->u4.PteFrame = PFN_FROM_PTE(PointerPde);
983 Pfn1->PteAddress = PointerPte;
984 Pfn1->u2.ShareCount++;
985 Pfn1->u3.e2.ReferenceCount = 1;
986 Pfn1->u3.e1.PageLocation = ActiveAndValid;
987 Pfn1->u3.e1.CacheAttribute = MiNonCached;
988 #if MI_TRACE_PFNS
989 Pfn1->PfnUsage = MI_USAGE_BOOT_DRIVER;
990 #endif
991
992 /* Check for RAM disk page */
993 if (MdBlock->MemoryType == LoaderXIPRom)
994 {
995 /* Make it a pseudo-I/O ROM mapping */
996 Pfn1->u1.Flink = 0;
997 Pfn1->u2.ShareCount = 0;
998 Pfn1->u3.e2.ReferenceCount = 0;
999 Pfn1->u3.e1.PageLocation = 0;
1000 Pfn1->u3.e1.Rom = 1;
1001 Pfn1->u4.InPageError = 0;
1002 Pfn1->u3.e1.PrototypePte = 1;
1003 }
1004 }
1005
1006 /* Advance page structures */
1007 Pfn1++;
1008 PageFrameIndex++;
1009 PointerPte++;
1010 }
1011 break;
1012 }
1013
1014 /* Next descriptor entry */
1015 NextEntry = MdBlock->ListEntry.Flink;
1016 }
1017 }
1018
1019 VOID
1020 NTAPI
1021 INIT_FUNCTION
1022 MiBuildPfnDatabaseSelf(VOID)
1023 {
1024 PMMPTE PointerPte, LastPte;
1025 PMMPFN Pfn1;
1026
1027 /* Loop the PFN database page */
1028 PointerPte = MiAddressToPte(MiGetPfnEntry(MmLowestPhysicalPage));
1029 LastPte = MiAddressToPte(MiGetPfnEntry(MmHighestPhysicalPage));
1030 while (PointerPte <= LastPte)
1031 {
1032 /* Make sure the page is valid */
1033 if (PointerPte->u.Hard.Valid == 1)
1034 {
1035 /* Get the PFN entry and just mark it referenced */
1036 Pfn1 = MiGetPfnEntry(PointerPte->u.Hard.PageFrameNumber);
1037 Pfn1->u2.ShareCount = 1;
1038 Pfn1->u3.e2.ReferenceCount = 1;
1039 #if MI_TRACE_PFNS
1040 Pfn1->PfnUsage = MI_USAGE_PFN_DATABASE;
1041 #endif
1042 }
1043
1044 /* Next */
1045 PointerPte++;
1046 }
1047 }
1048
1049 VOID
1050 NTAPI
1051 INIT_FUNCTION
1052 MiInitializePfnDatabase(IN PLOADER_PARAMETER_BLOCK LoaderBlock)
1053 {
1054 /* Scan memory and start setting up PFN entries */
1055 MiBuildPfnDatabaseFromPages(LoaderBlock);
1056
1057 /* Add the zero page */
1058 MiBuildPfnDatabaseZeroPage();
1059
1060 /* Scan the loader block and build the rest of the PFN database */
1061 MiBuildPfnDatabaseFromLoaderBlock(LoaderBlock);
1062
1063 /* Finally add the pages for the PFN database itself */
1064 MiBuildPfnDatabaseSelf();
1065 }
1066 #endif /* !_M_AMD64 */
1067
1068 VOID
1069 NTAPI
1070 INIT_FUNCTION
1071 MmFreeLoaderBlock(IN PLOADER_PARAMETER_BLOCK LoaderBlock)
1072 {
1073 PLIST_ENTRY NextMd;
1074 PMEMORY_ALLOCATION_DESCRIPTOR MdBlock;
1075 ULONG_PTR i;
1076 PFN_NUMBER BasePage, LoaderPages;
1077 PMMPFN Pfn1;
1078 KIRQL OldIrql;
1079 PPHYSICAL_MEMORY_RUN Buffer, Entry;
1080
1081 /* Loop the descriptors in order to count them */
1082 i = 0;
1083 NextMd = LoaderBlock->MemoryDescriptorListHead.Flink;
1084 while (NextMd != &LoaderBlock->MemoryDescriptorListHead)
1085 {
1086 MdBlock = CONTAINING_RECORD(NextMd,
1087 MEMORY_ALLOCATION_DESCRIPTOR,
1088 ListEntry);
1089 i++;
1090 NextMd = MdBlock->ListEntry.Flink;
1091 }
1092
1093 /* Allocate a structure to hold the physical runs */
1094 Buffer = ExAllocatePoolWithTag(NonPagedPool,
1095 i * sizeof(PHYSICAL_MEMORY_RUN),
1096 'lMmM');
1097 ASSERT(Buffer != NULL);
1098 Entry = Buffer;
1099
1100 /* Loop the descriptors again */
1101 NextMd = LoaderBlock->MemoryDescriptorListHead.Flink;
1102 while (NextMd != &LoaderBlock->MemoryDescriptorListHead)
1103 {
1104 /* Check what kind this was */
1105 MdBlock = CONTAINING_RECORD(NextMd,
1106 MEMORY_ALLOCATION_DESCRIPTOR,
1107 ListEntry);
1108 switch (MdBlock->MemoryType)
1109 {
1110 /* Registry, NLS, and heap data */
1111 case LoaderRegistryData:
1112 case LoaderOsloaderHeap:
1113 case LoaderNlsData:
1114 /* Are all a candidate for deletion */
1115 Entry->BasePage = MdBlock->BasePage;
1116 Entry->PageCount = MdBlock->PageCount;
1117 Entry++;
1118
1119 /* We keep the rest */
1120 default:
1121 break;
1122 }
1123
1124 /* Move to the next descriptor */
1125 NextMd = MdBlock->ListEntry.Flink;
1126 }
1127
1128 /* Acquire the PFN lock */
1129 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
1130
1131 /* Loop the runs */
1132 LoaderPages = 0;
1133 while (--Entry >= Buffer)
1134 {
1135 /* See how many pages are in this run */
1136 i = Entry->PageCount;
1137 BasePage = Entry->BasePage;
1138
1139 /* Loop each page */
1140 Pfn1 = MiGetPfnEntry(BasePage);
1141 while (i--)
1142 {
1143 /* Check if it has references or is in any kind of list */
1144 if (!(Pfn1->u3.e2.ReferenceCount) && (!Pfn1->u1.Flink))
1145 {
1146 /* Set the new PTE address and put this page into the free list */
1147 Pfn1->PteAddress = (PMMPTE)(BasePage << PAGE_SHIFT);
1148 MiInsertPageInFreeList(BasePage);
1149 LoaderPages++;
1150 }
1151 else if (BasePage)
1152 {
1153 /* It has a reference, so simply drop it */
1154 ASSERT(MI_IS_PHYSICAL_ADDRESS(MiPteToAddress(Pfn1->PteAddress)) == FALSE);
1155
1156 /* Drop a dereference on this page, which should delete it */
1157 Pfn1->PteAddress->u.Long = 0;
1158 MI_SET_PFN_DELETED(Pfn1);
1159 MiDecrementShareCount(Pfn1, BasePage);
1160 LoaderPages++;
1161 }
1162
1163 /* Move to the next page */
1164 Pfn1++;
1165 BasePage++;
1166 }
1167 }
1168
1169 /* Release the PFN lock and flush the TLB */
1170 DPRINT1("Loader pages freed: %lx\n", LoaderPages);
1171 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
1172 KeFlushCurrentTb();
1173
1174 /* Free our run structure */
1175 ExFreePoolWithTag(Buffer, 'lMmM');
1176 }
1177
1178 VOID
1179 NTAPI
1180 INIT_FUNCTION
1181 MiAdjustWorkingSetManagerParameters(IN BOOLEAN Client)
1182 {
1183 /* This function needs to do more work, for now, we tune page minimums */
1184
1185 /* Check for a system with around 64MB RAM or more */
1186 if (MmNumberOfPhysicalPages >= (63 * _1MB) / PAGE_SIZE)
1187 {
1188 /* Double the minimum amount of pages we consider for a "plenty free" scenario */
1189 MmPlentyFreePages *= 2;
1190 }
1191 }
1192
1193 VOID
1194 NTAPI
1195 INIT_FUNCTION
1196 MiNotifyMemoryEvents(VOID)
1197 {
1198 /* Are we in a low-memory situation? */
1199 if (MmAvailablePages < MmLowMemoryThreshold)
1200 {
1201 /* Clear high, set low */
1202 if (KeReadStateEvent(MiHighMemoryEvent)) KeClearEvent(MiHighMemoryEvent);
1203 if (!KeReadStateEvent(MiLowMemoryEvent)) KeSetEvent(MiLowMemoryEvent, 0, FALSE);
1204 }
1205 else if (MmAvailablePages < MmHighMemoryThreshold)
1206 {
1207 /* We are in between, clear both */
1208 if (KeReadStateEvent(MiHighMemoryEvent)) KeClearEvent(MiHighMemoryEvent);
1209 if (KeReadStateEvent(MiLowMemoryEvent)) KeClearEvent(MiLowMemoryEvent);
1210 }
1211 else
1212 {
1213 /* Clear low, set high */
1214 if (KeReadStateEvent(MiLowMemoryEvent)) KeClearEvent(MiLowMemoryEvent);
1215 if (!KeReadStateEvent(MiHighMemoryEvent)) KeSetEvent(MiHighMemoryEvent, 0, FALSE);
1216 }
1217 }
1218
1219 NTSTATUS
1220 NTAPI
1221 INIT_FUNCTION
1222 MiCreateMemoryEvent(IN PUNICODE_STRING Name,
1223 OUT PKEVENT *Event)
1224 {
1225 PACL Dacl;
1226 HANDLE EventHandle;
1227 ULONG DaclLength;
1228 NTSTATUS Status;
1229 OBJECT_ATTRIBUTES ObjectAttributes;
1230 SECURITY_DESCRIPTOR SecurityDescriptor;
1231
1232 /* Create the SD */
1233 Status = RtlCreateSecurityDescriptor(&SecurityDescriptor,
1234 SECURITY_DESCRIPTOR_REVISION);
1235 if (!NT_SUCCESS(Status)) return Status;
1236
1237 /* One ACL with 3 ACEs, containing each one SID */
1238 DaclLength = sizeof(ACL) +
1239 3 * sizeof(ACCESS_ALLOWED_ACE) +
1240 RtlLengthSid(SeLocalSystemSid) +
1241 RtlLengthSid(SeAliasAdminsSid) +
1242 RtlLengthSid(SeWorldSid);
1243
1244 /* Allocate space for the DACL */
1245 Dacl = ExAllocatePoolWithTag(PagedPool, DaclLength, 'lcaD');
1246 if (!Dacl) return STATUS_INSUFFICIENT_RESOURCES;
1247
1248 /* Setup the ACL inside it */
1249 Status = RtlCreateAcl(Dacl, DaclLength, ACL_REVISION);
1250 if (!NT_SUCCESS(Status)) goto CleanUp;
1251
1252 /* Add query rights for everyone */
1253 Status = RtlAddAccessAllowedAce(Dacl,
1254 ACL_REVISION,
1255 SYNCHRONIZE | EVENT_QUERY_STATE | READ_CONTROL,
1256 SeWorldSid);
1257 if (!NT_SUCCESS(Status)) goto CleanUp;
1258
1259 /* Full rights for the admin */
1260 Status = RtlAddAccessAllowedAce(Dacl,
1261 ACL_REVISION,
1262 EVENT_ALL_ACCESS,
1263 SeAliasAdminsSid);
1264 if (!NT_SUCCESS(Status)) goto CleanUp;
1265
1266 /* As well as full rights for the system */
1267 Status = RtlAddAccessAllowedAce(Dacl,
1268 ACL_REVISION,
1269 EVENT_ALL_ACCESS,
1270 SeLocalSystemSid);
1271 if (!NT_SUCCESS(Status)) goto CleanUp;
1272
1273 /* Set this DACL inside the SD */
1274 Status = RtlSetDaclSecurityDescriptor(&SecurityDescriptor,
1275 TRUE,
1276 Dacl,
1277 FALSE);
1278 if (!NT_SUCCESS(Status)) goto CleanUp;
1279
1280 /* Setup the event attributes, making sure it's a permanent one */
1281 InitializeObjectAttributes(&ObjectAttributes,
1282 Name,
1283 OBJ_KERNEL_HANDLE | OBJ_PERMANENT,
1284 NULL,
1285 &SecurityDescriptor);
1286
1287 /* Create the event */
1288 Status = ZwCreateEvent(&EventHandle,
1289 EVENT_ALL_ACCESS,
1290 &ObjectAttributes,
1291 NotificationEvent,
1292 FALSE);
1293 CleanUp:
1294 /* Free the DACL */
1295 ExFreePoolWithTag(Dacl, 'lcaD');
1296
1297 /* Check if this is the success path */
1298 if (NT_SUCCESS(Status))
1299 {
1300 /* Add a reference to the object, then close the handle we had */
1301 Status = ObReferenceObjectByHandle(EventHandle,
1302 EVENT_MODIFY_STATE,
1303 ExEventObjectType,
1304 KernelMode,
1305 (PVOID*)Event,
1306 NULL);
1307 ZwClose (EventHandle);
1308 }
1309
1310 /* Return status */
1311 return Status;
1312 }
1313
1314 BOOLEAN
1315 NTAPI
1316 INIT_FUNCTION
1317 MiInitializeMemoryEvents(VOID)
1318 {
1319 UNICODE_STRING LowString = RTL_CONSTANT_STRING(L"\\KernelObjects\\LowMemoryCondition");
1320 UNICODE_STRING HighString = RTL_CONSTANT_STRING(L"\\KernelObjects\\HighMemoryCondition");
1321 UNICODE_STRING LowPagedPoolString = RTL_CONSTANT_STRING(L"\\KernelObjects\\LowPagedPoolCondition");
1322 UNICODE_STRING HighPagedPoolString = RTL_CONSTANT_STRING(L"\\KernelObjects\\HighPagedPoolCondition");
1323 UNICODE_STRING LowNonPagedPoolString = RTL_CONSTANT_STRING(L"\\KernelObjects\\LowNonPagedPoolCondition");
1324 UNICODE_STRING HighNonPagedPoolString = RTL_CONSTANT_STRING(L"\\KernelObjects\\HighNonPagedPoolCondition");
1325 NTSTATUS Status;
1326
1327 /* Check if we have a registry setting */
1328 if (MmLowMemoryThreshold)
1329 {
1330 /* Convert it to pages */
1331 MmLowMemoryThreshold *= (_1MB / PAGE_SIZE);
1332 }
1333 else
1334 {
1335 /* The low memory threshold is hit when we don't consider that we have "plenty" of free pages anymore */
1336 MmLowMemoryThreshold = MmPlentyFreePages;
1337
1338 /* More than one GB of memory? */
1339 if (MmNumberOfPhysicalPages > 0x40000)
1340 {
1341 /* Start at 32MB, and add another 16MB for each GB */
1342 MmLowMemoryThreshold = (32 * _1MB) / PAGE_SIZE;
1343 MmLowMemoryThreshold += ((MmNumberOfPhysicalPages - 0x40000) >> 7);
1344 }
1345 else if (MmNumberOfPhysicalPages > 0x8000)
1346 {
1347 /* For systems with > 128MB RAM, add another 4MB for each 128MB */
1348 MmLowMemoryThreshold += ((MmNumberOfPhysicalPages - 0x8000) >> 5);
1349 }
1350
1351 /* Don't let the minimum threshold go past 64MB */
1352 MmLowMemoryThreshold = min(MmLowMemoryThreshold, (64 * _1MB) / PAGE_SIZE);
1353 }
1354
1355 /* Check if we have a registry setting */
1356 if (MmHighMemoryThreshold)
1357 {
1358 /* Convert it into pages */
1359 MmHighMemoryThreshold *= (_1MB / PAGE_SIZE);
1360 }
1361 else
1362 {
1363 /* Otherwise, the default is three times the low memory threshold */
1364 MmHighMemoryThreshold = 3 * MmLowMemoryThreshold;
1365 ASSERT(MmHighMemoryThreshold > MmLowMemoryThreshold);
1366 }
1367
1368 /* Make sure high threshold is actually higher than the low */
1369 MmHighMemoryThreshold = max(MmHighMemoryThreshold, MmLowMemoryThreshold);
1370
1371 /* Create the memory events for all the thresholds */
1372 Status = MiCreateMemoryEvent(&LowString, &MiLowMemoryEvent);
1373 if (!NT_SUCCESS(Status)) return FALSE;
1374 Status = MiCreateMemoryEvent(&HighString, &MiHighMemoryEvent);
1375 if (!NT_SUCCESS(Status)) return FALSE;
1376 Status = MiCreateMemoryEvent(&LowPagedPoolString, &MiLowPagedPoolEvent);
1377 if (!NT_SUCCESS(Status)) return FALSE;
1378 Status = MiCreateMemoryEvent(&HighPagedPoolString, &MiHighPagedPoolEvent);
1379 if (!NT_SUCCESS(Status)) return FALSE;
1380 Status = MiCreateMemoryEvent(&LowNonPagedPoolString, &MiLowNonPagedPoolEvent);
1381 if (!NT_SUCCESS(Status)) return FALSE;
1382 Status = MiCreateMemoryEvent(&HighNonPagedPoolString, &MiHighNonPagedPoolEvent);
1383 if (!NT_SUCCESS(Status)) return FALSE;
1384
1385 /* Now setup the pool events */
1386 MiInitializePoolEvents();
1387
1388 /* Set the initial event state */
1389 MiNotifyMemoryEvents();
1390 return TRUE;
1391 }
1392
1393 VOID
1394 NTAPI
1395 INIT_FUNCTION
1396 MiAddHalIoMappings(VOID)
1397 {
1398 PVOID BaseAddress;
1399 PMMPDE PointerPde, LastPde;
1400 PMMPTE PointerPte;
1401 ULONG j;
1402 PFN_NUMBER PageFrameIndex;
1403
1404 /* HAL Heap address -- should be on a PDE boundary */
1405 BaseAddress = (PVOID)MM_HAL_VA_START;
1406 ASSERT(MiAddressToPteOffset(BaseAddress) == 0);
1407
1408 /* Check how many PDEs the heap has */
1409 PointerPde = MiAddressToPde(BaseAddress);
1410 LastPde = MiAddressToPde((PVOID)MM_HAL_VA_END);
1411
1412 while (PointerPde <= LastPde)
1413 {
1414 /* Does the HAL own this mapping? */
1415 if ((PointerPde->u.Hard.Valid == 1) &&
1416 (MI_IS_PAGE_LARGE(PointerPde) == FALSE))
1417 {
1418 /* Get the PTE for it and scan each page */
1419 PointerPte = MiAddressToPte(BaseAddress);
1420 for (j = 0 ; j < PTE_COUNT; j++)
1421 {
1422 /* Does the HAL own this page? */
1423 if (PointerPte->u.Hard.Valid == 1)
1424 {
1425 /* Is the HAL using it for device or I/O mapped memory? */
1426 PageFrameIndex = PFN_FROM_PTE(PointerPte);
1427 if (!MiGetPfnEntry(PageFrameIndex))
1428 {
1429 /* FIXME: For PAT, we need to track I/O cache attributes for coherency */
1430 DPRINT1("HAL I/O Mapping at %p is unsafe\n", BaseAddress);
1431 }
1432 }
1433
1434 /* Move to the next page */
1435 BaseAddress = (PVOID)((ULONG_PTR)BaseAddress + PAGE_SIZE);
1436 PointerPte++;
1437 }
1438 }
1439 else
1440 {
1441 /* Move to the next address */
1442 BaseAddress = (PVOID)((ULONG_PTR)BaseAddress + PDE_MAPPED_VA);
1443 }
1444
1445 /* Move to the next PDE */
1446 PointerPde++;
1447 }
1448 }
1449
1450 VOID
1451 NTAPI
1452 MmDumpArmPfnDatabase(IN BOOLEAN StatusOnly)
1453 {
1454 ULONG i;
1455 PMMPFN Pfn1;
1456 PCHAR Consumer = "Unknown";
1457 KIRQL OldIrql;
1458 ULONG ActivePages = 0, FreePages = 0, OtherPages = 0;
1459 #if MI_TRACE_PFNS
1460 ULONG UsageBucket[MI_USAGE_FREE_PAGE + 1] = {0};
1461 PCHAR MI_USAGE_TEXT[MI_USAGE_FREE_PAGE + 1] =
1462 {
1463 "Not set",
1464 "Paged Pool",
1465 "Nonpaged Pool",
1466 "Nonpaged Pool Ex",
1467 "Kernel Stack",
1468 "Kernel Stack Ex",
1469 "System PTE",
1470 "VAD",
1471 "PEB/TEB",
1472 "Section",
1473 "Page Table",
1474 "Page Directory",
1475 "Old Page Table",
1476 "Driver Page",
1477 "Contiguous Alloc",
1478 "MDL",
1479 "Demand Zero",
1480 "Zero Loop",
1481 "Cache",
1482 "PFN Database",
1483 "Boot Driver",
1484 "Initial Memory",
1485 "Free Page"
1486 };
1487 #endif
1488 //
1489 // Loop the PFN database
1490 //
1491 KeRaiseIrql(HIGH_LEVEL, &OldIrql);
1492 for (i = 0; i <= MmHighestPhysicalPage; i++)
1493 {
1494 Pfn1 = MiGetPfnEntry(i);
1495 if (!Pfn1) continue;
1496 #if MI_TRACE_PFNS
1497 ASSERT(Pfn1->PfnUsage <= MI_USAGE_FREE_PAGE);
1498 #endif
1499 //
1500 // Get the page location
1501 //
1502 switch (Pfn1->u3.e1.PageLocation)
1503 {
1504 case ActiveAndValid:
1505
1506 Consumer = "Active and Valid";
1507 ActivePages++;
1508 break;
1509
1510 case ZeroedPageList:
1511
1512 Consumer = "Zero Page List";
1513 FreePages++;
1514 break;//continue;
1515
1516 case FreePageList:
1517
1518 Consumer = "Free Page List";
1519 FreePages++;
1520 break;//continue;
1521
1522 default:
1523
1524 Consumer = "Other (ASSERT!)";
1525 OtherPages++;
1526 break;
1527 }
1528
1529 #if MI_TRACE_PFNS
1530 /* Add into bucket */
1531 UsageBucket[Pfn1->PfnUsage]++;
1532 #endif
1533
1534 //
1535 // Pretty-print the page
1536 //
1537 if (!StatusOnly)
1538 DbgPrint("0x%08p:\t%20s\t(%04d.%04d)\t[%16s - %16s])\n",
1539 i << PAGE_SHIFT,
1540 Consumer,
1541 Pfn1->u3.e2.ReferenceCount,
1542 Pfn1->u2.ShareCount == LIST_HEAD ? 0xFFFF : Pfn1->u2.ShareCount,
1543 #if MI_TRACE_PFNS
1544 MI_USAGE_TEXT[Pfn1->PfnUsage],
1545 Pfn1->ProcessName);
1546 #else
1547 "Page tracking",
1548 "is disabled");
1549 #endif
1550 }
1551
1552 DbgPrint("Active: %5d pages\t[%6d KB]\n", ActivePages, (ActivePages << PAGE_SHIFT) / 1024);
1553 DbgPrint("Free: %5d pages\t[%6d KB]\n", FreePages, (FreePages << PAGE_SHIFT) / 1024);
1554 DbgPrint("-----------------------------------------\n");
1555 #if MI_TRACE_PFNS
1556 OtherPages = UsageBucket[MI_USAGE_BOOT_DRIVER];
1557 DbgPrint("Boot Images: %5d pages\t[%6d KB]\n", OtherPages, (OtherPages << PAGE_SHIFT) / 1024);
1558 OtherPages = UsageBucket[MI_USAGE_DRIVER_PAGE];
1559 DbgPrint("System Drivers: %5d pages\t[%6d KB]\n", OtherPages, (OtherPages << PAGE_SHIFT) / 1024);
1560 OtherPages = UsageBucket[MI_USAGE_PFN_DATABASE];
1561 DbgPrint("PFN Database: %5d pages\t[%6d KB]\n", OtherPages, (OtherPages << PAGE_SHIFT) / 1024);
1562 OtherPages = UsageBucket[MI_USAGE_PAGE_TABLE] + UsageBucket[MI_USAGE_LEGACY_PAGE_DIRECTORY];
1563 DbgPrint("Page Tables: %5d pages\t[%6d KB]\n", OtherPages, (OtherPages << PAGE_SHIFT) / 1024);
1564 OtherPages = UsageBucket[MI_USAGE_NONPAGED_POOL] + UsageBucket[MI_USAGE_NONPAGED_POOL_EXPANSION];
1565 DbgPrint("NonPaged Pool: %5d pages\t[%6d KB]\n", OtherPages, (OtherPages << PAGE_SHIFT) / 1024);
1566 OtherPages = UsageBucket[MI_USAGE_PAGED_POOL];
1567 DbgPrint("Paged Pool: %5d pages\t[%6d KB]\n", OtherPages, (OtherPages << PAGE_SHIFT) / 1024);
1568 OtherPages = UsageBucket[MI_USAGE_KERNEL_STACK] + UsageBucket[MI_USAGE_KERNEL_STACK_EXPANSION];
1569 DbgPrint("Kernel Stack: %5d pages\t[%6d KB]\n", OtherPages, (OtherPages << PAGE_SHIFT) / 1024);
1570 OtherPages = UsageBucket[MI_USAGE_INIT_MEMORY];
1571 DbgPrint("Init Memory: %5d pages\t[%6d KB]\n", OtherPages, (OtherPages << PAGE_SHIFT) / 1024);
1572 OtherPages = UsageBucket[MI_USAGE_SECTION];
1573 DbgPrint("Sections: %5d pages\t[%6d KB]\n", OtherPages, (OtherPages << PAGE_SHIFT) / 1024);
1574 OtherPages = UsageBucket[MI_USAGE_CACHE];
1575 DbgPrint("Cache: %5d pages\t[%6d KB]\n", OtherPages, (OtherPages << PAGE_SHIFT) / 1024);
1576 #endif
1577 KeLowerIrql(OldIrql);
1578 }
1579
1580 PPHYSICAL_MEMORY_DESCRIPTOR
1581 NTAPI
1582 INIT_FUNCTION
1583 MmInitializeMemoryLimits(IN PLOADER_PARAMETER_BLOCK LoaderBlock,
1584 IN PBOOLEAN IncludeType)
1585 {
1586 PLIST_ENTRY NextEntry;
1587 ULONG Run = 0, InitialRuns;
1588 PFN_NUMBER NextPage = -1, PageCount = 0;
1589 PPHYSICAL_MEMORY_DESCRIPTOR Buffer, NewBuffer;
1590 PMEMORY_ALLOCATION_DESCRIPTOR MdBlock;
1591
1592 //
1593 // Start with the maximum we might need
1594 //
1595 InitialRuns = MiNumberDescriptors;
1596
1597 //
1598 // Allocate the maximum we'll ever need
1599 //
1600 Buffer = ExAllocatePoolWithTag(NonPagedPool,
1601 sizeof(PHYSICAL_MEMORY_DESCRIPTOR) +
1602 sizeof(PHYSICAL_MEMORY_RUN) *
1603 (InitialRuns - 1),
1604 'lMmM');
1605 if (!Buffer) return NULL;
1606
1607 //
1608 // For now that's how many runs we have
1609 //
1610 Buffer->NumberOfRuns = InitialRuns;
1611
1612 //
1613 // Now loop through the descriptors again
1614 //
1615 NextEntry = LoaderBlock->MemoryDescriptorListHead.Flink;
1616 while (NextEntry != &LoaderBlock->MemoryDescriptorListHead)
1617 {
1618 //
1619 // Grab each one, and check if it's one we should include
1620 //
1621 MdBlock = CONTAINING_RECORD(NextEntry,
1622 MEMORY_ALLOCATION_DESCRIPTOR,
1623 ListEntry);
1624 if ((MdBlock->MemoryType < LoaderMaximum) &&
1625 (IncludeType[MdBlock->MemoryType]))
1626 {
1627 //
1628 // Add this to our running total
1629 //
1630 PageCount += MdBlock->PageCount;
1631
1632 //
1633 // Check if the next page is described by the next descriptor
1634 //
1635 if (MdBlock->BasePage == NextPage)
1636 {
1637 //
1638 // Combine it into the same physical run
1639 //
1640 ASSERT(MdBlock->PageCount != 0);
1641 Buffer->Run[Run - 1].PageCount += MdBlock->PageCount;
1642 NextPage += MdBlock->PageCount;
1643 }
1644 else
1645 {
1646 //
1647 // Otherwise just duplicate the descriptor's contents
1648 //
1649 Buffer->Run[Run].BasePage = MdBlock->BasePage;
1650 Buffer->Run[Run].PageCount = MdBlock->PageCount;
1651 NextPage = Buffer->Run[Run].BasePage + Buffer->Run[Run].PageCount;
1652
1653 //
1654 // And in this case, increase the number of runs
1655 //
1656 Run++;
1657 }
1658 }
1659
1660 //
1661 // Try the next descriptor
1662 //
1663 NextEntry = MdBlock->ListEntry.Flink;
1664 }
1665
1666 //
1667 // We should not have been able to go past our initial estimate
1668 //
1669 ASSERT(Run <= Buffer->NumberOfRuns);
1670
1671 //
1672 // Our guess was probably exaggerated...
1673 //
1674 if (InitialRuns > Run)
1675 {
1676 //
1677 // Allocate a more accurately sized buffer
1678 //
1679 NewBuffer = ExAllocatePoolWithTag(NonPagedPool,
1680 sizeof(PHYSICAL_MEMORY_DESCRIPTOR) +
1681 sizeof(PHYSICAL_MEMORY_RUN) *
1682 (Run - 1),
1683 'lMmM');
1684 if (NewBuffer)
1685 {
1686 //
1687 // Copy the old buffer into the new, then free it
1688 //
1689 RtlCopyMemory(NewBuffer->Run,
1690 Buffer->Run,
1691 sizeof(PHYSICAL_MEMORY_RUN) * Run);
1692 ExFreePoolWithTag(Buffer, 'lMmM');
1693
1694 //
1695 // Now use the new buffer
1696 //
1697 Buffer = NewBuffer;
1698 }
1699 }
1700
1701 //
1702 // Write the final numbers, and return it
1703 //
1704 Buffer->NumberOfRuns = Run;
1705 Buffer->NumberOfPages = PageCount;
1706 return Buffer;
1707 }
1708
1709 VOID
1710 NTAPI
1711 INIT_FUNCTION
1712 MiBuildPagedPool(VOID)
1713 {
1714 PMMPTE PointerPte;
1715 PMMPDE PointerPde;
1716 MMPDE TempPde = ValidKernelPde;
1717 PFN_NUMBER PageFrameIndex;
1718 KIRQL OldIrql;
1719 SIZE_T Size;
1720 ULONG BitMapSize;
1721 #if (_MI_PAGING_LEVELS >= 3)
1722 MMPPE TempPpe = ValidKernelPpe;
1723 PMMPPE PointerPpe;
1724 #elif (_MI_PAGING_LEVELS == 2)
1725 MMPTE TempPte = ValidKernelPte;
1726
1727 //
1728 // Get the page frame number for the system page directory
1729 //
1730 PointerPte = MiAddressToPte(PDE_BASE);
1731 ASSERT(PD_COUNT == 1);
1732 MmSystemPageDirectory[0] = PFN_FROM_PTE(PointerPte);
1733
1734 //
1735 // Allocate a system PTE which will hold a copy of the page directory
1736 //
1737 PointerPte = MiReserveSystemPtes(1, SystemPteSpace);
1738 ASSERT(PointerPte);
1739 MmSystemPagePtes = MiPteToAddress(PointerPte);
1740
1741 //
1742 // Make this system PTE point to the system page directory.
1743 // It is now essentially double-mapped. This will be used later for lazy
1744 // evaluation of PDEs accross process switches, similarly to how the Global
1745 // page directory array in the old ReactOS Mm is used (but in a less hacky
1746 // way).
1747 //
1748 TempPte = ValidKernelPte;
1749 ASSERT(PD_COUNT == 1);
1750 TempPte.u.Hard.PageFrameNumber = MmSystemPageDirectory[0];
1751 MI_WRITE_VALID_PTE(PointerPte, TempPte);
1752 #endif
1753 //
1754 // Let's get back to paged pool work: size it up.
1755 // By default, it should be twice as big as nonpaged pool.
1756 //
1757 MmSizeOfPagedPoolInBytes = 2 * MmMaximumNonPagedPoolInBytes;
1758 if (MmSizeOfPagedPoolInBytes > ((ULONG_PTR)MmNonPagedSystemStart -
1759 (ULONG_PTR)MmPagedPoolStart))
1760 {
1761 //
1762 // On the other hand, we have limited VA space, so make sure that the VA
1763 // for paged pool doesn't overflow into nonpaged pool VA. Otherwise, set
1764 // whatever maximum is possible.
1765 //
1766 MmSizeOfPagedPoolInBytes = (ULONG_PTR)MmNonPagedSystemStart -
1767 (ULONG_PTR)MmPagedPoolStart;
1768 }
1769
1770 //
1771 // Get the size in pages and make sure paged pool is at least 32MB.
1772 //
1773 Size = MmSizeOfPagedPoolInBytes;
1774 if (Size < MI_MIN_INIT_PAGED_POOLSIZE) Size = MI_MIN_INIT_PAGED_POOLSIZE;
1775 Size = BYTES_TO_PAGES(Size);
1776
1777 //
1778 // Now check how many PTEs will be required for these many pages.
1779 //
1780 Size = (Size + (1024 - 1)) / 1024;
1781
1782 //
1783 // Recompute the page-aligned size of the paged pool, in bytes and pages.
1784 //
1785 MmSizeOfPagedPoolInBytes = Size * PAGE_SIZE * 1024;
1786 MmSizeOfPagedPoolInPages = MmSizeOfPagedPoolInBytes >> PAGE_SHIFT;
1787
1788 //
1789 // Let's be really sure this doesn't overflow into nonpaged system VA
1790 //
1791 ASSERT((MmSizeOfPagedPoolInBytes + (ULONG_PTR)MmPagedPoolStart) <=
1792 (ULONG_PTR)MmNonPagedSystemStart);
1793
1794 //
1795 // This is where paged pool ends
1796 //
1797 MmPagedPoolEnd = (PVOID)(((ULONG_PTR)MmPagedPoolStart +
1798 MmSizeOfPagedPoolInBytes) - 1);
1799
1800 //
1801 // Lock the PFN database
1802 //
1803 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
1804
1805 #if (_MI_PAGING_LEVELS >= 3)
1806 /* On these systems, there's no double-mapping, so instead, the PPEs
1807 * are setup to span the entire paged pool area, so there's no need for the
1808 * system PD */
1809 for (PointerPpe = MiAddressToPpe(MmPagedPoolStart);
1810 PointerPpe <= MiAddressToPpe(MmPagedPoolEnd);
1811 PointerPpe++)
1812 {
1813 /* Check if the PPE is already valid */
1814 if (!PointerPpe->u.Hard.Valid)
1815 {
1816 /* It is not, so map a fresh zeroed page */
1817 TempPpe.u.Hard.PageFrameNumber = MiRemoveZeroPage(0);
1818 MI_WRITE_VALID_PPE(PointerPpe, TempPpe);
1819 }
1820 }
1821 #endif
1822
1823 //
1824 // So now get the PDE for paged pool and zero it out
1825 //
1826 PointerPde = MiAddressToPde(MmPagedPoolStart);
1827 RtlZeroMemory(PointerPde,
1828 (1 + MiAddressToPde(MmPagedPoolEnd) - PointerPde) * sizeof(MMPDE));
1829
1830 //
1831 // Next, get the first and last PTE
1832 //
1833 PointerPte = MiAddressToPte(MmPagedPoolStart);
1834 MmPagedPoolInfo.FirstPteForPagedPool = PointerPte;
1835 MmPagedPoolInfo.LastPteForPagedPool = MiAddressToPte(MmPagedPoolEnd);
1836
1837 /* Allocate a page and map the first paged pool PDE */
1838 MI_SET_USAGE(MI_USAGE_PAGED_POOL);
1839 MI_SET_PROCESS2("Kernel");
1840 PageFrameIndex = MiRemoveZeroPage(0);
1841 TempPde.u.Hard.PageFrameNumber = PageFrameIndex;
1842 MI_WRITE_VALID_PDE(PointerPde, TempPde);
1843 #if (_MI_PAGING_LEVELS >= 3)
1844 /* Use the PPE of MmPagedPoolStart that was setup above */
1845 // Bla = PFN_FROM_PTE(PpeAddress(MmPagedPool...));
1846
1847 /* Initialize the PFN entry for it */
1848 MiInitializePfnForOtherProcess(PageFrameIndex,
1849 (PMMPTE)PointerPde,
1850 PFN_FROM_PTE(MiAddressToPpe(MmPagedPoolStart)));
1851 #else
1852 /* Do it this way */
1853 // Bla = MmSystemPageDirectory[(PointerPde - (PMMPTE)PDE_BASE) / PDE_COUNT]
1854
1855 /* Initialize the PFN entry for it */
1856 MiInitializePfnForOtherProcess(PageFrameIndex,
1857 (PMMPTE)PointerPde,
1858 MmSystemPageDirectory[(PointerPde - (PMMPDE)PDE_BASE) / PDE_COUNT]);
1859 #endif
1860
1861 //
1862 // Release the PFN database lock
1863 //
1864 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
1865
1866 //
1867 // We only have one PDE mapped for now... at fault time, additional PDEs
1868 // will be allocated to handle paged pool growth. This is where they'll have
1869 // to start.
1870 //
1871 MmPagedPoolInfo.NextPdeForPagedPoolExpansion = PointerPde + 1;
1872
1873 //
1874 // We keep track of each page via a bit, so check how big the bitmap will
1875 // have to be (make sure to align our page count such that it fits nicely
1876 // into a 4-byte aligned bitmap.
1877 //
1878 // We'll also allocate the bitmap header itself part of the same buffer.
1879 //
1880 Size = Size * 1024;
1881 ASSERT(Size == MmSizeOfPagedPoolInPages);
1882 BitMapSize = (ULONG)Size;
1883 Size = sizeof(RTL_BITMAP) + (((Size + 31) / 32) * sizeof(ULONG));
1884
1885 //
1886 // Allocate the allocation bitmap, which tells us which regions have not yet
1887 // been mapped into memory
1888 //
1889 MmPagedPoolInfo.PagedPoolAllocationMap = ExAllocatePoolWithTag(NonPagedPool,
1890 Size,
1891 ' mM');
1892 ASSERT(MmPagedPoolInfo.PagedPoolAllocationMap);
1893
1894 //
1895 // Initialize it such that at first, only the first page's worth of PTEs is
1896 // marked as allocated (incidentially, the first PDE we allocated earlier).
1897 //
1898 RtlInitializeBitMap(MmPagedPoolInfo.PagedPoolAllocationMap,
1899 (PULONG)(MmPagedPoolInfo.PagedPoolAllocationMap + 1),
1900 BitMapSize);
1901 RtlSetAllBits(MmPagedPoolInfo.PagedPoolAllocationMap);
1902 RtlClearBits(MmPagedPoolInfo.PagedPoolAllocationMap, 0, 1024);
1903
1904 //
1905 // We have a second bitmap, which keeps track of where allocations end.
1906 // Given the allocation bitmap and a base address, we can therefore figure
1907 // out which page is the last page of that allocation, and thus how big the
1908 // entire allocation is.
1909 //
1910 MmPagedPoolInfo.EndOfPagedPoolBitmap = ExAllocatePoolWithTag(NonPagedPool,
1911 Size,
1912 ' mM');
1913 ASSERT(MmPagedPoolInfo.EndOfPagedPoolBitmap);
1914 RtlInitializeBitMap(MmPagedPoolInfo.EndOfPagedPoolBitmap,
1915 (PULONG)(MmPagedPoolInfo.EndOfPagedPoolBitmap + 1),
1916 BitMapSize);
1917
1918 //
1919 // Since no allocations have been made yet, there are no bits set as the end
1920 //
1921 RtlClearAllBits(MmPagedPoolInfo.EndOfPagedPoolBitmap);
1922
1923 //
1924 // Initialize paged pool.
1925 //
1926 InitializePool(PagedPool, 0);
1927
1928 /* Initialize special pool */
1929 MiInitializeSpecialPool();
1930
1931 /* Default low threshold of 30MB or one fifth of paged pool */
1932 MiLowPagedPoolThreshold = (30 * _1MB) >> PAGE_SHIFT;
1933 MiLowPagedPoolThreshold = min(MiLowPagedPoolThreshold, Size / 5);
1934
1935 /* Default high threshold of 60MB or 25% */
1936 MiHighPagedPoolThreshold = (60 * _1MB) >> PAGE_SHIFT;
1937 MiHighPagedPoolThreshold = min(MiHighPagedPoolThreshold, (Size * 2) / 5);
1938 ASSERT(MiLowPagedPoolThreshold < MiHighPagedPoolThreshold);
1939
1940 /* Setup the global session space */
1941 MiInitializeSystemSpaceMap(NULL);
1942 }
1943
1944 VOID
1945 NTAPI
1946 INIT_FUNCTION
1947 MiDbgDumpMemoryDescriptors(VOID)
1948 {
1949 PLIST_ENTRY NextEntry;
1950 PMEMORY_ALLOCATION_DESCRIPTOR Md;
1951 PFN_NUMBER TotalPages = 0;
1952 PCHAR
1953 MemType[] =
1954 {
1955 "ExceptionBlock ",
1956 "SystemBlock ",
1957 "Free ",
1958 "Bad ",
1959 "LoadedProgram ",
1960 "FirmwareTemporary ",
1961 "FirmwarePermanent ",
1962 "OsloaderHeap ",
1963 "OsloaderStack ",
1964 "SystemCode ",
1965 "HalCode ",
1966 "BootDriver ",
1967 "ConsoleInDriver ",
1968 "ConsoleOutDriver ",
1969 "StartupDpcStack ",
1970 "StartupKernelStack",
1971 "StartupPanicStack ",
1972 "StartupPcrPage ",
1973 "StartupPdrPage ",
1974 "RegistryData ",
1975 "MemoryData ",
1976 "NlsData ",
1977 "SpecialMemory ",
1978 "BBTMemory ",
1979 "LoaderReserve ",
1980 "LoaderXIPRom "
1981 };
1982
1983 DPRINT1("Base\t\tLength\t\tType\n");
1984 for (NextEntry = KeLoaderBlock->MemoryDescriptorListHead.Flink;
1985 NextEntry != &KeLoaderBlock->MemoryDescriptorListHead;
1986 NextEntry = NextEntry->Flink)
1987 {
1988 Md = CONTAINING_RECORD(NextEntry, MEMORY_ALLOCATION_DESCRIPTOR, ListEntry);
1989 DPRINT1("%08lX\t%08lX\t%s\n", Md->BasePage, Md->PageCount, MemType[Md->MemoryType]);
1990 TotalPages += Md->PageCount;
1991 }
1992
1993 DPRINT1("Total: %08lX (%d MB)\n", (ULONG)TotalPages, (ULONG)(TotalPages * PAGE_SIZE) / 1024 / 1024);
1994 }
1995
1996 BOOLEAN
1997 NTAPI
1998 INIT_FUNCTION
1999 MmArmInitSystem(IN ULONG Phase,
2000 IN PLOADER_PARAMETER_BLOCK LoaderBlock)
2001 {
2002 ULONG i;
2003 BOOLEAN IncludeType[LoaderMaximum];
2004 PVOID Bitmap;
2005 PPHYSICAL_MEMORY_RUN Run;
2006 PFN_NUMBER PageCount;
2007 #if DBG
2008 ULONG j;
2009 PMMPTE PointerPte, TestPte;
2010 MMPTE TempPte;
2011 #endif
2012
2013 /* Dump memory descriptors */
2014 if (MiDbgEnableMdDump) MiDbgDumpMemoryDescriptors();
2015
2016 //
2017 // Instantiate memory that we don't consider RAM/usable
2018 // We use the same exclusions that Windows does, in order to try to be
2019 // compatible with WinLDR-style booting
2020 //
2021 for (i = 0; i < LoaderMaximum; i++) IncludeType[i] = TRUE;
2022 IncludeType[LoaderBad] = FALSE;
2023 IncludeType[LoaderFirmwarePermanent] = FALSE;
2024 IncludeType[LoaderSpecialMemory] = FALSE;
2025 IncludeType[LoaderBBTMemory] = FALSE;
2026 if (Phase == 0)
2027 {
2028 /* Count physical pages on the system */
2029 MiScanMemoryDescriptors(LoaderBlock);
2030
2031 /* Initialize the phase 0 temporary event */
2032 KeInitializeEvent(&MiTempEvent, NotificationEvent, FALSE);
2033
2034 /* Set all the events to use the temporary event for now */
2035 MiLowMemoryEvent = &MiTempEvent;
2036 MiHighMemoryEvent = &MiTempEvent;
2037 MiLowPagedPoolEvent = &MiTempEvent;
2038 MiHighPagedPoolEvent = &MiTempEvent;
2039 MiLowNonPagedPoolEvent = &MiTempEvent;
2040 MiHighNonPagedPoolEvent = &MiTempEvent;
2041
2042 //
2043 // Define the basic user vs. kernel address space separation
2044 //
2045 MmSystemRangeStart = (PVOID)MI_DEFAULT_SYSTEM_RANGE_START;
2046 MmUserProbeAddress = (ULONG_PTR)MI_HIGHEST_USER_ADDRESS;
2047 MmHighestUserAddress = (PVOID)MI_HIGHEST_USER_ADDRESS;
2048
2049 /* Highest PTE and PDE based on the addresses above */
2050 MiHighestUserPte = MiAddressToPte(MmHighestUserAddress);
2051 MiHighestUserPde = MiAddressToPde(MmHighestUserAddress);
2052 #if (_MI_PAGING_LEVELS >= 3)
2053 MiHighestUserPpe = MiAddressToPpe(MmHighestUserAddress);
2054 #if (_MI_PAGING_LEVELS >= 4)
2055 MiHighestUserPxe = MiAddressToPxe(MmHighestUserAddress);
2056 #endif
2057 #endif
2058 //
2059 // Get the size of the boot loader's image allocations and then round
2060 // that region up to a PDE size, so that any PDEs we might create for
2061 // whatever follows are separate from the PDEs that boot loader might've
2062 // already created (and later, we can blow all that away if we want to).
2063 //
2064 MmBootImageSize = KeLoaderBlock->Extension->LoaderPagesSpanned;
2065 MmBootImageSize *= PAGE_SIZE;
2066 MmBootImageSize = (MmBootImageSize + PDE_MAPPED_VA - 1) & ~(PDE_MAPPED_VA - 1);
2067 ASSERT((MmBootImageSize % PDE_MAPPED_VA) == 0);
2068
2069 /* Initialize session space address layout */
2070 MiInitializeSessionSpaceLayout();
2071
2072 /* Set the based section highest address */
2073 MmHighSectionBase = (PVOID)((ULONG_PTR)MmHighestUserAddress - 0x800000);
2074
2075 #if DBG
2076 /* The subection PTE format depends on things being 8-byte aligned */
2077 ASSERT((sizeof(CONTROL_AREA) % 8) == 0);
2078 ASSERT((sizeof(SUBSECTION) % 8) == 0);
2079
2080 /* Prototype PTEs are assumed to be in paged pool, so check if the math works */
2081 PointerPte = (PMMPTE)MmPagedPoolStart;
2082 MI_MAKE_PROTOTYPE_PTE(&TempPte, PointerPte);
2083 TestPte = MiProtoPteToPte(&TempPte);
2084 ASSERT(PointerPte == TestPte);
2085
2086 /* Try the last nonpaged pool address */
2087 PointerPte = (PMMPTE)MI_NONPAGED_POOL_END;
2088 MI_MAKE_PROTOTYPE_PTE(&TempPte, PointerPte);
2089 TestPte = MiProtoPteToPte(&TempPte);
2090 ASSERT(PointerPte == TestPte);
2091
2092 /* Try a bunch of random addresses near the end of the address space */
2093 PointerPte = (PMMPTE)0xFFFC8000;
2094 for (j = 0; j < 20; j += 1)
2095 {
2096 MI_MAKE_PROTOTYPE_PTE(&TempPte, PointerPte);
2097 TestPte = MiProtoPteToPte(&TempPte);
2098 ASSERT(PointerPte == TestPte);
2099 PointerPte++;
2100 }
2101
2102 /* Subsection PTEs are always in nonpaged pool, pick a random address to try */
2103 PointerPte = (PMMPTE)0xFFAACBB8;
2104 MI_MAKE_SUBSECTION_PTE(&TempPte, PointerPte);
2105 TestPte = MiSubsectionPteToSubsection(&TempPte);
2106 ASSERT(PointerPte == TestPte);
2107 #endif
2108
2109 /* Loop all 8 standby lists */
2110 for (i = 0; i < 8; i++)
2111 {
2112 /* Initialize them */
2113 MmStandbyPageListByPriority[i].Total = 0;
2114 MmStandbyPageListByPriority[i].ListName = StandbyPageList;
2115 MmStandbyPageListByPriority[i].Flink = MM_EMPTY_LIST;
2116 MmStandbyPageListByPriority[i].Blink = MM_EMPTY_LIST;
2117 }
2118
2119 /* Initialize the user mode image list */
2120 InitializeListHead(&MmLoadedUserImageList);
2121
2122 /* Initialize the paged pool mutex and the section commit mutex */
2123 KeInitializeGuardedMutex(&MmPagedPoolMutex);
2124 KeInitializeGuardedMutex(&MmSectionCommitMutex);
2125 KeInitializeGuardedMutex(&MmSectionBasedMutex);
2126
2127 /* Initialize the Loader Lock */
2128 KeInitializeMutant(&MmSystemLoadLock, FALSE);
2129
2130 /* Set the zero page event */
2131 KeInitializeEvent(&MmZeroingPageEvent, SynchronizationEvent, FALSE);
2132 MmZeroingPageThreadActive = FALSE;
2133
2134 /* Initialize the dead stack S-LIST */
2135 InitializeSListHead(&MmDeadStackSListHead);
2136
2137 //
2138 // Check if this is a machine with less than 19MB of RAM
2139 //
2140 PageCount = MmNumberOfPhysicalPages;
2141 if (PageCount < MI_MIN_PAGES_FOR_SYSPTE_TUNING)
2142 {
2143 //
2144 // Use the very minimum of system PTEs
2145 //
2146 MmNumberOfSystemPtes = 7000;
2147 }
2148 else
2149 {
2150 //
2151 // Use the default
2152 //
2153 MmNumberOfSystemPtes = 11000;
2154 if (PageCount > MI_MIN_PAGES_FOR_SYSPTE_BOOST)
2155 {
2156 //
2157 // Double the amount of system PTEs
2158 //
2159 MmNumberOfSystemPtes <<= 1;
2160 }
2161 if (PageCount > MI_MIN_PAGES_FOR_SYSPTE_BOOST_BOOST)
2162 {
2163 //
2164 // Double the amount of system PTEs
2165 //
2166 MmNumberOfSystemPtes <<= 1;
2167 }
2168 }
2169
2170 DPRINT("System PTE count has been tuned to %d (%d bytes)\n",
2171 MmNumberOfSystemPtes, MmNumberOfSystemPtes * PAGE_SIZE);
2172
2173 /* Initialize the working set lock */
2174 ExInitializePushLock(&MmSystemCacheWs.WorkingSetMutex);
2175
2176 /* Set commit limit */
2177 MmTotalCommitLimit = 2 * _1GB;
2178 MmTotalCommitLimitMaximum = MmTotalCommitLimit;
2179
2180 /* Has the allocation fragment been setup? */
2181 if (!MmAllocationFragment)
2182 {
2183 /* Use the default value */
2184 MmAllocationFragment = MI_ALLOCATION_FRAGMENT;
2185 if (PageCount < ((256 * _1MB) / PAGE_SIZE))
2186 {
2187 /* On memory systems with less than 256MB, divide by 4 */
2188 MmAllocationFragment = MI_ALLOCATION_FRAGMENT / 4;
2189 }
2190 else if (PageCount < (_1GB / PAGE_SIZE))
2191 {
2192 /* On systems with less than 1GB, divide by 2 */
2193 MmAllocationFragment = MI_ALLOCATION_FRAGMENT / 2;
2194 }
2195 }
2196 else
2197 {
2198 /* Convert from 1KB fragments to pages */
2199 MmAllocationFragment *= _1KB;
2200 MmAllocationFragment = ROUND_TO_PAGES(MmAllocationFragment);
2201
2202 /* Don't let it past the maximum */
2203 MmAllocationFragment = min(MmAllocationFragment,
2204 MI_MAX_ALLOCATION_FRAGMENT);
2205
2206 /* Don't let it too small either */
2207 MmAllocationFragment = max(MmAllocationFragment,
2208 MI_MIN_ALLOCATION_FRAGMENT);
2209 }
2210
2211 /* Check for kernel stack size that's too big */
2212 if (MmLargeStackSize > (KERNEL_LARGE_STACK_SIZE / _1KB))
2213 {
2214 /* Sanitize to default value */
2215 MmLargeStackSize = KERNEL_LARGE_STACK_SIZE;
2216 }
2217 else
2218 {
2219 /* Take the registry setting, and convert it into bytes */
2220 MmLargeStackSize *= _1KB;
2221
2222 /* Now align it to a page boundary */
2223 MmLargeStackSize = PAGE_ROUND_UP(MmLargeStackSize);
2224
2225 /* Sanity checks */
2226 ASSERT(MmLargeStackSize <= KERNEL_LARGE_STACK_SIZE);
2227 ASSERT((MmLargeStackSize & (PAGE_SIZE - 1)) == 0);
2228
2229 /* Make sure it's not too low */
2230 if (MmLargeStackSize < KERNEL_STACK_SIZE) MmLargeStackSize = KERNEL_STACK_SIZE;
2231 }
2232
2233 /* Compute color information (L2 cache-separated paging lists) */
2234 MiComputeColorInformation();
2235
2236 // Calculate the number of bytes for the PFN database
2237 // then add the color tables and convert to pages
2238 MxPfnAllocation = (MmHighestPhysicalPage + 1) * sizeof(MMPFN);
2239 MxPfnAllocation += (MmSecondaryColors * sizeof(MMCOLOR_TABLES) * 2);
2240 MxPfnAllocation >>= PAGE_SHIFT;
2241
2242 // We have to add one to the count here, because in the process of
2243 // shifting down to the page size, we actually ended up getting the
2244 // lower aligned size (so say, 0x5FFFF bytes is now 0x5F pages).
2245 // Later on, we'll shift this number back into bytes, which would cause
2246 // us to end up with only 0x5F000 bytes -- when we actually want to have
2247 // 0x60000 bytes.
2248 MxPfnAllocation++;
2249
2250 /* Initialize the platform-specific parts */
2251 MiInitMachineDependent(LoaderBlock);
2252
2253 //
2254 // Build the physical memory block
2255 //
2256 MmPhysicalMemoryBlock = MmInitializeMemoryLimits(LoaderBlock,
2257 IncludeType);
2258
2259 //
2260 // Allocate enough buffer for the PFN bitmap
2261 // Align it up to a 32-bit boundary
2262 //
2263 Bitmap = ExAllocatePoolWithTag(NonPagedPool,
2264 (((MmHighestPhysicalPage + 1) + 31) / 32) * 4,
2265 ' mM');
2266 if (!Bitmap)
2267 {
2268 //
2269 // This is critical
2270 //
2271 KeBugCheckEx(INSTALL_MORE_MEMORY,
2272 MmNumberOfPhysicalPages,
2273 MmLowestPhysicalPage,
2274 MmHighestPhysicalPage,
2275 0x101);
2276 }
2277
2278 //
2279 // Initialize it and clear all the bits to begin with
2280 //
2281 RtlInitializeBitMap(&MiPfnBitMap,
2282 Bitmap,
2283 (ULONG)MmHighestPhysicalPage + 1);
2284 RtlClearAllBits(&MiPfnBitMap);
2285
2286 //
2287 // Loop physical memory runs
2288 //
2289 for (i = 0; i < MmPhysicalMemoryBlock->NumberOfRuns; i++)
2290 {
2291 //
2292 // Get the run
2293 //
2294 Run = &MmPhysicalMemoryBlock->Run[i];
2295 DPRINT("PHYSICAL RAM [0x%08p to 0x%08p]\n",
2296 Run->BasePage << PAGE_SHIFT,
2297 (Run->BasePage + Run->PageCount) << PAGE_SHIFT);
2298
2299 //
2300 // Make sure it has pages inside it
2301 //
2302 if (Run->PageCount)
2303 {
2304 //
2305 // Set the bits in the PFN bitmap
2306 //
2307 RtlSetBits(&MiPfnBitMap, (ULONG)Run->BasePage, (ULONG)Run->PageCount);
2308 }
2309 }
2310
2311 /* Look for large page cache entries that need caching */
2312 MiSyncCachedRanges();
2313
2314 /* Loop for HAL Heap I/O device mappings that need coherency tracking */
2315 MiAddHalIoMappings();
2316
2317 /* Set the initial resident page count */
2318 MmResidentAvailablePages = MmAvailablePages - 32;
2319
2320 /* Initialize large page structures on PAE/x64, and MmProcessList on x86 */
2321 MiInitializeLargePageSupport();
2322
2323 /* Check if the registry says any drivers should be loaded with large pages */
2324 MiInitializeDriverLargePageList();
2325
2326 /* Relocate the boot drivers into system PTE space and fixup their PFNs */
2327 MiReloadBootLoadedDrivers(LoaderBlock);
2328
2329 /* FIXME: Call out into Driver Verifier for initialization */
2330
2331 /* Check how many pages the system has */
2332 if (MmNumberOfPhysicalPages <= ((13 * _1MB) / PAGE_SIZE))
2333 {
2334 /* Set small system */
2335 MmSystemSize = MmSmallSystem;
2336 MmMaximumDeadKernelStacks = 0;
2337 }
2338 else if (MmNumberOfPhysicalPages <= ((19 * _1MB) / PAGE_SIZE))
2339 {
2340 /* Set small system and add 100 pages for the cache */
2341 MmSystemSize = MmSmallSystem;
2342 MmSystemCacheWsMinimum += 100;
2343 MmMaximumDeadKernelStacks = 2;
2344 }
2345 else
2346 {
2347 /* Set medium system and add 400 pages for the cache */
2348 MmSystemSize = MmMediumSystem;
2349 MmSystemCacheWsMinimum += 400;
2350 MmMaximumDeadKernelStacks = 5;
2351 }
2352
2353 /* Check for less than 24MB */
2354 if (MmNumberOfPhysicalPages < ((24 * _1MB) / PAGE_SIZE))
2355 {
2356 /* No more than 32 pages */
2357 MmSystemCacheWsMinimum = 32;
2358 }
2359
2360 /* Check for more than 32MB */
2361 if (MmNumberOfPhysicalPages >= ((32 * _1MB) / PAGE_SIZE))
2362 {
2363 /* Check for product type being "Wi" for WinNT */
2364 if (MmProductType == '\0i\0W')
2365 {
2366 /* Then this is a large system */
2367 MmSystemSize = MmLargeSystem;
2368 }
2369 else
2370 {
2371 /* For servers, we need 64MB to consider this as being large */
2372 if (MmNumberOfPhysicalPages >= ((64 * _1MB) / PAGE_SIZE))
2373 {
2374 /* Set it as large */
2375 MmSystemSize = MmLargeSystem;
2376 }
2377 }
2378 }
2379
2380 /* Check for more than 33 MB */
2381 if (MmNumberOfPhysicalPages > ((33 * _1MB) / PAGE_SIZE))
2382 {
2383 /* Add another 500 pages to the cache */
2384 MmSystemCacheWsMinimum += 500;
2385 }
2386
2387 /* Now setup the shared user data fields */
2388 ASSERT(SharedUserData->NumberOfPhysicalPages == 0);
2389 SharedUserData->NumberOfPhysicalPages = MmNumberOfPhysicalPages;
2390 SharedUserData->LargePageMinimum = 0;
2391
2392 /* Check for workstation (Wi for WinNT) */
2393 if (MmProductType == '\0i\0W')
2394 {
2395 /* Set Windows NT Workstation product type */
2396 SharedUserData->NtProductType = NtProductWinNt;
2397 MmProductType = 0;
2398 }
2399 else
2400 {
2401 /* Check for LanMan server (La for LanmanNT) */
2402 if (MmProductType == '\0a\0L')
2403 {
2404 /* This is a domain controller */
2405 SharedUserData->NtProductType = NtProductLanManNt;
2406 }
2407 else
2408 {
2409 /* Otherwise it must be a normal server (Se for ServerNT) */
2410 SharedUserData->NtProductType = NtProductServer;
2411 }
2412
2413 /* Set the product type, and make the system more aggressive with low memory */
2414 MmProductType = 1;
2415 MmMinimumFreePages = 81;
2416 }
2417
2418 /* Update working set tuning parameters */
2419 MiAdjustWorkingSetManagerParameters(!MmProductType);
2420
2421 /* Finetune the page count by removing working set and NP expansion */
2422 MmResidentAvailablePages -= MiExpansionPoolPagesInitialCharge;
2423 MmResidentAvailablePages -= MmSystemCacheWsMinimum;
2424 MmResidentAvailableAtInit = MmResidentAvailablePages;
2425 if (MmResidentAvailablePages <= 0)
2426 {
2427 /* This should not happen */
2428 DPRINT1("System cache working set too big\n");
2429 return FALSE;
2430 }
2431
2432 /* Initialize the system cache */
2433 //MiInitializeSystemCache(MmSystemCacheWsMinimum, MmAvailablePages);
2434
2435 /* Update the commit limit */
2436 MmTotalCommitLimit = MmAvailablePages;
2437 if (MmTotalCommitLimit > 1024) MmTotalCommitLimit -= 1024;
2438 MmTotalCommitLimitMaximum = MmTotalCommitLimit;
2439
2440 /* Size up paged pool and build the shadow system page directory */
2441 MiBuildPagedPool();
2442
2443 /* Debugger physical memory support is now ready to be used */
2444 MmDebugPte = MiAddressToPte(MiDebugMapping);
2445
2446 /* Initialize the loaded module list */
2447 MiInitializeLoadedModuleList(LoaderBlock);
2448 }
2449
2450 //
2451 // Always return success for now
2452 //
2453 return TRUE;
2454 }
2455
2456 /* EOF */