Sync with trunk r58740.
[reactos.git] / ntoskrnl / mm / ARM3 / mminit.c
1 /*
2 * PROJECT: ReactOS Kernel
3 * LICENSE: BSD - See COPYING.ARM in the top level directory
4 * FILE: ntoskrnl/mm/ARM3/mminit.c
5 * PURPOSE: ARM Memory Manager Initialization
6 * PROGRAMMERS: ReactOS Portable Systems Group
7 */
8
9 /* INCLUDES *******************************************************************/
10
11 #include <ntoskrnl.h>
12 #define NDEBUG
13 #include <debug.h>
14
15 #define MODULE_INVOLVED_IN_ARM3
16 #include "miarm.h"
17 #undef MmSystemRangeStart
18
19 /* GLOBALS ********************************************************************/
20
21 //
22 // These are all registry-configurable, but by default, the memory manager will
23 // figure out the most appropriate values.
24 //
25 ULONG MmMaximumNonPagedPoolPercent;
26 SIZE_T MmSizeOfNonPagedPoolInBytes;
27 SIZE_T MmMaximumNonPagedPoolInBytes;
28
29 /* Some of the same values, in pages */
30 PFN_NUMBER MmMaximumNonPagedPoolInPages;
31
32 //
33 // These numbers describe the discrete equation components of the nonpaged
34 // pool sizing algorithm.
35 //
36 // They are described on http://support.microsoft.com/default.aspx/kb/126402/ja
37 // along with the algorithm that uses them, which is implemented later below.
38 //
39 SIZE_T MmMinimumNonPagedPoolSize = 256 * 1024;
40 ULONG MmMinAdditionNonPagedPoolPerMb = 32 * 1024;
41 SIZE_T MmDefaultMaximumNonPagedPool = 1024 * 1024;
42 ULONG MmMaxAdditionNonPagedPoolPerMb = 400 * 1024;
43
44 //
45 // The memory layout (and especially variable names) of the NT kernel mode
46 // components can be a bit hard to twig, especially when it comes to the non
47 // paged area.
48 //
49 // There are really two components to the non-paged pool:
50 //
51 // - The initial nonpaged pool, sized dynamically up to a maximum.
52 // - The expansion nonpaged pool, sized dynamically up to a maximum.
53 //
54 // The initial nonpaged pool is physically continuous for performance, and
55 // immediately follows the PFN database, typically sharing the same PDE. It is
56 // a very small resource (32MB on a 1GB system), and capped at 128MB.
57 //
58 // Right now we call this the "ARM³ Nonpaged Pool" and it begins somewhere after
59 // the PFN database (which starts at 0xB0000000).
60 //
61 // The expansion nonpaged pool, on the other hand, can grow much bigger (400MB
62 // for a 1GB system). On ARM³ however, it is currently capped at 128MB.
63 //
64 // The address where the initial nonpaged pool starts is aptly named
65 // MmNonPagedPoolStart, and it describes a range of MmSizeOfNonPagedPoolInBytes
66 // bytes.
67 //
68 // Expansion nonpaged pool starts at an address described by the variable called
69 // MmNonPagedPoolExpansionStart, and it goes on for MmMaximumNonPagedPoolInBytes
70 // minus MmSizeOfNonPagedPoolInBytes bytes, always reaching MmNonPagedPoolEnd
71 // (because of the way it's calculated) at 0xFFBE0000.
72 //
73 // Initial nonpaged pool is allocated and mapped early-on during boot, but what
74 // about the expansion nonpaged pool? It is instead composed of special pages
75 // which belong to what are called System PTEs. These PTEs are the matter of a
76 // later discussion, but they are also considered part of the "nonpaged" OS, due
77 // to the fact that they are never paged out -- once an address is described by
78 // a System PTE, it is always valid, until the System PTE is torn down.
79 //
80 // System PTEs are actually composed of two "spaces", the system space proper,
81 // and the nonpaged pool expansion space. The latter, as we've already seen,
82 // begins at MmNonPagedPoolExpansionStart. Based on the number of System PTEs
83 // that the system will support, the remaining address space below this address
84 // is used to hold the system space PTEs. This address, in turn, is held in the
85 // variable named MmNonPagedSystemStart, which itself is never allowed to go
86 // below 0xEB000000 (thus creating an upper bound on the number of System PTEs).
87 //
88 // This means that 330MB are reserved for total nonpaged system VA, on top of
89 // whatever the initial nonpaged pool allocation is.
90 //
91 // The following URLs, valid as of April 23rd, 2008, support this evidence:
92 //
93 // http://www.cs.miami.edu/~burt/journal/NT/memory.html
94 // http://www.ditii.com/2007/09/28/windows-memory-management-x86-virtual-address-space/
95 //
96 PVOID MmNonPagedSystemStart;
97 SIZE_T MiNonPagedSystemSize;
98 PVOID MmNonPagedPoolStart;
99 PVOID MmNonPagedPoolExpansionStart;
100 PVOID MmNonPagedPoolEnd = MI_NONPAGED_POOL_END;
101
102 //
103 // This is where paged pool starts by default
104 //
105 PVOID MmPagedPoolStart = MI_PAGED_POOL_START;
106 PVOID MmPagedPoolEnd;
107
108 //
109 // And this is its default size
110 //
111 SIZE_T MmSizeOfPagedPoolInBytes = MI_MIN_INIT_PAGED_POOLSIZE;
112 PFN_NUMBER MmSizeOfPagedPoolInPages = MI_MIN_INIT_PAGED_POOLSIZE / PAGE_SIZE;
113
114 //
115 // Session space starts at 0xBFFFFFFF and grows downwards
116 // By default, it includes an 8MB image area where we map win32k and video card
117 // drivers, followed by a 4MB area containing the session's working set. This is
118 // then followed by a 20MB mapped view area and finally by the session's paged
119 // pool, by default 16MB.
120 //
121 // On a normal system, this results in session space occupying the region from
122 // 0xBD000000 to 0xC0000000
123 //
124 // See miarm.h for the defines that determine the sizing of this region. On an
125 // NT system, some of these can be configured through the registry, but we don't
126 // support that yet.
127 //
128 PVOID MiSessionSpaceEnd; // 0xC0000000
129 PVOID MiSessionImageEnd; // 0xC0000000
130 PVOID MiSessionImageStart; // 0xBF800000
131 PVOID MiSessionSpaceWs;
132 PVOID MiSessionViewStart; // 0xBE000000
133 PVOID MiSessionPoolEnd; // 0xBE000000
134 PVOID MiSessionPoolStart; // 0xBD000000
135 PVOID MmSessionBase; // 0xBD000000
136 SIZE_T MmSessionSize;
137 SIZE_T MmSessionViewSize;
138 SIZE_T MmSessionPoolSize;
139 SIZE_T MmSessionImageSize;
140
141 /*
142 * These are the PTE addresses of the boundaries carved out above
143 */
144 PMMPTE MiSessionImagePteStart;
145 PMMPTE MiSessionImagePteEnd;
146 PMMPTE MiSessionBasePte;
147 PMMPTE MiSessionLastPte;
148
149 //
150 // The system view space, on the other hand, is where sections that are memory
151 // mapped into "system space" end up.
152 //
153 // By default, it is a 16MB region, but we hack it to be 32MB for ReactOS
154 //
155 PVOID MiSystemViewStart;
156 SIZE_T MmSystemViewSize;
157
158 #if (_MI_PAGING_LEVELS == 2)
159 //
160 // A copy of the system page directory (the page directory associated with the
161 // System process) is kept (double-mapped) by the manager in order to lazily
162 // map paged pool PDEs into external processes when they fault on a paged pool
163 // address.
164 //
165 PFN_NUMBER MmSystemPageDirectory[PD_COUNT];
166 PMMPDE MmSystemPagePtes;
167 #endif
168
169 //
170 // The system cache starts right after hyperspace. The first few pages are for
171 // keeping track of the system working set list.
172 //
173 // This should be 0xC0C00000 -- the cache itself starts at 0xC1000000
174 //
175 PMMWSL MmSystemCacheWorkingSetList = (PVOID)MI_SYSTEM_CACHE_WS_START;
176
177 //
178 // Windows NT seems to choose between 7000, 11000 and 50000
179 // On systems with more than 32MB, this number is then doubled, and further
180 // aligned up to a PDE boundary (4MB).
181 //
182 PFN_COUNT MmNumberOfSystemPtes;
183
184 //
185 // This is how many pages the PFN database will take up
186 // In Windows, this includes the Quark Color Table, but not in ARM³
187 //
188 PFN_NUMBER MxPfnAllocation;
189
190 //
191 // Unlike the old ReactOS Memory Manager, ARM³ (and Windows) does not keep track
192 // of pages that are not actually valid physical memory, such as ACPI reserved
193 // regions, BIOS address ranges, or holes in physical memory address space which
194 // could indicate device-mapped I/O memory.
195 //
196 // In fact, the lack of a PFN entry for a page usually indicates that this is
197 // I/O space instead.
198 //
199 // A bitmap, called the PFN bitmap, keeps track of all page frames by assigning
200 // a bit to each. If the bit is set, then the page is valid physical RAM.
201 //
202 RTL_BITMAP MiPfnBitMap;
203
204 //
205 // This structure describes the different pieces of RAM-backed address space
206 //
207 PPHYSICAL_MEMORY_DESCRIPTOR MmPhysicalMemoryBlock;
208
209 //
210 // This is where we keep track of the most basic physical layout markers
211 //
212 PFN_NUMBER MmHighestPhysicalPage, MmLowestPhysicalPage = -1;
213 PFN_COUNT MmNumberOfPhysicalPages;
214
215 //
216 // The total number of pages mapped by the boot loader, which include the kernel
217 // HAL, boot drivers, registry, NLS files and other loader data structures is
218 // kept track of here. This depends on "LoaderPagesSpanned" being correct when
219 // coming from the loader.
220 //
221 // This number is later aligned up to a PDE boundary.
222 //
223 SIZE_T MmBootImageSize;
224
225 //
226 // These three variables keep track of the core separation of address space that
227 // exists between kernel mode and user mode.
228 //
229 ULONG_PTR MmUserProbeAddress;
230 PVOID MmHighestUserAddress;
231 PVOID MmSystemRangeStart;
232
233 /* And these store the respective highest PTE/PDE address */
234 PMMPTE MiHighestUserPte;
235 PMMPDE MiHighestUserPde;
236 #if (_MI_PAGING_LEVELS >= 3)
237 PMMPTE MiHighestUserPpe;
238 #if (_MI_PAGING_LEVELS >= 4)
239 PMMPTE MiHighestUserPxe;
240 #endif
241 #endif
242
243 /* These variables define the system cache address space */
244 PVOID MmSystemCacheStart;
245 PVOID MmSystemCacheEnd;
246 MMSUPPORT MmSystemCacheWs;
247
248 //
249 // This is where hyperspace ends (followed by the system cache working set)
250 //
251 PVOID MmHyperSpaceEnd;
252
253 //
254 // Page coloring algorithm data
255 //
256 ULONG MmSecondaryColors;
257 ULONG MmSecondaryColorMask;
258
259 //
260 // Actual (registry-configurable) size of a GUI thread's stack
261 //
262 ULONG MmLargeStackSize = KERNEL_LARGE_STACK_SIZE;
263
264 //
265 // Before we have a PFN database, memory comes straight from our physical memory
266 // blocks, which is nice because it's guaranteed contiguous and also because once
267 // we take a page from here, the system doesn't see it anymore.
268 // However, once the fun is over, those pages must be re-integrated back into
269 // PFN society life, and that requires us keeping a copy of the original layout
270 // so that we can parse it later.
271 //
272 PMEMORY_ALLOCATION_DESCRIPTOR MxFreeDescriptor;
273 MEMORY_ALLOCATION_DESCRIPTOR MxOldFreeDescriptor;
274
275 /*
276 * For each page's worth bytes of L2 cache in a given set/way line, the zero and
277 * free lists are organized in what is called a "color".
278 *
279 * This array points to the two lists, so it can be thought of as a multi-dimensional
280 * array of MmFreePagesByColor[2][MmSecondaryColors]. Since the number is dynamic,
281 * we describe the array in pointer form instead.
282 *
283 * On a final note, the color tables themselves are right after the PFN database.
284 */
285 C_ASSERT(FreePageList == 1);
286 PMMCOLOR_TABLES MmFreePagesByColor[FreePageList + 1];
287
288 /* An event used in Phase 0 before the rest of the system is ready to go */
289 KEVENT MiTempEvent;
290
291 /* All the events used for memory threshold notifications */
292 PKEVENT MiLowMemoryEvent;
293 PKEVENT MiHighMemoryEvent;
294 PKEVENT MiLowPagedPoolEvent;
295 PKEVENT MiHighPagedPoolEvent;
296 PKEVENT MiLowNonPagedPoolEvent;
297 PKEVENT MiHighNonPagedPoolEvent;
298
299 /* The actual thresholds themselves, in page numbers */
300 PFN_NUMBER MmLowMemoryThreshold;
301 PFN_NUMBER MmHighMemoryThreshold;
302 PFN_NUMBER MiLowPagedPoolThreshold;
303 PFN_NUMBER MiHighPagedPoolThreshold;
304 PFN_NUMBER MiLowNonPagedPoolThreshold;
305 PFN_NUMBER MiHighNonPagedPoolThreshold;
306
307 /*
308 * This number determines how many free pages must exist, at minimum, until we
309 * start trimming working sets and flushing modified pages to obtain more free
310 * pages.
311 *
312 * This number changes if the system detects that this is a server product
313 */
314 PFN_NUMBER MmMinimumFreePages = 26;
315
316 /*
317 * This number indicates how many pages we consider to be a low limit of having
318 * "plenty" of free memory.
319 *
320 * It is doubled on systems that have more than 63MB of memory
321 */
322 PFN_NUMBER MmPlentyFreePages = 400;
323
324 /* These values store the type of system this is (small, med, large) and if server */
325 ULONG MmProductType;
326 MM_SYSTEMSIZE MmSystemSize;
327
328 /*
329 * These values store the cache working set minimums and maximums, in pages
330 *
331 * The minimum value is boosted on systems with more than 24MB of RAM, and cut
332 * down to only 32 pages on embedded (<24MB RAM) systems.
333 *
334 * An extra boost of 2MB is given on systems with more than 33MB of RAM.
335 */
336 PFN_NUMBER MmSystemCacheWsMinimum = 288;
337 PFN_NUMBER MmSystemCacheWsMaximum = 350;
338
339 /* FIXME: Move to cache/working set code later */
340 BOOLEAN MmLargeSystemCache;
341
342 /*
343 * This value determines in how many fragments/chunks the subsection prototype
344 * PTEs should be allocated when mapping a section object. It is configurable in
345 * the registry through the MapAllocationFragment parameter.
346 *
347 * The default is 64KB on systems with more than 1GB of RAM, 32KB on systems with
348 * more than 256MB of RAM, and 16KB on systems with less than 256MB of RAM.
349 *
350 * The maximum it can be set to is 2MB, and the minimum is 4KB.
351 */
352 SIZE_T MmAllocationFragment;
353
354 /*
355 * These two values track how much virtual memory can be committed, and when
356 * expansion should happen.
357 */
358 // FIXME: They should be moved elsewhere since it's not an "init" setting?
359 SIZE_T MmTotalCommitLimit;
360 SIZE_T MmTotalCommitLimitMaximum;
361
362 /* Internal setting used for debugging memory descriptors */
363 BOOLEAN MiDbgEnableMdDump =
364 #ifdef _ARM_
365 TRUE;
366 #else
367 FALSE;
368 #endif
369
370 /* Number of memory descriptors in the loader block */
371 ULONG MiNumberDescriptors = 0;
372
373 /* Number of free pages in the loader block */
374 PFN_NUMBER MiNumberOfFreePages = 0;
375
376 /* Timeout value for critical sections (2.5 minutes) */
377 ULONG MmCritsectTimeoutSeconds = 150; // NT value: 720 * 60 * 60; (30 days)
378 LARGE_INTEGER MmCriticalSectionTimeout;
379
380 /* PRIVATE FUNCTIONS **********************************************************/
381
382 VOID
383 NTAPI
384 MiScanMemoryDescriptors(IN PLOADER_PARAMETER_BLOCK LoaderBlock)
385 {
386 PLIST_ENTRY ListEntry;
387 PMEMORY_ALLOCATION_DESCRIPTOR Descriptor;
388 PFN_NUMBER PageFrameIndex, FreePages = 0;
389
390 /* Loop the memory descriptors */
391 for (ListEntry = LoaderBlock->MemoryDescriptorListHead.Flink;
392 ListEntry != &LoaderBlock->MemoryDescriptorListHead;
393 ListEntry = ListEntry->Flink)
394 {
395 /* Get the descriptor */
396 Descriptor = CONTAINING_RECORD(ListEntry,
397 MEMORY_ALLOCATION_DESCRIPTOR,
398 ListEntry);
399 DPRINT("MD Type: %lx Base: %lx Count: %lx\n",
400 Descriptor->MemoryType, Descriptor->BasePage, Descriptor->PageCount);
401
402 /* Count this descriptor */
403 MiNumberDescriptors++;
404
405 /* Check if this is invisible memory */
406 if ((Descriptor->MemoryType == LoaderFirmwarePermanent) ||
407 (Descriptor->MemoryType == LoaderSpecialMemory) ||
408 (Descriptor->MemoryType == LoaderHALCachedMemory) ||
409 (Descriptor->MemoryType == LoaderBBTMemory))
410 {
411 /* Skip this descriptor */
412 continue;
413 }
414
415 /* Check if this is bad memory */
416 if (Descriptor->MemoryType != LoaderBad)
417 {
418 /* Count this in the total of pages */
419 MmNumberOfPhysicalPages += (PFN_COUNT)Descriptor->PageCount;
420 }
421
422 /* Check if this is the new lowest page */
423 if (Descriptor->BasePage < MmLowestPhysicalPage)
424 {
425 /* Update the lowest page */
426 MmLowestPhysicalPage = Descriptor->BasePage;
427 }
428
429 /* Check if this is the new highest page */
430 PageFrameIndex = Descriptor->BasePage + Descriptor->PageCount;
431 if (PageFrameIndex > MmHighestPhysicalPage)
432 {
433 /* Update the highest page */
434 MmHighestPhysicalPage = PageFrameIndex - 1;
435 }
436
437 /* Check if this is free memory */
438 if ((Descriptor->MemoryType == LoaderFree) ||
439 (Descriptor->MemoryType == LoaderLoadedProgram) ||
440 (Descriptor->MemoryType == LoaderFirmwareTemporary) ||
441 (Descriptor->MemoryType == LoaderOsloaderStack))
442 {
443 /* Count it too free pages */
444 MiNumberOfFreePages += Descriptor->PageCount;
445
446 /* Check if this is the largest memory descriptor */
447 if (Descriptor->PageCount > FreePages)
448 {
449 /* Remember it */
450 MxFreeDescriptor = Descriptor;
451 FreePages = Descriptor->PageCount;
452 }
453 }
454 }
455
456 /* Save original values of the free descriptor, since it'll be
457 * altered by early allocations */
458 MxOldFreeDescriptor = *MxFreeDescriptor;
459 }
460
461 PFN_NUMBER
462 NTAPI
463 INIT_FUNCTION
464 MxGetNextPage(IN PFN_NUMBER PageCount)
465 {
466 PFN_NUMBER Pfn;
467
468 /* Make sure we have enough pages */
469 if (PageCount > MxFreeDescriptor->PageCount)
470 {
471 /* Crash the system */
472 KeBugCheckEx(INSTALL_MORE_MEMORY,
473 MmNumberOfPhysicalPages,
474 MxFreeDescriptor->PageCount,
475 MxOldFreeDescriptor.PageCount,
476 PageCount);
477 }
478
479 /* Use our lowest usable free pages */
480 Pfn = MxFreeDescriptor->BasePage;
481 MxFreeDescriptor->BasePage += PageCount;
482 MxFreeDescriptor->PageCount -= PageCount;
483 return Pfn;
484 }
485
486 VOID
487 NTAPI
488 INIT_FUNCTION
489 MiComputeColorInformation(VOID)
490 {
491 ULONG L2Associativity;
492
493 /* Check if no setting was provided already */
494 if (!MmSecondaryColors)
495 {
496 /* Get L2 cache information */
497 L2Associativity = KeGetPcr()->SecondLevelCacheAssociativity;
498
499 /* The number of colors is the number of cache bytes by set/way */
500 MmSecondaryColors = KeGetPcr()->SecondLevelCacheSize;
501 if (L2Associativity) MmSecondaryColors /= L2Associativity;
502 }
503
504 /* Now convert cache bytes into pages */
505 MmSecondaryColors >>= PAGE_SHIFT;
506 if (!MmSecondaryColors)
507 {
508 /* If there was no cache data from the KPCR, use the default colors */
509 MmSecondaryColors = MI_SECONDARY_COLORS;
510 }
511 else
512 {
513 /* Otherwise, make sure there aren't too many colors */
514 if (MmSecondaryColors > MI_MAX_SECONDARY_COLORS)
515 {
516 /* Set the maximum */
517 MmSecondaryColors = MI_MAX_SECONDARY_COLORS;
518 }
519
520 /* Make sure there aren't too little colors */
521 if (MmSecondaryColors < MI_MIN_SECONDARY_COLORS)
522 {
523 /* Set the default */
524 MmSecondaryColors = MI_SECONDARY_COLORS;
525 }
526
527 /* Finally make sure the colors are a power of two */
528 if (MmSecondaryColors & (MmSecondaryColors - 1))
529 {
530 /* Set the default */
531 MmSecondaryColors = MI_SECONDARY_COLORS;
532 }
533 }
534
535 /* Compute the mask and store it */
536 MmSecondaryColorMask = MmSecondaryColors - 1;
537 KeGetCurrentPrcb()->SecondaryColorMask = MmSecondaryColorMask;
538 }
539
540 VOID
541 NTAPI
542 INIT_FUNCTION
543 MiInitializeColorTables(VOID)
544 {
545 ULONG i;
546 PMMPTE PointerPte, LastPte;
547 MMPTE TempPte = ValidKernelPte;
548
549 /* The color table starts after the ARM3 PFN database */
550 MmFreePagesByColor[0] = (PMMCOLOR_TABLES)&MmPfnDatabase[MmHighestPhysicalPage + 1];
551
552 /* Loop the PTEs. We have two color tables for each secondary color */
553 PointerPte = MiAddressToPte(&MmFreePagesByColor[0][0]);
554 LastPte = MiAddressToPte((ULONG_PTR)MmFreePagesByColor[0] +
555 (2 * MmSecondaryColors * sizeof(MMCOLOR_TABLES))
556 - 1);
557 while (PointerPte <= LastPte)
558 {
559 /* Check for valid PTE */
560 if (PointerPte->u.Hard.Valid == 0)
561 {
562 /* Get a page and map it */
563 TempPte.u.Hard.PageFrameNumber = MxGetNextPage(1);
564 MI_WRITE_VALID_PTE(PointerPte, TempPte);
565
566 /* Zero out the page */
567 RtlZeroMemory(MiPteToAddress(PointerPte), PAGE_SIZE);
568 }
569
570 /* Next */
571 PointerPte++;
572 }
573
574 /* Now set the address of the next list, right after this one */
575 MmFreePagesByColor[1] = &MmFreePagesByColor[0][MmSecondaryColors];
576
577 /* Now loop the lists to set them up */
578 for (i = 0; i < MmSecondaryColors; i++)
579 {
580 /* Set both free and zero lists for each color */
581 MmFreePagesByColor[ZeroedPageList][i].Flink = LIST_HEAD;
582 MmFreePagesByColor[ZeroedPageList][i].Blink = (PVOID)LIST_HEAD;
583 MmFreePagesByColor[ZeroedPageList][i].Count = 0;
584 MmFreePagesByColor[FreePageList][i].Flink = LIST_HEAD;
585 MmFreePagesByColor[FreePageList][i].Blink = (PVOID)LIST_HEAD;
586 MmFreePagesByColor[FreePageList][i].Count = 0;
587 }
588 }
589
590 #ifndef _M_AMD64
591 BOOLEAN
592 NTAPI
593 INIT_FUNCTION
594 MiIsRegularMemory(IN PLOADER_PARAMETER_BLOCK LoaderBlock,
595 IN PFN_NUMBER Pfn)
596 {
597 PLIST_ENTRY NextEntry;
598 PMEMORY_ALLOCATION_DESCRIPTOR MdBlock;
599
600 /* Loop the memory descriptors */
601 NextEntry = LoaderBlock->MemoryDescriptorListHead.Flink;
602 while (NextEntry != &LoaderBlock->MemoryDescriptorListHead)
603 {
604 /* Get the memory descriptor */
605 MdBlock = CONTAINING_RECORD(NextEntry,
606 MEMORY_ALLOCATION_DESCRIPTOR,
607 ListEntry);
608
609 /* Check if this PFN could be part of the block */
610 if (Pfn >= (MdBlock->BasePage))
611 {
612 /* Check if it really is part of the block */
613 if (Pfn < (MdBlock->BasePage + MdBlock->PageCount))
614 {
615 /* Check if the block is actually memory we don't map */
616 if ((MdBlock->MemoryType == LoaderFirmwarePermanent) ||
617 (MdBlock->MemoryType == LoaderBBTMemory) ||
618 (MdBlock->MemoryType == LoaderSpecialMemory))
619 {
620 /* We don't need PFN database entries for this memory */
621 break;
622 }
623
624 /* This is memory we want to map */
625 return TRUE;
626 }
627 }
628 else
629 {
630 /* Blocks are ordered, so if it's not here, it doesn't exist */
631 break;
632 }
633
634 /* Get to the next descriptor */
635 NextEntry = MdBlock->ListEntry.Flink;
636 }
637
638 /* Check if this PFN is actually from our free memory descriptor */
639 if ((Pfn >= MxOldFreeDescriptor.BasePage) &&
640 (Pfn < MxOldFreeDescriptor.BasePage + MxOldFreeDescriptor.PageCount))
641 {
642 /* We use these pages for initial mappings, so we do want to count them */
643 return TRUE;
644 }
645
646 /* Otherwise this isn't memory that we describe or care about */
647 return FALSE;
648 }
649
650 VOID
651 NTAPI
652 INIT_FUNCTION
653 MiMapPfnDatabase(IN PLOADER_PARAMETER_BLOCK LoaderBlock)
654 {
655 PFN_NUMBER FreePage, FreePageCount, PagesLeft, BasePage, PageCount;
656 PLIST_ENTRY NextEntry;
657 PMEMORY_ALLOCATION_DESCRIPTOR MdBlock;
658 PMMPTE PointerPte, LastPte;
659 MMPTE TempPte = ValidKernelPte;
660
661 /* Get current page data, since we won't be using MxGetNextPage as it would corrupt our state */
662 FreePage = MxFreeDescriptor->BasePage;
663 FreePageCount = MxFreeDescriptor->PageCount;
664 PagesLeft = 0;
665
666 /* Loop the memory descriptors */
667 NextEntry = LoaderBlock->MemoryDescriptorListHead.Flink;
668 while (NextEntry != &LoaderBlock->MemoryDescriptorListHead)
669 {
670 /* Get the descriptor */
671 MdBlock = CONTAINING_RECORD(NextEntry,
672 MEMORY_ALLOCATION_DESCRIPTOR,
673 ListEntry);
674 if ((MdBlock->MemoryType == LoaderFirmwarePermanent) ||
675 (MdBlock->MemoryType == LoaderBBTMemory) ||
676 (MdBlock->MemoryType == LoaderSpecialMemory))
677 {
678 /* These pages are not part of the PFN database */
679 NextEntry = MdBlock->ListEntry.Flink;
680 continue;
681 }
682
683 /* Next, check if this is our special free descriptor we've found */
684 if (MdBlock == MxFreeDescriptor)
685 {
686 /* Use the real numbers instead */
687 BasePage = MxOldFreeDescriptor.BasePage;
688 PageCount = MxOldFreeDescriptor.PageCount;
689 }
690 else
691 {
692 /* Use the descriptor's numbers */
693 BasePage = MdBlock->BasePage;
694 PageCount = MdBlock->PageCount;
695 }
696
697 /* Get the PTEs for this range */
698 PointerPte = MiAddressToPte(&MmPfnDatabase[BasePage]);
699 LastPte = MiAddressToPte(((ULONG_PTR)&MmPfnDatabase[BasePage + PageCount]) - 1);
700 DPRINT("MD Type: %lx Base: %lx Count: %lx\n", MdBlock->MemoryType, BasePage, PageCount);
701
702 /* Loop them */
703 while (PointerPte <= LastPte)
704 {
705 /* We'll only touch PTEs that aren't already valid */
706 if (PointerPte->u.Hard.Valid == 0)
707 {
708 /* Use the next free page */
709 TempPte.u.Hard.PageFrameNumber = FreePage;
710 ASSERT(FreePageCount != 0);
711
712 /* Consume free pages */
713 FreePage++;
714 FreePageCount--;
715 if (!FreePageCount)
716 {
717 /* Out of memory */
718 KeBugCheckEx(INSTALL_MORE_MEMORY,
719 MmNumberOfPhysicalPages,
720 FreePageCount,
721 MxOldFreeDescriptor.PageCount,
722 1);
723 }
724
725 /* Write out this PTE */
726 PagesLeft++;
727 MI_WRITE_VALID_PTE(PointerPte, TempPte);
728
729 /* Zero this page */
730 RtlZeroMemory(MiPteToAddress(PointerPte), PAGE_SIZE);
731 }
732
733 /* Next! */
734 PointerPte++;
735 }
736
737 /* Do the next address range */
738 NextEntry = MdBlock->ListEntry.Flink;
739 }
740
741 /* Now update the free descriptors to consume the pages we used up during the PFN allocation loop */
742 MxFreeDescriptor->BasePage = FreePage;
743 MxFreeDescriptor->PageCount = FreePageCount;
744 }
745
746 VOID
747 NTAPI
748 INIT_FUNCTION
749 MiBuildPfnDatabaseFromPages(IN PLOADER_PARAMETER_BLOCK LoaderBlock)
750 {
751 PMMPDE PointerPde;
752 PMMPTE PointerPte;
753 ULONG i, Count, j;
754 PFN_NUMBER PageFrameIndex, StartupPdIndex, PtePageIndex;
755 PMMPFN Pfn1, Pfn2;
756 ULONG_PTR BaseAddress = 0;
757
758 /* PFN of the startup page directory */
759 StartupPdIndex = PFN_FROM_PTE(MiAddressToPde(PDE_BASE));
760
761 /* Start with the first PDE and scan them all */
762 PointerPde = MiAddressToPde(NULL);
763 Count = PD_COUNT * PDE_COUNT;
764 for (i = 0; i < Count; i++)
765 {
766 /* Check for valid PDE */
767 if (PointerPde->u.Hard.Valid == 1)
768 {
769 /* Get the PFN from it */
770 PageFrameIndex = PFN_FROM_PTE(PointerPde);
771
772 /* Do we want a PFN entry for this page? */
773 if (MiIsRegularMemory(LoaderBlock, PageFrameIndex))
774 {
775 /* Yes we do, set it up */
776 Pfn1 = MiGetPfnEntry(PageFrameIndex);
777 Pfn1->u4.PteFrame = StartupPdIndex;
778 Pfn1->PteAddress = (PMMPTE)PointerPde;
779 Pfn1->u2.ShareCount++;
780 Pfn1->u3.e2.ReferenceCount = 1;
781 Pfn1->u3.e1.PageLocation = ActiveAndValid;
782 Pfn1->u3.e1.CacheAttribute = MiNonCached;
783 #if MI_TRACE_PFNS
784 Pfn1->PfnUsage = MI_USAGE_INIT_MEMORY;
785 memcpy(Pfn1->ProcessName, "Initial PDE", 16);
786 #endif
787 }
788 else
789 {
790 /* No PFN entry */
791 Pfn1 = NULL;
792 }
793
794 /* Now get the PTE and scan the pages */
795 PointerPte = MiAddressToPte(BaseAddress);
796 for (j = 0; j < PTE_COUNT; j++)
797 {
798 /* Check for a valid PTE */
799 if (PointerPte->u.Hard.Valid == 1)
800 {
801 /* Increase the shared count of the PFN entry for the PDE */
802 ASSERT(Pfn1 != NULL);
803 Pfn1->u2.ShareCount++;
804
805 /* Now check if the PTE is valid memory too */
806 PtePageIndex = PFN_FROM_PTE(PointerPte);
807 if (MiIsRegularMemory(LoaderBlock, PtePageIndex))
808 {
809 /*
810 * Only add pages above the end of system code or pages
811 * that are part of nonpaged pool
812 */
813 if ((BaseAddress >= 0xA0000000) ||
814 ((BaseAddress >= (ULONG_PTR)MmNonPagedPoolStart) &&
815 (BaseAddress < (ULONG_PTR)MmNonPagedPoolStart +
816 MmSizeOfNonPagedPoolInBytes)))
817 {
818 /* Get the PFN entry and make sure it too is valid */
819 Pfn2 = MiGetPfnEntry(PtePageIndex);
820 if ((MmIsAddressValid(Pfn2)) &&
821 (MmIsAddressValid(Pfn2 + 1)))
822 {
823 /* Setup the PFN entry */
824 Pfn2->u4.PteFrame = PageFrameIndex;
825 Pfn2->PteAddress = PointerPte;
826 Pfn2->u2.ShareCount++;
827 Pfn2->u3.e2.ReferenceCount = 1;
828 Pfn2->u3.e1.PageLocation = ActiveAndValid;
829 Pfn2->u3.e1.CacheAttribute = MiNonCached;
830 #if MI_TRACE_PFNS
831 Pfn2->PfnUsage = MI_USAGE_INIT_MEMORY;
832 memcpy(Pfn1->ProcessName, "Initial PTE", 16);
833 #endif
834 }
835 }
836 }
837 }
838
839 /* Next PTE */
840 PointerPte++;
841 BaseAddress += PAGE_SIZE;
842 }
843 }
844 else
845 {
846 /* Next PDE mapped address */
847 BaseAddress += PDE_MAPPED_VA;
848 }
849
850 /* Next PTE */
851 PointerPde++;
852 }
853 }
854
855 VOID
856 NTAPI
857 INIT_FUNCTION
858 MiBuildPfnDatabaseZeroPage(VOID)
859 {
860 PMMPFN Pfn1;
861 PMMPDE PointerPde;
862
863 /* Grab the lowest page and check if it has no real references */
864 Pfn1 = MiGetPfnEntry(MmLowestPhysicalPage);
865 if (!(MmLowestPhysicalPage) && !(Pfn1->u3.e2.ReferenceCount))
866 {
867 /* Make it a bogus page to catch errors */
868 PointerPde = MiAddressToPde(0xFFFFFFFF);
869 Pfn1->u4.PteFrame = PFN_FROM_PTE(PointerPde);
870 Pfn1->PteAddress = (PMMPTE)PointerPde;
871 Pfn1->u2.ShareCount++;
872 Pfn1->u3.e2.ReferenceCount = 0xFFF0;
873 Pfn1->u3.e1.PageLocation = ActiveAndValid;
874 Pfn1->u3.e1.CacheAttribute = MiNonCached;
875 }
876 }
877
878 VOID
879 NTAPI
880 INIT_FUNCTION
881 MiBuildPfnDatabaseFromLoaderBlock(IN PLOADER_PARAMETER_BLOCK LoaderBlock)
882 {
883 PLIST_ENTRY NextEntry;
884 PFN_NUMBER PageCount = 0;
885 PMEMORY_ALLOCATION_DESCRIPTOR MdBlock;
886 PFN_NUMBER PageFrameIndex;
887 PMMPFN Pfn1;
888 PMMPTE PointerPte;
889 PMMPDE PointerPde;
890 KIRQL OldIrql;
891
892 /* Now loop through the descriptors */
893 NextEntry = LoaderBlock->MemoryDescriptorListHead.Flink;
894 while (NextEntry != &LoaderBlock->MemoryDescriptorListHead)
895 {
896 /* Get the current descriptor */
897 MdBlock = CONTAINING_RECORD(NextEntry,
898 MEMORY_ALLOCATION_DESCRIPTOR,
899 ListEntry);
900
901 /* Read its data */
902 PageCount = MdBlock->PageCount;
903 PageFrameIndex = MdBlock->BasePage;
904
905 /* Don't allow memory above what the PFN database is mapping */
906 if (PageFrameIndex > MmHighestPhysicalPage)
907 {
908 /* Since they are ordered, everything past here will be larger */
909 break;
910 }
911
912 /* On the other hand, the end page might be higher up... */
913 if ((PageFrameIndex + PageCount) > (MmHighestPhysicalPage + 1))
914 {
915 /* In which case we'll trim the descriptor to go as high as we can */
916 PageCount = MmHighestPhysicalPage + 1 - PageFrameIndex;
917 MdBlock->PageCount = PageCount;
918
919 /* But if there's nothing left to trim, we got too high, so quit */
920 if (!PageCount) break;
921 }
922
923 /* Now check the descriptor type */
924 switch (MdBlock->MemoryType)
925 {
926 /* Check for bad RAM */
927 case LoaderBad:
928
929 DPRINT1("You either have specified /BURNMEMORY or damaged RAM modules.\n");
930 break;
931
932 /* Check for free RAM */
933 case LoaderFree:
934 case LoaderLoadedProgram:
935 case LoaderFirmwareTemporary:
936 case LoaderOsloaderStack:
937
938 /* Get the last page of this descriptor. Note we loop backwards */
939 PageFrameIndex += PageCount - 1;
940 Pfn1 = MiGetPfnEntry(PageFrameIndex);
941
942 /* Lock the PFN Database */
943 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
944 while (PageCount--)
945 {
946 /* If the page really has no references, mark it as free */
947 if (!Pfn1->u3.e2.ReferenceCount)
948 {
949 /* Add it to the free list */
950 Pfn1->u3.e1.CacheAttribute = MiNonCached;
951 MiInsertPageInFreeList(PageFrameIndex);
952 }
953
954 /* Go to the next page */
955 Pfn1--;
956 PageFrameIndex--;
957 }
958
959 /* Release PFN database */
960 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
961
962 /* Done with this block */
963 break;
964
965 /* Check for pages that are invisible to us */
966 case LoaderFirmwarePermanent:
967 case LoaderSpecialMemory:
968 case LoaderBBTMemory:
969
970 /* And skip them */
971 break;
972
973 default:
974
975 /* Map these pages with the KSEG0 mapping that adds 0x80000000 */
976 PointerPte = MiAddressToPte(KSEG0_BASE + (PageFrameIndex << PAGE_SHIFT));
977 Pfn1 = MiGetPfnEntry(PageFrameIndex);
978 while (PageCount--)
979 {
980 /* Check if the page is really unused */
981 PointerPde = MiAddressToPde(KSEG0_BASE + (PageFrameIndex << PAGE_SHIFT));
982 if (!Pfn1->u3.e2.ReferenceCount)
983 {
984 /* Mark it as being in-use */
985 Pfn1->u4.PteFrame = PFN_FROM_PTE(PointerPde);
986 Pfn1->PteAddress = PointerPte;
987 Pfn1->u2.ShareCount++;
988 Pfn1->u3.e2.ReferenceCount = 1;
989 Pfn1->u3.e1.PageLocation = ActiveAndValid;
990 Pfn1->u3.e1.CacheAttribute = MiNonCached;
991 #if MI_TRACE_PFNS
992 Pfn1->PfnUsage = MI_USAGE_BOOT_DRIVER;
993 #endif
994
995 /* Check for RAM disk page */
996 if (MdBlock->MemoryType == LoaderXIPRom)
997 {
998 /* Make it a pseudo-I/O ROM mapping */
999 Pfn1->u1.Flink = 0;
1000 Pfn1->u2.ShareCount = 0;
1001 Pfn1->u3.e2.ReferenceCount = 0;
1002 Pfn1->u3.e1.PageLocation = 0;
1003 Pfn1->u3.e1.Rom = 1;
1004 Pfn1->u4.InPageError = 0;
1005 Pfn1->u3.e1.PrototypePte = 1;
1006 }
1007 }
1008
1009 /* Advance page structures */
1010 Pfn1++;
1011 PageFrameIndex++;
1012 PointerPte++;
1013 }
1014 break;
1015 }
1016
1017 /* Next descriptor entry */
1018 NextEntry = MdBlock->ListEntry.Flink;
1019 }
1020 }
1021
1022 VOID
1023 NTAPI
1024 INIT_FUNCTION
1025 MiBuildPfnDatabaseSelf(VOID)
1026 {
1027 PMMPTE PointerPte, LastPte;
1028 PMMPFN Pfn1;
1029
1030 /* Loop the PFN database page */
1031 PointerPte = MiAddressToPte(MiGetPfnEntry(MmLowestPhysicalPage));
1032 LastPte = MiAddressToPte(MiGetPfnEntry(MmHighestPhysicalPage));
1033 while (PointerPte <= LastPte)
1034 {
1035 /* Make sure the page is valid */
1036 if (PointerPte->u.Hard.Valid == 1)
1037 {
1038 /* Get the PFN entry and just mark it referenced */
1039 Pfn1 = MiGetPfnEntry(PointerPte->u.Hard.PageFrameNumber);
1040 Pfn1->u2.ShareCount = 1;
1041 Pfn1->u3.e2.ReferenceCount = 1;
1042 #if MI_TRACE_PFNS
1043 Pfn1->PfnUsage = MI_USAGE_PFN_DATABASE;
1044 #endif
1045 }
1046
1047 /* Next */
1048 PointerPte++;
1049 }
1050 }
1051
1052 VOID
1053 NTAPI
1054 INIT_FUNCTION
1055 MiInitializePfnDatabase(IN PLOADER_PARAMETER_BLOCK LoaderBlock)
1056 {
1057 /* Scan memory and start setting up PFN entries */
1058 MiBuildPfnDatabaseFromPages(LoaderBlock);
1059
1060 /* Add the zero page */
1061 MiBuildPfnDatabaseZeroPage();
1062
1063 /* Scan the loader block and build the rest of the PFN database */
1064 MiBuildPfnDatabaseFromLoaderBlock(LoaderBlock);
1065
1066 /* Finally add the pages for the PFN database itself */
1067 MiBuildPfnDatabaseSelf();
1068 }
1069 #endif /* !_M_AMD64 */
1070
1071 VOID
1072 NTAPI
1073 INIT_FUNCTION
1074 MmFreeLoaderBlock(IN PLOADER_PARAMETER_BLOCK LoaderBlock)
1075 {
1076 PLIST_ENTRY NextMd;
1077 PMEMORY_ALLOCATION_DESCRIPTOR MdBlock;
1078 ULONG_PTR i;
1079 PFN_NUMBER BasePage, LoaderPages;
1080 PMMPFN Pfn1;
1081 KIRQL OldIrql;
1082 PPHYSICAL_MEMORY_RUN Buffer, Entry;
1083
1084 /* Loop the descriptors in order to count them */
1085 i = 0;
1086 NextMd = LoaderBlock->MemoryDescriptorListHead.Flink;
1087 while (NextMd != &LoaderBlock->MemoryDescriptorListHead)
1088 {
1089 MdBlock = CONTAINING_RECORD(NextMd,
1090 MEMORY_ALLOCATION_DESCRIPTOR,
1091 ListEntry);
1092 i++;
1093 NextMd = MdBlock->ListEntry.Flink;
1094 }
1095
1096 /* Allocate a structure to hold the physical runs */
1097 Buffer = ExAllocatePoolWithTag(NonPagedPool,
1098 i * sizeof(PHYSICAL_MEMORY_RUN),
1099 'lMmM');
1100 ASSERT(Buffer != NULL);
1101 Entry = Buffer;
1102
1103 /* Loop the descriptors again */
1104 NextMd = LoaderBlock->MemoryDescriptorListHead.Flink;
1105 while (NextMd != &LoaderBlock->MemoryDescriptorListHead)
1106 {
1107 /* Check what kind this was */
1108 MdBlock = CONTAINING_RECORD(NextMd,
1109 MEMORY_ALLOCATION_DESCRIPTOR,
1110 ListEntry);
1111 switch (MdBlock->MemoryType)
1112 {
1113 /* Registry, NLS, and heap data */
1114 case LoaderRegistryData:
1115 case LoaderOsloaderHeap:
1116 case LoaderNlsData:
1117 /* Are all a candidate for deletion */
1118 Entry->BasePage = MdBlock->BasePage;
1119 Entry->PageCount = MdBlock->PageCount;
1120 Entry++;
1121
1122 /* We keep the rest */
1123 default:
1124 break;
1125 }
1126
1127 /* Move to the next descriptor */
1128 NextMd = MdBlock->ListEntry.Flink;
1129 }
1130
1131 /* Acquire the PFN lock */
1132 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
1133
1134 /* Loop the runs */
1135 LoaderPages = 0;
1136 while (--Entry >= Buffer)
1137 {
1138 /* See how many pages are in this run */
1139 i = Entry->PageCount;
1140 BasePage = Entry->BasePage;
1141
1142 /* Loop each page */
1143 Pfn1 = MiGetPfnEntry(BasePage);
1144 while (i--)
1145 {
1146 /* Check if it has references or is in any kind of list */
1147 if (!(Pfn1->u3.e2.ReferenceCount) && (!Pfn1->u1.Flink))
1148 {
1149 /* Set the new PTE address and put this page into the free list */
1150 Pfn1->PteAddress = (PMMPTE)(BasePage << PAGE_SHIFT);
1151 MiInsertPageInFreeList(BasePage);
1152 LoaderPages++;
1153 }
1154 else if (BasePage)
1155 {
1156 /* It has a reference, so simply drop it */
1157 ASSERT(MI_IS_PHYSICAL_ADDRESS(MiPteToAddress(Pfn1->PteAddress)) == FALSE);
1158
1159 /* Drop a dereference on this page, which should delete it */
1160 Pfn1->PteAddress->u.Long = 0;
1161 MI_SET_PFN_DELETED(Pfn1);
1162 MiDecrementShareCount(Pfn1, BasePage);
1163 LoaderPages++;
1164 }
1165
1166 /* Move to the next page */
1167 Pfn1++;
1168 BasePage++;
1169 }
1170 }
1171
1172 /* Release the PFN lock and flush the TLB */
1173 DPRINT1("Loader pages freed: %lx\n", LoaderPages);
1174 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
1175 KeFlushCurrentTb();
1176
1177 /* Free our run structure */
1178 ExFreePoolWithTag(Buffer, 'lMmM');
1179 }
1180
1181 VOID
1182 NTAPI
1183 INIT_FUNCTION
1184 MiAdjustWorkingSetManagerParameters(IN BOOLEAN Client)
1185 {
1186 /* This function needs to do more work, for now, we tune page minimums */
1187
1188 /* Check for a system with around 64MB RAM or more */
1189 if (MmNumberOfPhysicalPages >= (63 * _1MB) / PAGE_SIZE)
1190 {
1191 /* Double the minimum amount of pages we consider for a "plenty free" scenario */
1192 MmPlentyFreePages *= 2;
1193 }
1194 }
1195
1196 VOID
1197 NTAPI
1198 INIT_FUNCTION
1199 MiNotifyMemoryEvents(VOID)
1200 {
1201 /* Are we in a low-memory situation? */
1202 if (MmAvailablePages < MmLowMemoryThreshold)
1203 {
1204 /* Clear high, set low */
1205 if (KeReadStateEvent(MiHighMemoryEvent)) KeClearEvent(MiHighMemoryEvent);
1206 if (!KeReadStateEvent(MiLowMemoryEvent)) KeSetEvent(MiLowMemoryEvent, 0, FALSE);
1207 }
1208 else if (MmAvailablePages < MmHighMemoryThreshold)
1209 {
1210 /* We are in between, clear both */
1211 if (KeReadStateEvent(MiHighMemoryEvent)) KeClearEvent(MiHighMemoryEvent);
1212 if (KeReadStateEvent(MiLowMemoryEvent)) KeClearEvent(MiLowMemoryEvent);
1213 }
1214 else
1215 {
1216 /* Clear low, set high */
1217 if (KeReadStateEvent(MiLowMemoryEvent)) KeClearEvent(MiLowMemoryEvent);
1218 if (!KeReadStateEvent(MiHighMemoryEvent)) KeSetEvent(MiHighMemoryEvent, 0, FALSE);
1219 }
1220 }
1221
1222 NTSTATUS
1223 NTAPI
1224 INIT_FUNCTION
1225 MiCreateMemoryEvent(IN PUNICODE_STRING Name,
1226 OUT PKEVENT *Event)
1227 {
1228 PACL Dacl;
1229 HANDLE EventHandle;
1230 ULONG DaclLength;
1231 NTSTATUS Status;
1232 OBJECT_ATTRIBUTES ObjectAttributes;
1233 SECURITY_DESCRIPTOR SecurityDescriptor;
1234
1235 /* Create the SD */
1236 Status = RtlCreateSecurityDescriptor(&SecurityDescriptor,
1237 SECURITY_DESCRIPTOR_REVISION);
1238 if (!NT_SUCCESS(Status)) return Status;
1239
1240 /* One ACL with 3 ACEs, containing each one SID */
1241 DaclLength = sizeof(ACL) +
1242 3 * sizeof(ACCESS_ALLOWED_ACE) +
1243 RtlLengthSid(SeLocalSystemSid) +
1244 RtlLengthSid(SeAliasAdminsSid) +
1245 RtlLengthSid(SeWorldSid);
1246
1247 /* Allocate space for the DACL */
1248 Dacl = ExAllocatePoolWithTag(PagedPool, DaclLength, 'lcaD');
1249 if (!Dacl) return STATUS_INSUFFICIENT_RESOURCES;
1250
1251 /* Setup the ACL inside it */
1252 Status = RtlCreateAcl(Dacl, DaclLength, ACL_REVISION);
1253 if (!NT_SUCCESS(Status)) goto CleanUp;
1254
1255 /* Add query rights for everyone */
1256 Status = RtlAddAccessAllowedAce(Dacl,
1257 ACL_REVISION,
1258 SYNCHRONIZE | EVENT_QUERY_STATE | READ_CONTROL,
1259 SeWorldSid);
1260 if (!NT_SUCCESS(Status)) goto CleanUp;
1261
1262 /* Full rights for the admin */
1263 Status = RtlAddAccessAllowedAce(Dacl,
1264 ACL_REVISION,
1265 EVENT_ALL_ACCESS,
1266 SeAliasAdminsSid);
1267 if (!NT_SUCCESS(Status)) goto CleanUp;
1268
1269 /* As well as full rights for the system */
1270 Status = RtlAddAccessAllowedAce(Dacl,
1271 ACL_REVISION,
1272 EVENT_ALL_ACCESS,
1273 SeLocalSystemSid);
1274 if (!NT_SUCCESS(Status)) goto CleanUp;
1275
1276 /* Set this DACL inside the SD */
1277 Status = RtlSetDaclSecurityDescriptor(&SecurityDescriptor,
1278 TRUE,
1279 Dacl,
1280 FALSE);
1281 if (!NT_SUCCESS(Status)) goto CleanUp;
1282
1283 /* Setup the event attributes, making sure it's a permanent one */
1284 InitializeObjectAttributes(&ObjectAttributes,
1285 Name,
1286 OBJ_KERNEL_HANDLE | OBJ_PERMANENT,
1287 NULL,
1288 &SecurityDescriptor);
1289
1290 /* Create the event */
1291 Status = ZwCreateEvent(&EventHandle,
1292 EVENT_ALL_ACCESS,
1293 &ObjectAttributes,
1294 NotificationEvent,
1295 FALSE);
1296 CleanUp:
1297 /* Free the DACL */
1298 ExFreePoolWithTag(Dacl, 'lcaD');
1299
1300 /* Check if this is the success path */
1301 if (NT_SUCCESS(Status))
1302 {
1303 /* Add a reference to the object, then close the handle we had */
1304 Status = ObReferenceObjectByHandle(EventHandle,
1305 EVENT_MODIFY_STATE,
1306 ExEventObjectType,
1307 KernelMode,
1308 (PVOID*)Event,
1309 NULL);
1310 ZwClose (EventHandle);
1311 }
1312
1313 /* Return status */
1314 return Status;
1315 }
1316
1317 BOOLEAN
1318 NTAPI
1319 INIT_FUNCTION
1320 MiInitializeMemoryEvents(VOID)
1321 {
1322 UNICODE_STRING LowString = RTL_CONSTANT_STRING(L"\\KernelObjects\\LowMemoryCondition");
1323 UNICODE_STRING HighString = RTL_CONSTANT_STRING(L"\\KernelObjects\\HighMemoryCondition");
1324 UNICODE_STRING LowPagedPoolString = RTL_CONSTANT_STRING(L"\\KernelObjects\\LowPagedPoolCondition");
1325 UNICODE_STRING HighPagedPoolString = RTL_CONSTANT_STRING(L"\\KernelObjects\\HighPagedPoolCondition");
1326 UNICODE_STRING LowNonPagedPoolString = RTL_CONSTANT_STRING(L"\\KernelObjects\\LowNonPagedPoolCondition");
1327 UNICODE_STRING HighNonPagedPoolString = RTL_CONSTANT_STRING(L"\\KernelObjects\\HighNonPagedPoolCondition");
1328 NTSTATUS Status;
1329
1330 /* Check if we have a registry setting */
1331 if (MmLowMemoryThreshold)
1332 {
1333 /* Convert it to pages */
1334 MmLowMemoryThreshold *= (_1MB / PAGE_SIZE);
1335 }
1336 else
1337 {
1338 /* The low memory threshold is hit when we don't consider that we have "plenty" of free pages anymore */
1339 MmLowMemoryThreshold = MmPlentyFreePages;
1340
1341 /* More than one GB of memory? */
1342 if (MmNumberOfPhysicalPages > 0x40000)
1343 {
1344 /* Start at 32MB, and add another 16MB for each GB */
1345 MmLowMemoryThreshold = (32 * _1MB) / PAGE_SIZE;
1346 MmLowMemoryThreshold += ((MmNumberOfPhysicalPages - 0x40000) >> 7);
1347 }
1348 else if (MmNumberOfPhysicalPages > 0x8000)
1349 {
1350 /* For systems with > 128MB RAM, add another 4MB for each 128MB */
1351 MmLowMemoryThreshold += ((MmNumberOfPhysicalPages - 0x8000) >> 5);
1352 }
1353
1354 /* Don't let the minimum threshold go past 64MB */
1355 MmLowMemoryThreshold = min(MmLowMemoryThreshold, (64 * _1MB) / PAGE_SIZE);
1356 }
1357
1358 /* Check if we have a registry setting */
1359 if (MmHighMemoryThreshold)
1360 {
1361 /* Convert it into pages */
1362 MmHighMemoryThreshold *= (_1MB / PAGE_SIZE);
1363 }
1364 else
1365 {
1366 /* Otherwise, the default is three times the low memory threshold */
1367 MmHighMemoryThreshold = 3 * MmLowMemoryThreshold;
1368 ASSERT(MmHighMemoryThreshold > MmLowMemoryThreshold);
1369 }
1370
1371 /* Make sure high threshold is actually higher than the low */
1372 MmHighMemoryThreshold = max(MmHighMemoryThreshold, MmLowMemoryThreshold);
1373
1374 /* Create the memory events for all the thresholds */
1375 Status = MiCreateMemoryEvent(&LowString, &MiLowMemoryEvent);
1376 if (!NT_SUCCESS(Status)) return FALSE;
1377 Status = MiCreateMemoryEvent(&HighString, &MiHighMemoryEvent);
1378 if (!NT_SUCCESS(Status)) return FALSE;
1379 Status = MiCreateMemoryEvent(&LowPagedPoolString, &MiLowPagedPoolEvent);
1380 if (!NT_SUCCESS(Status)) return FALSE;
1381 Status = MiCreateMemoryEvent(&HighPagedPoolString, &MiHighPagedPoolEvent);
1382 if (!NT_SUCCESS(Status)) return FALSE;
1383 Status = MiCreateMemoryEvent(&LowNonPagedPoolString, &MiLowNonPagedPoolEvent);
1384 if (!NT_SUCCESS(Status)) return FALSE;
1385 Status = MiCreateMemoryEvent(&HighNonPagedPoolString, &MiHighNonPagedPoolEvent);
1386 if (!NT_SUCCESS(Status)) return FALSE;
1387
1388 /* Now setup the pool events */
1389 MiInitializePoolEvents();
1390
1391 /* Set the initial event state */
1392 MiNotifyMemoryEvents();
1393 return TRUE;
1394 }
1395
1396 VOID
1397 NTAPI
1398 INIT_FUNCTION
1399 MiAddHalIoMappings(VOID)
1400 {
1401 PVOID BaseAddress;
1402 PMMPDE PointerPde, LastPde;
1403 PMMPTE PointerPte;
1404 ULONG j;
1405 PFN_NUMBER PageFrameIndex;
1406
1407 /* HAL Heap address -- should be on a PDE boundary */
1408 BaseAddress = (PVOID)MM_HAL_VA_START;
1409 ASSERT(MiAddressToPteOffset(BaseAddress) == 0);
1410
1411 /* Check how many PDEs the heap has */
1412 PointerPde = MiAddressToPde(BaseAddress);
1413 LastPde = MiAddressToPde((PVOID)MM_HAL_VA_END);
1414
1415 while (PointerPde <= LastPde)
1416 {
1417 /* Does the HAL own this mapping? */
1418 if ((PointerPde->u.Hard.Valid == 1) &&
1419 (MI_IS_PAGE_LARGE(PointerPde) == FALSE))
1420 {
1421 /* Get the PTE for it and scan each page */
1422 PointerPte = MiAddressToPte(BaseAddress);
1423 for (j = 0 ; j < PTE_COUNT; j++)
1424 {
1425 /* Does the HAL own this page? */
1426 if (PointerPte->u.Hard.Valid == 1)
1427 {
1428 /* Is the HAL using it for device or I/O mapped memory? */
1429 PageFrameIndex = PFN_FROM_PTE(PointerPte);
1430 if (!MiGetPfnEntry(PageFrameIndex))
1431 {
1432 /* FIXME: For PAT, we need to track I/O cache attributes for coherency */
1433 DPRINT1("HAL I/O Mapping at %p is unsafe\n", BaseAddress);
1434 }
1435 }
1436
1437 /* Move to the next page */
1438 BaseAddress = (PVOID)((ULONG_PTR)BaseAddress + PAGE_SIZE);
1439 PointerPte++;
1440 }
1441 }
1442 else
1443 {
1444 /* Move to the next address */
1445 BaseAddress = (PVOID)((ULONG_PTR)BaseAddress + PDE_MAPPED_VA);
1446 }
1447
1448 /* Move to the next PDE */
1449 PointerPde++;
1450 }
1451 }
1452
1453 VOID
1454 NTAPI
1455 MmDumpArmPfnDatabase(IN BOOLEAN StatusOnly)
1456 {
1457 ULONG i;
1458 PMMPFN Pfn1;
1459 PCHAR Consumer = "Unknown";
1460 KIRQL OldIrql;
1461 ULONG ActivePages = 0, FreePages = 0, OtherPages = 0;
1462 #if MI_TRACE_PFNS
1463 ULONG UsageBucket[MI_USAGE_FREE_PAGE + 1] = {0};
1464 PCHAR MI_USAGE_TEXT[MI_USAGE_FREE_PAGE + 1] =
1465 {
1466 "Not set",
1467 "Paged Pool",
1468 "Nonpaged Pool",
1469 "Nonpaged Pool Ex",
1470 "Kernel Stack",
1471 "Kernel Stack Ex",
1472 "System PTE",
1473 "VAD",
1474 "PEB/TEB",
1475 "Section",
1476 "Page Table",
1477 "Page Directory",
1478 "Old Page Table",
1479 "Driver Page",
1480 "Contiguous Alloc",
1481 "MDL",
1482 "Demand Zero",
1483 "Zero Loop",
1484 "Cache",
1485 "PFN Database",
1486 "Boot Driver",
1487 "Initial Memory",
1488 "Free Page"
1489 };
1490 #endif
1491 //
1492 // Loop the PFN database
1493 //
1494 KeRaiseIrql(HIGH_LEVEL, &OldIrql);
1495 for (i = 0; i <= MmHighestPhysicalPage; i++)
1496 {
1497 Pfn1 = MiGetPfnEntry(i);
1498 if (!Pfn1) continue;
1499 #if MI_TRACE_PFNS
1500 ASSERT(Pfn1->PfnUsage <= MI_USAGE_FREE_PAGE);
1501 #endif
1502 //
1503 // Get the page location
1504 //
1505 switch (Pfn1->u3.e1.PageLocation)
1506 {
1507 case ActiveAndValid:
1508
1509 Consumer = "Active and Valid";
1510 ActivePages++;
1511 break;
1512
1513 case ZeroedPageList:
1514
1515 Consumer = "Zero Page List";
1516 FreePages++;
1517 break;//continue;
1518
1519 case FreePageList:
1520
1521 Consumer = "Free Page List";
1522 FreePages++;
1523 break;//continue;
1524
1525 default:
1526
1527 Consumer = "Other (ASSERT!)";
1528 OtherPages++;
1529 break;
1530 }
1531
1532 #if MI_TRACE_PFNS
1533 /* Add into bucket */
1534 UsageBucket[Pfn1->PfnUsage]++;
1535 #endif
1536
1537 //
1538 // Pretty-print the page
1539 //
1540 if (!StatusOnly)
1541 DbgPrint("0x%08p:\t%20s\t(%04d.%04d)\t[%16s - %16s])\n",
1542 i << PAGE_SHIFT,
1543 Consumer,
1544 Pfn1->u3.e2.ReferenceCount,
1545 Pfn1->u2.ShareCount == LIST_HEAD ? 0xFFFF : Pfn1->u2.ShareCount,
1546 #if MI_TRACE_PFNS
1547 MI_USAGE_TEXT[Pfn1->PfnUsage],
1548 Pfn1->ProcessName);
1549 #else
1550 "Page tracking",
1551 "is disabled");
1552 #endif
1553 }
1554
1555 DbgPrint("Active: %5d pages\t[%6d KB]\n", ActivePages, (ActivePages << PAGE_SHIFT) / 1024);
1556 DbgPrint("Free: %5d pages\t[%6d KB]\n", FreePages, (FreePages << PAGE_SHIFT) / 1024);
1557 DbgPrint("-----------------------------------------\n");
1558 #if MI_TRACE_PFNS
1559 OtherPages = UsageBucket[MI_USAGE_BOOT_DRIVER];
1560 DbgPrint("Boot Images: %5d pages\t[%6d KB]\n", OtherPages, (OtherPages << PAGE_SHIFT) / 1024);
1561 OtherPages = UsageBucket[MI_USAGE_DRIVER_PAGE];
1562 DbgPrint("System Drivers: %5d pages\t[%6d KB]\n", OtherPages, (OtherPages << PAGE_SHIFT) / 1024);
1563 OtherPages = UsageBucket[MI_USAGE_PFN_DATABASE];
1564 DbgPrint("PFN Database: %5d pages\t[%6d KB]\n", OtherPages, (OtherPages << PAGE_SHIFT) / 1024);
1565 OtherPages = UsageBucket[MI_USAGE_PAGE_TABLE] + UsageBucket[MI_USAGE_LEGACY_PAGE_DIRECTORY];
1566 DbgPrint("Page Tables: %5d pages\t[%6d KB]\n", OtherPages, (OtherPages << PAGE_SHIFT) / 1024);
1567 OtherPages = UsageBucket[MI_USAGE_NONPAGED_POOL] + UsageBucket[MI_USAGE_NONPAGED_POOL_EXPANSION];
1568 DbgPrint("NonPaged Pool: %5d pages\t[%6d KB]\n", OtherPages, (OtherPages << PAGE_SHIFT) / 1024);
1569 OtherPages = UsageBucket[MI_USAGE_PAGED_POOL];
1570 DbgPrint("Paged Pool: %5d pages\t[%6d KB]\n", OtherPages, (OtherPages << PAGE_SHIFT) / 1024);
1571 OtherPages = UsageBucket[MI_USAGE_KERNEL_STACK] + UsageBucket[MI_USAGE_KERNEL_STACK_EXPANSION];
1572 DbgPrint("Kernel Stack: %5d pages\t[%6d KB]\n", OtherPages, (OtherPages << PAGE_SHIFT) / 1024);
1573 OtherPages = UsageBucket[MI_USAGE_INIT_MEMORY];
1574 DbgPrint("Init Memory: %5d pages\t[%6d KB]\n", OtherPages, (OtherPages << PAGE_SHIFT) / 1024);
1575 OtherPages = UsageBucket[MI_USAGE_SECTION];
1576 DbgPrint("Sections: %5d pages\t[%6d KB]\n", OtherPages, (OtherPages << PAGE_SHIFT) / 1024);
1577 OtherPages = UsageBucket[MI_USAGE_CACHE];
1578 DbgPrint("Cache: %5d pages\t[%6d KB]\n", OtherPages, (OtherPages << PAGE_SHIFT) / 1024);
1579 #endif
1580 KeLowerIrql(OldIrql);
1581 }
1582
1583 PPHYSICAL_MEMORY_DESCRIPTOR
1584 NTAPI
1585 INIT_FUNCTION
1586 MmInitializeMemoryLimits(IN PLOADER_PARAMETER_BLOCK LoaderBlock,
1587 IN PBOOLEAN IncludeType)
1588 {
1589 PLIST_ENTRY NextEntry;
1590 ULONG Run = 0, InitialRuns;
1591 PFN_NUMBER NextPage = -1, PageCount = 0;
1592 PPHYSICAL_MEMORY_DESCRIPTOR Buffer, NewBuffer;
1593 PMEMORY_ALLOCATION_DESCRIPTOR MdBlock;
1594
1595 //
1596 // Start with the maximum we might need
1597 //
1598 InitialRuns = MiNumberDescriptors;
1599
1600 //
1601 // Allocate the maximum we'll ever need
1602 //
1603 Buffer = ExAllocatePoolWithTag(NonPagedPool,
1604 sizeof(PHYSICAL_MEMORY_DESCRIPTOR) +
1605 sizeof(PHYSICAL_MEMORY_RUN) *
1606 (InitialRuns - 1),
1607 'lMmM');
1608 if (!Buffer) return NULL;
1609
1610 //
1611 // For now that's how many runs we have
1612 //
1613 Buffer->NumberOfRuns = InitialRuns;
1614
1615 //
1616 // Now loop through the descriptors again
1617 //
1618 NextEntry = LoaderBlock->MemoryDescriptorListHead.Flink;
1619 while (NextEntry != &LoaderBlock->MemoryDescriptorListHead)
1620 {
1621 //
1622 // Grab each one, and check if it's one we should include
1623 //
1624 MdBlock = CONTAINING_RECORD(NextEntry,
1625 MEMORY_ALLOCATION_DESCRIPTOR,
1626 ListEntry);
1627 if ((MdBlock->MemoryType < LoaderMaximum) &&
1628 (IncludeType[MdBlock->MemoryType]))
1629 {
1630 //
1631 // Add this to our running total
1632 //
1633 PageCount += MdBlock->PageCount;
1634
1635 //
1636 // Check if the next page is described by the next descriptor
1637 //
1638 if (MdBlock->BasePage == NextPage)
1639 {
1640 //
1641 // Combine it into the same physical run
1642 //
1643 ASSERT(MdBlock->PageCount != 0);
1644 Buffer->Run[Run - 1].PageCount += MdBlock->PageCount;
1645 NextPage += MdBlock->PageCount;
1646 }
1647 else
1648 {
1649 //
1650 // Otherwise just duplicate the descriptor's contents
1651 //
1652 Buffer->Run[Run].BasePage = MdBlock->BasePage;
1653 Buffer->Run[Run].PageCount = MdBlock->PageCount;
1654 NextPage = Buffer->Run[Run].BasePage + Buffer->Run[Run].PageCount;
1655
1656 //
1657 // And in this case, increase the number of runs
1658 //
1659 Run++;
1660 }
1661 }
1662
1663 //
1664 // Try the next descriptor
1665 //
1666 NextEntry = MdBlock->ListEntry.Flink;
1667 }
1668
1669 //
1670 // We should not have been able to go past our initial estimate
1671 //
1672 ASSERT(Run <= Buffer->NumberOfRuns);
1673
1674 //
1675 // Our guess was probably exaggerated...
1676 //
1677 if (InitialRuns > Run)
1678 {
1679 //
1680 // Allocate a more accurately sized buffer
1681 //
1682 NewBuffer = ExAllocatePoolWithTag(NonPagedPool,
1683 sizeof(PHYSICAL_MEMORY_DESCRIPTOR) +
1684 sizeof(PHYSICAL_MEMORY_RUN) *
1685 (Run - 1),
1686 'lMmM');
1687 if (NewBuffer)
1688 {
1689 //
1690 // Copy the old buffer into the new, then free it
1691 //
1692 RtlCopyMemory(NewBuffer->Run,
1693 Buffer->Run,
1694 sizeof(PHYSICAL_MEMORY_RUN) * Run);
1695 ExFreePoolWithTag(Buffer, 'lMmM');
1696
1697 //
1698 // Now use the new buffer
1699 //
1700 Buffer = NewBuffer;
1701 }
1702 }
1703
1704 //
1705 // Write the final numbers, and return it
1706 //
1707 Buffer->NumberOfRuns = Run;
1708 Buffer->NumberOfPages = PageCount;
1709 return Buffer;
1710 }
1711
1712 VOID
1713 NTAPI
1714 INIT_FUNCTION
1715 MiBuildPagedPool(VOID)
1716 {
1717 PMMPTE PointerPte;
1718 PMMPDE PointerPde;
1719 MMPDE TempPde = ValidKernelPde;
1720 PFN_NUMBER PageFrameIndex;
1721 KIRQL OldIrql;
1722 SIZE_T Size;
1723 ULONG BitMapSize;
1724 #if (_MI_PAGING_LEVELS >= 3)
1725 MMPPE TempPpe = ValidKernelPpe;
1726 PMMPPE PointerPpe;
1727 #elif (_MI_PAGING_LEVELS == 2)
1728 MMPTE TempPte = ValidKernelPte;
1729
1730 //
1731 // Get the page frame number for the system page directory
1732 //
1733 PointerPte = MiAddressToPte(PDE_BASE);
1734 ASSERT(PD_COUNT == 1);
1735 MmSystemPageDirectory[0] = PFN_FROM_PTE(PointerPte);
1736
1737 //
1738 // Allocate a system PTE which will hold a copy of the page directory
1739 //
1740 PointerPte = MiReserveSystemPtes(1, SystemPteSpace);
1741 ASSERT(PointerPte);
1742 MmSystemPagePtes = MiPteToAddress(PointerPte);
1743
1744 //
1745 // Make this system PTE point to the system page directory.
1746 // It is now essentially double-mapped. This will be used later for lazy
1747 // evaluation of PDEs accross process switches, similarly to how the Global
1748 // page directory array in the old ReactOS Mm is used (but in a less hacky
1749 // way).
1750 //
1751 TempPte = ValidKernelPte;
1752 ASSERT(PD_COUNT == 1);
1753 TempPte.u.Hard.PageFrameNumber = MmSystemPageDirectory[0];
1754 MI_WRITE_VALID_PTE(PointerPte, TempPte);
1755 #endif
1756 //
1757 // Let's get back to paged pool work: size it up.
1758 // By default, it should be twice as big as nonpaged pool.
1759 //
1760 MmSizeOfPagedPoolInBytes = 2 * MmMaximumNonPagedPoolInBytes;
1761 if (MmSizeOfPagedPoolInBytes > ((ULONG_PTR)MmNonPagedSystemStart -
1762 (ULONG_PTR)MmPagedPoolStart))
1763 {
1764 //
1765 // On the other hand, we have limited VA space, so make sure that the VA
1766 // for paged pool doesn't overflow into nonpaged pool VA. Otherwise, set
1767 // whatever maximum is possible.
1768 //
1769 MmSizeOfPagedPoolInBytes = (ULONG_PTR)MmNonPagedSystemStart -
1770 (ULONG_PTR)MmPagedPoolStart;
1771 }
1772
1773 //
1774 // Get the size in pages and make sure paged pool is at least 32MB.
1775 //
1776 Size = MmSizeOfPagedPoolInBytes;
1777 if (Size < MI_MIN_INIT_PAGED_POOLSIZE) Size = MI_MIN_INIT_PAGED_POOLSIZE;
1778 Size = BYTES_TO_PAGES(Size);
1779
1780 //
1781 // Now check how many PTEs will be required for these many pages.
1782 //
1783 Size = (Size + (1024 - 1)) / 1024;
1784
1785 //
1786 // Recompute the page-aligned size of the paged pool, in bytes and pages.
1787 //
1788 MmSizeOfPagedPoolInBytes = Size * PAGE_SIZE * 1024;
1789 MmSizeOfPagedPoolInPages = MmSizeOfPagedPoolInBytes >> PAGE_SHIFT;
1790
1791 //
1792 // Let's be really sure this doesn't overflow into nonpaged system VA
1793 //
1794 ASSERT((MmSizeOfPagedPoolInBytes + (ULONG_PTR)MmPagedPoolStart) <=
1795 (ULONG_PTR)MmNonPagedSystemStart);
1796
1797 //
1798 // This is where paged pool ends
1799 //
1800 MmPagedPoolEnd = (PVOID)(((ULONG_PTR)MmPagedPoolStart +
1801 MmSizeOfPagedPoolInBytes) - 1);
1802
1803 //
1804 // Lock the PFN database
1805 //
1806 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
1807
1808 #if (_MI_PAGING_LEVELS >= 3)
1809 /* On these systems, there's no double-mapping, so instead, the PPEs
1810 * are setup to span the entire paged pool area, so there's no need for the
1811 * system PD */
1812 for (PointerPpe = MiAddressToPpe(MmPagedPoolStart);
1813 PointerPpe <= MiAddressToPpe(MmPagedPoolEnd);
1814 PointerPpe++)
1815 {
1816 /* Check if the PPE is already valid */
1817 if (!PointerPpe->u.Hard.Valid)
1818 {
1819 /* It is not, so map a fresh zeroed page */
1820 TempPpe.u.Hard.PageFrameNumber = MiRemoveZeroPage(0);
1821 MI_WRITE_VALID_PPE(PointerPpe, TempPpe);
1822 }
1823 }
1824 #endif
1825
1826 //
1827 // So now get the PDE for paged pool and zero it out
1828 //
1829 PointerPde = MiAddressToPde(MmPagedPoolStart);
1830 RtlZeroMemory(PointerPde,
1831 (1 + MiAddressToPde(MmPagedPoolEnd) - PointerPde) * sizeof(MMPDE));
1832
1833 //
1834 // Next, get the first and last PTE
1835 //
1836 PointerPte = MiAddressToPte(MmPagedPoolStart);
1837 MmPagedPoolInfo.FirstPteForPagedPool = PointerPte;
1838 MmPagedPoolInfo.LastPteForPagedPool = MiAddressToPte(MmPagedPoolEnd);
1839
1840 /* Allocate a page and map the first paged pool PDE */
1841 MI_SET_USAGE(MI_USAGE_PAGED_POOL);
1842 MI_SET_PROCESS2("Kernel");
1843 PageFrameIndex = MiRemoveZeroPage(0);
1844 TempPde.u.Hard.PageFrameNumber = PageFrameIndex;
1845 MI_WRITE_VALID_PDE(PointerPde, TempPde);
1846 #if (_MI_PAGING_LEVELS >= 3)
1847 /* Use the PPE of MmPagedPoolStart that was setup above */
1848 // Bla = PFN_FROM_PTE(PpeAddress(MmPagedPool...));
1849
1850 /* Initialize the PFN entry for it */
1851 MiInitializePfnForOtherProcess(PageFrameIndex,
1852 (PMMPTE)PointerPde,
1853 PFN_FROM_PTE(MiAddressToPpe(MmPagedPoolStart)));
1854 #else
1855 /* Do it this way */
1856 // Bla = MmSystemPageDirectory[(PointerPde - (PMMPTE)PDE_BASE) / PDE_COUNT]
1857
1858 /* Initialize the PFN entry for it */
1859 MiInitializePfnForOtherProcess(PageFrameIndex,
1860 (PMMPTE)PointerPde,
1861 MmSystemPageDirectory[(PointerPde - (PMMPDE)PDE_BASE) / PDE_COUNT]);
1862 #endif
1863
1864 //
1865 // Release the PFN database lock
1866 //
1867 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
1868
1869 //
1870 // We only have one PDE mapped for now... at fault time, additional PDEs
1871 // will be allocated to handle paged pool growth. This is where they'll have
1872 // to start.
1873 //
1874 MmPagedPoolInfo.NextPdeForPagedPoolExpansion = PointerPde + 1;
1875
1876 //
1877 // We keep track of each page via a bit, so check how big the bitmap will
1878 // have to be (make sure to align our page count such that it fits nicely
1879 // into a 4-byte aligned bitmap.
1880 //
1881 // We'll also allocate the bitmap header itself part of the same buffer.
1882 //
1883 Size = Size * 1024;
1884 ASSERT(Size == MmSizeOfPagedPoolInPages);
1885 BitMapSize = (ULONG)Size;
1886 Size = sizeof(RTL_BITMAP) + (((Size + 31) / 32) * sizeof(ULONG));
1887
1888 //
1889 // Allocate the allocation bitmap, which tells us which regions have not yet
1890 // been mapped into memory
1891 //
1892 MmPagedPoolInfo.PagedPoolAllocationMap = ExAllocatePoolWithTag(NonPagedPool,
1893 Size,
1894 ' mM');
1895 ASSERT(MmPagedPoolInfo.PagedPoolAllocationMap);
1896
1897 //
1898 // Initialize it such that at first, only the first page's worth of PTEs is
1899 // marked as allocated (incidentially, the first PDE we allocated earlier).
1900 //
1901 RtlInitializeBitMap(MmPagedPoolInfo.PagedPoolAllocationMap,
1902 (PULONG)(MmPagedPoolInfo.PagedPoolAllocationMap + 1),
1903 BitMapSize);
1904 RtlSetAllBits(MmPagedPoolInfo.PagedPoolAllocationMap);
1905 RtlClearBits(MmPagedPoolInfo.PagedPoolAllocationMap, 0, 1024);
1906
1907 //
1908 // We have a second bitmap, which keeps track of where allocations end.
1909 // Given the allocation bitmap and a base address, we can therefore figure
1910 // out which page is the last page of that allocation, and thus how big the
1911 // entire allocation is.
1912 //
1913 MmPagedPoolInfo.EndOfPagedPoolBitmap = ExAllocatePoolWithTag(NonPagedPool,
1914 Size,
1915 ' mM');
1916 ASSERT(MmPagedPoolInfo.EndOfPagedPoolBitmap);
1917 RtlInitializeBitMap(MmPagedPoolInfo.EndOfPagedPoolBitmap,
1918 (PULONG)(MmPagedPoolInfo.EndOfPagedPoolBitmap + 1),
1919 BitMapSize);
1920
1921 //
1922 // Since no allocations have been made yet, there are no bits set as the end
1923 //
1924 RtlClearAllBits(MmPagedPoolInfo.EndOfPagedPoolBitmap);
1925
1926 //
1927 // Initialize paged pool.
1928 //
1929 InitializePool(PagedPool, 0);
1930
1931 /* Initialize special pool */
1932 MiInitializeSpecialPool();
1933
1934 /* Default low threshold of 30MB or one fifth of paged pool */
1935 MiLowPagedPoolThreshold = (30 * _1MB) >> PAGE_SHIFT;
1936 MiLowPagedPoolThreshold = min(MiLowPagedPoolThreshold, Size / 5);
1937
1938 /* Default high threshold of 60MB or 25% */
1939 MiHighPagedPoolThreshold = (60 * _1MB) >> PAGE_SHIFT;
1940 MiHighPagedPoolThreshold = min(MiHighPagedPoolThreshold, (Size * 2) / 5);
1941 ASSERT(MiLowPagedPoolThreshold < MiHighPagedPoolThreshold);
1942
1943 /* Setup the global session space */
1944 MiInitializeSystemSpaceMap(NULL);
1945 }
1946
1947 VOID
1948 NTAPI
1949 INIT_FUNCTION
1950 MiDbgDumpMemoryDescriptors(VOID)
1951 {
1952 PLIST_ENTRY NextEntry;
1953 PMEMORY_ALLOCATION_DESCRIPTOR Md;
1954 PFN_NUMBER TotalPages = 0;
1955 PCHAR
1956 MemType[] =
1957 {
1958 "ExceptionBlock ",
1959 "SystemBlock ",
1960 "Free ",
1961 "Bad ",
1962 "LoadedProgram ",
1963 "FirmwareTemporary ",
1964 "FirmwarePermanent ",
1965 "OsloaderHeap ",
1966 "OsloaderStack ",
1967 "SystemCode ",
1968 "HalCode ",
1969 "BootDriver ",
1970 "ConsoleInDriver ",
1971 "ConsoleOutDriver ",
1972 "StartupDpcStack ",
1973 "StartupKernelStack",
1974 "StartupPanicStack ",
1975 "StartupPcrPage ",
1976 "StartupPdrPage ",
1977 "RegistryData ",
1978 "MemoryData ",
1979 "NlsData ",
1980 "SpecialMemory ",
1981 "BBTMemory ",
1982 "LoaderReserve ",
1983 "LoaderXIPRom "
1984 };
1985
1986 DPRINT1("Base\t\tLength\t\tType\n");
1987 for (NextEntry = KeLoaderBlock->MemoryDescriptorListHead.Flink;
1988 NextEntry != &KeLoaderBlock->MemoryDescriptorListHead;
1989 NextEntry = NextEntry->Flink)
1990 {
1991 Md = CONTAINING_RECORD(NextEntry, MEMORY_ALLOCATION_DESCRIPTOR, ListEntry);
1992 DPRINT1("%08lX\t%08lX\t%s\n", Md->BasePage, Md->PageCount, MemType[Md->MemoryType]);
1993 TotalPages += Md->PageCount;
1994 }
1995
1996 DPRINT1("Total: %08lX (%d MB)\n", (ULONG)TotalPages, (ULONG)(TotalPages * PAGE_SIZE) / 1024 / 1024);
1997 }
1998
1999 BOOLEAN
2000 NTAPI
2001 INIT_FUNCTION
2002 MmArmInitSystem(IN ULONG Phase,
2003 IN PLOADER_PARAMETER_BLOCK LoaderBlock)
2004 {
2005 ULONG i;
2006 BOOLEAN IncludeType[LoaderMaximum];
2007 PVOID Bitmap;
2008 PPHYSICAL_MEMORY_RUN Run;
2009 PFN_NUMBER PageCount;
2010 #if DBG
2011 ULONG j;
2012 PMMPTE PointerPte, TestPte;
2013 MMPTE TempPte;
2014 #endif
2015
2016 /* Dump memory descriptors */
2017 if (MiDbgEnableMdDump) MiDbgDumpMemoryDescriptors();
2018
2019 //
2020 // Instantiate memory that we don't consider RAM/usable
2021 // We use the same exclusions that Windows does, in order to try to be
2022 // compatible with WinLDR-style booting
2023 //
2024 for (i = 0; i < LoaderMaximum; i++) IncludeType[i] = TRUE;
2025 IncludeType[LoaderBad] = FALSE;
2026 IncludeType[LoaderFirmwarePermanent] = FALSE;
2027 IncludeType[LoaderSpecialMemory] = FALSE;
2028 IncludeType[LoaderBBTMemory] = FALSE;
2029 if (Phase == 0)
2030 {
2031 /* Count physical pages on the system */
2032 MiScanMemoryDescriptors(LoaderBlock);
2033
2034 /* Initialize the phase 0 temporary event */
2035 KeInitializeEvent(&MiTempEvent, NotificationEvent, FALSE);
2036
2037 /* Set all the events to use the temporary event for now */
2038 MiLowMemoryEvent = &MiTempEvent;
2039 MiHighMemoryEvent = &MiTempEvent;
2040 MiLowPagedPoolEvent = &MiTempEvent;
2041 MiHighPagedPoolEvent = &MiTempEvent;
2042 MiLowNonPagedPoolEvent = &MiTempEvent;
2043 MiHighNonPagedPoolEvent = &MiTempEvent;
2044
2045 //
2046 // Define the basic user vs. kernel address space separation
2047 //
2048 MmSystemRangeStart = (PVOID)MI_DEFAULT_SYSTEM_RANGE_START;
2049 MmUserProbeAddress = (ULONG_PTR)MI_USER_PROBE_ADDRESS;
2050 MmHighestUserAddress = (PVOID)MI_HIGHEST_USER_ADDRESS;
2051
2052 /* Highest PTE and PDE based on the addresses above */
2053 MiHighestUserPte = MiAddressToPte(MmHighestUserAddress);
2054 MiHighestUserPde = MiAddressToPde(MmHighestUserAddress);
2055 #if (_MI_PAGING_LEVELS >= 3)
2056 MiHighestUserPpe = MiAddressToPpe(MmHighestUserAddress);
2057 #if (_MI_PAGING_LEVELS >= 4)
2058 MiHighestUserPxe = MiAddressToPxe(MmHighestUserAddress);
2059 #endif
2060 #endif
2061 //
2062 // Get the size of the boot loader's image allocations and then round
2063 // that region up to a PDE size, so that any PDEs we might create for
2064 // whatever follows are separate from the PDEs that boot loader might've
2065 // already created (and later, we can blow all that away if we want to).
2066 //
2067 MmBootImageSize = KeLoaderBlock->Extension->LoaderPagesSpanned;
2068 MmBootImageSize *= PAGE_SIZE;
2069 MmBootImageSize = (MmBootImageSize + PDE_MAPPED_VA - 1) & ~(PDE_MAPPED_VA - 1);
2070 ASSERT((MmBootImageSize % PDE_MAPPED_VA) == 0);
2071
2072 /* Initialize session space address layout */
2073 MiInitializeSessionSpaceLayout();
2074
2075 /* Set the based section highest address */
2076 MmHighSectionBase = (PVOID)((ULONG_PTR)MmHighestUserAddress - 0x800000);
2077
2078 #if DBG
2079 /* The subection PTE format depends on things being 8-byte aligned */
2080 ASSERT((sizeof(CONTROL_AREA) % 8) == 0);
2081 ASSERT((sizeof(SUBSECTION) % 8) == 0);
2082
2083 /* Prototype PTEs are assumed to be in paged pool, so check if the math works */
2084 PointerPte = (PMMPTE)MmPagedPoolStart;
2085 MI_MAKE_PROTOTYPE_PTE(&TempPte, PointerPte);
2086 TestPte = MiProtoPteToPte(&TempPte);
2087 ASSERT(PointerPte == TestPte);
2088
2089 /* Try the last nonpaged pool address */
2090 PointerPte = (PMMPTE)MI_NONPAGED_POOL_END;
2091 MI_MAKE_PROTOTYPE_PTE(&TempPte, PointerPte);
2092 TestPte = MiProtoPteToPte(&TempPte);
2093 ASSERT(PointerPte == TestPte);
2094
2095 /* Try a bunch of random addresses near the end of the address space */
2096 PointerPte = (PMMPTE)0xFFFC8000;
2097 for (j = 0; j < 20; j += 1)
2098 {
2099 MI_MAKE_PROTOTYPE_PTE(&TempPte, PointerPte);
2100 TestPte = MiProtoPteToPte(&TempPte);
2101 ASSERT(PointerPte == TestPte);
2102 PointerPte++;
2103 }
2104
2105 /* Subsection PTEs are always in nonpaged pool, pick a random address to try */
2106 PointerPte = (PMMPTE)0xFFAACBB8;
2107 MI_MAKE_SUBSECTION_PTE(&TempPte, PointerPte);
2108 TestPte = MiSubsectionPteToSubsection(&TempPte);
2109 ASSERT(PointerPte == TestPte);
2110 #endif
2111
2112 /* Loop all 8 standby lists */
2113 for (i = 0; i < 8; i++)
2114 {
2115 /* Initialize them */
2116 MmStandbyPageListByPriority[i].Total = 0;
2117 MmStandbyPageListByPriority[i].ListName = StandbyPageList;
2118 MmStandbyPageListByPriority[i].Flink = MM_EMPTY_LIST;
2119 MmStandbyPageListByPriority[i].Blink = MM_EMPTY_LIST;
2120 }
2121
2122 /* Initialize the user mode image list */
2123 InitializeListHead(&MmLoadedUserImageList);
2124
2125 /* Initialize critical section timeout value (relative time is negative) */
2126 MmCriticalSectionTimeout.QuadPart = MmCritsectTimeoutSeconds * (-10000000LL);
2127
2128 /* Initialize the paged pool mutex and the section commit mutex */
2129 KeInitializeGuardedMutex(&MmPagedPoolMutex);
2130 KeInitializeGuardedMutex(&MmSectionCommitMutex);
2131 KeInitializeGuardedMutex(&MmSectionBasedMutex);
2132
2133 /* Initialize the Loader Lock */
2134 KeInitializeMutant(&MmSystemLoadLock, FALSE);
2135
2136 /* Set the zero page event */
2137 KeInitializeEvent(&MmZeroingPageEvent, SynchronizationEvent, FALSE);
2138 MmZeroingPageThreadActive = FALSE;
2139
2140 /* Initialize the dead stack S-LIST */
2141 InitializeSListHead(&MmDeadStackSListHead);
2142
2143 //
2144 // Check if this is a machine with less than 19MB of RAM
2145 //
2146 PageCount = MmNumberOfPhysicalPages;
2147 if (PageCount < MI_MIN_PAGES_FOR_SYSPTE_TUNING)
2148 {
2149 //
2150 // Use the very minimum of system PTEs
2151 //
2152 MmNumberOfSystemPtes = 7000;
2153 }
2154 else
2155 {
2156 //
2157 // Use the default
2158 //
2159 MmNumberOfSystemPtes = 11000;
2160 if (PageCount > MI_MIN_PAGES_FOR_SYSPTE_BOOST)
2161 {
2162 //
2163 // Double the amount of system PTEs
2164 //
2165 MmNumberOfSystemPtes <<= 1;
2166 }
2167 if (PageCount > MI_MIN_PAGES_FOR_SYSPTE_BOOST_BOOST)
2168 {
2169 //
2170 // Double the amount of system PTEs
2171 //
2172 MmNumberOfSystemPtes <<= 1;
2173 }
2174 }
2175
2176 DPRINT("System PTE count has been tuned to %d (%d bytes)\n",
2177 MmNumberOfSystemPtes, MmNumberOfSystemPtes * PAGE_SIZE);
2178
2179 /* Initialize the working set lock */
2180 ExInitializePushLock(&MmSystemCacheWs.WorkingSetMutex);
2181
2182 /* Set commit limit */
2183 MmTotalCommitLimit = 2 * _1GB;
2184 MmTotalCommitLimitMaximum = MmTotalCommitLimit;
2185
2186 /* Has the allocation fragment been setup? */
2187 if (!MmAllocationFragment)
2188 {
2189 /* Use the default value */
2190 MmAllocationFragment = MI_ALLOCATION_FRAGMENT;
2191 if (PageCount < ((256 * _1MB) / PAGE_SIZE))
2192 {
2193 /* On memory systems with less than 256MB, divide by 4 */
2194 MmAllocationFragment = MI_ALLOCATION_FRAGMENT / 4;
2195 }
2196 else if (PageCount < (_1GB / PAGE_SIZE))
2197 {
2198 /* On systems with less than 1GB, divide by 2 */
2199 MmAllocationFragment = MI_ALLOCATION_FRAGMENT / 2;
2200 }
2201 }
2202 else
2203 {
2204 /* Convert from 1KB fragments to pages */
2205 MmAllocationFragment *= _1KB;
2206 MmAllocationFragment = ROUND_TO_PAGES(MmAllocationFragment);
2207
2208 /* Don't let it past the maximum */
2209 MmAllocationFragment = min(MmAllocationFragment,
2210 MI_MAX_ALLOCATION_FRAGMENT);
2211
2212 /* Don't let it too small either */
2213 MmAllocationFragment = max(MmAllocationFragment,
2214 MI_MIN_ALLOCATION_FRAGMENT);
2215 }
2216
2217 /* Check for kernel stack size that's too big */
2218 if (MmLargeStackSize > (KERNEL_LARGE_STACK_SIZE / _1KB))
2219 {
2220 /* Sanitize to default value */
2221 MmLargeStackSize = KERNEL_LARGE_STACK_SIZE;
2222 }
2223 else
2224 {
2225 /* Take the registry setting, and convert it into bytes */
2226 MmLargeStackSize *= _1KB;
2227
2228 /* Now align it to a page boundary */
2229 MmLargeStackSize = PAGE_ROUND_UP(MmLargeStackSize);
2230
2231 /* Sanity checks */
2232 ASSERT(MmLargeStackSize <= KERNEL_LARGE_STACK_SIZE);
2233 ASSERT((MmLargeStackSize & (PAGE_SIZE - 1)) == 0);
2234
2235 /* Make sure it's not too low */
2236 if (MmLargeStackSize < KERNEL_STACK_SIZE) MmLargeStackSize = KERNEL_STACK_SIZE;
2237 }
2238
2239 /* Compute color information (L2 cache-separated paging lists) */
2240 MiComputeColorInformation();
2241
2242 // Calculate the number of bytes for the PFN database
2243 // then add the color tables and convert to pages
2244 MxPfnAllocation = (MmHighestPhysicalPage + 1) * sizeof(MMPFN);
2245 MxPfnAllocation += (MmSecondaryColors * sizeof(MMCOLOR_TABLES) * 2);
2246 MxPfnAllocation >>= PAGE_SHIFT;
2247
2248 // We have to add one to the count here, because in the process of
2249 // shifting down to the page size, we actually ended up getting the
2250 // lower aligned size (so say, 0x5FFFF bytes is now 0x5F pages).
2251 // Later on, we'll shift this number back into bytes, which would cause
2252 // us to end up with only 0x5F000 bytes -- when we actually want to have
2253 // 0x60000 bytes.
2254 MxPfnAllocation++;
2255
2256 /* Initialize the platform-specific parts */
2257 MiInitMachineDependent(LoaderBlock);
2258
2259 //
2260 // Build the physical memory block
2261 //
2262 MmPhysicalMemoryBlock = MmInitializeMemoryLimits(LoaderBlock,
2263 IncludeType);
2264
2265 //
2266 // Allocate enough buffer for the PFN bitmap
2267 // Align it up to a 32-bit boundary
2268 //
2269 Bitmap = ExAllocatePoolWithTag(NonPagedPool,
2270 (((MmHighestPhysicalPage + 1) + 31) / 32) * 4,
2271 ' mM');
2272 if (!Bitmap)
2273 {
2274 //
2275 // This is critical
2276 //
2277 KeBugCheckEx(INSTALL_MORE_MEMORY,
2278 MmNumberOfPhysicalPages,
2279 MmLowestPhysicalPage,
2280 MmHighestPhysicalPage,
2281 0x101);
2282 }
2283
2284 //
2285 // Initialize it and clear all the bits to begin with
2286 //
2287 RtlInitializeBitMap(&MiPfnBitMap,
2288 Bitmap,
2289 (ULONG)MmHighestPhysicalPage + 1);
2290 RtlClearAllBits(&MiPfnBitMap);
2291
2292 //
2293 // Loop physical memory runs
2294 //
2295 for (i = 0; i < MmPhysicalMemoryBlock->NumberOfRuns; i++)
2296 {
2297 //
2298 // Get the run
2299 //
2300 Run = &MmPhysicalMemoryBlock->Run[i];
2301 DPRINT("PHYSICAL RAM [0x%08p to 0x%08p]\n",
2302 Run->BasePage << PAGE_SHIFT,
2303 (Run->BasePage + Run->PageCount) << PAGE_SHIFT);
2304
2305 //
2306 // Make sure it has pages inside it
2307 //
2308 if (Run->PageCount)
2309 {
2310 //
2311 // Set the bits in the PFN bitmap
2312 //
2313 RtlSetBits(&MiPfnBitMap, (ULONG)Run->BasePage, (ULONG)Run->PageCount);
2314 }
2315 }
2316
2317 /* Look for large page cache entries that need caching */
2318 MiSyncCachedRanges();
2319
2320 /* Loop for HAL Heap I/O device mappings that need coherency tracking */
2321 MiAddHalIoMappings();
2322
2323 /* Set the initial resident page count */
2324 MmResidentAvailablePages = MmAvailablePages - 32;
2325
2326 /* Initialize large page structures on PAE/x64, and MmProcessList on x86 */
2327 MiInitializeLargePageSupport();
2328
2329 /* Check if the registry says any drivers should be loaded with large pages */
2330 MiInitializeDriverLargePageList();
2331
2332 /* Relocate the boot drivers into system PTE space and fixup their PFNs */
2333 MiReloadBootLoadedDrivers(LoaderBlock);
2334
2335 /* FIXME: Call out into Driver Verifier for initialization */
2336
2337 /* Check how many pages the system has */
2338 if (MmNumberOfPhysicalPages <= ((13 * _1MB) / PAGE_SIZE))
2339 {
2340 /* Set small system */
2341 MmSystemSize = MmSmallSystem;
2342 MmMaximumDeadKernelStacks = 0;
2343 }
2344 else if (MmNumberOfPhysicalPages <= ((19 * _1MB) / PAGE_SIZE))
2345 {
2346 /* Set small system and add 100 pages for the cache */
2347 MmSystemSize = MmSmallSystem;
2348 MmSystemCacheWsMinimum += 100;
2349 MmMaximumDeadKernelStacks = 2;
2350 }
2351 else
2352 {
2353 /* Set medium system and add 400 pages for the cache */
2354 MmSystemSize = MmMediumSystem;
2355 MmSystemCacheWsMinimum += 400;
2356 MmMaximumDeadKernelStacks = 5;
2357 }
2358
2359 /* Check for less than 24MB */
2360 if (MmNumberOfPhysicalPages < ((24 * _1MB) / PAGE_SIZE))
2361 {
2362 /* No more than 32 pages */
2363 MmSystemCacheWsMinimum = 32;
2364 }
2365
2366 /* Check for more than 32MB */
2367 if (MmNumberOfPhysicalPages >= ((32 * _1MB) / PAGE_SIZE))
2368 {
2369 /* Check for product type being "Wi" for WinNT */
2370 if (MmProductType == '\0i\0W')
2371 {
2372 /* Then this is a large system */
2373 MmSystemSize = MmLargeSystem;
2374 }
2375 else
2376 {
2377 /* For servers, we need 64MB to consider this as being large */
2378 if (MmNumberOfPhysicalPages >= ((64 * _1MB) / PAGE_SIZE))
2379 {
2380 /* Set it as large */
2381 MmSystemSize = MmLargeSystem;
2382 }
2383 }
2384 }
2385
2386 /* Check for more than 33 MB */
2387 if (MmNumberOfPhysicalPages > ((33 * _1MB) / PAGE_SIZE))
2388 {
2389 /* Add another 500 pages to the cache */
2390 MmSystemCacheWsMinimum += 500;
2391 }
2392
2393 /* Now setup the shared user data fields */
2394 ASSERT(SharedUserData->NumberOfPhysicalPages == 0);
2395 SharedUserData->NumberOfPhysicalPages = MmNumberOfPhysicalPages;
2396 SharedUserData->LargePageMinimum = 0;
2397
2398 /* Check for workstation (Wi for WinNT) */
2399 if (MmProductType == '\0i\0W')
2400 {
2401 /* Set Windows NT Workstation product type */
2402 SharedUserData->NtProductType = NtProductWinNt;
2403 MmProductType = 0;
2404 }
2405 else
2406 {
2407 /* Check for LanMan server (La for LanmanNT) */
2408 if (MmProductType == '\0a\0L')
2409 {
2410 /* This is a domain controller */
2411 SharedUserData->NtProductType = NtProductLanManNt;
2412 }
2413 else
2414 {
2415 /* Otherwise it must be a normal server (Se for ServerNT) */
2416 SharedUserData->NtProductType = NtProductServer;
2417 }
2418
2419 /* Set the product type, and make the system more aggressive with low memory */
2420 MmProductType = 1;
2421 MmMinimumFreePages = 81;
2422 }
2423
2424 /* Update working set tuning parameters */
2425 MiAdjustWorkingSetManagerParameters(!MmProductType);
2426
2427 /* Finetune the page count by removing working set and NP expansion */
2428 MmResidentAvailablePages -= MiExpansionPoolPagesInitialCharge;
2429 MmResidentAvailablePages -= MmSystemCacheWsMinimum;
2430 MmResidentAvailableAtInit = MmResidentAvailablePages;
2431 if (MmResidentAvailablePages <= 0)
2432 {
2433 /* This should not happen */
2434 DPRINT1("System cache working set too big\n");
2435 return FALSE;
2436 }
2437
2438 /* Initialize the system cache */
2439 //MiInitializeSystemCache(MmSystemCacheWsMinimum, MmAvailablePages);
2440
2441 /* Update the commit limit */
2442 MmTotalCommitLimit = MmAvailablePages;
2443 if (MmTotalCommitLimit > 1024) MmTotalCommitLimit -= 1024;
2444 MmTotalCommitLimitMaximum = MmTotalCommitLimit;
2445
2446 /* Size up paged pool and build the shadow system page directory */
2447 MiBuildPagedPool();
2448
2449 /* Debugger physical memory support is now ready to be used */
2450 MmDebugPte = MiAddressToPte(MiDebugMapping);
2451
2452 /* Initialize the loaded module list */
2453 MiInitializeLoadedModuleList(LoaderBlock);
2454 }
2455
2456 //
2457 // Always return success for now
2458 //
2459 return TRUE;
2460 }
2461
2462 /* EOF */