Merge from amd64 branch:
[reactos.git] / reactos / ntoskrnl / mm / ARM3 / mminit.c
1 /*
2 * PROJECT: ReactOS Kernel
3 * LICENSE: BSD - See COPYING.ARM in the top level directory
4 * FILE: ntoskrnl/mm/ARM3/mminit.c
5 * PURPOSE: ARM Memory Manager Initialization
6 * PROGRAMMERS: ReactOS Portable Systems Group
7 */
8
9 /* INCLUDES *******************************************************************/
10
11 #include <ntoskrnl.h>
12 #define NDEBUG
13 #include <debug.h>
14
15 #line 15 "ARM³::INIT"
16 #define MODULE_INVOLVED_IN_ARM3
17 #include "miarm.h"
18
19 /* GLOBALS ********************************************************************/
20
21 //
22 // These are all registry-configurable, but by default, the memory manager will
23 // figure out the most appropriate values.
24 //
25 ULONG MmMaximumNonPagedPoolPercent;
26 ULONG MmSizeOfNonPagedPoolInBytes;
27 ULONG MmMaximumNonPagedPoolInBytes;
28
29 /* Some of the same values, in pages */
30 PFN_NUMBER MmMaximumNonPagedPoolInPages;
31
32 //
33 // These numbers describe the discrete equation components of the nonpaged
34 // pool sizing algorithm.
35 //
36 // They are described on http://support.microsoft.com/default.aspx/kb/126402/ja
37 // along with the algorithm that uses them, which is implemented later below.
38 //
39 ULONG MmMinimumNonPagedPoolSize = 256 * 1024;
40 ULONG MmMinAdditionNonPagedPoolPerMb = 32 * 1024;
41 ULONG MmDefaultMaximumNonPagedPool = 1024 * 1024;
42 ULONG MmMaxAdditionNonPagedPoolPerMb = 400 * 1024;
43
44 //
45 // The memory layout (and especially variable names) of the NT kernel mode
46 // components can be a bit hard to twig, especially when it comes to the non
47 // paged area.
48 //
49 // There are really two components to the non-paged pool:
50 //
51 // - The initial nonpaged pool, sized dynamically up to a maximum.
52 // - The expansion nonpaged pool, sized dynamically up to a maximum.
53 //
54 // The initial nonpaged pool is physically continuous for performance, and
55 // immediately follows the PFN database, typically sharing the same PDE. It is
56 // a very small resource (32MB on a 1GB system), and capped at 128MB.
57 //
58 // Right now we call this the "ARM³ Nonpaged Pool" and it begins somewhere after
59 // the PFN database (which starts at 0xB0000000).
60 //
61 // The expansion nonpaged pool, on the other hand, can grow much bigger (400MB
62 // for a 1GB system). On ARM³ however, it is currently capped at 128MB.
63 //
64 // The address where the initial nonpaged pool starts is aptly named
65 // MmNonPagedPoolStart, and it describes a range of MmSizeOfNonPagedPoolInBytes
66 // bytes.
67 //
68 // Expansion nonpaged pool starts at an address described by the variable called
69 // MmNonPagedPoolExpansionStart, and it goes on for MmMaximumNonPagedPoolInBytes
70 // minus MmSizeOfNonPagedPoolInBytes bytes, always reaching MmNonPagedPoolEnd
71 // (because of the way it's calculated) at 0xFFBE0000.
72 //
73 // Initial nonpaged pool is allocated and mapped early-on during boot, but what
74 // about the expansion nonpaged pool? It is instead composed of special pages
75 // which belong to what are called System PTEs. These PTEs are the matter of a
76 // later discussion, but they are also considered part of the "nonpaged" OS, due
77 // to the fact that they are never paged out -- once an address is described by
78 // a System PTE, it is always valid, until the System PTE is torn down.
79 //
80 // System PTEs are actually composed of two "spaces", the system space proper,
81 // and the nonpaged pool expansion space. The latter, as we've already seen,
82 // begins at MmNonPagedPoolExpansionStart. Based on the number of System PTEs
83 // that the system will support, the remaining address space below this address
84 // is used to hold the system space PTEs. This address, in turn, is held in the
85 // variable named MmNonPagedSystemStart, which itself is never allowed to go
86 // below 0xEB000000 (thus creating an upper bound on the number of System PTEs).
87 //
88 // This means that 330MB are reserved for total nonpaged system VA, on top of
89 // whatever the initial nonpaged pool allocation is.
90 //
91 // The following URLs, valid as of April 23rd, 2008, support this evidence:
92 //
93 // http://www.cs.miami.edu/~burt/journal/NT/memory.html
94 // http://www.ditii.com/2007/09/28/windows-memory-management-x86-virtual-address-space/
95 //
96 PVOID MmNonPagedSystemStart;
97 PVOID MmNonPagedPoolStart;
98 PVOID MmNonPagedPoolExpansionStart;
99 PVOID MmNonPagedPoolEnd = MI_NONPAGED_POOL_END;
100
101 //
102 // This is where paged pool starts by default
103 //
104 PVOID MmPagedPoolStart = MI_PAGED_POOL_START;
105 PVOID MmPagedPoolEnd;
106
107 //
108 // And this is its default size
109 //
110 ULONG MmSizeOfPagedPoolInBytes = MI_MIN_INIT_PAGED_POOLSIZE;
111 PFN_NUMBER MmSizeOfPagedPoolInPages = MI_MIN_INIT_PAGED_POOLSIZE / PAGE_SIZE;
112
113 //
114 // Session space starts at 0xBFFFFFFF and grows downwards
115 // By default, it includes an 8MB image area where we map win32k and video card
116 // drivers, followed by a 4MB area containing the session's working set. This is
117 // then followed by a 20MB mapped view area and finally by the session's paged
118 // pool, by default 16MB.
119 //
120 // On a normal system, this results in session space occupying the region from
121 // 0xBD000000 to 0xC0000000
122 //
123 // See miarm.h for the defines that determine the sizing of this region. On an
124 // NT system, some of these can be configured through the registry, but we don't
125 // support that yet.
126 //
127 PVOID MiSessionSpaceEnd; // 0xC0000000
128 PVOID MiSessionImageEnd; // 0xC0000000
129 PVOID MiSessionImageStart; // 0xBF800000
130 PVOID MiSessionViewStart; // 0xBE000000
131 PVOID MiSessionPoolEnd; // 0xBE000000
132 PVOID MiSessionPoolStart; // 0xBD000000
133 PVOID MmSessionBase; // 0xBD000000
134 ULONG MmSessionSize;
135 ULONG MmSessionViewSize;
136 ULONG MmSessionPoolSize;
137 ULONG MmSessionImageSize;
138
139 //
140 // The system view space, on the other hand, is where sections that are memory
141 // mapped into "system space" end up.
142 //
143 // By default, it is a 16MB region.
144 //
145 PVOID MiSystemViewStart;
146 ULONG MmSystemViewSize;
147
148 //
149 // A copy of the system page directory (the page directory associated with the
150 // System process) is kept (double-mapped) by the manager in order to lazily
151 // map paged pool PDEs into external processes when they fault on a paged pool
152 // address.
153 //
154 PFN_NUMBER MmSystemPageDirectory;
155 PMMPTE MmSystemPagePtes;
156
157 //
158 // The system cache starts right after hyperspace. The first few pages are for
159 // keeping track of the system working set list.
160 //
161 // This should be 0xC0C00000 -- the cache itself starts at 0xC1000000
162 //
163 PMMWSL MmSystemCacheWorkingSetList = MI_SYSTEM_CACHE_WS_START;
164
165 //
166 // Windows NT seems to choose between 7000, 11000 and 50000
167 // On systems with more than 32MB, this number is then doubled, and further
168 // aligned up to a PDE boundary (4MB).
169 //
170 ULONG MmNumberOfSystemPtes;
171
172 //
173 // This is how many pages the PFN database will take up
174 // In Windows, this includes the Quark Color Table, but not in ARM³
175 //
176 ULONG MxPfnAllocation;
177
178 //
179 // Unlike the old ReactOS Memory Manager, ARM³ (and Windows) does not keep track
180 // of pages that are not actually valid physical memory, such as ACPI reserved
181 // regions, BIOS address ranges, or holes in physical memory address space which
182 // could indicate device-mapped I/O memory.
183 //
184 // In fact, the lack of a PFN entry for a page usually indicates that this is
185 // I/O space instead.
186 //
187 // A bitmap, called the PFN bitmap, keeps track of all page frames by assigning
188 // a bit to each. If the bit is set, then the page is valid physical RAM.
189 //
190 RTL_BITMAP MiPfnBitMap;
191
192 //
193 // This structure describes the different pieces of RAM-backed address space
194 //
195 PPHYSICAL_MEMORY_DESCRIPTOR MmPhysicalMemoryBlock;
196
197 //
198 // This is where we keep track of the most basic physical layout markers
199 //
200 ULONG MmNumberOfPhysicalPages, MmHighestPhysicalPage, MmLowestPhysicalPage = -1;
201
202 //
203 // The total number of pages mapped by the boot loader, which include the kernel
204 // HAL, boot drivers, registry, NLS files and other loader data structures is
205 // kept track of here. This depends on "LoaderPagesSpanned" being correct when
206 // coming from the loader.
207 //
208 // This number is later aligned up to a PDE boundary.
209 //
210 ULONG MmBootImageSize;
211
212 //
213 // These three variables keep track of the core separation of address space that
214 // exists between kernel mode and user mode.
215 //
216 ULONG MmUserProbeAddress;
217 PVOID MmHighestUserAddress;
218 PVOID MmSystemRangeStart;
219
220 PVOID MmSystemCacheStart;
221 PVOID MmSystemCacheEnd;
222 MMSUPPORT MmSystemCacheWs;
223
224 //
225 // This is where hyperspace ends (followed by the system cache working set)
226 //
227 PVOID MmHyperSpaceEnd;
228
229 //
230 // Page coloring algorithm data
231 //
232 ULONG MmSecondaryColors;
233 ULONG MmSecondaryColorMask;
234
235 //
236 // Actual (registry-configurable) size of a GUI thread's stack
237 //
238 ULONG MmLargeStackSize = KERNEL_LARGE_STACK_SIZE;
239
240 //
241 // Before we have a PFN database, memory comes straight from our physical memory
242 // blocks, which is nice because it's guaranteed contiguous and also because once
243 // we take a page from here, the system doesn't see it anymore.
244 // However, once the fun is over, those pages must be re-integrated back into
245 // PFN society life, and that requires us keeping a copy of the original layout
246 // so that we can parse it later.
247 //
248 PMEMORY_ALLOCATION_DESCRIPTOR MxFreeDescriptor;
249 MEMORY_ALLOCATION_DESCRIPTOR MxOldFreeDescriptor;
250
251 /*
252 * For each page's worth bytes of L2 cache in a given set/way line, the zero and
253 * free lists are organized in what is called a "color".
254 *
255 * This array points to the two lists, so it can be thought of as a multi-dimensional
256 * array of MmFreePagesByColor[2][MmSecondaryColors]. Since the number is dynamic,
257 * we describe the array in pointer form instead.
258 *
259 * On a final note, the color tables themselves are right after the PFN database.
260 */
261 C_ASSERT(FreePageList == 1);
262 PMMCOLOR_TABLES MmFreePagesByColor[FreePageList + 1];
263
264 /* An event used in Phase 0 before the rest of the system is ready to go */
265 KEVENT MiTempEvent;
266
267 /* All the events used for memory threshold notifications */
268 PKEVENT MiLowMemoryEvent;
269 PKEVENT MiHighMemoryEvent;
270 PKEVENT MiLowPagedPoolEvent;
271 PKEVENT MiHighPagedPoolEvent;
272 PKEVENT MiLowNonPagedPoolEvent;
273 PKEVENT MiHighNonPagedPoolEvent;
274
275 /* The actual thresholds themselves, in page numbers */
276 PFN_NUMBER MmLowMemoryThreshold;
277 PFN_NUMBER MmHighMemoryThreshold;
278 PFN_NUMBER MiLowPagedPoolThreshold;
279 PFN_NUMBER MiHighPagedPoolThreshold;
280 PFN_NUMBER MiLowNonPagedPoolThreshold;
281 PFN_NUMBER MiHighNonPagedPoolThreshold;
282
283 /*
284 * This number determines how many free pages must exist, at minimum, until we
285 * start trimming working sets and flushing modified pages to obtain more free
286 * pages.
287 *
288 * This number changes if the system detects that this is a server product
289 */
290 PFN_NUMBER MmMinimumFreePages = 26;
291
292 /*
293 * This number indicates how many pages we consider to be a low limit of having
294 * "plenty" of free memory.
295 *
296 * It is doubled on systems that have more than 63MB of memory
297 */
298 PFN_NUMBER MmPlentyFreePages = 400;
299
300 /* These values store the type of system this is (small, med, large) and if server */
301 ULONG MmProductType;
302 MM_SYSTEMSIZE MmSystemSize;
303
304 /* PRIVATE FUNCTIONS **********************************************************/
305
306 //
307 // In Bavaria, this is probably a hate crime
308 //
309 VOID
310 FASTCALL
311 MiSyncARM3WithROS(IN PVOID AddressStart,
312 IN PVOID AddressEnd)
313 {
314 //
315 // Puerile piece of junk-grade carbonized horseshit puss sold to the lowest bidder
316 //
317 ULONG Pde = ADDR_TO_PDE_OFFSET(AddressStart);
318 while (Pde <= ADDR_TO_PDE_OFFSET(AddressEnd))
319 {
320 //
321 // This both odious and heinous
322 //
323 extern ULONG MmGlobalKernelPageDirectory[1024];
324 MmGlobalKernelPageDirectory[Pde] = ((PULONG)PDE_BASE)[Pde];
325 Pde++;
326 }
327 }
328
329 PFN_NUMBER
330 NTAPI
331 MxGetNextPage(IN PFN_NUMBER PageCount)
332 {
333 PFN_NUMBER Pfn;
334
335 /* Make sure we have enough pages */
336 if (PageCount > MxFreeDescriptor->PageCount)
337 {
338 /* Crash the system */
339 KeBugCheckEx(INSTALL_MORE_MEMORY,
340 MmNumberOfPhysicalPages,
341 MxFreeDescriptor->PageCount,
342 MxOldFreeDescriptor.PageCount,
343 PageCount);
344 }
345
346 /* Use our lowest usable free pages */
347 Pfn = MxFreeDescriptor->BasePage;
348 MxFreeDescriptor->BasePage += PageCount;
349 MxFreeDescriptor->PageCount -= PageCount;
350 return Pfn;
351 }
352
353 VOID
354 NTAPI
355 MiComputeColorInformation(VOID)
356 {
357 ULONG L2Associativity;
358
359 /* Check if no setting was provided already */
360 if (!MmSecondaryColors)
361 {
362 /* Get L2 cache information */
363 L2Associativity = KeGetPcr()->SecondLevelCacheAssociativity;
364
365 /* The number of colors is the number of cache bytes by set/way */
366 MmSecondaryColors = KeGetPcr()->SecondLevelCacheSize;
367 if (L2Associativity) MmSecondaryColors /= L2Associativity;
368 }
369
370 /* Now convert cache bytes into pages */
371 MmSecondaryColors >>= PAGE_SHIFT;
372 if (!MmSecondaryColors)
373 {
374 /* If there was no cache data from the KPCR, use the default colors */
375 MmSecondaryColors = MI_SECONDARY_COLORS;
376 }
377 else
378 {
379 /* Otherwise, make sure there aren't too many colors */
380 if (MmSecondaryColors > MI_MAX_SECONDARY_COLORS)
381 {
382 /* Set the maximum */
383 MmSecondaryColors = MI_MAX_SECONDARY_COLORS;
384 }
385
386 /* Make sure there aren't too little colors */
387 if (MmSecondaryColors < MI_MIN_SECONDARY_COLORS)
388 {
389 /* Set the default */
390 MmSecondaryColors = MI_SECONDARY_COLORS;
391 }
392
393 /* Finally make sure the colors are a power of two */
394 if (MmSecondaryColors & (MmSecondaryColors - 1))
395 {
396 /* Set the default */
397 MmSecondaryColors = MI_SECONDARY_COLORS;
398 }
399 }
400
401 /* Compute the mask and store it */
402 MmSecondaryColorMask = MmSecondaryColors - 1;
403 KeGetCurrentPrcb()->SecondaryColorMask = MmSecondaryColorMask;
404 }
405
406 VOID
407 NTAPI
408 MiInitializeColorTables(VOID)
409 {
410 ULONG i;
411 PMMPTE PointerPte, LastPte;
412 MMPTE TempPte = ValidKernelPte;
413
414 /* The color table starts after the ARM3 PFN database */
415 MmFreePagesByColor[0] = (PMMCOLOR_TABLES)&MmPfnDatabase[1][MmHighestPhysicalPage + 1];
416
417 /* Loop the PTEs. We have two color tables for each secondary color */
418 PointerPte = MiAddressToPte(&MmFreePagesByColor[0][0]);
419 LastPte = MiAddressToPte((ULONG_PTR)MmFreePagesByColor[0] +
420 (2 * MmSecondaryColors * sizeof(MMCOLOR_TABLES))
421 - 1);
422 while (PointerPte <= LastPte)
423 {
424 /* Check for valid PTE */
425 if (PointerPte->u.Hard.Valid == 0)
426 {
427 /* Get a page and map it */
428 TempPte.u.Hard.PageFrameNumber = MxGetNextPage(1);
429 ASSERT(TempPte.u.Hard.Valid == 1);
430 *PointerPte = TempPte;
431
432 /* Zero out the page */
433 RtlZeroMemory(MiPteToAddress(PointerPte), PAGE_SIZE);
434 }
435
436 /* Next */
437 PointerPte++;
438 }
439
440 /* Now set the address of the next list, right after this one */
441 MmFreePagesByColor[1] = &MmFreePagesByColor[0][MmSecondaryColors];
442
443 /* Now loop the lists to set them up */
444 for (i = 0; i < MmSecondaryColors; i++)
445 {
446 /* Set both free and zero lists for each color */
447 MmFreePagesByColor[ZeroedPageList][i].Flink = 0xFFFFFFFF;
448 MmFreePagesByColor[ZeroedPageList][i].Blink = (PVOID)0xFFFFFFFF;
449 MmFreePagesByColor[ZeroedPageList][i].Count = 0;
450 MmFreePagesByColor[FreePageList][i].Flink = 0xFFFFFFFF;
451 MmFreePagesByColor[FreePageList][i].Blink = (PVOID)0xFFFFFFFF;
452 MmFreePagesByColor[FreePageList][i].Count = 0;
453 }
454 }
455
456 BOOLEAN
457 NTAPI
458 MiIsRegularMemory(IN PLOADER_PARAMETER_BLOCK LoaderBlock,
459 IN PFN_NUMBER Pfn)
460 {
461 PLIST_ENTRY NextEntry;
462 PMEMORY_ALLOCATION_DESCRIPTOR MdBlock;
463
464 /* Loop the memory descriptors */
465 NextEntry = LoaderBlock->MemoryDescriptorListHead.Flink;
466 while (NextEntry != &LoaderBlock->MemoryDescriptorListHead)
467 {
468 /* Get the memory descriptor */
469 MdBlock = CONTAINING_RECORD(NextEntry,
470 MEMORY_ALLOCATION_DESCRIPTOR,
471 ListEntry);
472
473 /* Check if this PFN could be part of the block */
474 if (Pfn >= (MdBlock->BasePage))
475 {
476 /* Check if it really is part of the block */
477 if (Pfn < (MdBlock->BasePage + MdBlock->PageCount))
478 {
479 /* Check if the block is actually memory we don't map */
480 if ((MdBlock->MemoryType == LoaderFirmwarePermanent) ||
481 (MdBlock->MemoryType == LoaderBBTMemory) ||
482 (MdBlock->MemoryType == LoaderSpecialMemory))
483 {
484 /* We don't need PFN database entries for this memory */
485 break;
486 }
487
488 /* This is memory we want to map */
489 return TRUE;
490 }
491 }
492 else
493 {
494 /* Blocks are ordered, so if it's not here, it doesn't exist */
495 break;
496 }
497
498 /* Get to the next descriptor */
499 NextEntry = MdBlock->ListEntry.Flink;
500 }
501
502 /* Check if this PFN is actually from our free memory descriptor */
503 if ((Pfn >= MxOldFreeDescriptor.BasePage) &&
504 (Pfn < MxOldFreeDescriptor.BasePage + MxOldFreeDescriptor.PageCount))
505 {
506 /* We use these pages for initial mappings, so we do want to count them */
507 return TRUE;
508 }
509
510 /* Otherwise this isn't memory that we describe or care about */
511 return FALSE;
512 }
513
514 VOID
515 NTAPI
516 MiMapPfnDatabase(IN PLOADER_PARAMETER_BLOCK LoaderBlock)
517 {
518 ULONG FreePage, FreePageCount, PagesLeft, BasePage, PageCount;
519 PLIST_ENTRY NextEntry;
520 PMEMORY_ALLOCATION_DESCRIPTOR MdBlock;
521 PMMPTE PointerPte, LastPte;
522 MMPTE TempPte = ValidKernelPte;
523
524 /* Get current page data, since we won't be using MxGetNextPage as it would corrupt our state */
525 FreePage = MxFreeDescriptor->BasePage;
526 FreePageCount = MxFreeDescriptor->PageCount;
527 PagesLeft = 0;
528
529 /* Loop the memory descriptors */
530 NextEntry = LoaderBlock->MemoryDescriptorListHead.Flink;
531 while (NextEntry != &LoaderBlock->MemoryDescriptorListHead)
532 {
533 /* Get the descriptor */
534 MdBlock = CONTAINING_RECORD(NextEntry,
535 MEMORY_ALLOCATION_DESCRIPTOR,
536 ListEntry);
537 if ((MdBlock->MemoryType == LoaderFirmwarePermanent) ||
538 (MdBlock->MemoryType == LoaderBBTMemory) ||
539 (MdBlock->MemoryType == LoaderSpecialMemory))
540 {
541 /* These pages are not part of the PFN database */
542 NextEntry = MdBlock->ListEntry.Flink;
543 continue;
544 }
545
546 /* Next, check if this is our special free descriptor we've found */
547 if (MdBlock == MxFreeDescriptor)
548 {
549 /* Use the real numbers instead */
550 BasePage = MxOldFreeDescriptor.BasePage;
551 PageCount = MxOldFreeDescriptor.PageCount;
552 }
553 else
554 {
555 /* Use the descriptor's numbers */
556 BasePage = MdBlock->BasePage;
557 PageCount = MdBlock->PageCount;
558 }
559
560 /* Get the PTEs for this range */
561 PointerPte = MiAddressToPte(&MmPfnDatabase[0][BasePage]);
562 LastPte = MiAddressToPte(((ULONG_PTR)&MmPfnDatabase[0][BasePage + PageCount]) - 1);
563 DPRINT("MD Type: %lx Base: %lx Count: %lx\n", MdBlock->MemoryType, BasePage, PageCount);
564
565 /* Loop them */
566 while (PointerPte <= LastPte)
567 {
568 /* We'll only touch PTEs that aren't already valid */
569 if (PointerPte->u.Hard.Valid == 0)
570 {
571 /* Use the next free page */
572 TempPte.u.Hard.PageFrameNumber = FreePage;
573 ASSERT(FreePageCount != 0);
574
575 /* Consume free pages */
576 FreePage++;
577 FreePageCount--;
578 if (!FreePageCount)
579 {
580 /* Out of memory */
581 KeBugCheckEx(INSTALL_MORE_MEMORY,
582 MmNumberOfPhysicalPages,
583 FreePageCount,
584 MxOldFreeDescriptor.PageCount,
585 1);
586 }
587
588 /* Write out this PTE */
589 PagesLeft++;
590 ASSERT(PointerPte->u.Hard.Valid == 0);
591 ASSERT(TempPte.u.Hard.Valid == 1);
592 *PointerPte = TempPte;
593
594 /* Zero this page */
595 RtlZeroMemory(MiPteToAddress(PointerPte), PAGE_SIZE);
596 }
597
598 /* Next! */
599 PointerPte++;
600 }
601
602 /* Get the PTEs for this range */
603 PointerPte = MiAddressToPte(&MmPfnDatabase[1][BasePage]);
604 LastPte = MiAddressToPte(((ULONG_PTR)&MmPfnDatabase[1][BasePage + PageCount]) - 1);
605 DPRINT("MD Type: %lx Base: %lx Count: %lx\n", MdBlock->MemoryType, BasePage, PageCount);
606
607 /* Loop them */
608 while (PointerPte <= LastPte)
609 {
610 /* We'll only touch PTEs that aren't already valid */
611 if (PointerPte->u.Hard.Valid == 0)
612 {
613 /* Use the next free page */
614 TempPte.u.Hard.PageFrameNumber = FreePage;
615 ASSERT(FreePageCount != 0);
616
617 /* Consume free pages */
618 FreePage++;
619 FreePageCount--;
620 if (!FreePageCount)
621 {
622 /* Out of memory */
623 KeBugCheckEx(INSTALL_MORE_MEMORY,
624 MmNumberOfPhysicalPages,
625 FreePageCount,
626 MxOldFreeDescriptor.PageCount,
627 1);
628 }
629
630 /* Write out this PTE */
631 PagesLeft++;
632 ASSERT(PointerPte->u.Hard.Valid == 0);
633 ASSERT(TempPte.u.Hard.Valid == 1);
634 *PointerPte = TempPte;
635
636 /* Zero this page */
637 RtlZeroMemory(MiPteToAddress(PointerPte), PAGE_SIZE);
638 }
639
640 /* Next! */
641 PointerPte++;
642 }
643
644 /* Do the next address range */
645 NextEntry = MdBlock->ListEntry.Flink;
646 }
647
648 /* Now update the free descriptors to consume the pages we used up during the PFN allocation loop */
649 MxFreeDescriptor->BasePage = FreePage;
650 MxFreeDescriptor->PageCount = FreePageCount;
651 }
652
653 VOID
654 NTAPI
655 MiBuildPfnDatabaseFromPages(IN PLOADER_PARAMETER_BLOCK LoaderBlock)
656 {
657 PMMPDE PointerPde;
658 PMMPTE PointerPte;
659 ULONG i, Count, j;
660 PFN_NUMBER PageFrameIndex, StartupPdIndex, PtePageIndex;
661 PMMPFN Pfn1, Pfn2;
662 ULONG_PTR BaseAddress = 0;
663
664 /* PFN of the startup page directory */
665 StartupPdIndex = PFN_FROM_PTE(MiAddressToPde(PDE_BASE));
666
667 /* Start with the first PDE and scan them all */
668 PointerPde = MiAddressToPde(NULL);
669 Count = PD_COUNT * PDE_COUNT;
670 for (i = 0; i < Count; i++)
671 {
672 /* Check for valid PDE */
673 if (PointerPde->u.Hard.Valid == 1)
674 {
675 /* Get the PFN from it */
676 PageFrameIndex = PFN_FROM_PTE(PointerPde);
677
678 /* Do we want a PFN entry for this page? */
679 if (MiIsRegularMemory(LoaderBlock, PageFrameIndex))
680 {
681 /* Yes we do, set it up */
682 Pfn1 = MI_PFN_TO_PFNENTRY(PageFrameIndex);
683 Pfn1->u4.PteFrame = StartupPdIndex;
684 Pfn1->PteAddress = PointerPde;
685 Pfn1->u2.ShareCount++;
686 Pfn1->u3.e2.ReferenceCount = 1;
687 Pfn1->u3.e1.PageLocation = ActiveAndValid;
688 Pfn1->u3.e1.CacheAttribute = MiNonCached;
689 }
690 else
691 {
692 /* No PFN entry */
693 Pfn1 = NULL;
694 }
695
696 /* Now get the PTE and scan the pages */
697 PointerPte = MiAddressToPte(BaseAddress);
698 for (j = 0; j < PTE_COUNT; j++)
699 {
700 /* Check for a valid PTE */
701 if (PointerPte->u.Hard.Valid == 1)
702 {
703 /* Increase the shared count of the PFN entry for the PDE */
704 ASSERT(Pfn1 != NULL);
705 Pfn1->u2.ShareCount++;
706
707 /* Now check if the PTE is valid memory too */
708 PtePageIndex = PFN_FROM_PTE(PointerPte);
709 if (MiIsRegularMemory(LoaderBlock, PtePageIndex))
710 {
711 /*
712 * Only add pages above the end of system code or pages
713 * that are part of nonpaged pool
714 */
715 if ((BaseAddress >= 0xA0000000) ||
716 ((BaseAddress >= (ULONG_PTR)MmNonPagedPoolStart) &&
717 (BaseAddress < (ULONG_PTR)MmNonPagedPoolStart +
718 MmSizeOfNonPagedPoolInBytes)))
719 {
720 /* Get the PFN entry and make sure it too is valid */
721 Pfn2 = MI_PFN_TO_PFNENTRY(PtePageIndex);
722 if ((MmIsAddressValid(Pfn2)) &&
723 (MmIsAddressValid(Pfn2 + 1)))
724 {
725 /* Setup the PFN entry */
726 Pfn2->u4.PteFrame = PageFrameIndex;
727 Pfn2->PteAddress = PointerPte;
728 Pfn2->u2.ShareCount++;
729 Pfn2->u3.e2.ReferenceCount = 1;
730 Pfn2->u3.e1.PageLocation = ActiveAndValid;
731 Pfn2->u3.e1.CacheAttribute = MiNonCached;
732 }
733 }
734 }
735 }
736
737 /* Next PTE */
738 PointerPte++;
739 BaseAddress += PAGE_SIZE;
740 }
741 }
742 else
743 {
744 /* Next PDE mapped address */
745 BaseAddress += PTE_COUNT * PAGE_SIZE;
746 }
747
748 /* Next PTE */
749 PointerPde++;
750 }
751 }
752
753 VOID
754 NTAPI
755 MiBuildPfnDatabaseZeroPage(VOID)
756 {
757 PMMPFN Pfn1;
758 PMMPDE PointerPde;
759
760 /* Grab the lowest page and check if it has no real references */
761 Pfn1 = MI_PFN_TO_PFNENTRY(MmLowestPhysicalPage);
762 if (!(MmLowestPhysicalPage) && !(Pfn1->u3.e2.ReferenceCount))
763 {
764 /* Make it a bogus page to catch errors */
765 PointerPde = MiAddressToPde(0xFFFFFFFF);
766 Pfn1->u4.PteFrame = PFN_FROM_PTE(PointerPde);
767 Pfn1->PteAddress = PointerPde;
768 Pfn1->u2.ShareCount++;
769 Pfn1->u3.e2.ReferenceCount = 0xFFF0;
770 Pfn1->u3.e1.PageLocation = ActiveAndValid;
771 Pfn1->u3.e1.CacheAttribute = MiNonCached;
772 }
773 }
774
775 VOID
776 NTAPI
777 MiBuildPfnDatabaseFromLoaderBlock(IN PLOADER_PARAMETER_BLOCK LoaderBlock)
778 {
779 PLIST_ENTRY NextEntry;
780 PFN_NUMBER PageCount = 0;
781 PMEMORY_ALLOCATION_DESCRIPTOR MdBlock;
782 PFN_NUMBER PageFrameIndex;
783 PMMPFN Pfn1;
784 PMMPTE PointerPte;
785 PMMPDE PointerPde;
786
787 /* Now loop through the descriptors */
788 NextEntry = LoaderBlock->MemoryDescriptorListHead.Flink;
789 while (NextEntry != &LoaderBlock->MemoryDescriptorListHead)
790 {
791 /* Get the current descriptor */
792 MdBlock = CONTAINING_RECORD(NextEntry,
793 MEMORY_ALLOCATION_DESCRIPTOR,
794 ListEntry);
795
796 /* Read its data */
797 PageCount = MdBlock->PageCount;
798 PageFrameIndex = MdBlock->BasePage;
799
800 /* Don't allow memory above what the PFN database is mapping */
801 if (PageFrameIndex > MmHighestPhysicalPage)
802 {
803 /* Since they are ordered, everything past here will be larger */
804 break;
805 }
806
807 /* On the other hand, the end page might be higher up... */
808 if ((PageFrameIndex + PageCount) > (MmHighestPhysicalPage + 1))
809 {
810 /* In which case we'll trim the descriptor to go as high as we can */
811 PageCount = MmHighestPhysicalPage + 1 - PageFrameIndex;
812 MdBlock->PageCount = PageCount;
813
814 /* But if there's nothing left to trim, we got too high, so quit */
815 if (!PageCount) break;
816 }
817
818 /* Now check the descriptor type */
819 switch (MdBlock->MemoryType)
820 {
821 /* Check for bad RAM */
822 case LoaderBad:
823
824 DPRINT1("You have damaged RAM modules. Stopping boot\n");
825 while (TRUE);
826 break;
827
828 /* Check for free RAM */
829 case LoaderFree:
830 case LoaderLoadedProgram:
831 case LoaderFirmwareTemporary:
832 case LoaderOsloaderStack:
833
834 /* Get the last page of this descriptor. Note we loop backwards */
835 PageFrameIndex += PageCount - 1;
836 Pfn1 = MI_PFN_TO_PFNENTRY(PageFrameIndex);
837 while (PageCount--)
838 {
839 /* If the page really has no references, mark it as free */
840 if (!Pfn1->u3.e2.ReferenceCount)
841 {
842 Pfn1->u3.e1.CacheAttribute = MiNonCached;
843 //MiInsertPageInFreeList(PageFrameIndex);
844 }
845
846 /* Go to the next page */
847 Pfn1--;
848 PageFrameIndex--;
849 }
850
851 /* Done with this block */
852 break;
853
854 /* Check for pages that are invisible to us */
855 case LoaderFirmwarePermanent:
856 case LoaderSpecialMemory:
857 case LoaderBBTMemory:
858
859 /* And skip them */
860 break;
861
862 default:
863
864 /* Map these pages with the KSEG0 mapping that adds 0x80000000 */
865 PointerPte = MiAddressToPte(KSEG0_BASE + (PageFrameIndex << PAGE_SHIFT));
866 Pfn1 = MI_PFN_TO_PFNENTRY(PageFrameIndex);
867 while (PageCount--)
868 {
869 /* Check if the page is really unused */
870 PointerPde = MiAddressToPde(KSEG0_BASE + (PageFrameIndex << PAGE_SHIFT));
871 if (!Pfn1->u3.e2.ReferenceCount)
872 {
873 /* Mark it as being in-use */
874 Pfn1->u4.PteFrame = PFN_FROM_PTE(PointerPde);
875 Pfn1->PteAddress = PointerPte;
876 Pfn1->u2.ShareCount++;
877 Pfn1->u3.e2.ReferenceCount = 1;
878 Pfn1->u3.e1.PageLocation = ActiveAndValid;
879 Pfn1->u3.e1.CacheAttribute = MiNonCached;
880
881 /* Check for RAM disk page */
882 if (MdBlock->MemoryType == LoaderXIPRom)
883 {
884 /* Make it a pseudo-I/O ROM mapping */
885 Pfn1->u1.Flink = 0;
886 Pfn1->u2.ShareCount = 0;
887 Pfn1->u3.e2.ReferenceCount = 0;
888 Pfn1->u3.e1.PageLocation = 0;
889 Pfn1->u3.e1.Rom = 1;
890 Pfn1->u4.InPageError = 0;
891 Pfn1->u3.e1.PrototypePte = 1;
892 }
893 }
894
895 /* Advance page structures */
896 Pfn1++;
897 PageFrameIndex++;
898 PointerPte++;
899 }
900 break;
901 }
902
903 /* Next descriptor entry */
904 NextEntry = MdBlock->ListEntry.Flink;
905 }
906 }
907
908 VOID
909 NTAPI
910 MiBuildPfnDatabaseSelf(VOID)
911 {
912 PMMPTE PointerPte, LastPte;
913 PMMPFN Pfn1;
914
915 /* Loop the PFN database page */
916 PointerPte = MiAddressToPte(MI_PFN_TO_PFNENTRY(MmLowestPhysicalPage));
917 LastPte = MiAddressToPte(MI_PFN_TO_PFNENTRY(MmHighestPhysicalPage));
918 while (PointerPte <= LastPte)
919 {
920 /* Make sure the page is valid */
921 if (PointerPte->u.Hard.Valid == 1)
922 {
923 /* Get the PFN entry and just mark it referenced */
924 Pfn1 = MI_PFN_TO_PFNENTRY(PointerPte->u.Hard.PageFrameNumber);
925 Pfn1->u2.ShareCount = 1;
926 Pfn1->u3.e2.ReferenceCount = 1;
927 }
928
929 /* Next */
930 PointerPte++;
931 }
932 }
933
934 VOID
935 NTAPI
936 MiInitializePfnDatabase(IN PLOADER_PARAMETER_BLOCK LoaderBlock)
937 {
938 /* Scan memory and start setting up PFN entries */
939 MiBuildPfnDatabaseFromPages(LoaderBlock);
940
941 /* Add the zero page */
942 MiBuildPfnDatabaseZeroPage();
943
944 /* Scan the loader block and build the rest of the PFN database */
945 MiBuildPfnDatabaseFromLoaderBlock(LoaderBlock);
946
947 /* Finally add the pages for the PFN database itself */
948 MiBuildPfnDatabaseSelf();
949 }
950
951 VOID
952 NTAPI
953 MiAdjustWorkingSetManagerParameters(IN BOOLEAN Client)
954 {
955 /* This function needs to do more work, for now, we tune page minimums */
956
957 /* Check for a system with around 64MB RAM or more */
958 if (MmNumberOfPhysicalPages >= (63 * _1MB) / PAGE_SIZE)
959 {
960 /* Double the minimum amount of pages we consider for a "plenty free" scenario */
961 MmPlentyFreePages *= 2;
962 }
963 }
964
965 VOID
966 NTAPI
967 MiNotifyMemoryEvents(VOID)
968 {
969 /* Are we in a low-memory situation? */
970 if (MmAvailablePages < MmLowMemoryThreshold)
971 {
972 /* Clear high, set low */
973 if (KeReadStateEvent(MiHighMemoryEvent)) KeClearEvent(MiHighMemoryEvent);
974 if (!KeReadStateEvent(MiLowMemoryEvent)) KeSetEvent(MiLowMemoryEvent, 0, FALSE);
975 }
976 else if (MmAvailablePages < MmHighMemoryThreshold)
977 {
978 /* We are in between, clear both */
979 if (KeReadStateEvent(MiHighMemoryEvent)) KeClearEvent(MiHighMemoryEvent);
980 if (KeReadStateEvent(MiLowMemoryEvent)) KeClearEvent(MiLowMemoryEvent);
981 }
982 else
983 {
984 /* Clear low, set high */
985 if (KeReadStateEvent(MiLowMemoryEvent)) KeClearEvent(MiLowMemoryEvent);
986 if (!KeReadStateEvent(MiHighMemoryEvent)) KeSetEvent(MiHighMemoryEvent, 0, FALSE);
987 }
988 }
989
990 NTSTATUS
991 NTAPI
992 MiCreateMemoryEvent(IN PUNICODE_STRING Name,
993 OUT PKEVENT *Event)
994 {
995 PACL Dacl;
996 HANDLE EventHandle;
997 ULONG DaclLength;
998 NTSTATUS Status;
999 OBJECT_ATTRIBUTES ObjectAttributes;
1000 SECURITY_DESCRIPTOR SecurityDescriptor;
1001
1002 /* Create the SD */
1003 Status = RtlCreateSecurityDescriptor(&SecurityDescriptor,
1004 SECURITY_DESCRIPTOR_REVISION);
1005 if (!NT_SUCCESS(Status)) return Status;
1006
1007 /* One ACL with 3 ACEs, containing each one SID */
1008 DaclLength = sizeof(ACL) +
1009 3 * sizeof(ACCESS_ALLOWED_ACE) +
1010 RtlLengthSid(SeLocalSystemSid) +
1011 RtlLengthSid(SeAliasAdminsSid) +
1012 RtlLengthSid(SeWorldSid);
1013
1014 /* Allocate space for the DACL */
1015 Dacl = ExAllocatePoolWithTag(PagedPool, DaclLength, 'lcaD');
1016 if (!Dacl) return STATUS_INSUFFICIENT_RESOURCES;
1017
1018 /* Setup the ACL inside it */
1019 Status = RtlCreateAcl(Dacl, DaclLength, ACL_REVISION);
1020 if (!NT_SUCCESS(Status)) goto CleanUp;
1021
1022 /* Add query rights for everyone */
1023 Status = RtlAddAccessAllowedAce(Dacl,
1024 ACL_REVISION,
1025 SYNCHRONIZE | EVENT_QUERY_STATE | READ_CONTROL,
1026 SeWorldSid);
1027 if (!NT_SUCCESS(Status)) goto CleanUp;
1028
1029 /* Full rights for the admin */
1030 Status = RtlAddAccessAllowedAce(Dacl,
1031 ACL_REVISION,
1032 EVENT_ALL_ACCESS,
1033 SeAliasAdminsSid);
1034 if (!NT_SUCCESS(Status)) goto CleanUp;
1035
1036 /* As well as full rights for the system */
1037 Status = RtlAddAccessAllowedAce(Dacl,
1038 ACL_REVISION,
1039 EVENT_ALL_ACCESS,
1040 SeLocalSystemSid);
1041 if (!NT_SUCCESS(Status)) goto CleanUp;
1042
1043 /* Set this DACL inside the SD */
1044 Status = RtlSetDaclSecurityDescriptor(&SecurityDescriptor,
1045 TRUE,
1046 Dacl,
1047 FALSE);
1048 if (!NT_SUCCESS(Status)) goto CleanUp;
1049
1050 /* Setup the event attributes, making sure it's a permanent one */
1051 InitializeObjectAttributes(&ObjectAttributes,
1052 Name,
1053 OBJ_KERNEL_HANDLE | OBJ_PERMANENT,
1054 NULL,
1055 &SecurityDescriptor);
1056
1057 /* Create the event */
1058 Status = ZwCreateEvent(&EventHandle,
1059 EVENT_ALL_ACCESS,
1060 &ObjectAttributes,
1061 NotificationEvent,
1062 FALSE);
1063 CleanUp:
1064 /* Free the DACL */
1065 ExFreePool(Dacl);
1066
1067 /* Check if this is the success path */
1068 if (NT_SUCCESS(Status))
1069 {
1070 /* Add a reference to the object, then close the handle we had */
1071 Status = ObReferenceObjectByHandle(EventHandle,
1072 EVENT_MODIFY_STATE,
1073 ExEventObjectType,
1074 KernelMode,
1075 (PVOID*)Event,
1076 NULL);
1077 ZwClose (EventHandle);
1078 }
1079
1080 /* Return status */
1081 return Status;
1082 }
1083
1084 BOOLEAN
1085 NTAPI
1086 MiInitializeMemoryEvents(VOID)
1087 {
1088 UNICODE_STRING LowString = RTL_CONSTANT_STRING(L"\\KernelObjects\\LowMemoryCondition");
1089 UNICODE_STRING HighString = RTL_CONSTANT_STRING(L"\\KernelObjects\\HighMemoryCondition");
1090 UNICODE_STRING LowPagedPoolString = RTL_CONSTANT_STRING(L"\\KernelObjects\\LowPagedPoolCondition");
1091 UNICODE_STRING HighPagedPoolString = RTL_CONSTANT_STRING(L"\\KernelObjects\\HighPagedPoolCondition");
1092 UNICODE_STRING LowNonPagedPoolString = RTL_CONSTANT_STRING(L"\\KernelObjects\\LowNonPagedPoolCondition");
1093 UNICODE_STRING HighNonPagedPoolString = RTL_CONSTANT_STRING(L"\\KernelObjects\\HighNonPagedPoolCondition");
1094 NTSTATUS Status;
1095
1096 /* Check if we have a registry setting */
1097 if (MmLowMemoryThreshold)
1098 {
1099 /* Convert it to pages */
1100 MmLowMemoryThreshold *= (_1MB / PAGE_SIZE);
1101 }
1102 else
1103 {
1104 /* The low memory threshold is hit when we don't consider that we have "plenty" of free pages anymore */
1105 MmLowMemoryThreshold = MmPlentyFreePages;
1106
1107 /* More than one GB of memory? */
1108 if (MmNumberOfPhysicalPages > 0x40000)
1109 {
1110 /* Start at 32MB, and add another 16MB for each GB */
1111 MmLowMemoryThreshold = (32 * _1MB) / PAGE_SIZE;
1112 MmLowMemoryThreshold += ((MmNumberOfPhysicalPages - 0x40000) >> 7);
1113 }
1114 else if (MmNumberOfPhysicalPages > 0x8000)
1115 {
1116 /* For systems with > 128MB RAM, add another 4MB for each 128MB */
1117 MmLowMemoryThreshold += ((MmNumberOfPhysicalPages - 0x8000) >> 5);
1118 }
1119
1120 /* Don't let the minimum threshold go past 64MB */
1121 MmLowMemoryThreshold = min(MmLowMemoryThreshold, (64 * _1MB) / PAGE_SIZE);
1122 }
1123
1124 /* Check if we have a registry setting */
1125 if (MmHighMemoryThreshold)
1126 {
1127 /* Convert it into pages */
1128 MmHighMemoryThreshold *= (_1MB / PAGE_SIZE);
1129 }
1130 else
1131 {
1132 /* Otherwise, the default is three times the low memory threshold */
1133 MmHighMemoryThreshold = 3 * MmLowMemoryThreshold;
1134 ASSERT(MmHighMemoryThreshold > MmLowMemoryThreshold);
1135 }
1136
1137 /* Make sure high threshold is actually higher than the low */
1138 MmHighMemoryThreshold = max(MmHighMemoryThreshold, MmLowMemoryThreshold);
1139
1140 /* Create the memory events for all the thresholds */
1141 Status = MiCreateMemoryEvent(&LowString, &MiLowMemoryEvent);
1142 if (!NT_SUCCESS(Status)) return FALSE;
1143 Status = MiCreateMemoryEvent(&HighString, &MiHighMemoryEvent);
1144 if (!NT_SUCCESS(Status)) return FALSE;
1145 Status = MiCreateMemoryEvent(&LowPagedPoolString, &MiLowPagedPoolEvent);
1146 if (!NT_SUCCESS(Status)) return FALSE;
1147 Status = MiCreateMemoryEvent(&HighPagedPoolString, &MiHighPagedPoolEvent);
1148 if (!NT_SUCCESS(Status)) return FALSE;
1149 Status = MiCreateMemoryEvent(&LowNonPagedPoolString, &MiLowNonPagedPoolEvent);
1150 if (!NT_SUCCESS(Status)) return FALSE;
1151 Status = MiCreateMemoryEvent(&HighNonPagedPoolString, &MiHighNonPagedPoolEvent);
1152 if (!NT_SUCCESS(Status)) return FALSE;
1153
1154 /* Now setup the pool events */
1155 MiInitializePoolEvents();
1156
1157 /* Set the initial event state */
1158 MiNotifyMemoryEvents();
1159 return TRUE;
1160 }
1161
1162 VOID
1163 NTAPI
1164 MmDumpArmPfnDatabase(VOID)
1165 {
1166 ULONG i;
1167 PMMPFN Pfn1;
1168 PCHAR Consumer = "Unknown";
1169 KIRQL OldIrql;
1170 ULONG ActivePages = 0, FreePages = 0, OtherPages = 0;
1171
1172 KeRaiseIrql(HIGH_LEVEL, &OldIrql);
1173
1174 //
1175 // Loop the PFN database
1176 //
1177 for (i = 0; i <= MmHighestPhysicalPage; i++)
1178 {
1179 Pfn1 = MI_PFN_TO_PFNENTRY(i);
1180 if (!Pfn1) continue;
1181
1182 //
1183 // Get the page location
1184 //
1185 switch (Pfn1->u3.e1.PageLocation)
1186 {
1187 case ActiveAndValid:
1188
1189 Consumer = "Active and Valid";
1190 ActivePages++;
1191 break;
1192
1193 case FreePageList:
1194
1195 Consumer = "Free Page List";
1196 FreePages++;
1197 break;
1198
1199 default:
1200
1201 Consumer = "Other (ASSERT!)";
1202 OtherPages++;
1203 break;
1204 }
1205
1206 //
1207 // Pretty-print the page
1208 //
1209 DbgPrint("0x%08p:\t%20s\t(%02d.%02d) [%08p-%08p])\n",
1210 i << PAGE_SHIFT,
1211 Consumer,
1212 Pfn1->u3.e2.ReferenceCount,
1213 Pfn1->u2.ShareCount,
1214 Pfn1->PteAddress,
1215 Pfn1->u4.PteFrame);
1216 }
1217
1218 DbgPrint("Active: %d pages\t[%d KB]\n", ActivePages, (ActivePages << PAGE_SHIFT) / 1024);
1219 DbgPrint("Free: %d pages\t[%d KB]\n", FreePages, (FreePages << PAGE_SHIFT) / 1024);
1220 DbgPrint("Other: %d pages\t[%d KB]\n", OtherPages, (OtherPages << PAGE_SHIFT) / 1024);
1221
1222 KeLowerIrql(OldIrql);
1223 }
1224
1225 PFN_NUMBER
1226 NTAPI
1227 MiPagesInLoaderBlock(IN PLOADER_PARAMETER_BLOCK LoaderBlock,
1228 IN PBOOLEAN IncludeType)
1229 {
1230 PLIST_ENTRY NextEntry;
1231 PFN_NUMBER PageCount = 0;
1232 PMEMORY_ALLOCATION_DESCRIPTOR MdBlock;
1233
1234 //
1235 // Now loop through the descriptors
1236 //
1237 NextEntry = LoaderBlock->MemoryDescriptorListHead.Flink;
1238 while (NextEntry != &LoaderBlock->MemoryDescriptorListHead)
1239 {
1240 //
1241 // Grab each one, and check if it's one we should include
1242 //
1243 MdBlock = CONTAINING_RECORD(NextEntry,
1244 MEMORY_ALLOCATION_DESCRIPTOR,
1245 ListEntry);
1246 if ((MdBlock->MemoryType < LoaderMaximum) &&
1247 (IncludeType[MdBlock->MemoryType]))
1248 {
1249 //
1250 // Add this to our running total
1251 //
1252 PageCount += MdBlock->PageCount;
1253 }
1254
1255 //
1256 // Try the next descriptor
1257 //
1258 NextEntry = MdBlock->ListEntry.Flink;
1259 }
1260
1261 //
1262 // Return the total
1263 //
1264 return PageCount;
1265 }
1266
1267 PPHYSICAL_MEMORY_DESCRIPTOR
1268 NTAPI
1269 MmInitializeMemoryLimits(IN PLOADER_PARAMETER_BLOCK LoaderBlock,
1270 IN PBOOLEAN IncludeType)
1271 {
1272 PLIST_ENTRY NextEntry;
1273 ULONG Run = 0, InitialRuns = 0;
1274 PFN_NUMBER NextPage = -1, PageCount = 0;
1275 PPHYSICAL_MEMORY_DESCRIPTOR Buffer, NewBuffer;
1276 PMEMORY_ALLOCATION_DESCRIPTOR MdBlock;
1277
1278 //
1279 // Scan the memory descriptors
1280 //
1281 NextEntry = LoaderBlock->MemoryDescriptorListHead.Flink;
1282 while (NextEntry != &LoaderBlock->MemoryDescriptorListHead)
1283 {
1284 //
1285 // For each one, increase the memory allocation estimate
1286 //
1287 InitialRuns++;
1288 NextEntry = NextEntry->Flink;
1289 }
1290
1291 //
1292 // Allocate the maximum we'll ever need
1293 //
1294 Buffer = ExAllocatePoolWithTag(NonPagedPool,
1295 sizeof(PHYSICAL_MEMORY_DESCRIPTOR) +
1296 sizeof(PHYSICAL_MEMORY_RUN) *
1297 (InitialRuns - 1),
1298 'lMmM');
1299 if (!Buffer) return NULL;
1300
1301 //
1302 // For now that's how many runs we have
1303 //
1304 Buffer->NumberOfRuns = InitialRuns;
1305
1306 //
1307 // Now loop through the descriptors again
1308 //
1309 NextEntry = LoaderBlock->MemoryDescriptorListHead.Flink;
1310 while (NextEntry != &LoaderBlock->MemoryDescriptorListHead)
1311 {
1312 //
1313 // Grab each one, and check if it's one we should include
1314 //
1315 MdBlock = CONTAINING_RECORD(NextEntry,
1316 MEMORY_ALLOCATION_DESCRIPTOR,
1317 ListEntry);
1318 if ((MdBlock->MemoryType < LoaderMaximum) &&
1319 (IncludeType[MdBlock->MemoryType]))
1320 {
1321 //
1322 // Add this to our running total
1323 //
1324 PageCount += MdBlock->PageCount;
1325
1326 //
1327 // Check if the next page is described by the next descriptor
1328 //
1329 if (MdBlock->BasePage == NextPage)
1330 {
1331 //
1332 // Combine it into the same physical run
1333 //
1334 ASSERT(MdBlock->PageCount != 0);
1335 Buffer->Run[Run - 1].PageCount += MdBlock->PageCount;
1336 NextPage += MdBlock->PageCount;
1337 }
1338 else
1339 {
1340 //
1341 // Otherwise just duplicate the descriptor's contents
1342 //
1343 Buffer->Run[Run].BasePage = MdBlock->BasePage;
1344 Buffer->Run[Run].PageCount = MdBlock->PageCount;
1345 NextPage = Buffer->Run[Run].BasePage + Buffer->Run[Run].PageCount;
1346
1347 //
1348 // And in this case, increase the number of runs
1349 //
1350 Run++;
1351 }
1352 }
1353
1354 //
1355 // Try the next descriptor
1356 //
1357 NextEntry = MdBlock->ListEntry.Flink;
1358 }
1359
1360 //
1361 // We should not have been able to go past our initial estimate
1362 //
1363 ASSERT(Run <= Buffer->NumberOfRuns);
1364
1365 //
1366 // Our guess was probably exaggerated...
1367 //
1368 if (InitialRuns > Run)
1369 {
1370 //
1371 // Allocate a more accurately sized buffer
1372 //
1373 NewBuffer = ExAllocatePoolWithTag(NonPagedPool,
1374 sizeof(PHYSICAL_MEMORY_DESCRIPTOR) +
1375 sizeof(PHYSICAL_MEMORY_RUN) *
1376 (Run - 1),
1377 'lMmM');
1378 if (NewBuffer)
1379 {
1380 //
1381 // Copy the old buffer into the new, then free it
1382 //
1383 RtlCopyMemory(NewBuffer->Run,
1384 Buffer->Run,
1385 sizeof(PHYSICAL_MEMORY_RUN) * Run);
1386 ExFreePool(Buffer);
1387
1388 //
1389 // Now use the new buffer
1390 //
1391 Buffer = NewBuffer;
1392 }
1393 }
1394
1395 //
1396 // Write the final numbers, and return it
1397 //
1398 Buffer->NumberOfRuns = Run;
1399 Buffer->NumberOfPages = PageCount;
1400 return Buffer;
1401 }
1402
1403 VOID
1404 NTAPI
1405 MiBuildPagedPool(VOID)
1406 {
1407 PMMPTE PointerPte, PointerPde;
1408 MMPTE TempPte = ValidKernelPte;
1409 PFN_NUMBER PageFrameIndex;
1410 KIRQL OldIrql;
1411 ULONG Size, BitMapSize;
1412
1413 //
1414 // Get the page frame number for the system page directory
1415 //
1416 PointerPte = MiAddressToPte(PDE_BASE);
1417 MmSystemPageDirectory = PFN_FROM_PTE(PointerPte);
1418
1419 //
1420 // Allocate a system PTE which will hold a copy of the page directory
1421 //
1422 PointerPte = MiReserveSystemPtes(1, SystemPteSpace);
1423 ASSERT(PointerPte);
1424 MmSystemPagePtes = MiPteToAddress(PointerPte);
1425
1426 //
1427 // Make this system PTE point to the system page directory.
1428 // It is now essentially double-mapped. This will be used later for lazy
1429 // evaluation of PDEs accross process switches, similarly to how the Global
1430 // page directory array in the old ReactOS Mm is used (but in a less hacky
1431 // way).
1432 //
1433 TempPte = ValidKernelPte;
1434 TempPte.u.Hard.PageFrameNumber = MmSystemPageDirectory;
1435 ASSERT(PointerPte->u.Hard.Valid == 0);
1436 ASSERT(TempPte.u.Hard.Valid == 1);
1437 *PointerPte = TempPte;
1438
1439 //
1440 // Let's get back to paged pool work: size it up.
1441 // By default, it should be twice as big as nonpaged pool.
1442 //
1443 MmSizeOfPagedPoolInBytes = 2 * MmMaximumNonPagedPoolInBytes;
1444 if (MmSizeOfPagedPoolInBytes > ((ULONG_PTR)MmNonPagedSystemStart -
1445 (ULONG_PTR)MmPagedPoolStart))
1446 {
1447 //
1448 // On the other hand, we have limited VA space, so make sure that the VA
1449 // for paged pool doesn't overflow into nonpaged pool VA. Otherwise, set
1450 // whatever maximum is possible.
1451 //
1452 MmSizeOfPagedPoolInBytes = (ULONG_PTR)MmNonPagedSystemStart -
1453 (ULONG_PTR)MmPagedPoolStart;
1454 }
1455
1456 //
1457 // Get the size in pages and make sure paged pool is at least 32MB.
1458 //
1459 Size = MmSizeOfPagedPoolInBytes;
1460 if (Size < MI_MIN_INIT_PAGED_POOLSIZE) Size = MI_MIN_INIT_PAGED_POOLSIZE;
1461 Size = BYTES_TO_PAGES(Size);
1462
1463 //
1464 // Now check how many PTEs will be required for these many pages.
1465 //
1466 Size = (Size + (1024 - 1)) / 1024;
1467
1468 //
1469 // Recompute the page-aligned size of the paged pool, in bytes and pages.
1470 //
1471 MmSizeOfPagedPoolInBytes = Size * PAGE_SIZE * 1024;
1472 MmSizeOfPagedPoolInPages = MmSizeOfPagedPoolInBytes >> PAGE_SHIFT;
1473
1474 //
1475 // Let's be really sure this doesn't overflow into nonpaged system VA
1476 //
1477 ASSERT((MmSizeOfPagedPoolInBytes + (ULONG_PTR)MmPagedPoolStart) <=
1478 (ULONG_PTR)MmNonPagedSystemStart);
1479
1480 //
1481 // This is where paged pool ends
1482 //
1483 MmPagedPoolEnd = (PVOID)(((ULONG_PTR)MmPagedPoolStart +
1484 MmSizeOfPagedPoolInBytes) - 1);
1485
1486 //
1487 // So now get the PDE for paged pool and zero it out
1488 //
1489 PointerPde = MiAddressToPde(MmPagedPoolStart);
1490 RtlZeroMemory(PointerPde,
1491 (1 + MiAddressToPde(MmPagedPoolEnd) - PointerPde) * sizeof(MMPTE));
1492
1493 //
1494 // Next, get the first and last PTE
1495 //
1496 PointerPte = MiAddressToPte(MmPagedPoolStart);
1497 MmPagedPoolInfo.FirstPteForPagedPool = PointerPte;
1498 MmPagedPoolInfo.LastPteForPagedPool = MiAddressToPte(MmPagedPoolEnd);
1499
1500 //
1501 // Lock the PFN database
1502 //
1503 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
1504
1505 //
1506 // Allocate a page and map the first paged pool PDE
1507 //
1508 PageFrameIndex = MmAllocPage(MC_NPPOOL);
1509 TempPte.u.Hard.PageFrameNumber = PageFrameIndex;
1510 ASSERT(PointerPde->u.Hard.Valid == 0);
1511 ASSERT(TempPte.u.Hard.Valid == 1);
1512 *PointerPde = TempPte;
1513
1514 //
1515 // Release the PFN database lock
1516 //
1517 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
1518
1519 //
1520 // We only have one PDE mapped for now... at fault time, additional PDEs
1521 // will be allocated to handle paged pool growth. This is where they'll have
1522 // to start.
1523 //
1524 MmPagedPoolInfo.NextPdeForPagedPoolExpansion = PointerPde + 1;
1525
1526 //
1527 // We keep track of each page via a bit, so check how big the bitmap will
1528 // have to be (make sure to align our page count such that it fits nicely
1529 // into a 4-byte aligned bitmap.
1530 //
1531 // We'll also allocate the bitmap header itself part of the same buffer.
1532 //
1533 Size = Size * 1024;
1534 ASSERT(Size == MmSizeOfPagedPoolInPages);
1535 BitMapSize = Size;
1536 Size = sizeof(RTL_BITMAP) + (((Size + 31) / 32) * sizeof(ULONG));
1537
1538 //
1539 // Allocate the allocation bitmap, which tells us which regions have not yet
1540 // been mapped into memory
1541 //
1542 MmPagedPoolInfo.PagedPoolAllocationMap = ExAllocatePoolWithTag(NonPagedPool,
1543 Size,
1544 ' mM');
1545 ASSERT(MmPagedPoolInfo.PagedPoolAllocationMap);
1546
1547 //
1548 // Initialize it such that at first, only the first page's worth of PTEs is
1549 // marked as allocated (incidentially, the first PDE we allocated earlier).
1550 //
1551 RtlInitializeBitMap(MmPagedPoolInfo.PagedPoolAllocationMap,
1552 (PULONG)(MmPagedPoolInfo.PagedPoolAllocationMap + 1),
1553 BitMapSize);
1554 RtlSetAllBits(MmPagedPoolInfo.PagedPoolAllocationMap);
1555 RtlClearBits(MmPagedPoolInfo.PagedPoolAllocationMap, 0, 1024);
1556
1557 //
1558 // We have a second bitmap, which keeps track of where allocations end.
1559 // Given the allocation bitmap and a base address, we can therefore figure
1560 // out which page is the last page of that allocation, and thus how big the
1561 // entire allocation is.
1562 //
1563 MmPagedPoolInfo.EndOfPagedPoolBitmap = ExAllocatePoolWithTag(NonPagedPool,
1564 Size,
1565 ' mM');
1566 ASSERT(MmPagedPoolInfo.EndOfPagedPoolBitmap);
1567 RtlInitializeBitMap(MmPagedPoolInfo.EndOfPagedPoolBitmap,
1568 (PULONG)(MmPagedPoolInfo.EndOfPagedPoolBitmap + 1),
1569 BitMapSize);
1570
1571 //
1572 // Since no allocations have been made yet, there are no bits set as the end
1573 //
1574 RtlClearAllBits(MmPagedPoolInfo.EndOfPagedPoolBitmap);
1575
1576 //
1577 // Initialize paged pool.
1578 //
1579 InitializePool(PagedPool, 0);
1580
1581 /* Default low threshold of 30MB or one fifth of paged pool */
1582 MiLowPagedPoolThreshold = (30 * _1MB) >> PAGE_SHIFT;
1583 MiLowPagedPoolThreshold = min(MiLowPagedPoolThreshold, Size / 5);
1584
1585 /* Default high threshold of 60MB or 25% */
1586 MiHighPagedPoolThreshold = (60 * _1MB) >> PAGE_SHIFT;
1587 MiHighPagedPoolThreshold = min(MiHighPagedPoolThreshold, (Size * 2) / 5);
1588 ASSERT(MiLowPagedPoolThreshold < MiHighPagedPoolThreshold);
1589 }
1590
1591 NTSTATUS
1592 NTAPI
1593 MmArmInitSystem(IN ULONG Phase,
1594 IN PLOADER_PARAMETER_BLOCK LoaderBlock)
1595 {
1596 ULONG i;
1597 BOOLEAN IncludeType[LoaderMaximum];
1598 PVOID Bitmap;
1599 PPHYSICAL_MEMORY_RUN Run;
1600 PFN_NUMBER PageCount;
1601
1602 //
1603 // Instantiate memory that we don't consider RAM/usable
1604 // We use the same exclusions that Windows does, in order to try to be
1605 // compatible with WinLDR-style booting
1606 //
1607 for (i = 0; i < LoaderMaximum; i++) IncludeType[i] = TRUE;
1608 IncludeType[LoaderBad] = FALSE;
1609 IncludeType[LoaderFirmwarePermanent] = FALSE;
1610 IncludeType[LoaderSpecialMemory] = FALSE;
1611 IncludeType[LoaderBBTMemory] = FALSE;
1612 if (Phase == 0)
1613 {
1614 /* Initialize the phase 0 temporary event */
1615 KeInitializeEvent(&MiTempEvent, NotificationEvent, FALSE);
1616
1617 /* Set all the events to use the temporary event for now */
1618 MiLowMemoryEvent = &MiTempEvent;
1619 MiHighMemoryEvent = &MiTempEvent;
1620 MiLowPagedPoolEvent = &MiTempEvent;
1621 MiHighPagedPoolEvent = &MiTempEvent;
1622 MiLowNonPagedPoolEvent = &MiTempEvent;
1623 MiHighNonPagedPoolEvent = &MiTempEvent;
1624
1625 //
1626 // Define the basic user vs. kernel address space separation
1627 //
1628 MmSystemRangeStart = (PVOID)KSEG0_BASE;
1629 MmUserProbeAddress = (ULONG_PTR)MmSystemRangeStart - 0x10000;
1630 MmHighestUserAddress = (PVOID)(MmUserProbeAddress - 1);
1631
1632 //
1633 // Get the size of the boot loader's image allocations and then round
1634 // that region up to a PDE size, so that any PDEs we might create for
1635 // whatever follows are separate from the PDEs that boot loader might've
1636 // already created (and later, we can blow all that away if we want to).
1637 //
1638 MmBootImageSize = KeLoaderBlock->Extension->LoaderPagesSpanned;
1639 MmBootImageSize *= PAGE_SIZE;
1640 MmBootImageSize = (MmBootImageSize + (4 * 1024 * 1024) - 1) & ~((4 * 1024 * 1024) - 1);
1641 ASSERT((MmBootImageSize % (4 * 1024 * 1024)) == 0);
1642
1643 //
1644 // Set the size of session view, pool, and image
1645 //
1646 MmSessionSize = MI_SESSION_SIZE;
1647 MmSessionViewSize = MI_SESSION_VIEW_SIZE;
1648 MmSessionPoolSize = MI_SESSION_POOL_SIZE;
1649 MmSessionImageSize = MI_SESSION_IMAGE_SIZE;
1650
1651 //
1652 // Set the size of system view
1653 //
1654 MmSystemViewSize = MI_SYSTEM_VIEW_SIZE;
1655
1656 //
1657 // This is where it all ends
1658 //
1659 MiSessionImageEnd = (PVOID)PTE_BASE;
1660
1661 //
1662 // This is where we will load Win32k.sys and the video driver
1663 //
1664 MiSessionImageStart = (PVOID)((ULONG_PTR)MiSessionImageEnd -
1665 MmSessionImageSize);
1666
1667 //
1668 // So the view starts right below the session working set (itself below
1669 // the image area)
1670 //
1671 MiSessionViewStart = (PVOID)((ULONG_PTR)MiSessionImageEnd -
1672 MmSessionImageSize -
1673 MI_SESSION_WORKING_SET_SIZE -
1674 MmSessionViewSize);
1675
1676 //
1677 // Session pool follows
1678 //
1679 MiSessionPoolEnd = MiSessionViewStart;
1680 MiSessionPoolStart = (PVOID)((ULONG_PTR)MiSessionPoolEnd -
1681 MmSessionPoolSize);
1682
1683 //
1684 // And it all begins here
1685 //
1686 MmSessionBase = MiSessionPoolStart;
1687
1688 //
1689 // Sanity check that our math is correct
1690 //
1691 ASSERT((ULONG_PTR)MmSessionBase + MmSessionSize == PTE_BASE);
1692
1693 //
1694 // Session space ends wherever image session space ends
1695 //
1696 MiSessionSpaceEnd = MiSessionImageEnd;
1697
1698 //
1699 // System view space ends at session space, so now that we know where
1700 // this is, we can compute the base address of system view space itself.
1701 //
1702 MiSystemViewStart = (PVOID)((ULONG_PTR)MmSessionBase -
1703 MmSystemViewSize);
1704
1705
1706 /* Initialize the user mode image list */
1707 InitializeListHead(&MmLoadedUserImageList);
1708
1709 /* Initialize the paged pool mutex */
1710 KeInitializeGuardedMutex(&MmPagedPoolMutex);
1711
1712 /* Initialize the Loader Lock */
1713 KeInitializeMutant(&MmSystemLoadLock, FALSE);
1714
1715 //
1716 // Count physical pages on the system
1717 //
1718 PageCount = MiPagesInLoaderBlock(LoaderBlock, IncludeType);
1719
1720 //
1721 // Check if this is a machine with less than 19MB of RAM
1722 //
1723 if (PageCount < MI_MIN_PAGES_FOR_SYSPTE_TUNING)
1724 {
1725 //
1726 // Use the very minimum of system PTEs
1727 //
1728 MmNumberOfSystemPtes = 7000;
1729 }
1730 else
1731 {
1732 //
1733 // Use the default, but check if we have more than 32MB of RAM
1734 //
1735 MmNumberOfSystemPtes = 11000;
1736 if (PageCount > MI_MIN_PAGES_FOR_SYSPTE_BOOST)
1737 {
1738 //
1739 // Double the amount of system PTEs
1740 //
1741 MmNumberOfSystemPtes <<= 1;
1742 }
1743 }
1744
1745 DPRINT("System PTE count has been tuned to %d (%d bytes)\n",
1746 MmNumberOfSystemPtes, MmNumberOfSystemPtes * PAGE_SIZE);
1747
1748 /* Initialize the platform-specific parts */
1749 MiInitMachineDependent(LoaderBlock);
1750
1751 //
1752 // Sync us up with ReactOS Mm
1753 //
1754 MiSyncARM3WithROS(MmNonPagedSystemStart, (PVOID)((ULONG_PTR)MmNonPagedPoolEnd - 1));
1755 MiSyncARM3WithROS(MmPfnDatabase[0], (PVOID)((ULONG_PTR)MmNonPagedPoolStart + MmSizeOfNonPagedPoolInBytes - 1));
1756 MiSyncARM3WithROS((PVOID)HYPER_SPACE, (PVOID)(HYPER_SPACE + PAGE_SIZE - 1));
1757
1758 //
1759 // Build the physical memory block
1760 //
1761 MmPhysicalMemoryBlock = MmInitializeMemoryLimits(LoaderBlock,
1762 IncludeType);
1763
1764 //
1765 // Allocate enough buffer for the PFN bitmap
1766 // Align it up to a 32-bit boundary
1767 //
1768 Bitmap = ExAllocatePoolWithTag(NonPagedPool,
1769 (((MmHighestPhysicalPage + 1) + 31) / 32) * 4,
1770 ' mM');
1771 if (!Bitmap)
1772 {
1773 //
1774 // This is critical
1775 //
1776 KeBugCheckEx(INSTALL_MORE_MEMORY,
1777 MmNumberOfPhysicalPages,
1778 MmLowestPhysicalPage,
1779 MmHighestPhysicalPage,
1780 0x101);
1781 }
1782
1783 //
1784 // Initialize it and clear all the bits to begin with
1785 //
1786 RtlInitializeBitMap(&MiPfnBitMap,
1787 Bitmap,
1788 MmHighestPhysicalPage + 1);
1789 RtlClearAllBits(&MiPfnBitMap);
1790
1791 //
1792 // Loop physical memory runs
1793 //
1794 for (i = 0; i < MmPhysicalMemoryBlock->NumberOfRuns; i++)
1795 {
1796 //
1797 // Get the run
1798 //
1799 Run = &MmPhysicalMemoryBlock->Run[i];
1800 DPRINT("PHYSICAL RAM [0x%08p to 0x%08p]\n",
1801 Run->BasePage << PAGE_SHIFT,
1802 (Run->BasePage + Run->PageCount) << PAGE_SHIFT);
1803
1804 //
1805 // Make sure it has pages inside it
1806 //
1807 if (Run->PageCount)
1808 {
1809 //
1810 // Set the bits in the PFN bitmap
1811 //
1812 RtlSetBits(&MiPfnBitMap, Run->BasePage, Run->PageCount);
1813 }
1814 }
1815
1816 //
1817 // Size up paged pool and build the shadow system page directory
1818 //
1819 MiBuildPagedPool();
1820
1821 /* Check how many pages the system has */
1822 if (MmNumberOfPhysicalPages <= (13 * _1MB))
1823 {
1824 /* Set small system */
1825 MmSystemSize = MmSmallSystem;
1826 }
1827 else if (MmNumberOfPhysicalPages <= (19 * _1MB))
1828 {
1829 /* Set small system */
1830 MmSystemSize = MmSmallSystem;
1831 }
1832 else
1833 {
1834 /* Set medium system */
1835 MmSystemSize = MmMediumSystem;
1836 }
1837
1838 /* Check for more than 32MB */
1839 if (MmNumberOfPhysicalPages >= ((32 * _1MB) / PAGE_SIZE))
1840 {
1841 /* Check for product type being "Wi" for WinNT */
1842 if (MmProductType == '\0i\0W')
1843 {
1844 /* Then this is a large system */
1845 MmSystemSize = MmLargeSystem;
1846 }
1847 else
1848 {
1849 /* For servers, we need 64MB to consider this as being large */
1850 if (MmNumberOfPhysicalPages >= ((64 * _1MB) / PAGE_SIZE))
1851 {
1852 /* Set it as large */
1853 MmSystemSize = MmLargeSystem;
1854 }
1855 }
1856 }
1857
1858 /* Now setup the shared user data fields */
1859 ASSERT(SharedUserData->NumberOfPhysicalPages == 0);
1860 SharedUserData->NumberOfPhysicalPages = MmNumberOfPhysicalPages;
1861 SharedUserData->LargePageMinimum = 0;
1862
1863 /* Check for workstation (Wi for WinNT) */
1864 if (MmProductType == '\0i\0W')
1865 {
1866 /* Set Windows NT Workstation product type */
1867 SharedUserData->NtProductType = NtProductWinNt;
1868 MmProductType = 0;
1869 }
1870 else
1871 {
1872 /* Check for LanMan server */
1873 if (MmProductType == '\0a\0L')
1874 {
1875 /* This is a domain controller */
1876 SharedUserData->NtProductType = NtProductLanManNt;
1877 }
1878 else
1879 {
1880 /* Otherwise it must be a normal server */
1881 SharedUserData->NtProductType = NtProductServer;
1882 }
1883
1884 /* Set the product type, and make the system more aggressive with low memory */
1885 MmProductType = 1;
1886 MmMinimumFreePages = 81;
1887 }
1888
1889 /* Update working set tuning parameters */
1890 MiAdjustWorkingSetManagerParameters(!MmProductType);
1891 }
1892
1893 //
1894 // Always return success for now
1895 //
1896 return STATUS_SUCCESS;
1897 }
1898
1899 /* EOF */