[NTOSKRNL]
[reactos.git] / reactos / ntoskrnl / mm / ARM3 / mminit.c
1 /*
2 * PROJECT: ReactOS Kernel
3 * LICENSE: BSD - See COPYING.ARM in the top level directory
4 * FILE: ntoskrnl/mm/ARM3/mminit.c
5 * PURPOSE: ARM Memory Manager Initialization
6 * PROGRAMMERS: ReactOS Portable Systems Group
7 */
8
9 /* INCLUDES *******************************************************************/
10
11 #include <ntoskrnl.h>
12 #define NDEBUG
13 #include <debug.h>
14
15 #define MODULE_INVOLVED_IN_ARM3
16 #include "miarm.h"
17 #undef MmSystemRangeStart
18
19 /* GLOBALS ********************************************************************/
20
21 //
22 // These are all registry-configurable, but by default, the memory manager will
23 // figure out the most appropriate values.
24 //
25 ULONG MmMaximumNonPagedPoolPercent;
26 SIZE_T MmSizeOfNonPagedPoolInBytes;
27 SIZE_T MmMaximumNonPagedPoolInBytes;
28
29 /* Some of the same values, in pages */
30 PFN_NUMBER MmMaximumNonPagedPoolInPages;
31
32 //
33 // These numbers describe the discrete equation components of the nonpaged
34 // pool sizing algorithm.
35 //
36 // They are described on http://support.microsoft.com/default.aspx/kb/126402/ja
37 // along with the algorithm that uses them, which is implemented later below.
38 //
39 SIZE_T MmMinimumNonPagedPoolSize = 256 * 1024;
40 ULONG MmMinAdditionNonPagedPoolPerMb = 32 * 1024;
41 SIZE_T MmDefaultMaximumNonPagedPool = 1024 * 1024;
42 ULONG MmMaxAdditionNonPagedPoolPerMb = 400 * 1024;
43
44 //
45 // The memory layout (and especially variable names) of the NT kernel mode
46 // components can be a bit hard to twig, especially when it comes to the non
47 // paged area.
48 //
49 // There are really two components to the non-paged pool:
50 //
51 // - The initial nonpaged pool, sized dynamically up to a maximum.
52 // - The expansion nonpaged pool, sized dynamically up to a maximum.
53 //
54 // The initial nonpaged pool is physically continuous for performance, and
55 // immediately follows the PFN database, typically sharing the same PDE. It is
56 // a very small resource (32MB on a 1GB system), and capped at 128MB.
57 //
58 // Right now we call this the "ARM³ Nonpaged Pool" and it begins somewhere after
59 // the PFN database (which starts at 0xB0000000).
60 //
61 // The expansion nonpaged pool, on the other hand, can grow much bigger (400MB
62 // for a 1GB system). On ARM³ however, it is currently capped at 128MB.
63 //
64 // The address where the initial nonpaged pool starts is aptly named
65 // MmNonPagedPoolStart, and it describes a range of MmSizeOfNonPagedPoolInBytes
66 // bytes.
67 //
68 // Expansion nonpaged pool starts at an address described by the variable called
69 // MmNonPagedPoolExpansionStart, and it goes on for MmMaximumNonPagedPoolInBytes
70 // minus MmSizeOfNonPagedPoolInBytes bytes, always reaching MmNonPagedPoolEnd
71 // (because of the way it's calculated) at 0xFFBE0000.
72 //
73 // Initial nonpaged pool is allocated and mapped early-on during boot, but what
74 // about the expansion nonpaged pool? It is instead composed of special pages
75 // which belong to what are called System PTEs. These PTEs are the matter of a
76 // later discussion, but they are also considered part of the "nonpaged" OS, due
77 // to the fact that they are never paged out -- once an address is described by
78 // a System PTE, it is always valid, until the System PTE is torn down.
79 //
80 // System PTEs are actually composed of two "spaces", the system space proper,
81 // and the nonpaged pool expansion space. The latter, as we've already seen,
82 // begins at MmNonPagedPoolExpansionStart. Based on the number of System PTEs
83 // that the system will support, the remaining address space below this address
84 // is used to hold the system space PTEs. This address, in turn, is held in the
85 // variable named MmNonPagedSystemStart, which itself is never allowed to go
86 // below 0xEB000000 (thus creating an upper bound on the number of System PTEs).
87 //
88 // This means that 330MB are reserved for total nonpaged system VA, on top of
89 // whatever the initial nonpaged pool allocation is.
90 //
91 // The following URLs, valid as of April 23rd, 2008, support this evidence:
92 //
93 // http://www.cs.miami.edu/~burt/journal/NT/memory.html
94 // http://www.ditii.com/2007/09/28/windows-memory-management-x86-virtual-address-space/
95 //
96 PVOID MmNonPagedSystemStart;
97 SIZE_T MiNonPagedSystemSize;
98 PVOID MmNonPagedPoolStart;
99 PVOID MmNonPagedPoolExpansionStart;
100 PVOID MmNonPagedPoolEnd = MI_NONPAGED_POOL_END;
101
102 //
103 // This is where paged pool starts by default
104 //
105 PVOID MmPagedPoolStart = MI_PAGED_POOL_START;
106 PVOID MmPagedPoolEnd;
107
108 //
109 // And this is its default size
110 //
111 SIZE_T MmSizeOfPagedPoolInBytes = MI_MIN_INIT_PAGED_POOLSIZE;
112 PFN_NUMBER MmSizeOfPagedPoolInPages = MI_MIN_INIT_PAGED_POOLSIZE / PAGE_SIZE;
113
114 //
115 // Session space starts at 0xBFFFFFFF and grows downwards
116 // By default, it includes an 8MB image area where we map win32k and video card
117 // drivers, followed by a 4MB area containing the session's working set. This is
118 // then followed by a 20MB mapped view area and finally by the session's paged
119 // pool, by default 16MB.
120 //
121 // On a normal system, this results in session space occupying the region from
122 // 0xBD000000 to 0xC0000000
123 //
124 // See miarm.h for the defines that determine the sizing of this region. On an
125 // NT system, some of these can be configured through the registry, but we don't
126 // support that yet.
127 //
128 PVOID MiSessionSpaceEnd; // 0xC0000000
129 PVOID MiSessionImageEnd; // 0xC0000000
130 PVOID MiSessionImageStart; // 0xBF800000
131 PVOID MiSessionViewStart; // 0xBE000000
132 PVOID MiSessionPoolEnd; // 0xBE000000
133 PVOID MiSessionPoolStart; // 0xBD000000
134 PVOID MmSessionBase; // 0xBD000000
135 SIZE_T MmSessionSize;
136 SIZE_T MmSessionViewSize;
137 SIZE_T MmSessionPoolSize;
138 SIZE_T MmSessionImageSize;
139
140 /*
141 * These are the PTE addresses of the boundaries carved out above
142 */
143 PMMPTE MiSessionImagePteStart;
144 PMMPTE MiSessionImagePteEnd;
145 PMMPTE MiSessionBasePte;
146 PMMPTE MiSessionLastPte;
147
148 //
149 // The system view space, on the other hand, is where sections that are memory
150 // mapped into "system space" end up.
151 //
152 // By default, it is a 16MB region, but we hack it to be 32MB for ReactOS
153 //
154 PVOID MiSystemViewStart;
155 SIZE_T MmSystemViewSize;
156
157 #if (_MI_PAGING_LEVELS == 2)
158 //
159 // A copy of the system page directory (the page directory associated with the
160 // System process) is kept (double-mapped) by the manager in order to lazily
161 // map paged pool PDEs into external processes when they fault on a paged pool
162 // address.
163 //
164 PFN_NUMBER MmSystemPageDirectory[PD_COUNT];
165 PMMPDE MmSystemPagePtes;
166 #endif
167
168 //
169 // The system cache starts right after hyperspace. The first few pages are for
170 // keeping track of the system working set list.
171 //
172 // This should be 0xC0C00000 -- the cache itself starts at 0xC1000000
173 //
174 PMMWSL MmSystemCacheWorkingSetList = (PVOID)MI_SYSTEM_CACHE_WS_START;
175
176 //
177 // Windows NT seems to choose between 7000, 11000 and 50000
178 // On systems with more than 32MB, this number is then doubled, and further
179 // aligned up to a PDE boundary (4MB).
180 //
181 PFN_COUNT MmNumberOfSystemPtes;
182
183 //
184 // This is how many pages the PFN database will take up
185 // In Windows, this includes the Quark Color Table, but not in ARM³
186 //
187 PFN_NUMBER MxPfnAllocation;
188
189 //
190 // Unlike the old ReactOS Memory Manager, ARM³ (and Windows) does not keep track
191 // of pages that are not actually valid physical memory, such as ACPI reserved
192 // regions, BIOS address ranges, or holes in physical memory address space which
193 // could indicate device-mapped I/O memory.
194 //
195 // In fact, the lack of a PFN entry for a page usually indicates that this is
196 // I/O space instead.
197 //
198 // A bitmap, called the PFN bitmap, keeps track of all page frames by assigning
199 // a bit to each. If the bit is set, then the page is valid physical RAM.
200 //
201 RTL_BITMAP MiPfnBitMap;
202
203 //
204 // This structure describes the different pieces of RAM-backed address space
205 //
206 PPHYSICAL_MEMORY_DESCRIPTOR MmPhysicalMemoryBlock;
207
208 //
209 // This is where we keep track of the most basic physical layout markers
210 //
211 PFN_NUMBER MmHighestPhysicalPage, MmLowestPhysicalPage = -1;
212 PFN_COUNT MmNumberOfPhysicalPages;
213
214 //
215 // The total number of pages mapped by the boot loader, which include the kernel
216 // HAL, boot drivers, registry, NLS files and other loader data structures is
217 // kept track of here. This depends on "LoaderPagesSpanned" being correct when
218 // coming from the loader.
219 //
220 // This number is later aligned up to a PDE boundary.
221 //
222 SIZE_T MmBootImageSize;
223
224 //
225 // These three variables keep track of the core separation of address space that
226 // exists between kernel mode and user mode.
227 //
228 ULONG_PTR MmUserProbeAddress;
229 PVOID MmHighestUserAddress;
230 PVOID MmSystemRangeStart;
231
232 /* And these store the respective highest PTE/PDE address */
233 PMMPTE MiHighestUserPte;
234 PMMPDE MiHighestUserPde;
235 #if (_MI_PAGING_LEVELS >= 3)
236 PMMPTE MiHighestUserPpe;
237 #if (_MI_PAGING_LEVELS >= 4)
238 PMMPTE MiHighestUserPxe;
239 #endif
240 #endif
241
242 /* These variables define the system cache address space */
243 PVOID MmSystemCacheStart;
244 PVOID MmSystemCacheEnd;
245 MMSUPPORT MmSystemCacheWs;
246
247 //
248 // This is where hyperspace ends (followed by the system cache working set)
249 //
250 PVOID MmHyperSpaceEnd;
251
252 //
253 // Page coloring algorithm data
254 //
255 ULONG MmSecondaryColors;
256 ULONG MmSecondaryColorMask;
257
258 //
259 // Actual (registry-configurable) size of a GUI thread's stack
260 //
261 ULONG MmLargeStackSize = KERNEL_LARGE_STACK_SIZE;
262
263 //
264 // Before we have a PFN database, memory comes straight from our physical memory
265 // blocks, which is nice because it's guaranteed contiguous and also because once
266 // we take a page from here, the system doesn't see it anymore.
267 // However, once the fun is over, those pages must be re-integrated back into
268 // PFN society life, and that requires us keeping a copy of the original layout
269 // so that we can parse it later.
270 //
271 PMEMORY_ALLOCATION_DESCRIPTOR MxFreeDescriptor;
272 MEMORY_ALLOCATION_DESCRIPTOR MxOldFreeDescriptor;
273
274 /*
275 * For each page's worth bytes of L2 cache in a given set/way line, the zero and
276 * free lists are organized in what is called a "color".
277 *
278 * This array points to the two lists, so it can be thought of as a multi-dimensional
279 * array of MmFreePagesByColor[2][MmSecondaryColors]. Since the number is dynamic,
280 * we describe the array in pointer form instead.
281 *
282 * On a final note, the color tables themselves are right after the PFN database.
283 */
284 C_ASSERT(FreePageList == 1);
285 PMMCOLOR_TABLES MmFreePagesByColor[FreePageList + 1];
286
287 /* An event used in Phase 0 before the rest of the system is ready to go */
288 KEVENT MiTempEvent;
289
290 /* All the events used for memory threshold notifications */
291 PKEVENT MiLowMemoryEvent;
292 PKEVENT MiHighMemoryEvent;
293 PKEVENT MiLowPagedPoolEvent;
294 PKEVENT MiHighPagedPoolEvent;
295 PKEVENT MiLowNonPagedPoolEvent;
296 PKEVENT MiHighNonPagedPoolEvent;
297
298 /* The actual thresholds themselves, in page numbers */
299 PFN_NUMBER MmLowMemoryThreshold;
300 PFN_NUMBER MmHighMemoryThreshold;
301 PFN_NUMBER MiLowPagedPoolThreshold;
302 PFN_NUMBER MiHighPagedPoolThreshold;
303 PFN_NUMBER MiLowNonPagedPoolThreshold;
304 PFN_NUMBER MiHighNonPagedPoolThreshold;
305
306 /*
307 * This number determines how many free pages must exist, at minimum, until we
308 * start trimming working sets and flushing modified pages to obtain more free
309 * pages.
310 *
311 * This number changes if the system detects that this is a server product
312 */
313 PFN_NUMBER MmMinimumFreePages = 26;
314
315 /*
316 * This number indicates how many pages we consider to be a low limit of having
317 * "plenty" of free memory.
318 *
319 * It is doubled on systems that have more than 63MB of memory
320 */
321 PFN_NUMBER MmPlentyFreePages = 400;
322
323 /* These values store the type of system this is (small, med, large) and if server */
324 ULONG MmProductType;
325 MM_SYSTEMSIZE MmSystemSize;
326
327 /*
328 * These values store the cache working set minimums and maximums, in pages
329 *
330 * The minimum value is boosted on systems with more than 24MB of RAM, and cut
331 * down to only 32 pages on embedded (<24MB RAM) systems.
332 *
333 * An extra boost of 2MB is given on systems with more than 33MB of RAM.
334 */
335 PFN_NUMBER MmSystemCacheWsMinimum = 288;
336 PFN_NUMBER MmSystemCacheWsMaximum = 350;
337
338 /* FIXME: Move to cache/working set code later */
339 BOOLEAN MmLargeSystemCache;
340
341 /*
342 * This value determines in how many fragments/chunks the subsection prototype
343 * PTEs should be allocated when mapping a section object. It is configurable in
344 * the registry through the MapAllocationFragment parameter.
345 *
346 * The default is 64KB on systems with more than 1GB of RAM, 32KB on systems with
347 * more than 256MB of RAM, and 16KB on systems with less than 256MB of RAM.
348 *
349 * The maximum it can be set to is 2MB, and the minimum is 4KB.
350 */
351 SIZE_T MmAllocationFragment;
352
353 /*
354 * These two values track how much virtual memory can be committed, and when
355 * expansion should happen.
356 */
357 // FIXME: They should be moved elsewhere since it's not an "init" setting?
358 SIZE_T MmTotalCommitLimit;
359 SIZE_T MmTotalCommitLimitMaximum;
360
361 /* Internal setting used for debugging memory descriptors */
362 BOOLEAN MiDbgEnableMdDump =
363 #ifdef _ARM_
364 TRUE;
365 #else
366 FALSE;
367 #endif
368
369 /* Number of memory descriptors in the loader block */
370 ULONG MiNumberDescriptors = 0;
371
372 /* Number of free pages in the loader block */
373 PFN_NUMBER MiNumberOfFreePages = 0;
374
375
376 /* PRIVATE FUNCTIONS **********************************************************/
377
378 VOID
379 NTAPI
380 MiScanMemoryDescriptors(IN PLOADER_PARAMETER_BLOCK LoaderBlock)
381 {
382 PLIST_ENTRY ListEntry;
383 PMEMORY_ALLOCATION_DESCRIPTOR Descriptor;
384 PFN_NUMBER PageFrameIndex, FreePages = 0;
385
386 /* Loop the memory descriptors */
387 for (ListEntry = LoaderBlock->MemoryDescriptorListHead.Flink;
388 ListEntry != &LoaderBlock->MemoryDescriptorListHead;
389 ListEntry = ListEntry->Flink)
390 {
391 /* Get the descriptor */
392 Descriptor = CONTAINING_RECORD(ListEntry,
393 MEMORY_ALLOCATION_DESCRIPTOR,
394 ListEntry);
395 DPRINT("MD Type: %lx Base: %lx Count: %lx\n",
396 Descriptor->MemoryType, Descriptor->BasePage, Descriptor->PageCount);
397
398 /* Count this descriptor */
399 MiNumberDescriptors++;
400
401 /* Check if this is invisible memory */
402 if ((Descriptor->MemoryType == LoaderFirmwarePermanent) ||
403 (Descriptor->MemoryType == LoaderSpecialMemory) ||
404 (Descriptor->MemoryType == LoaderHALCachedMemory) ||
405 (Descriptor->MemoryType == LoaderBBTMemory))
406 {
407 /* Skip this descriptor */
408 continue;
409 }
410
411 /* Check if this is bad memory */
412 if (Descriptor->MemoryType != LoaderBad)
413 {
414 /* Count this in the total of pages */
415 MmNumberOfPhysicalPages += (PFN_COUNT)Descriptor->PageCount;
416 }
417
418 /* Check if this is the new lowest page */
419 if (Descriptor->BasePage < MmLowestPhysicalPage)
420 {
421 /* Update the lowest page */
422 MmLowestPhysicalPage = Descriptor->BasePage;
423 }
424
425 /* Check if this is the new highest page */
426 PageFrameIndex = Descriptor->BasePage + Descriptor->PageCount;
427 if (PageFrameIndex > MmHighestPhysicalPage)
428 {
429 /* Update the highest page */
430 MmHighestPhysicalPage = PageFrameIndex - 1;
431 }
432
433 /* Check if this is free memory */
434 if ((Descriptor->MemoryType == LoaderFree) ||
435 (Descriptor->MemoryType == LoaderLoadedProgram) ||
436 (Descriptor->MemoryType == LoaderFirmwareTemporary) ||
437 (Descriptor->MemoryType == LoaderOsloaderStack))
438 {
439 /* Count it too free pages */
440 MiNumberOfFreePages += Descriptor->PageCount;
441
442 /* Check if this is the largest memory descriptor */
443 if (Descriptor->PageCount > FreePages)
444 {
445 /* Remember it */
446 MxFreeDescriptor = Descriptor;
447 FreePages = Descriptor->PageCount;
448 }
449 }
450 }
451
452 /* Save original values of the free descriptor, since it'll be
453 * altered by early allocations */
454 MxOldFreeDescriptor = *MxFreeDescriptor;
455 }
456
457 PFN_NUMBER
458 NTAPI
459 INIT_FUNCTION
460 MxGetNextPage(IN PFN_NUMBER PageCount)
461 {
462 PFN_NUMBER Pfn;
463
464 /* Make sure we have enough pages */
465 if (PageCount > MxFreeDescriptor->PageCount)
466 {
467 /* Crash the system */
468 KeBugCheckEx(INSTALL_MORE_MEMORY,
469 MmNumberOfPhysicalPages,
470 MxFreeDescriptor->PageCount,
471 MxOldFreeDescriptor.PageCount,
472 PageCount);
473 }
474
475 /* Use our lowest usable free pages */
476 Pfn = MxFreeDescriptor->BasePage;
477 MxFreeDescriptor->BasePage += PageCount;
478 MxFreeDescriptor->PageCount -= PageCount;
479 return Pfn;
480 }
481
482 VOID
483 NTAPI
484 INIT_FUNCTION
485 MiComputeColorInformation(VOID)
486 {
487 ULONG L2Associativity;
488
489 /* Check if no setting was provided already */
490 if (!MmSecondaryColors)
491 {
492 /* Get L2 cache information */
493 L2Associativity = KeGetPcr()->SecondLevelCacheAssociativity;
494
495 /* The number of colors is the number of cache bytes by set/way */
496 MmSecondaryColors = KeGetPcr()->SecondLevelCacheSize;
497 if (L2Associativity) MmSecondaryColors /= L2Associativity;
498 }
499
500 /* Now convert cache bytes into pages */
501 MmSecondaryColors >>= PAGE_SHIFT;
502 if (!MmSecondaryColors)
503 {
504 /* If there was no cache data from the KPCR, use the default colors */
505 MmSecondaryColors = MI_SECONDARY_COLORS;
506 }
507 else
508 {
509 /* Otherwise, make sure there aren't too many colors */
510 if (MmSecondaryColors > MI_MAX_SECONDARY_COLORS)
511 {
512 /* Set the maximum */
513 MmSecondaryColors = MI_MAX_SECONDARY_COLORS;
514 }
515
516 /* Make sure there aren't too little colors */
517 if (MmSecondaryColors < MI_MIN_SECONDARY_COLORS)
518 {
519 /* Set the default */
520 MmSecondaryColors = MI_SECONDARY_COLORS;
521 }
522
523 /* Finally make sure the colors are a power of two */
524 if (MmSecondaryColors & (MmSecondaryColors - 1))
525 {
526 /* Set the default */
527 MmSecondaryColors = MI_SECONDARY_COLORS;
528 }
529 }
530
531 /* Compute the mask and store it */
532 MmSecondaryColorMask = MmSecondaryColors - 1;
533 KeGetCurrentPrcb()->SecondaryColorMask = MmSecondaryColorMask;
534 }
535
536 VOID
537 NTAPI
538 INIT_FUNCTION
539 MiInitializeColorTables(VOID)
540 {
541 ULONG i;
542 PMMPTE PointerPte, LastPte;
543 MMPTE TempPte = ValidKernelPte;
544
545 /* The color table starts after the ARM3 PFN database */
546 MmFreePagesByColor[0] = (PMMCOLOR_TABLES)&MmPfnDatabase[MmHighestPhysicalPage + 1];
547
548 /* Loop the PTEs. We have two color tables for each secondary color */
549 PointerPte = MiAddressToPte(&MmFreePagesByColor[0][0]);
550 LastPte = MiAddressToPte((ULONG_PTR)MmFreePagesByColor[0] +
551 (2 * MmSecondaryColors * sizeof(MMCOLOR_TABLES))
552 - 1);
553 while (PointerPte <= LastPte)
554 {
555 /* Check for valid PTE */
556 if (PointerPte->u.Hard.Valid == 0)
557 {
558 /* Get a page and map it */
559 TempPte.u.Hard.PageFrameNumber = MxGetNextPage(1);
560 MI_WRITE_VALID_PTE(PointerPte, TempPte);
561
562 /* Zero out the page */
563 RtlZeroMemory(MiPteToAddress(PointerPte), PAGE_SIZE);
564 }
565
566 /* Next */
567 PointerPte++;
568 }
569
570 /* Now set the address of the next list, right after this one */
571 MmFreePagesByColor[1] = &MmFreePagesByColor[0][MmSecondaryColors];
572
573 /* Now loop the lists to set them up */
574 for (i = 0; i < MmSecondaryColors; i++)
575 {
576 /* Set both free and zero lists for each color */
577 MmFreePagesByColor[ZeroedPageList][i].Flink = LIST_HEAD;
578 MmFreePagesByColor[ZeroedPageList][i].Blink = (PVOID)LIST_HEAD;
579 MmFreePagesByColor[ZeroedPageList][i].Count = 0;
580 MmFreePagesByColor[FreePageList][i].Flink = LIST_HEAD;
581 MmFreePagesByColor[FreePageList][i].Blink = (PVOID)LIST_HEAD;
582 MmFreePagesByColor[FreePageList][i].Count = 0;
583 }
584 }
585
586 #ifndef _M_AMD64
587 BOOLEAN
588 NTAPI
589 INIT_FUNCTION
590 MiIsRegularMemory(IN PLOADER_PARAMETER_BLOCK LoaderBlock,
591 IN PFN_NUMBER Pfn)
592 {
593 PLIST_ENTRY NextEntry;
594 PMEMORY_ALLOCATION_DESCRIPTOR MdBlock;
595
596 /* Loop the memory descriptors */
597 NextEntry = LoaderBlock->MemoryDescriptorListHead.Flink;
598 while (NextEntry != &LoaderBlock->MemoryDescriptorListHead)
599 {
600 /* Get the memory descriptor */
601 MdBlock = CONTAINING_RECORD(NextEntry,
602 MEMORY_ALLOCATION_DESCRIPTOR,
603 ListEntry);
604
605 /* Check if this PFN could be part of the block */
606 if (Pfn >= (MdBlock->BasePage))
607 {
608 /* Check if it really is part of the block */
609 if (Pfn < (MdBlock->BasePage + MdBlock->PageCount))
610 {
611 /* Check if the block is actually memory we don't map */
612 if ((MdBlock->MemoryType == LoaderFirmwarePermanent) ||
613 (MdBlock->MemoryType == LoaderBBTMemory) ||
614 (MdBlock->MemoryType == LoaderSpecialMemory))
615 {
616 /* We don't need PFN database entries for this memory */
617 break;
618 }
619
620 /* This is memory we want to map */
621 return TRUE;
622 }
623 }
624 else
625 {
626 /* Blocks are ordered, so if it's not here, it doesn't exist */
627 break;
628 }
629
630 /* Get to the next descriptor */
631 NextEntry = MdBlock->ListEntry.Flink;
632 }
633
634 /* Check if this PFN is actually from our free memory descriptor */
635 if ((Pfn >= MxOldFreeDescriptor.BasePage) &&
636 (Pfn < MxOldFreeDescriptor.BasePage + MxOldFreeDescriptor.PageCount))
637 {
638 /* We use these pages for initial mappings, so we do want to count them */
639 return TRUE;
640 }
641
642 /* Otherwise this isn't memory that we describe or care about */
643 return FALSE;
644 }
645
646 VOID
647 NTAPI
648 INIT_FUNCTION
649 MiMapPfnDatabase(IN PLOADER_PARAMETER_BLOCK LoaderBlock)
650 {
651 PFN_NUMBER FreePage, FreePageCount, PagesLeft, BasePage, PageCount;
652 PLIST_ENTRY NextEntry;
653 PMEMORY_ALLOCATION_DESCRIPTOR MdBlock;
654 PMMPTE PointerPte, LastPte;
655 MMPTE TempPte = ValidKernelPte;
656
657 /* Get current page data, since we won't be using MxGetNextPage as it would corrupt our state */
658 FreePage = MxFreeDescriptor->BasePage;
659 FreePageCount = MxFreeDescriptor->PageCount;
660 PagesLeft = 0;
661
662 /* Loop the memory descriptors */
663 NextEntry = LoaderBlock->MemoryDescriptorListHead.Flink;
664 while (NextEntry != &LoaderBlock->MemoryDescriptorListHead)
665 {
666 /* Get the descriptor */
667 MdBlock = CONTAINING_RECORD(NextEntry,
668 MEMORY_ALLOCATION_DESCRIPTOR,
669 ListEntry);
670 if ((MdBlock->MemoryType == LoaderFirmwarePermanent) ||
671 (MdBlock->MemoryType == LoaderBBTMemory) ||
672 (MdBlock->MemoryType == LoaderSpecialMemory))
673 {
674 /* These pages are not part of the PFN database */
675 NextEntry = MdBlock->ListEntry.Flink;
676 continue;
677 }
678
679 /* Next, check if this is our special free descriptor we've found */
680 if (MdBlock == MxFreeDescriptor)
681 {
682 /* Use the real numbers instead */
683 BasePage = MxOldFreeDescriptor.BasePage;
684 PageCount = MxOldFreeDescriptor.PageCount;
685 }
686 else
687 {
688 /* Use the descriptor's numbers */
689 BasePage = MdBlock->BasePage;
690 PageCount = MdBlock->PageCount;
691 }
692
693 /* Get the PTEs for this range */
694 PointerPte = MiAddressToPte(&MmPfnDatabase[BasePage]);
695 LastPte = MiAddressToPte(((ULONG_PTR)&MmPfnDatabase[BasePage + PageCount]) - 1);
696 DPRINT("MD Type: %lx Base: %lx Count: %lx\n", MdBlock->MemoryType, BasePage, PageCount);
697
698 /* Loop them */
699 while (PointerPte <= LastPte)
700 {
701 /* We'll only touch PTEs that aren't already valid */
702 if (PointerPte->u.Hard.Valid == 0)
703 {
704 /* Use the next free page */
705 TempPte.u.Hard.PageFrameNumber = FreePage;
706 ASSERT(FreePageCount != 0);
707
708 /* Consume free pages */
709 FreePage++;
710 FreePageCount--;
711 if (!FreePageCount)
712 {
713 /* Out of memory */
714 KeBugCheckEx(INSTALL_MORE_MEMORY,
715 MmNumberOfPhysicalPages,
716 FreePageCount,
717 MxOldFreeDescriptor.PageCount,
718 1);
719 }
720
721 /* Write out this PTE */
722 PagesLeft++;
723 MI_WRITE_VALID_PTE(PointerPte, TempPte);
724
725 /* Zero this page */
726 RtlZeroMemory(MiPteToAddress(PointerPte), PAGE_SIZE);
727 }
728
729 /* Next! */
730 PointerPte++;
731 }
732
733 /* Do the next address range */
734 NextEntry = MdBlock->ListEntry.Flink;
735 }
736
737 /* Now update the free descriptors to consume the pages we used up during the PFN allocation loop */
738 MxFreeDescriptor->BasePage = FreePage;
739 MxFreeDescriptor->PageCount = FreePageCount;
740 }
741
742 VOID
743 NTAPI
744 INIT_FUNCTION
745 MiBuildPfnDatabaseFromPages(IN PLOADER_PARAMETER_BLOCK LoaderBlock)
746 {
747 PMMPDE PointerPde;
748 PMMPTE PointerPte;
749 ULONG i, Count, j;
750 PFN_NUMBER PageFrameIndex, StartupPdIndex, PtePageIndex;
751 PMMPFN Pfn1, Pfn2;
752 ULONG_PTR BaseAddress = 0;
753
754 /* PFN of the startup page directory */
755 StartupPdIndex = PFN_FROM_PTE(MiAddressToPde(PDE_BASE));
756
757 /* Start with the first PDE and scan them all */
758 PointerPde = MiAddressToPde(NULL);
759 Count = PD_COUNT * PDE_COUNT;
760 for (i = 0; i < Count; i++)
761 {
762 /* Check for valid PDE */
763 if (PointerPde->u.Hard.Valid == 1)
764 {
765 /* Get the PFN from it */
766 PageFrameIndex = PFN_FROM_PTE(PointerPde);
767
768 /* Do we want a PFN entry for this page? */
769 if (MiIsRegularMemory(LoaderBlock, PageFrameIndex))
770 {
771 /* Yes we do, set it up */
772 Pfn1 = MiGetPfnEntry(PageFrameIndex);
773 Pfn1->u4.PteFrame = StartupPdIndex;
774 Pfn1->PteAddress = (PMMPTE)PointerPde;
775 Pfn1->u2.ShareCount++;
776 Pfn1->u3.e2.ReferenceCount = 1;
777 Pfn1->u3.e1.PageLocation = ActiveAndValid;
778 Pfn1->u3.e1.CacheAttribute = MiNonCached;
779 #if MI_TRACE_PFNS
780 Pfn1->PfnUsage = MI_USAGE_INIT_MEMORY;
781 memcpy(Pfn1->ProcessName, "Initial PDE", 16);
782 #endif
783 }
784 else
785 {
786 /* No PFN entry */
787 Pfn1 = NULL;
788 }
789
790 /* Now get the PTE and scan the pages */
791 PointerPte = MiAddressToPte(BaseAddress);
792 for (j = 0; j < PTE_COUNT; j++)
793 {
794 /* Check for a valid PTE */
795 if (PointerPte->u.Hard.Valid == 1)
796 {
797 /* Increase the shared count of the PFN entry for the PDE */
798 ASSERT(Pfn1 != NULL);
799 Pfn1->u2.ShareCount++;
800
801 /* Now check if the PTE is valid memory too */
802 PtePageIndex = PFN_FROM_PTE(PointerPte);
803 if (MiIsRegularMemory(LoaderBlock, PtePageIndex))
804 {
805 /*
806 * Only add pages above the end of system code or pages
807 * that are part of nonpaged pool
808 */
809 if ((BaseAddress >= 0xA0000000) ||
810 ((BaseAddress >= (ULONG_PTR)MmNonPagedPoolStart) &&
811 (BaseAddress < (ULONG_PTR)MmNonPagedPoolStart +
812 MmSizeOfNonPagedPoolInBytes)))
813 {
814 /* Get the PFN entry and make sure it too is valid */
815 Pfn2 = MiGetPfnEntry(PtePageIndex);
816 if ((MmIsAddressValid(Pfn2)) &&
817 (MmIsAddressValid(Pfn2 + 1)))
818 {
819 /* Setup the PFN entry */
820 Pfn2->u4.PteFrame = PageFrameIndex;
821 Pfn2->PteAddress = PointerPte;
822 Pfn2->u2.ShareCount++;
823 Pfn2->u3.e2.ReferenceCount = 1;
824 Pfn2->u3.e1.PageLocation = ActiveAndValid;
825 Pfn2->u3.e1.CacheAttribute = MiNonCached;
826 #if MI_TRACE_PFNS
827 Pfn2->PfnUsage = MI_USAGE_INIT_MEMORY;
828 memcpy(Pfn1->ProcessName, "Initial PTE", 16);
829 #endif
830 }
831 }
832 }
833 }
834
835 /* Next PTE */
836 PointerPte++;
837 BaseAddress += PAGE_SIZE;
838 }
839 }
840 else
841 {
842 /* Next PDE mapped address */
843 BaseAddress += PDE_MAPPED_VA;
844 }
845
846 /* Next PTE */
847 PointerPde++;
848 }
849 }
850
851 VOID
852 NTAPI
853 INIT_FUNCTION
854 MiBuildPfnDatabaseZeroPage(VOID)
855 {
856 PMMPFN Pfn1;
857 PMMPDE PointerPde;
858
859 /* Grab the lowest page and check if it has no real references */
860 Pfn1 = MiGetPfnEntry(MmLowestPhysicalPage);
861 if (!(MmLowestPhysicalPage) && !(Pfn1->u3.e2.ReferenceCount))
862 {
863 /* Make it a bogus page to catch errors */
864 PointerPde = MiAddressToPde(0xFFFFFFFF);
865 Pfn1->u4.PteFrame = PFN_FROM_PTE(PointerPde);
866 Pfn1->PteAddress = (PMMPTE)PointerPde;
867 Pfn1->u2.ShareCount++;
868 Pfn1->u3.e2.ReferenceCount = 0xFFF0;
869 Pfn1->u3.e1.PageLocation = ActiveAndValid;
870 Pfn1->u3.e1.CacheAttribute = MiNonCached;
871 }
872 }
873
874 VOID
875 NTAPI
876 INIT_FUNCTION
877 MiBuildPfnDatabaseFromLoaderBlock(IN PLOADER_PARAMETER_BLOCK LoaderBlock)
878 {
879 PLIST_ENTRY NextEntry;
880 PFN_NUMBER PageCount = 0;
881 PMEMORY_ALLOCATION_DESCRIPTOR MdBlock;
882 PFN_NUMBER PageFrameIndex;
883 PMMPFN Pfn1;
884 PMMPTE PointerPte;
885 PMMPDE PointerPde;
886 KIRQL OldIrql;
887
888 /* Now loop through the descriptors */
889 NextEntry = LoaderBlock->MemoryDescriptorListHead.Flink;
890 while (NextEntry != &LoaderBlock->MemoryDescriptorListHead)
891 {
892 /* Get the current descriptor */
893 MdBlock = CONTAINING_RECORD(NextEntry,
894 MEMORY_ALLOCATION_DESCRIPTOR,
895 ListEntry);
896
897 /* Read its data */
898 PageCount = MdBlock->PageCount;
899 PageFrameIndex = MdBlock->BasePage;
900
901 /* Don't allow memory above what the PFN database is mapping */
902 if (PageFrameIndex > MmHighestPhysicalPage)
903 {
904 /* Since they are ordered, everything past here will be larger */
905 break;
906 }
907
908 /* On the other hand, the end page might be higher up... */
909 if ((PageFrameIndex + PageCount) > (MmHighestPhysicalPage + 1))
910 {
911 /* In which case we'll trim the descriptor to go as high as we can */
912 PageCount = MmHighestPhysicalPage + 1 - PageFrameIndex;
913 MdBlock->PageCount = PageCount;
914
915 /* But if there's nothing left to trim, we got too high, so quit */
916 if (!PageCount) break;
917 }
918
919 /* Now check the descriptor type */
920 switch (MdBlock->MemoryType)
921 {
922 /* Check for bad RAM */
923 case LoaderBad:
924
925 DPRINT1("You either have specified /BURNMEMORY or damaged RAM modules.\n");
926 break;
927
928 /* Check for free RAM */
929 case LoaderFree:
930 case LoaderLoadedProgram:
931 case LoaderFirmwareTemporary:
932 case LoaderOsloaderStack:
933
934 /* Get the last page of this descriptor. Note we loop backwards */
935 PageFrameIndex += PageCount - 1;
936 Pfn1 = MiGetPfnEntry(PageFrameIndex);
937
938 /* Lock the PFN Database */
939 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
940 while (PageCount--)
941 {
942 /* If the page really has no references, mark it as free */
943 if (!Pfn1->u3.e2.ReferenceCount)
944 {
945 /* Add it to the free list */
946 Pfn1->u3.e1.CacheAttribute = MiNonCached;
947 MiInsertPageInFreeList(PageFrameIndex);
948 }
949
950 /* Go to the next page */
951 Pfn1--;
952 PageFrameIndex--;
953 }
954
955 /* Release PFN database */
956 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
957
958 /* Done with this block */
959 break;
960
961 /* Check for pages that are invisible to us */
962 case LoaderFirmwarePermanent:
963 case LoaderSpecialMemory:
964 case LoaderBBTMemory:
965
966 /* And skip them */
967 break;
968
969 default:
970
971 /* Map these pages with the KSEG0 mapping that adds 0x80000000 */
972 PointerPte = MiAddressToPte(KSEG0_BASE + (PageFrameIndex << PAGE_SHIFT));
973 Pfn1 = MiGetPfnEntry(PageFrameIndex);
974 while (PageCount--)
975 {
976 /* Check if the page is really unused */
977 PointerPde = MiAddressToPde(KSEG0_BASE + (PageFrameIndex << PAGE_SHIFT));
978 if (!Pfn1->u3.e2.ReferenceCount)
979 {
980 /* Mark it as being in-use */
981 Pfn1->u4.PteFrame = PFN_FROM_PTE(PointerPde);
982 Pfn1->PteAddress = PointerPte;
983 Pfn1->u2.ShareCount++;
984 Pfn1->u3.e2.ReferenceCount = 1;
985 Pfn1->u3.e1.PageLocation = ActiveAndValid;
986 Pfn1->u3.e1.CacheAttribute = MiNonCached;
987 #if MI_TRACE_PFNS
988 Pfn1->PfnUsage = MI_USAGE_BOOT_DRIVER;
989 #endif
990
991 /* Check for RAM disk page */
992 if (MdBlock->MemoryType == LoaderXIPRom)
993 {
994 /* Make it a pseudo-I/O ROM mapping */
995 Pfn1->u1.Flink = 0;
996 Pfn1->u2.ShareCount = 0;
997 Pfn1->u3.e2.ReferenceCount = 0;
998 Pfn1->u3.e1.PageLocation = 0;
999 Pfn1->u3.e1.Rom = 1;
1000 Pfn1->u4.InPageError = 0;
1001 Pfn1->u3.e1.PrototypePte = 1;
1002 }
1003 }
1004
1005 /* Advance page structures */
1006 Pfn1++;
1007 PageFrameIndex++;
1008 PointerPte++;
1009 }
1010 break;
1011 }
1012
1013 /* Next descriptor entry */
1014 NextEntry = MdBlock->ListEntry.Flink;
1015 }
1016 }
1017
1018 VOID
1019 NTAPI
1020 INIT_FUNCTION
1021 MiBuildPfnDatabaseSelf(VOID)
1022 {
1023 PMMPTE PointerPte, LastPte;
1024 PMMPFN Pfn1;
1025
1026 /* Loop the PFN database page */
1027 PointerPte = MiAddressToPte(MiGetPfnEntry(MmLowestPhysicalPage));
1028 LastPte = MiAddressToPte(MiGetPfnEntry(MmHighestPhysicalPage));
1029 while (PointerPte <= LastPte)
1030 {
1031 /* Make sure the page is valid */
1032 if (PointerPte->u.Hard.Valid == 1)
1033 {
1034 /* Get the PFN entry and just mark it referenced */
1035 Pfn1 = MiGetPfnEntry(PointerPte->u.Hard.PageFrameNumber);
1036 Pfn1->u2.ShareCount = 1;
1037 Pfn1->u3.e2.ReferenceCount = 1;
1038 #if MI_TRACE_PFNS
1039 Pfn1->PfnUsage = MI_USAGE_PFN_DATABASE;
1040 #endif
1041 }
1042
1043 /* Next */
1044 PointerPte++;
1045 }
1046 }
1047
1048 VOID
1049 NTAPI
1050 INIT_FUNCTION
1051 MiInitializePfnDatabase(IN PLOADER_PARAMETER_BLOCK LoaderBlock)
1052 {
1053 /* Scan memory and start setting up PFN entries */
1054 MiBuildPfnDatabaseFromPages(LoaderBlock);
1055
1056 /* Add the zero page */
1057 MiBuildPfnDatabaseZeroPage();
1058
1059 /* Scan the loader block and build the rest of the PFN database */
1060 MiBuildPfnDatabaseFromLoaderBlock(LoaderBlock);
1061
1062 /* Finally add the pages for the PFN database itself */
1063 MiBuildPfnDatabaseSelf();
1064 }
1065 #endif /* !_M_AMD64 */
1066
1067 VOID
1068 NTAPI
1069 INIT_FUNCTION
1070 MmFreeLoaderBlock(IN PLOADER_PARAMETER_BLOCK LoaderBlock)
1071 {
1072 PLIST_ENTRY NextMd;
1073 PMEMORY_ALLOCATION_DESCRIPTOR MdBlock;
1074 ULONG_PTR i;
1075 PFN_NUMBER BasePage, LoaderPages;
1076 PMMPFN Pfn1;
1077 KIRQL OldIrql;
1078 PPHYSICAL_MEMORY_RUN Buffer, Entry;
1079
1080 /* Loop the descriptors in order to count them */
1081 i = 0;
1082 NextMd = LoaderBlock->MemoryDescriptorListHead.Flink;
1083 while (NextMd != &LoaderBlock->MemoryDescriptorListHead)
1084 {
1085 MdBlock = CONTAINING_RECORD(NextMd,
1086 MEMORY_ALLOCATION_DESCRIPTOR,
1087 ListEntry);
1088 i++;
1089 NextMd = MdBlock->ListEntry.Flink;
1090 }
1091
1092 /* Allocate a structure to hold the physical runs */
1093 Buffer = ExAllocatePoolWithTag(NonPagedPool,
1094 i * sizeof(PHYSICAL_MEMORY_RUN),
1095 'lMmM');
1096 ASSERT(Buffer != NULL);
1097 Entry = Buffer;
1098
1099 /* Loop the descriptors again */
1100 NextMd = LoaderBlock->MemoryDescriptorListHead.Flink;
1101 while (NextMd != &LoaderBlock->MemoryDescriptorListHead)
1102 {
1103 /* Check what kind this was */
1104 MdBlock = CONTAINING_RECORD(NextMd,
1105 MEMORY_ALLOCATION_DESCRIPTOR,
1106 ListEntry);
1107 switch (MdBlock->MemoryType)
1108 {
1109 /* Registry, NLS, and heap data */
1110 case LoaderRegistryData:
1111 case LoaderOsloaderHeap:
1112 case LoaderNlsData:
1113 /* Are all a candidate for deletion */
1114 Entry->BasePage = MdBlock->BasePage;
1115 Entry->PageCount = MdBlock->PageCount;
1116 Entry++;
1117
1118 /* We keep the rest */
1119 default:
1120 break;
1121 }
1122
1123 /* Move to the next descriptor */
1124 NextMd = MdBlock->ListEntry.Flink;
1125 }
1126
1127 /* Acquire the PFN lock */
1128 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
1129
1130 /* Loop the runs */
1131 LoaderPages = 0;
1132 while (--Entry >= Buffer)
1133 {
1134 /* See how many pages are in this run */
1135 i = Entry->PageCount;
1136 BasePage = Entry->BasePage;
1137
1138 /* Loop each page */
1139 Pfn1 = MiGetPfnEntry(BasePage);
1140 while (i--)
1141 {
1142 /* Check if it has references or is in any kind of list */
1143 if (!(Pfn1->u3.e2.ReferenceCount) && (!Pfn1->u1.Flink))
1144 {
1145 /* Set the new PTE address and put this page into the free list */
1146 Pfn1->PteAddress = (PMMPTE)(BasePage << PAGE_SHIFT);
1147 MiInsertPageInFreeList(BasePage);
1148 LoaderPages++;
1149 }
1150 else if (BasePage)
1151 {
1152 /* It has a reference, so simply drop it */
1153 ASSERT(MI_IS_PHYSICAL_ADDRESS(MiPteToAddress(Pfn1->PteAddress)) == FALSE);
1154
1155 /* Drop a dereference on this page, which should delete it */
1156 Pfn1->PteAddress->u.Long = 0;
1157 MI_SET_PFN_DELETED(Pfn1);
1158 MiDecrementShareCount(Pfn1, BasePage);
1159 LoaderPages++;
1160 }
1161
1162 /* Move to the next page */
1163 Pfn1++;
1164 BasePage++;
1165 }
1166 }
1167
1168 /* Release the PFN lock and flush the TLB */
1169 DPRINT1("Loader pages freed: %lx\n", LoaderPages);
1170 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
1171 KeFlushCurrentTb();
1172
1173 /* Free our run structure */
1174 ExFreePool(Buffer);
1175 }
1176
1177 VOID
1178 NTAPI
1179 INIT_FUNCTION
1180 MiAdjustWorkingSetManagerParameters(IN BOOLEAN Client)
1181 {
1182 /* This function needs to do more work, for now, we tune page minimums */
1183
1184 /* Check for a system with around 64MB RAM or more */
1185 if (MmNumberOfPhysicalPages >= (63 * _1MB) / PAGE_SIZE)
1186 {
1187 /* Double the minimum amount of pages we consider for a "plenty free" scenario */
1188 MmPlentyFreePages *= 2;
1189 }
1190 }
1191
1192 VOID
1193 NTAPI
1194 INIT_FUNCTION
1195 MiNotifyMemoryEvents(VOID)
1196 {
1197 /* Are we in a low-memory situation? */
1198 if (MmAvailablePages < MmLowMemoryThreshold)
1199 {
1200 /* Clear high, set low */
1201 if (KeReadStateEvent(MiHighMemoryEvent)) KeClearEvent(MiHighMemoryEvent);
1202 if (!KeReadStateEvent(MiLowMemoryEvent)) KeSetEvent(MiLowMemoryEvent, 0, FALSE);
1203 }
1204 else if (MmAvailablePages < MmHighMemoryThreshold)
1205 {
1206 /* We are in between, clear both */
1207 if (KeReadStateEvent(MiHighMemoryEvent)) KeClearEvent(MiHighMemoryEvent);
1208 if (KeReadStateEvent(MiLowMemoryEvent)) KeClearEvent(MiLowMemoryEvent);
1209 }
1210 else
1211 {
1212 /* Clear low, set high */
1213 if (KeReadStateEvent(MiLowMemoryEvent)) KeClearEvent(MiLowMemoryEvent);
1214 if (!KeReadStateEvent(MiHighMemoryEvent)) KeSetEvent(MiHighMemoryEvent, 0, FALSE);
1215 }
1216 }
1217
1218 NTSTATUS
1219 NTAPI
1220 INIT_FUNCTION
1221 MiCreateMemoryEvent(IN PUNICODE_STRING Name,
1222 OUT PKEVENT *Event)
1223 {
1224 PACL Dacl;
1225 HANDLE EventHandle;
1226 ULONG DaclLength;
1227 NTSTATUS Status;
1228 OBJECT_ATTRIBUTES ObjectAttributes;
1229 SECURITY_DESCRIPTOR SecurityDescriptor;
1230
1231 /* Create the SD */
1232 Status = RtlCreateSecurityDescriptor(&SecurityDescriptor,
1233 SECURITY_DESCRIPTOR_REVISION);
1234 if (!NT_SUCCESS(Status)) return Status;
1235
1236 /* One ACL with 3 ACEs, containing each one SID */
1237 DaclLength = sizeof(ACL) +
1238 3 * sizeof(ACCESS_ALLOWED_ACE) +
1239 RtlLengthSid(SeLocalSystemSid) +
1240 RtlLengthSid(SeAliasAdminsSid) +
1241 RtlLengthSid(SeWorldSid);
1242
1243 /* Allocate space for the DACL */
1244 Dacl = ExAllocatePoolWithTag(PagedPool, DaclLength, 'lcaD');
1245 if (!Dacl) return STATUS_INSUFFICIENT_RESOURCES;
1246
1247 /* Setup the ACL inside it */
1248 Status = RtlCreateAcl(Dacl, DaclLength, ACL_REVISION);
1249 if (!NT_SUCCESS(Status)) goto CleanUp;
1250
1251 /* Add query rights for everyone */
1252 Status = RtlAddAccessAllowedAce(Dacl,
1253 ACL_REVISION,
1254 SYNCHRONIZE | EVENT_QUERY_STATE | READ_CONTROL,
1255 SeWorldSid);
1256 if (!NT_SUCCESS(Status)) goto CleanUp;
1257
1258 /* Full rights for the admin */
1259 Status = RtlAddAccessAllowedAce(Dacl,
1260 ACL_REVISION,
1261 EVENT_ALL_ACCESS,
1262 SeAliasAdminsSid);
1263 if (!NT_SUCCESS(Status)) goto CleanUp;
1264
1265 /* As well as full rights for the system */
1266 Status = RtlAddAccessAllowedAce(Dacl,
1267 ACL_REVISION,
1268 EVENT_ALL_ACCESS,
1269 SeLocalSystemSid);
1270 if (!NT_SUCCESS(Status)) goto CleanUp;
1271
1272 /* Set this DACL inside the SD */
1273 Status = RtlSetDaclSecurityDescriptor(&SecurityDescriptor,
1274 TRUE,
1275 Dacl,
1276 FALSE);
1277 if (!NT_SUCCESS(Status)) goto CleanUp;
1278
1279 /* Setup the event attributes, making sure it's a permanent one */
1280 InitializeObjectAttributes(&ObjectAttributes,
1281 Name,
1282 OBJ_KERNEL_HANDLE | OBJ_PERMANENT,
1283 NULL,
1284 &SecurityDescriptor);
1285
1286 /* Create the event */
1287 Status = ZwCreateEvent(&EventHandle,
1288 EVENT_ALL_ACCESS,
1289 &ObjectAttributes,
1290 NotificationEvent,
1291 FALSE);
1292 CleanUp:
1293 /* Free the DACL */
1294 ExFreePoolWithTag(Dacl, 'lcaD');
1295
1296 /* Check if this is the success path */
1297 if (NT_SUCCESS(Status))
1298 {
1299 /* Add a reference to the object, then close the handle we had */
1300 Status = ObReferenceObjectByHandle(EventHandle,
1301 EVENT_MODIFY_STATE,
1302 ExEventObjectType,
1303 KernelMode,
1304 (PVOID*)Event,
1305 NULL);
1306 ZwClose (EventHandle);
1307 }
1308
1309 /* Return status */
1310 return Status;
1311 }
1312
1313 BOOLEAN
1314 NTAPI
1315 INIT_FUNCTION
1316 MiInitializeMemoryEvents(VOID)
1317 {
1318 UNICODE_STRING LowString = RTL_CONSTANT_STRING(L"\\KernelObjects\\LowMemoryCondition");
1319 UNICODE_STRING HighString = RTL_CONSTANT_STRING(L"\\KernelObjects\\HighMemoryCondition");
1320 UNICODE_STRING LowPagedPoolString = RTL_CONSTANT_STRING(L"\\KernelObjects\\LowPagedPoolCondition");
1321 UNICODE_STRING HighPagedPoolString = RTL_CONSTANT_STRING(L"\\KernelObjects\\HighPagedPoolCondition");
1322 UNICODE_STRING LowNonPagedPoolString = RTL_CONSTANT_STRING(L"\\KernelObjects\\LowNonPagedPoolCondition");
1323 UNICODE_STRING HighNonPagedPoolString = RTL_CONSTANT_STRING(L"\\KernelObjects\\HighNonPagedPoolCondition");
1324 NTSTATUS Status;
1325
1326 /* Check if we have a registry setting */
1327 if (MmLowMemoryThreshold)
1328 {
1329 /* Convert it to pages */
1330 MmLowMemoryThreshold *= (_1MB / PAGE_SIZE);
1331 }
1332 else
1333 {
1334 /* The low memory threshold is hit when we don't consider that we have "plenty" of free pages anymore */
1335 MmLowMemoryThreshold = MmPlentyFreePages;
1336
1337 /* More than one GB of memory? */
1338 if (MmNumberOfPhysicalPages > 0x40000)
1339 {
1340 /* Start at 32MB, and add another 16MB for each GB */
1341 MmLowMemoryThreshold = (32 * _1MB) / PAGE_SIZE;
1342 MmLowMemoryThreshold += ((MmNumberOfPhysicalPages - 0x40000) >> 7);
1343 }
1344 else if (MmNumberOfPhysicalPages > 0x8000)
1345 {
1346 /* For systems with > 128MB RAM, add another 4MB for each 128MB */
1347 MmLowMemoryThreshold += ((MmNumberOfPhysicalPages - 0x8000) >> 5);
1348 }
1349
1350 /* Don't let the minimum threshold go past 64MB */
1351 MmLowMemoryThreshold = min(MmLowMemoryThreshold, (64 * _1MB) / PAGE_SIZE);
1352 }
1353
1354 /* Check if we have a registry setting */
1355 if (MmHighMemoryThreshold)
1356 {
1357 /* Convert it into pages */
1358 MmHighMemoryThreshold *= (_1MB / PAGE_SIZE);
1359 }
1360 else
1361 {
1362 /* Otherwise, the default is three times the low memory threshold */
1363 MmHighMemoryThreshold = 3 * MmLowMemoryThreshold;
1364 ASSERT(MmHighMemoryThreshold > MmLowMemoryThreshold);
1365 }
1366
1367 /* Make sure high threshold is actually higher than the low */
1368 MmHighMemoryThreshold = max(MmHighMemoryThreshold, MmLowMemoryThreshold);
1369
1370 /* Create the memory events for all the thresholds */
1371 Status = MiCreateMemoryEvent(&LowString, &MiLowMemoryEvent);
1372 if (!NT_SUCCESS(Status)) return FALSE;
1373 Status = MiCreateMemoryEvent(&HighString, &MiHighMemoryEvent);
1374 if (!NT_SUCCESS(Status)) return FALSE;
1375 Status = MiCreateMemoryEvent(&LowPagedPoolString, &MiLowPagedPoolEvent);
1376 if (!NT_SUCCESS(Status)) return FALSE;
1377 Status = MiCreateMemoryEvent(&HighPagedPoolString, &MiHighPagedPoolEvent);
1378 if (!NT_SUCCESS(Status)) return FALSE;
1379 Status = MiCreateMemoryEvent(&LowNonPagedPoolString, &MiLowNonPagedPoolEvent);
1380 if (!NT_SUCCESS(Status)) return FALSE;
1381 Status = MiCreateMemoryEvent(&HighNonPagedPoolString, &MiHighNonPagedPoolEvent);
1382 if (!NT_SUCCESS(Status)) return FALSE;
1383
1384 /* Now setup the pool events */
1385 MiInitializePoolEvents();
1386
1387 /* Set the initial event state */
1388 MiNotifyMemoryEvents();
1389 return TRUE;
1390 }
1391
1392 VOID
1393 NTAPI
1394 INIT_FUNCTION
1395 MiAddHalIoMappings(VOID)
1396 {
1397 PVOID BaseAddress;
1398 PMMPDE PointerPde, LastPde;
1399 PMMPTE PointerPte;
1400 ULONG j;
1401 PFN_NUMBER PageFrameIndex;
1402
1403 /* HAL Heap address -- should be on a PDE boundary */
1404 BaseAddress = (PVOID)MM_HAL_VA_START;
1405 ASSERT(MiAddressToPteOffset(BaseAddress) == 0);
1406
1407 /* Check how many PDEs the heap has */
1408 PointerPde = MiAddressToPde(BaseAddress);
1409 LastPde = MiAddressToPde((PVOID)MM_HAL_VA_END);
1410
1411 while (PointerPde <= LastPde)
1412 {
1413 /* Does the HAL own this mapping? */
1414 if ((PointerPde->u.Hard.Valid == 1) &&
1415 (MI_IS_PAGE_LARGE(PointerPde) == FALSE))
1416 {
1417 /* Get the PTE for it and scan each page */
1418 PointerPte = MiAddressToPte(BaseAddress);
1419 for (j = 0 ; j < PTE_COUNT; j++)
1420 {
1421 /* Does the HAL own this page? */
1422 if (PointerPte->u.Hard.Valid == 1)
1423 {
1424 /* Is the HAL using it for device or I/O mapped memory? */
1425 PageFrameIndex = PFN_FROM_PTE(PointerPte);
1426 if (!MiGetPfnEntry(PageFrameIndex))
1427 {
1428 /* FIXME: For PAT, we need to track I/O cache attributes for coherency */
1429 DPRINT1("HAL I/O Mapping at %p is unsafe\n", BaseAddress);
1430 }
1431 }
1432
1433 /* Move to the next page */
1434 BaseAddress = (PVOID)((ULONG_PTR)BaseAddress + PAGE_SIZE);
1435 PointerPte++;
1436 }
1437 }
1438 else
1439 {
1440 /* Move to the next address */
1441 BaseAddress = (PVOID)((ULONG_PTR)BaseAddress + PDE_MAPPED_VA);
1442 }
1443
1444 /* Move to the next PDE */
1445 PointerPde++;
1446 }
1447 }
1448
1449 VOID
1450 NTAPI
1451 MmDumpArmPfnDatabase(IN BOOLEAN StatusOnly)
1452 {
1453 ULONG i;
1454 PMMPFN Pfn1;
1455 PCHAR Consumer = "Unknown";
1456 KIRQL OldIrql;
1457 ULONG ActivePages = 0, FreePages = 0, OtherPages = 0;
1458 #if MI_TRACE_PFNS
1459 ULONG UsageBucket[MI_USAGE_FREE_PAGE + 1] = {0};
1460 PCHAR MI_USAGE_TEXT[MI_USAGE_FREE_PAGE + 1] =
1461 {
1462 "Not set",
1463 "Paged Pool",
1464 "Nonpaged Pool",
1465 "Nonpaged Pool Ex",
1466 "Kernel Stack",
1467 "Kernel Stack Ex",
1468 "System PTE",
1469 "VAD",
1470 "PEB/TEB",
1471 "Section",
1472 "Page Table",
1473 "Page Directory",
1474 "Old Page Table",
1475 "Driver Page",
1476 "Contiguous Alloc",
1477 "MDL",
1478 "Demand Zero",
1479 "Zero Loop",
1480 "Cache",
1481 "PFN Database",
1482 "Boot Driver",
1483 "Initial Memory",
1484 "Free Page"
1485 };
1486 #endif
1487 //
1488 // Loop the PFN database
1489 //
1490 KeRaiseIrql(HIGH_LEVEL, &OldIrql);
1491 for (i = 0; i <= MmHighestPhysicalPage; i++)
1492 {
1493 Pfn1 = MiGetPfnEntry(i);
1494 if (!Pfn1) continue;
1495 #if MI_TRACE_PFNS
1496 ASSERT(Pfn1->PfnUsage <= MI_USAGE_FREE_PAGE);
1497 #endif
1498 //
1499 // Get the page location
1500 //
1501 switch (Pfn1->u3.e1.PageLocation)
1502 {
1503 case ActiveAndValid:
1504
1505 Consumer = "Active and Valid";
1506 ActivePages++;
1507 break;
1508
1509 case ZeroedPageList:
1510
1511 Consumer = "Zero Page List";
1512 FreePages++;
1513 break;//continue;
1514
1515 case FreePageList:
1516
1517 Consumer = "Free Page List";
1518 FreePages++;
1519 break;//continue;
1520
1521 default:
1522
1523 Consumer = "Other (ASSERT!)";
1524 OtherPages++;
1525 break;
1526 }
1527
1528 #if MI_TRACE_PFNS
1529 /* Add into bucket */
1530 UsageBucket[Pfn1->PfnUsage]++;
1531 #endif
1532
1533 //
1534 // Pretty-print the page
1535 //
1536 if (!StatusOnly)
1537 DbgPrint("0x%08p:\t%20s\t(%04d.%04d)\t[%16s - %16s])\n",
1538 i << PAGE_SHIFT,
1539 Consumer,
1540 Pfn1->u3.e2.ReferenceCount,
1541 Pfn1->u2.ShareCount == LIST_HEAD ? 0xFFFF : Pfn1->u2.ShareCount,
1542 #if MI_TRACE_PFNS
1543 MI_USAGE_TEXT[Pfn1->PfnUsage],
1544 Pfn1->ProcessName);
1545 #else
1546 "Page tracking",
1547 "is disabled");
1548 #endif
1549 }
1550
1551 DbgPrint("Active: %5d pages\t[%6d KB]\n", ActivePages, (ActivePages << PAGE_SHIFT) / 1024);
1552 DbgPrint("Free: %5d pages\t[%6d KB]\n", FreePages, (FreePages << PAGE_SHIFT) / 1024);
1553 DbgPrint("-----------------------------------------\n");
1554 #if MI_TRACE_PFNS
1555 OtherPages = UsageBucket[MI_USAGE_BOOT_DRIVER];
1556 DbgPrint("Boot Images: %5d pages\t[%6d KB]\n", OtherPages, (OtherPages << PAGE_SHIFT) / 1024);
1557 OtherPages = UsageBucket[MI_USAGE_DRIVER_PAGE];
1558 DbgPrint("System Drivers: %5d pages\t[%6d KB]\n", OtherPages, (OtherPages << PAGE_SHIFT) / 1024);
1559 OtherPages = UsageBucket[MI_USAGE_PFN_DATABASE];
1560 DbgPrint("PFN Database: %5d pages\t[%6d KB]\n", OtherPages, (OtherPages << PAGE_SHIFT) / 1024);
1561 OtherPages = UsageBucket[MI_USAGE_PAGE_TABLE] + UsageBucket[MI_USAGE_LEGACY_PAGE_DIRECTORY];
1562 DbgPrint("Page Tables: %5d pages\t[%6d KB]\n", OtherPages, (OtherPages << PAGE_SHIFT) / 1024);
1563 OtherPages = UsageBucket[MI_USAGE_NONPAGED_POOL] + UsageBucket[MI_USAGE_NONPAGED_POOL_EXPANSION];
1564 DbgPrint("NonPaged Pool: %5d pages\t[%6d KB]\n", OtherPages, (OtherPages << PAGE_SHIFT) / 1024);
1565 OtherPages = UsageBucket[MI_USAGE_PAGED_POOL];
1566 DbgPrint("Paged Pool: %5d pages\t[%6d KB]\n", OtherPages, (OtherPages << PAGE_SHIFT) / 1024);
1567 OtherPages = UsageBucket[MI_USAGE_KERNEL_STACK] + UsageBucket[MI_USAGE_KERNEL_STACK_EXPANSION];
1568 DbgPrint("Kernel Stack: %5d pages\t[%6d KB]\n", OtherPages, (OtherPages << PAGE_SHIFT) / 1024);
1569 OtherPages = UsageBucket[MI_USAGE_INIT_MEMORY];
1570 DbgPrint("Init Memory: %5d pages\t[%6d KB]\n", OtherPages, (OtherPages << PAGE_SHIFT) / 1024);
1571 OtherPages = UsageBucket[MI_USAGE_SECTION];
1572 DbgPrint("Sections: %5d pages\t[%6d KB]\n", OtherPages, (OtherPages << PAGE_SHIFT) / 1024);
1573 OtherPages = UsageBucket[MI_USAGE_CACHE];
1574 DbgPrint("Cache: %5d pages\t[%6d KB]\n", OtherPages, (OtherPages << PAGE_SHIFT) / 1024);
1575 #endif
1576 KeLowerIrql(OldIrql);
1577 }
1578
1579 PPHYSICAL_MEMORY_DESCRIPTOR
1580 NTAPI
1581 INIT_FUNCTION
1582 MmInitializeMemoryLimits(IN PLOADER_PARAMETER_BLOCK LoaderBlock,
1583 IN PBOOLEAN IncludeType)
1584 {
1585 PLIST_ENTRY NextEntry;
1586 ULONG Run = 0, InitialRuns;
1587 PFN_NUMBER NextPage = -1, PageCount = 0;
1588 PPHYSICAL_MEMORY_DESCRIPTOR Buffer, NewBuffer;
1589 PMEMORY_ALLOCATION_DESCRIPTOR MdBlock;
1590
1591 //
1592 // Start with the maximum we might need
1593 //
1594 InitialRuns = MiNumberDescriptors;
1595
1596 //
1597 // Allocate the maximum we'll ever need
1598 //
1599 Buffer = ExAllocatePoolWithTag(NonPagedPool,
1600 sizeof(PHYSICAL_MEMORY_DESCRIPTOR) +
1601 sizeof(PHYSICAL_MEMORY_RUN) *
1602 (InitialRuns - 1),
1603 'lMmM');
1604 if (!Buffer) return NULL;
1605
1606 //
1607 // For now that's how many runs we have
1608 //
1609 Buffer->NumberOfRuns = InitialRuns;
1610
1611 //
1612 // Now loop through the descriptors again
1613 //
1614 NextEntry = LoaderBlock->MemoryDescriptorListHead.Flink;
1615 while (NextEntry != &LoaderBlock->MemoryDescriptorListHead)
1616 {
1617 //
1618 // Grab each one, and check if it's one we should include
1619 //
1620 MdBlock = CONTAINING_RECORD(NextEntry,
1621 MEMORY_ALLOCATION_DESCRIPTOR,
1622 ListEntry);
1623 if ((MdBlock->MemoryType < LoaderMaximum) &&
1624 (IncludeType[MdBlock->MemoryType]))
1625 {
1626 //
1627 // Add this to our running total
1628 //
1629 PageCount += MdBlock->PageCount;
1630
1631 //
1632 // Check if the next page is described by the next descriptor
1633 //
1634 if (MdBlock->BasePage == NextPage)
1635 {
1636 //
1637 // Combine it into the same physical run
1638 //
1639 ASSERT(MdBlock->PageCount != 0);
1640 Buffer->Run[Run - 1].PageCount += MdBlock->PageCount;
1641 NextPage += MdBlock->PageCount;
1642 }
1643 else
1644 {
1645 //
1646 // Otherwise just duplicate the descriptor's contents
1647 //
1648 Buffer->Run[Run].BasePage = MdBlock->BasePage;
1649 Buffer->Run[Run].PageCount = MdBlock->PageCount;
1650 NextPage = Buffer->Run[Run].BasePage + Buffer->Run[Run].PageCount;
1651
1652 //
1653 // And in this case, increase the number of runs
1654 //
1655 Run++;
1656 }
1657 }
1658
1659 //
1660 // Try the next descriptor
1661 //
1662 NextEntry = MdBlock->ListEntry.Flink;
1663 }
1664
1665 //
1666 // We should not have been able to go past our initial estimate
1667 //
1668 ASSERT(Run <= Buffer->NumberOfRuns);
1669
1670 //
1671 // Our guess was probably exaggerated...
1672 //
1673 if (InitialRuns > Run)
1674 {
1675 //
1676 // Allocate a more accurately sized buffer
1677 //
1678 NewBuffer = ExAllocatePoolWithTag(NonPagedPool,
1679 sizeof(PHYSICAL_MEMORY_DESCRIPTOR) +
1680 sizeof(PHYSICAL_MEMORY_RUN) *
1681 (Run - 1),
1682 'lMmM');
1683 if (NewBuffer)
1684 {
1685 //
1686 // Copy the old buffer into the new, then free it
1687 //
1688 RtlCopyMemory(NewBuffer->Run,
1689 Buffer->Run,
1690 sizeof(PHYSICAL_MEMORY_RUN) * Run);
1691 ExFreePoolWithTag(Buffer, 'lMmM');
1692
1693 //
1694 // Now use the new buffer
1695 //
1696 Buffer = NewBuffer;
1697 }
1698 }
1699
1700 //
1701 // Write the final numbers, and return it
1702 //
1703 Buffer->NumberOfRuns = Run;
1704 Buffer->NumberOfPages = PageCount;
1705 return Buffer;
1706 }
1707
1708 VOID
1709 NTAPI
1710 INIT_FUNCTION
1711 MiBuildPagedPool(VOID)
1712 {
1713 PMMPTE PointerPte;
1714 PMMPDE PointerPde;
1715 MMPDE TempPde = ValidKernelPde;
1716 PFN_NUMBER PageFrameIndex;
1717 KIRQL OldIrql;
1718 SIZE_T Size;
1719 ULONG BitMapSize;
1720 #if (_MI_PAGING_LEVELS >= 3)
1721 MMPPE TempPpe = ValidKernelPpe;
1722 PMMPPE PointerPpe;
1723 #elif (_MI_PAGING_LEVELS == 2)
1724 MMPTE TempPte = ValidKernelPte;
1725
1726 //
1727 // Get the page frame number for the system page directory
1728 //
1729 PointerPte = MiAddressToPte(PDE_BASE);
1730 ASSERT(PD_COUNT == 1);
1731 MmSystemPageDirectory[0] = PFN_FROM_PTE(PointerPte);
1732
1733 //
1734 // Allocate a system PTE which will hold a copy of the page directory
1735 //
1736 PointerPte = MiReserveSystemPtes(1, SystemPteSpace);
1737 ASSERT(PointerPte);
1738 MmSystemPagePtes = MiPteToAddress(PointerPte);
1739
1740 //
1741 // Make this system PTE point to the system page directory.
1742 // It is now essentially double-mapped. This will be used later for lazy
1743 // evaluation of PDEs accross process switches, similarly to how the Global
1744 // page directory array in the old ReactOS Mm is used (but in a less hacky
1745 // way).
1746 //
1747 TempPte = ValidKernelPte;
1748 ASSERT(PD_COUNT == 1);
1749 TempPte.u.Hard.PageFrameNumber = MmSystemPageDirectory[0];
1750 MI_WRITE_VALID_PTE(PointerPte, TempPte);
1751 #endif
1752 //
1753 // Let's get back to paged pool work: size it up.
1754 // By default, it should be twice as big as nonpaged pool.
1755 //
1756 MmSizeOfPagedPoolInBytes = 2 * MmMaximumNonPagedPoolInBytes;
1757 if (MmSizeOfPagedPoolInBytes > ((ULONG_PTR)MmNonPagedSystemStart -
1758 (ULONG_PTR)MmPagedPoolStart))
1759 {
1760 //
1761 // On the other hand, we have limited VA space, so make sure that the VA
1762 // for paged pool doesn't overflow into nonpaged pool VA. Otherwise, set
1763 // whatever maximum is possible.
1764 //
1765 MmSizeOfPagedPoolInBytes = (ULONG_PTR)MmNonPagedSystemStart -
1766 (ULONG_PTR)MmPagedPoolStart;
1767 }
1768
1769 //
1770 // Get the size in pages and make sure paged pool is at least 32MB.
1771 //
1772 Size = MmSizeOfPagedPoolInBytes;
1773 if (Size < MI_MIN_INIT_PAGED_POOLSIZE) Size = MI_MIN_INIT_PAGED_POOLSIZE;
1774 Size = BYTES_TO_PAGES(Size);
1775
1776 //
1777 // Now check how many PTEs will be required for these many pages.
1778 //
1779 Size = (Size + (1024 - 1)) / 1024;
1780
1781 //
1782 // Recompute the page-aligned size of the paged pool, in bytes and pages.
1783 //
1784 MmSizeOfPagedPoolInBytes = Size * PAGE_SIZE * 1024;
1785 MmSizeOfPagedPoolInPages = MmSizeOfPagedPoolInBytes >> PAGE_SHIFT;
1786
1787 //
1788 // Let's be really sure this doesn't overflow into nonpaged system VA
1789 //
1790 ASSERT((MmSizeOfPagedPoolInBytes + (ULONG_PTR)MmPagedPoolStart) <=
1791 (ULONG_PTR)MmNonPagedSystemStart);
1792
1793 //
1794 // This is where paged pool ends
1795 //
1796 MmPagedPoolEnd = (PVOID)(((ULONG_PTR)MmPagedPoolStart +
1797 MmSizeOfPagedPoolInBytes) - 1);
1798
1799 //
1800 // Lock the PFN database
1801 //
1802 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
1803
1804 #if (_MI_PAGING_LEVELS >= 3)
1805 /* On these systems, there's no double-mapping, so instead, the PPEs
1806 * are setup to span the entire paged pool area, so there's no need for the
1807 * system PD */
1808 for (PointerPpe = MiAddressToPpe(MmPagedPoolStart);
1809 PointerPpe <= MiAddressToPpe(MmPagedPoolEnd);
1810 PointerPpe++)
1811 {
1812 /* Check if the PPE is already valid */
1813 if (!PointerPpe->u.Hard.Valid)
1814 {
1815 /* It is not, so map a fresh zeroed page */
1816 TempPpe.u.Hard.PageFrameNumber = MiRemoveZeroPage(0);
1817 MI_WRITE_VALID_PPE(PointerPpe, TempPpe);
1818 }
1819 }
1820 #endif
1821
1822 //
1823 // So now get the PDE for paged pool and zero it out
1824 //
1825 PointerPde = MiAddressToPde(MmPagedPoolStart);
1826 RtlZeroMemory(PointerPde,
1827 (1 + MiAddressToPde(MmPagedPoolEnd) - PointerPde) * sizeof(MMPDE));
1828
1829 //
1830 // Next, get the first and last PTE
1831 //
1832 PointerPte = MiAddressToPte(MmPagedPoolStart);
1833 MmPagedPoolInfo.FirstPteForPagedPool = PointerPte;
1834 MmPagedPoolInfo.LastPteForPagedPool = MiAddressToPte(MmPagedPoolEnd);
1835
1836 /* Allocate a page and map the first paged pool PDE */
1837 MI_SET_USAGE(MI_USAGE_PAGED_POOL);
1838 MI_SET_PROCESS2("Kernel");
1839 PageFrameIndex = MiRemoveZeroPage(0);
1840 TempPde.u.Hard.PageFrameNumber = PageFrameIndex;
1841 MI_WRITE_VALID_PDE(PointerPde, TempPde);
1842 #if (_MI_PAGING_LEVELS >= 3)
1843 /* Use the PPE of MmPagedPoolStart that was setup above */
1844 // Bla = PFN_FROM_PTE(PpeAddress(MmPagedPool...));
1845
1846 /* Initialize the PFN entry for it */
1847 MiInitializePfnForOtherProcess(PageFrameIndex,
1848 (PMMPTE)PointerPde,
1849 PFN_FROM_PTE(MiAddressToPpe(MmPagedPoolStart)));
1850 #else
1851 /* Do it this way */
1852 // Bla = MmSystemPageDirectory[(PointerPde - (PMMPTE)PDE_BASE) / PDE_COUNT]
1853
1854 /* Initialize the PFN entry for it */
1855 MiInitializePfnForOtherProcess(PageFrameIndex,
1856 (PMMPTE)PointerPde,
1857 MmSystemPageDirectory[(PointerPde - (PMMPDE)PDE_BASE) / PDE_COUNT]);
1858 #endif
1859
1860 //
1861 // Release the PFN database lock
1862 //
1863 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
1864
1865 //
1866 // We only have one PDE mapped for now... at fault time, additional PDEs
1867 // will be allocated to handle paged pool growth. This is where they'll have
1868 // to start.
1869 //
1870 MmPagedPoolInfo.NextPdeForPagedPoolExpansion = PointerPde + 1;
1871
1872 //
1873 // We keep track of each page via a bit, so check how big the bitmap will
1874 // have to be (make sure to align our page count such that it fits nicely
1875 // into a 4-byte aligned bitmap.
1876 //
1877 // We'll also allocate the bitmap header itself part of the same buffer.
1878 //
1879 Size = Size * 1024;
1880 ASSERT(Size == MmSizeOfPagedPoolInPages);
1881 BitMapSize = (ULONG)Size;
1882 Size = sizeof(RTL_BITMAP) + (((Size + 31) / 32) * sizeof(ULONG));
1883
1884 //
1885 // Allocate the allocation bitmap, which tells us which regions have not yet
1886 // been mapped into memory
1887 //
1888 MmPagedPoolInfo.PagedPoolAllocationMap = ExAllocatePoolWithTag(NonPagedPool,
1889 Size,
1890 ' mM');
1891 ASSERT(MmPagedPoolInfo.PagedPoolAllocationMap);
1892
1893 //
1894 // Initialize it such that at first, only the first page's worth of PTEs is
1895 // marked as allocated (incidentially, the first PDE we allocated earlier).
1896 //
1897 RtlInitializeBitMap(MmPagedPoolInfo.PagedPoolAllocationMap,
1898 (PULONG)(MmPagedPoolInfo.PagedPoolAllocationMap + 1),
1899 BitMapSize);
1900 RtlSetAllBits(MmPagedPoolInfo.PagedPoolAllocationMap);
1901 RtlClearBits(MmPagedPoolInfo.PagedPoolAllocationMap, 0, 1024);
1902
1903 //
1904 // We have a second bitmap, which keeps track of where allocations end.
1905 // Given the allocation bitmap and a base address, we can therefore figure
1906 // out which page is the last page of that allocation, and thus how big the
1907 // entire allocation is.
1908 //
1909 MmPagedPoolInfo.EndOfPagedPoolBitmap = ExAllocatePoolWithTag(NonPagedPool,
1910 Size,
1911 ' mM');
1912 ASSERT(MmPagedPoolInfo.EndOfPagedPoolBitmap);
1913 RtlInitializeBitMap(MmPagedPoolInfo.EndOfPagedPoolBitmap,
1914 (PULONG)(MmPagedPoolInfo.EndOfPagedPoolBitmap + 1),
1915 BitMapSize);
1916
1917 //
1918 // Since no allocations have been made yet, there are no bits set as the end
1919 //
1920 RtlClearAllBits(MmPagedPoolInfo.EndOfPagedPoolBitmap);
1921
1922 //
1923 // Initialize paged pool.
1924 //
1925 InitializePool(PagedPool, 0);
1926
1927 /* Initialize special pool */
1928 MiInitializeSpecialPool();
1929
1930 /* Default low threshold of 30MB or one fifth of paged pool */
1931 MiLowPagedPoolThreshold = (30 * _1MB) >> PAGE_SHIFT;
1932 MiLowPagedPoolThreshold = min(MiLowPagedPoolThreshold, Size / 5);
1933
1934 /* Default high threshold of 60MB or 25% */
1935 MiHighPagedPoolThreshold = (60 * _1MB) >> PAGE_SHIFT;
1936 MiHighPagedPoolThreshold = min(MiHighPagedPoolThreshold, (Size * 2) / 5);
1937 ASSERT(MiLowPagedPoolThreshold < MiHighPagedPoolThreshold);
1938
1939 /* Setup the global session space */
1940 MiInitializeSystemSpaceMap(NULL);
1941 }
1942
1943 VOID
1944 NTAPI
1945 INIT_FUNCTION
1946 MiDbgDumpMemoryDescriptors(VOID)
1947 {
1948 PLIST_ENTRY NextEntry;
1949 PMEMORY_ALLOCATION_DESCRIPTOR Md;
1950 PFN_NUMBER TotalPages = 0;
1951 PCHAR
1952 MemType[] =
1953 {
1954 "ExceptionBlock ",
1955 "SystemBlock ",
1956 "Free ",
1957 "Bad ",
1958 "LoadedProgram ",
1959 "FirmwareTemporary ",
1960 "FirmwarePermanent ",
1961 "OsloaderHeap ",
1962 "OsloaderStack ",
1963 "SystemCode ",
1964 "HalCode ",
1965 "BootDriver ",
1966 "ConsoleInDriver ",
1967 "ConsoleOutDriver ",
1968 "StartupDpcStack ",
1969 "StartupKernelStack",
1970 "StartupPanicStack ",
1971 "StartupPcrPage ",
1972 "StartupPdrPage ",
1973 "RegistryData ",
1974 "MemoryData ",
1975 "NlsData ",
1976 "SpecialMemory ",
1977 "BBTMemory ",
1978 "LoaderReserve ",
1979 "LoaderXIPRom "
1980 };
1981
1982 DPRINT1("Base\t\tLength\t\tType\n");
1983 for (NextEntry = KeLoaderBlock->MemoryDescriptorListHead.Flink;
1984 NextEntry != &KeLoaderBlock->MemoryDescriptorListHead;
1985 NextEntry = NextEntry->Flink)
1986 {
1987 Md = CONTAINING_RECORD(NextEntry, MEMORY_ALLOCATION_DESCRIPTOR, ListEntry);
1988 DPRINT1("%08lX\t%08lX\t%s\n", Md->BasePage, Md->PageCount, MemType[Md->MemoryType]);
1989 TotalPages += Md->PageCount;
1990 }
1991
1992 DPRINT1("Total: %08lX (%d MB)\n", (ULONG)TotalPages, (ULONG)(TotalPages * PAGE_SIZE) / 1024 / 1024);
1993 }
1994
1995 BOOLEAN
1996 NTAPI
1997 INIT_FUNCTION
1998 MmArmInitSystem(IN ULONG Phase,
1999 IN PLOADER_PARAMETER_BLOCK LoaderBlock)
2000 {
2001 ULONG i;
2002 BOOLEAN IncludeType[LoaderMaximum];
2003 PVOID Bitmap;
2004 PPHYSICAL_MEMORY_RUN Run;
2005 PFN_NUMBER PageCount;
2006
2007 /* Dump memory descriptors */
2008 if (MiDbgEnableMdDump) MiDbgDumpMemoryDescriptors();
2009
2010 //
2011 // Instantiate memory that we don't consider RAM/usable
2012 // We use the same exclusions that Windows does, in order to try to be
2013 // compatible with WinLDR-style booting
2014 //
2015 for (i = 0; i < LoaderMaximum; i++) IncludeType[i] = TRUE;
2016 IncludeType[LoaderBad] = FALSE;
2017 IncludeType[LoaderFirmwarePermanent] = FALSE;
2018 IncludeType[LoaderSpecialMemory] = FALSE;
2019 IncludeType[LoaderBBTMemory] = FALSE;
2020 if (Phase == 0)
2021 {
2022 /* Count physical pages on the system */
2023 MiScanMemoryDescriptors(LoaderBlock);
2024
2025 /* Initialize the phase 0 temporary event */
2026 KeInitializeEvent(&MiTempEvent, NotificationEvent, FALSE);
2027
2028 /* Set all the events to use the temporary event for now */
2029 MiLowMemoryEvent = &MiTempEvent;
2030 MiHighMemoryEvent = &MiTempEvent;
2031 MiLowPagedPoolEvent = &MiTempEvent;
2032 MiHighPagedPoolEvent = &MiTempEvent;
2033 MiLowNonPagedPoolEvent = &MiTempEvent;
2034 MiHighNonPagedPoolEvent = &MiTempEvent;
2035
2036 //
2037 // Define the basic user vs. kernel address space separation
2038 //
2039 MmSystemRangeStart = (PVOID)MI_DEFAULT_SYSTEM_RANGE_START;
2040 MmUserProbeAddress = (ULONG_PTR)MI_HIGHEST_USER_ADDRESS;
2041 MmHighestUserAddress = (PVOID)MI_HIGHEST_USER_ADDRESS;
2042
2043 /* Highest PTE and PDE based on the addresses above */
2044 MiHighestUserPte = MiAddressToPte(MmHighestUserAddress);
2045 MiHighestUserPde = MiAddressToPde(MmHighestUserAddress);
2046 #if (_MI_PAGING_LEVELS >= 3)
2047 MiHighestUserPpe = MiAddressToPpe(MmHighestUserAddress);
2048 #if (_MI_PAGING_LEVELS >= 4)
2049 MiHighestUserPxe = MiAddressToPxe(MmHighestUserAddress);
2050 #endif
2051 #endif
2052 //
2053 // Get the size of the boot loader's image allocations and then round
2054 // that region up to a PDE size, so that any PDEs we might create for
2055 // whatever follows are separate from the PDEs that boot loader might've
2056 // already created (and later, we can blow all that away if we want to).
2057 //
2058 MmBootImageSize = KeLoaderBlock->Extension->LoaderPagesSpanned;
2059 MmBootImageSize *= PAGE_SIZE;
2060 MmBootImageSize = (MmBootImageSize + PDE_MAPPED_VA - 1) & ~(PDE_MAPPED_VA - 1);
2061 ASSERT((MmBootImageSize % PDE_MAPPED_VA) == 0);
2062
2063 /* Initialize session space address layout */
2064 MiInitializeSessionSpaceLayout();
2065
2066 /* Set the based section highest address */
2067 MmHighSectionBase = (PVOID)((ULONG_PTR)MmHighestUserAddress - 0x800000);
2068
2069 /* Loop all 8 standby lists */
2070 for (i = 0; i < 8; i++)
2071 {
2072 /* Initialize them */
2073 MmStandbyPageListByPriority[i].Total = 0;
2074 MmStandbyPageListByPriority[i].ListName = StandbyPageList;
2075 MmStandbyPageListByPriority[i].Flink = MM_EMPTY_LIST;
2076 MmStandbyPageListByPriority[i].Blink = MM_EMPTY_LIST;
2077 }
2078
2079 /* Initialize the user mode image list */
2080 InitializeListHead(&MmLoadedUserImageList);
2081
2082 /* Initialize the paged pool mutex and the section commit mutex */
2083 KeInitializeGuardedMutex(&MmPagedPoolMutex);
2084 KeInitializeGuardedMutex(&MmSectionCommitMutex);
2085 KeInitializeGuardedMutex(&MmSectionBasedMutex);
2086
2087 /* Initialize the Loader Lock */
2088 KeInitializeMutant(&MmSystemLoadLock, FALSE);
2089
2090 /* Set the zero page event */
2091 KeInitializeEvent(&MmZeroingPageEvent, SynchronizationEvent, FALSE);
2092 MmZeroingPageThreadActive = FALSE;
2093
2094 /* Initialize the dead stack S-LIST */
2095 InitializeSListHead(&MmDeadStackSListHead);
2096
2097 //
2098 // Check if this is a machine with less than 19MB of RAM
2099 //
2100 PageCount = MmNumberOfPhysicalPages;
2101 if (PageCount < MI_MIN_PAGES_FOR_SYSPTE_TUNING)
2102 {
2103 //
2104 // Use the very minimum of system PTEs
2105 //
2106 MmNumberOfSystemPtes = 7000;
2107 }
2108 else
2109 {
2110 //
2111 // Use the default, but check if we have more than 32MB of RAM
2112 //
2113 MmNumberOfSystemPtes = 11000;
2114 if (PageCount > MI_MIN_PAGES_FOR_SYSPTE_BOOST)
2115 {
2116 //
2117 // Double the amount of system PTEs
2118 //
2119 MmNumberOfSystemPtes <<= 1;
2120 }
2121 }
2122
2123 DPRINT("System PTE count has been tuned to %d (%d bytes)\n",
2124 MmNumberOfSystemPtes, MmNumberOfSystemPtes * PAGE_SIZE);
2125
2126 /* Initialize the working set lock */
2127 ExInitializePushLock(&MmSystemCacheWs.WorkingSetMutex);
2128
2129 /* Set commit limit */
2130 MmTotalCommitLimit = 2 * _1GB;
2131 MmTotalCommitLimitMaximum = MmTotalCommitLimit;
2132
2133 /* Has the allocation fragment been setup? */
2134 if (!MmAllocationFragment)
2135 {
2136 /* Use the default value */
2137 MmAllocationFragment = MI_ALLOCATION_FRAGMENT;
2138 if (PageCount < ((256 * _1MB) / PAGE_SIZE))
2139 {
2140 /* On memory systems with less than 256MB, divide by 4 */
2141 MmAllocationFragment = MI_ALLOCATION_FRAGMENT / 4;
2142 }
2143 else if (PageCount < (_1GB / PAGE_SIZE))
2144 {
2145 /* On systems with less than 1GB, divide by 2 */
2146 MmAllocationFragment = MI_ALLOCATION_FRAGMENT / 2;
2147 }
2148 }
2149 else
2150 {
2151 /* Convert from 1KB fragments to pages */
2152 MmAllocationFragment *= _1KB;
2153 MmAllocationFragment = ROUND_TO_PAGES(MmAllocationFragment);
2154
2155 /* Don't let it past the maximum */
2156 MmAllocationFragment = min(MmAllocationFragment,
2157 MI_MAX_ALLOCATION_FRAGMENT);
2158
2159 /* Don't let it too small either */
2160 MmAllocationFragment = max(MmAllocationFragment,
2161 MI_MIN_ALLOCATION_FRAGMENT);
2162 }
2163
2164 /* Check for kernel stack size that's too big */
2165 if (MmLargeStackSize > (KERNEL_LARGE_STACK_SIZE / _1KB))
2166 {
2167 /* Sanitize to default value */
2168 MmLargeStackSize = KERNEL_LARGE_STACK_SIZE;
2169 }
2170 else
2171 {
2172 /* Take the registry setting, and convert it into bytes */
2173 MmLargeStackSize *= _1KB;
2174
2175 /* Now align it to a page boundary */
2176 MmLargeStackSize = PAGE_ROUND_UP(MmLargeStackSize);
2177
2178 /* Sanity checks */
2179 ASSERT(MmLargeStackSize <= KERNEL_LARGE_STACK_SIZE);
2180 ASSERT((MmLargeStackSize & (PAGE_SIZE - 1)) == 0);
2181
2182 /* Make sure it's not too low */
2183 if (MmLargeStackSize < KERNEL_STACK_SIZE) MmLargeStackSize = KERNEL_STACK_SIZE;
2184 }
2185
2186 /* Compute color information (L2 cache-separated paging lists) */
2187 MiComputeColorInformation();
2188
2189 // Calculate the number of bytes for the PFN database
2190 // then add the color tables and convert to pages
2191 MxPfnAllocation = (MmHighestPhysicalPage + 1) * sizeof(MMPFN);
2192 MxPfnAllocation += (MmSecondaryColors * sizeof(MMCOLOR_TABLES) * 2);
2193 MxPfnAllocation >>= PAGE_SHIFT;
2194
2195 // We have to add one to the count here, because in the process of
2196 // shifting down to the page size, we actually ended up getting the
2197 // lower aligned size (so say, 0x5FFFF bytes is now 0x5F pages).
2198 // Later on, we'll shift this number back into bytes, which would cause
2199 // us to end up with only 0x5F000 bytes -- when we actually want to have
2200 // 0x60000 bytes.
2201 MxPfnAllocation++;
2202
2203 /* Initialize the platform-specific parts */
2204 MiInitMachineDependent(LoaderBlock);
2205
2206 //
2207 // Build the physical memory block
2208 //
2209 MmPhysicalMemoryBlock = MmInitializeMemoryLimits(LoaderBlock,
2210 IncludeType);
2211
2212 //
2213 // Allocate enough buffer for the PFN bitmap
2214 // Align it up to a 32-bit boundary
2215 //
2216 Bitmap = ExAllocatePoolWithTag(NonPagedPool,
2217 (((MmHighestPhysicalPage + 1) + 31) / 32) * 4,
2218 ' mM');
2219 if (!Bitmap)
2220 {
2221 //
2222 // This is critical
2223 //
2224 KeBugCheckEx(INSTALL_MORE_MEMORY,
2225 MmNumberOfPhysicalPages,
2226 MmLowestPhysicalPage,
2227 MmHighestPhysicalPage,
2228 0x101);
2229 }
2230
2231 //
2232 // Initialize it and clear all the bits to begin with
2233 //
2234 RtlInitializeBitMap(&MiPfnBitMap,
2235 Bitmap,
2236 (ULONG)MmHighestPhysicalPage + 1);
2237 RtlClearAllBits(&MiPfnBitMap);
2238
2239 //
2240 // Loop physical memory runs
2241 //
2242 for (i = 0; i < MmPhysicalMemoryBlock->NumberOfRuns; i++)
2243 {
2244 //
2245 // Get the run
2246 //
2247 Run = &MmPhysicalMemoryBlock->Run[i];
2248 DPRINT("PHYSICAL RAM [0x%08p to 0x%08p]\n",
2249 Run->BasePage << PAGE_SHIFT,
2250 (Run->BasePage + Run->PageCount) << PAGE_SHIFT);
2251
2252 //
2253 // Make sure it has pages inside it
2254 //
2255 if (Run->PageCount)
2256 {
2257 //
2258 // Set the bits in the PFN bitmap
2259 //
2260 RtlSetBits(&MiPfnBitMap, (ULONG)Run->BasePage, (ULONG)Run->PageCount);
2261 }
2262 }
2263
2264 /* Look for large page cache entries that need caching */
2265 MiSyncCachedRanges();
2266
2267 /* Loop for HAL Heap I/O device mappings that need coherency tracking */
2268 MiAddHalIoMappings();
2269
2270 /* Set the initial resident page count */
2271 MmResidentAvailablePages = MmAvailablePages - 32;
2272
2273 /* Initialize large page structures on PAE/x64, and MmProcessList on x86 */
2274 MiInitializeLargePageSupport();
2275
2276 /* Check if the registry says any drivers should be loaded with large pages */
2277 MiInitializeDriverLargePageList();
2278
2279 /* Relocate the boot drivers into system PTE space and fixup their PFNs */
2280 MiReloadBootLoadedDrivers(LoaderBlock);
2281
2282 /* FIXME: Call out into Driver Verifier for initialization */
2283
2284 /* Check how many pages the system has */
2285 if (MmNumberOfPhysicalPages <= ((13 * _1MB) / PAGE_SIZE))
2286 {
2287 /* Set small system */
2288 MmSystemSize = MmSmallSystem;
2289 MmMaximumDeadKernelStacks = 0;
2290 }
2291 else if (MmNumberOfPhysicalPages <= ((19 * _1MB) / PAGE_SIZE))
2292 {
2293 /* Set small system and add 100 pages for the cache */
2294 MmSystemSize = MmSmallSystem;
2295 MmSystemCacheWsMinimum += 100;
2296 MmMaximumDeadKernelStacks = 2;
2297 }
2298 else
2299 {
2300 /* Set medium system and add 400 pages for the cache */
2301 MmSystemSize = MmMediumSystem;
2302 MmSystemCacheWsMinimum += 400;
2303 MmMaximumDeadKernelStacks = 5;
2304 }
2305
2306 /* Check for less than 24MB */
2307 if (MmNumberOfPhysicalPages < ((24 * _1MB) / PAGE_SIZE))
2308 {
2309 /* No more than 32 pages */
2310 MmSystemCacheWsMinimum = 32;
2311 }
2312
2313 /* Check for more than 32MB */
2314 if (MmNumberOfPhysicalPages >= ((32 * _1MB) / PAGE_SIZE))
2315 {
2316 /* Check for product type being "Wi" for WinNT */
2317 if (MmProductType == '\0i\0W')
2318 {
2319 /* Then this is a large system */
2320 MmSystemSize = MmLargeSystem;
2321 }
2322 else
2323 {
2324 /* For servers, we need 64MB to consider this as being large */
2325 if (MmNumberOfPhysicalPages >= ((64 * _1MB) / PAGE_SIZE))
2326 {
2327 /* Set it as large */
2328 MmSystemSize = MmLargeSystem;
2329 }
2330 }
2331 }
2332
2333 /* Check for more than 33 MB */
2334 if (MmNumberOfPhysicalPages > ((33 * _1MB) / PAGE_SIZE))
2335 {
2336 /* Add another 500 pages to the cache */
2337 MmSystemCacheWsMinimum += 500;
2338 }
2339
2340 /* Now setup the shared user data fields */
2341 ASSERT(SharedUserData->NumberOfPhysicalPages == 0);
2342 SharedUserData->NumberOfPhysicalPages = MmNumberOfPhysicalPages;
2343 SharedUserData->LargePageMinimum = 0;
2344
2345 /* Check for workstation (Wi for WinNT) */
2346 if (MmProductType == '\0i\0W')
2347 {
2348 /* Set Windows NT Workstation product type */
2349 SharedUserData->NtProductType = NtProductWinNt;
2350 MmProductType = 0;
2351 }
2352 else
2353 {
2354 /* Check for LanMan server */
2355 if (MmProductType == '\0a\0L')
2356 {
2357 /* This is a domain controller */
2358 SharedUserData->NtProductType = NtProductLanManNt;
2359 }
2360 else
2361 {
2362 /* Otherwise it must be a normal server */
2363 SharedUserData->NtProductType = NtProductServer;
2364 }
2365
2366 /* Set the product type, and make the system more aggressive with low memory */
2367 MmProductType = 1;
2368 MmMinimumFreePages = 81;
2369 }
2370
2371 /* Update working set tuning parameters */
2372 MiAdjustWorkingSetManagerParameters(!MmProductType);
2373
2374 /* Finetune the page count by removing working set and NP expansion */
2375 MmResidentAvailablePages -= MiExpansionPoolPagesInitialCharge;
2376 MmResidentAvailablePages -= MmSystemCacheWsMinimum;
2377 MmResidentAvailableAtInit = MmResidentAvailablePages;
2378 if (MmResidentAvailablePages <= 0)
2379 {
2380 /* This should not happen */
2381 DPRINT1("System cache working set too big\n");
2382 return FALSE;
2383 }
2384
2385 /* Initialize the system cache */
2386 //MiInitializeSystemCache(MmSystemCacheWsMinimum, MmAvailablePages);
2387
2388 /* Update the commit limit */
2389 MmTotalCommitLimit = MmAvailablePages;
2390 if (MmTotalCommitLimit > 1024) MmTotalCommitLimit -= 1024;
2391 MmTotalCommitLimitMaximum = MmTotalCommitLimit;
2392
2393 /* Size up paged pool and build the shadow system page directory */
2394 MiBuildPagedPool();
2395
2396 /* Debugger physical memory support is now ready to be used */
2397 MmDebugPte = MiAddressToPte(MiDebugMapping);
2398
2399 /* Initialize the loaded module list */
2400 MiInitializeLoadedModuleList(LoaderBlock);
2401 }
2402
2403 //
2404 // Always return success for now
2405 //
2406 return TRUE;
2407 }
2408
2409 /* EOF */