05c1007d5197da825f0504b36809f009d8e32eb2
[reactos.git] / ntoskrnl / mm / ARM3 / i386 / init.c
1 /*
2 * PROJECT: ReactOS Kernel
3 * LICENSE: BSD - See COPYING.ARM in the top level directory
4 * FILE: ntoskrnl/mm/ARM3/i386/init.c
5 * PURPOSE: ARM Memory Manager Initialization for x86
6 * PROGRAMMERS: ReactOS Portable Systems Group
7 */
8
9 /* INCLUDES *******************************************************************/
10
11 #include <ntoskrnl.h>
12 #define NDEBUG
13 #include <debug.h>
14
15 #define MODULE_INVOLVED_IN_ARM3
16 #include <mm/ARM3/miarm.h>
17
18 /* GLOBALS ********************************************************************/
19
20 /* Template PTE and PDE for a kernel page */
21 /* FIXME: These should be PTE_GLOBAL */
22 MMPTE ValidKernelPde = {{PTE_VALID|PTE_READWRITE|PTE_DIRTY|PTE_ACCESSED}};
23 MMPTE ValidKernelPte = {{PTE_VALID|PTE_READWRITE|PTE_DIRTY|PTE_ACCESSED}};
24
25 /* The same, but for local pages */
26 MMPTE ValidKernelPdeLocal = {{PTE_VALID|PTE_READWRITE|PTE_DIRTY|PTE_ACCESSED}};
27 MMPTE ValidKernelPteLocal = {{PTE_VALID|PTE_READWRITE|PTE_DIRTY|PTE_ACCESSED}};
28
29 /* Template PDE for a demand-zero page */
30 MMPDE DemandZeroPde = {{MM_READWRITE << MM_PTE_SOFTWARE_PROTECTION_BITS}};
31 MMPTE DemandZeroPte = {{MM_READWRITE << MM_PTE_SOFTWARE_PROTECTION_BITS}};
32
33 /* Template PTE for prototype page */
34 MMPTE PrototypePte = {{(MM_READWRITE << MM_PTE_SOFTWARE_PROTECTION_BITS) |
35 PTE_PROTOTYPE | (MI_PTE_LOOKUP_NEEDED << PAGE_SHIFT)}};
36
37 /* Template PTE for decommited page */
38 MMPTE MmDecommittedPte = {{MM_DECOMMIT << MM_PTE_SOFTWARE_PROTECTION_BITS}};
39
40 /* PRIVATE FUNCTIONS **********************************************************/
41
42 VOID
43 NTAPI
44 INIT_FUNCTION
45 MiInitializeSessionSpaceLayout(VOID)
46 {
47 //
48 // Set the size of session view, pool, and image
49 //
50 MmSessionSize = MI_SESSION_SIZE;
51 MmSessionViewSize = MI_SESSION_VIEW_SIZE;
52 MmSessionPoolSize = MI_SESSION_POOL_SIZE;
53 MmSessionImageSize = MI_SESSION_IMAGE_SIZE;
54
55 //
56 // Set the size of system view
57 //
58 MmSystemViewSize = MI_SYSTEM_VIEW_SIZE;
59
60 //
61 // This is where it all ends
62 //
63 MiSessionImageEnd = (PVOID)PTE_BASE;
64
65 //
66 // This is where we will load Win32k.sys and the video driver
67 //
68 MiSessionImageStart = (PVOID)((ULONG_PTR)MiSessionImageEnd -
69 MmSessionImageSize);
70
71 //
72 // So the view starts right below the session working set (itself below
73 // the image area)
74 //
75 MiSessionViewStart = (PVOID)((ULONG_PTR)MiSessionImageEnd -
76 MmSessionImageSize -
77 MI_SESSION_WORKING_SET_SIZE -
78 MmSessionViewSize);
79
80 //
81 // Session pool follows
82 //
83 MiSessionPoolEnd = MiSessionViewStart;
84 MiSessionPoolStart = (PVOID)((ULONG_PTR)MiSessionPoolEnd -
85 MmSessionPoolSize);
86
87 //
88 // And it all begins here
89 //
90 MmSessionBase = MiSessionPoolStart;
91
92 //
93 // Sanity check that our math is correct
94 //
95 ASSERT((ULONG_PTR)MmSessionBase + MmSessionSize == PTE_BASE);
96
97 //
98 // Session space ends wherever image session space ends
99 //
100 MiSessionSpaceEnd = MiSessionImageEnd;
101
102 //
103 // System view space ends at session space, so now that we know where
104 // this is, we can compute the base address of system view space itself.
105 //
106 MiSystemViewStart = (PVOID)((ULONG_PTR)MmSessionBase -
107 MmSystemViewSize);
108
109 /* Compute the PTE addresses for all the addresses we carved out */
110 MiSessionImagePteStart = MiAddressToPte(MiSessionImageStart);
111 MiSessionImagePteEnd = MiAddressToPte(MiSessionImageEnd);
112 MiSessionBasePte = MiAddressToPte(MmSessionBase);
113 MiSessionSpaceWs = (PVOID)((ULONG_PTR)MiSessionViewStart + MmSessionViewSize);
114 MiSessionLastPte = MiAddressToPte(MiSessionSpaceEnd);
115
116 /* Initialize session space */
117 MmSessionSpace = (PMM_SESSION_SPACE)((ULONG_PTR)MmSessionBase +
118 MmSessionSize -
119 MmSessionImageSize -
120 MM_ALLOCATION_GRANULARITY);
121 }
122
123 VOID
124 NTAPI
125 INIT_FUNCTION
126 MiComputeNonPagedPoolVa(IN ULONG FreePages)
127 {
128 IN PFN_NUMBER PoolPages;
129
130 /* Check if this is a machine with less than 256MB of RAM, and no overide */
131 if ((MmNumberOfPhysicalPages <= MI_MIN_PAGES_FOR_NONPAGED_POOL_TUNING) &&
132 !(MmSizeOfNonPagedPoolInBytes))
133 {
134 /* Force the non paged pool to be 2MB so we can reduce RAM usage */
135 MmSizeOfNonPagedPoolInBytes = 2 * _1MB;
136 }
137
138 /* Hyperspace ends here */
139 MmHyperSpaceEnd = (PVOID)((ULONG_PTR)MmSystemCacheWorkingSetList - 1);
140
141 /* Check if the user gave a ridicuously large nonpaged pool RAM size */
142 if ((MmSizeOfNonPagedPoolInBytes >> PAGE_SHIFT) > (FreePages * 7 / 8))
143 {
144 /* More than 7/8ths of RAM was dedicated to nonpaged pool, ignore! */
145 MmSizeOfNonPagedPoolInBytes = 0;
146 }
147
148 /* Check if no registry setting was set, or if the setting was too low */
149 if (MmSizeOfNonPagedPoolInBytes < MmMinimumNonPagedPoolSize)
150 {
151 /* Start with the minimum (256 KB) and add 32 KB for each MB above 4 */
152 MmSizeOfNonPagedPoolInBytes = MmMinimumNonPagedPoolSize;
153 MmSizeOfNonPagedPoolInBytes += (FreePages - 1024) / 256 * MmMinAdditionNonPagedPoolPerMb;
154 }
155
156 /* Check if the registy setting or our dynamic calculation was too high */
157 if (MmSizeOfNonPagedPoolInBytes > MI_MAX_INIT_NONPAGED_POOL_SIZE)
158 {
159 /* Set it to the maximum */
160 MmSizeOfNonPagedPoolInBytes = MI_MAX_INIT_NONPAGED_POOL_SIZE;
161 }
162
163 /* Check if a percentage cap was set through the registry */
164 if (MmMaximumNonPagedPoolPercent) UNIMPLEMENTED;
165
166 /* Page-align the nonpaged pool size */
167 MmSizeOfNonPagedPoolInBytes &= ~(PAGE_SIZE - 1);
168
169 /* Now, check if there was a registry size for the maximum size */
170 if (!MmMaximumNonPagedPoolInBytes)
171 {
172 /* Start with the default (1MB) */
173 MmMaximumNonPagedPoolInBytes = MmDefaultMaximumNonPagedPool;
174
175 /* Add space for PFN database */
176 MmMaximumNonPagedPoolInBytes += (ULONG)
177 PAGE_ALIGN((MmHighestPhysicalPage + 1) * sizeof(MMPFN));
178
179 /* Check if the machine has more than 512MB of free RAM */
180 if (FreePages >= 0x1F000)
181 {
182 /* Add 200KB for each MB above 4 */
183 MmMaximumNonPagedPoolInBytes += (FreePages - 1024) / 256 *
184 (MmMaxAdditionNonPagedPoolPerMb / 2);
185 if (MmMaximumNonPagedPoolInBytes < MI_MAX_NONPAGED_POOL_SIZE)
186 {
187 /* Make it at least 128MB since this machine has a lot of RAM */
188 MmMaximumNonPagedPoolInBytes = MI_MAX_NONPAGED_POOL_SIZE;
189 }
190 }
191 else
192 {
193 /* Add 400KB for each MB above 4 */
194 MmMaximumNonPagedPoolInBytes += (FreePages - 1024) / 256 *
195 MmMaxAdditionNonPagedPoolPerMb;
196 }
197 }
198
199 /* Make sure there's at least 16 pages + the PFN available for expansion */
200 PoolPages = MmSizeOfNonPagedPoolInBytes + (PAGE_SIZE * 16) +
201 ((ULONG)PAGE_ALIGN(MmHighestPhysicalPage + 1) * sizeof(MMPFN));
202 if (MmMaximumNonPagedPoolInBytes < PoolPages)
203 {
204 /* The maximum should be at least high enough to cover all the above */
205 MmMaximumNonPagedPoolInBytes = PoolPages;
206 }
207
208 /* Systems with 2GB of kernel address space get double the size */
209 PoolPages = MI_MAX_NONPAGED_POOL_SIZE * 2;
210
211 /* On the other hand, make sure that PFN + nonpaged pool doesn't get too big */
212 if (MmMaximumNonPagedPoolInBytes > PoolPages)
213 {
214 /* Trim it down to the maximum architectural limit (256MB) */
215 MmMaximumNonPagedPoolInBytes = PoolPages;
216 }
217
218 /* Check if this is a system with > 128MB of non paged pool */
219 if (MmMaximumNonPagedPoolInBytes > MI_MAX_NONPAGED_POOL_SIZE)
220 {
221 /* Check if the initial size is less than the extra 128MB boost */
222 if (MmSizeOfNonPagedPoolInBytes < (MmMaximumNonPagedPoolInBytes -
223 MI_MAX_NONPAGED_POOL_SIZE))
224 {
225 /* FIXME: Should check if the initial pool can be expanded */
226
227 /* Assume no expansion possible, check ift he maximum is too large */
228 if (MmMaximumNonPagedPoolInBytes > (MmSizeOfNonPagedPoolInBytes +
229 MI_MAX_NONPAGED_POOL_SIZE))
230 {
231 /* Set it to the initial value plus the boost */
232 MmMaximumNonPagedPoolInBytes = MmSizeOfNonPagedPoolInBytes +
233 MI_MAX_NONPAGED_POOL_SIZE;
234 }
235 }
236 }
237 }
238
239 NTSTATUS
240 NTAPI
241 INIT_FUNCTION
242 MiInitMachineDependent(IN PLOADER_PARAMETER_BLOCK LoaderBlock)
243 {
244 PFN_NUMBER PageFrameIndex;
245 PMMPTE StartPde, EndPde, PointerPte, LastPte;
246 MMPTE TempPde, TempPte;
247 PVOID NonPagedPoolExpansionVa;
248 SIZE_T NonPagedSystemSize;
249 KIRQL OldIrql;
250 PMMPFN Pfn1;
251 ULONG Flags;
252
253 #if defined(_GLOBAL_PAGES_ARE_AWESOME_)
254
255 /* Check for global bit */
256 if (KeFeatureBits & KF_GLOBAL_PAGE)
257 {
258 /* Set it on the template PTE and PDE */
259 ValidKernelPte.u.Hard.Global = TRUE;
260 ValidKernelPde.u.Hard.Global = TRUE;
261 }
262
263 #endif
264
265 /* Now templates are ready */
266 TempPte = ValidKernelPte;
267 TempPde = ValidKernelPde;
268
269 //
270 // Set CR3 for the system process
271 //
272 PointerPte = MiAddressToPde(PDE_BASE);
273 PageFrameIndex = PFN_FROM_PTE(PointerPte) << PAGE_SHIFT;
274 PsGetCurrentProcess()->Pcb.DirectoryTableBase[0] = PageFrameIndex;
275
276 //
277 // Blow away user-mode
278 //
279 StartPde = MiAddressToPde(0);
280 EndPde = MiAddressToPde(KSEG0_BASE);
281 RtlZeroMemory(StartPde, (EndPde - StartPde) * sizeof(MMPTE));
282
283 /* Compute non paged pool limits and size */
284 MiComputeNonPagedPoolVa(MiNumberOfFreePages);
285
286 //
287 // Now calculate the nonpaged pool expansion VA region
288 //
289 MmNonPagedPoolStart = (PVOID)((ULONG_PTR)MmNonPagedPoolEnd -
290 MmMaximumNonPagedPoolInBytes +
291 MmSizeOfNonPagedPoolInBytes);
292 MmNonPagedPoolStart = (PVOID)PAGE_ALIGN(MmNonPagedPoolStart);
293 NonPagedPoolExpansionVa = MmNonPagedPoolStart;
294 DPRINT("NP Pool has been tuned to: %lu bytes and %lu bytes\n",
295 MmSizeOfNonPagedPoolInBytes, MmMaximumNonPagedPoolInBytes);
296
297 //
298 // Now calculate the nonpaged system VA region, which includes the
299 // nonpaged pool expansion (above) and the system PTEs. Note that it is
300 // then aligned to a PDE boundary (4MB).
301 //
302 NonPagedSystemSize = (MmNumberOfSystemPtes + 1) * PAGE_SIZE;
303 MmNonPagedSystemStart = (PVOID)((ULONG_PTR)MmNonPagedPoolStart -
304 NonPagedSystemSize);
305 MmNonPagedSystemStart = (PVOID)((ULONG_PTR)MmNonPagedSystemStart &
306 ~(PDE_MAPPED_VA - 1));
307
308 //
309 // Don't let it go below the minimum
310 //
311 if (MmNonPagedSystemStart < (PVOID)0xEB000000)
312 {
313 //
314 // This is a hard-coded limit in the Windows NT address space
315 //
316 MmNonPagedSystemStart = (PVOID)0xEB000000;
317
318 //
319 // Reduce the amount of system PTEs to reach this point
320 //
321 MmNumberOfSystemPtes = ((ULONG_PTR)MmNonPagedPoolStart -
322 (ULONG_PTR)MmNonPagedSystemStart) >>
323 PAGE_SHIFT;
324 MmNumberOfSystemPtes--;
325 ASSERT(MmNumberOfSystemPtes > 1000);
326 }
327
328 //
329 // Check if we are in a situation where the size of the paged pool
330 // is so large that it overflows into nonpaged pool
331 //
332 if (MmSizeOfPagedPoolInBytes >
333 ((ULONG_PTR)MmNonPagedSystemStart - (ULONG_PTR)MmPagedPoolStart))
334 {
335 //
336 // We need some recalculations here
337 //
338 DPRINT1("Paged pool is too big!\n");
339 }
340
341 //
342 // Normally, the PFN database should start after the loader images.
343 // This is already the case in ReactOS, but for now we want to co-exist
344 // with the old memory manager, so we'll create a "Shadow PFN Database"
345 // instead, and arbitrarly start it at 0xB0000000.
346 //
347 MmPfnDatabase = (PVOID)0xB0000000;
348 ASSERT(((ULONG_PTR)MmPfnDatabase & (PDE_MAPPED_VA - 1)) == 0);
349
350 //
351 // Non paged pool comes after the PFN database
352 //
353 MmNonPagedPoolStart = (PVOID)((ULONG_PTR)MmPfnDatabase +
354 (MxPfnAllocation << PAGE_SHIFT));
355
356 //
357 // Now we actually need to get these many physical pages. Nonpaged pool
358 // is actually also physically contiguous (but not the expansion)
359 //
360 PageFrameIndex = MxGetNextPage(MxPfnAllocation +
361 (MmSizeOfNonPagedPoolInBytes >> PAGE_SHIFT));
362 ASSERT(PageFrameIndex != 0);
363 DPRINT("PFN DB PA PFN begins at: %lx\n", PageFrameIndex);
364 DPRINT("NP PA PFN begins at: %lx\n", PageFrameIndex + MxPfnAllocation);
365
366 /* Convert nonpaged pool size from bytes to pages */
367 MmMaximumNonPagedPoolInPages = MmMaximumNonPagedPoolInBytes >> PAGE_SHIFT;
368
369 //
370 // Now we need some pages to create the page tables for the NP system VA
371 // which includes system PTEs and expansion NP
372 //
373 StartPde = MiAddressToPde(MmNonPagedSystemStart);
374 EndPde = MiAddressToPde((PVOID)((ULONG_PTR)MmNonPagedPoolEnd - 1));
375 while (StartPde <= EndPde)
376 {
377 //
378 // Get a page
379 //
380 TempPde.u.Hard.PageFrameNumber = MxGetNextPage(1);
381 MI_WRITE_VALID_PTE(StartPde, TempPde);
382
383 //
384 // Zero out the page table
385 //
386 PointerPte = MiPteToAddress(StartPde);
387 RtlZeroMemory(PointerPte, PAGE_SIZE);
388
389 //
390 // Next
391 //
392 StartPde++;
393 }
394
395 //
396 // Now we need pages for the page tables which will map initial NP
397 //
398 StartPde = MiAddressToPde(MmPfnDatabase);
399 EndPde = MiAddressToPde((PVOID)((ULONG_PTR)MmNonPagedPoolStart +
400 MmSizeOfNonPagedPoolInBytes - 1));
401 while (StartPde <= EndPde)
402 {
403 //
404 // Get a page
405 //
406 TempPde.u.Hard.PageFrameNumber = MxGetNextPage(1);
407 MI_WRITE_VALID_PTE(StartPde, TempPde);
408
409 //
410 // Zero out the page table
411 //
412 PointerPte = MiPteToAddress(StartPde);
413 RtlZeroMemory(PointerPte, PAGE_SIZE);
414
415 //
416 // Next
417 //
418 StartPde++;
419 }
420
421 //
422 // Now remember where the expansion starts
423 //
424 MmNonPagedPoolExpansionStart = NonPagedPoolExpansionVa;
425
426 //
427 // Last step is to actually map the nonpaged pool
428 //
429 PointerPte = MiAddressToPte(MmNonPagedPoolStart);
430 LastPte = MiAddressToPte((PVOID)((ULONG_PTR)MmNonPagedPoolStart +
431 MmSizeOfNonPagedPoolInBytes - 1));
432 while (PointerPte <= LastPte)
433 {
434 //
435 // Use one of our contigous pages
436 //
437 TempPte.u.Hard.PageFrameNumber = PageFrameIndex++;
438 MI_WRITE_VALID_PTE(PointerPte++, TempPte);
439 }
440
441 //
442 // Sanity check: make sure we have properly defined the system PTE space
443 //
444 ASSERT(MiAddressToPte(MmNonPagedSystemStart) <
445 MiAddressToPte(MmNonPagedPoolExpansionStart));
446
447 /* Now go ahead and initialize the nonpaged pool */
448 MiInitializeNonPagedPool();
449 MiInitializeNonPagedPoolThresholds();
450
451 /* Map the PFN database pages */
452 MiMapPfnDatabase(LoaderBlock);
453
454 /* Initialize the color tables */
455 MiInitializeColorTables();
456
457 /* Build the PFN Database */
458 MiInitializePfnDatabase(LoaderBlock);
459 MmInitializeBalancer(MmAvailablePages, 0);
460
461 //
462 // Reset the descriptor back so we can create the correct memory blocks
463 //
464 *MxFreeDescriptor = MxOldFreeDescriptor;
465
466 //
467 // Initialize the nonpaged pool
468 //
469 InitializePool(NonPagedPool, 0);
470
471 //
472 // We PDE-aligned the nonpaged system start VA, so haul some extra PTEs!
473 //
474 PointerPte = MiAddressToPte(MmNonPagedSystemStart);
475 MmNumberOfSystemPtes = MiAddressToPte(MmNonPagedPoolExpansionStart) -
476 PointerPte;
477 MmNumberOfSystemPtes--;
478 DPRINT("Final System PTE count: %lu (%lu bytes)\n",
479 MmNumberOfSystemPtes, MmNumberOfSystemPtes * PAGE_SIZE);
480
481 //
482 // Create the system PTE space
483 //
484 MiInitializeSystemPtes(PointerPte, MmNumberOfSystemPtes, SystemPteSpace);
485
486 /* Get the PDE For hyperspace */
487 StartPde = MiAddressToPde(HYPER_SPACE);
488
489 /* Lock PFN database */
490 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
491
492 /* Allocate a page for hyperspace and create it */
493 MI_SET_USAGE(MI_USAGE_PAGE_TABLE);
494 MI_SET_PROCESS2("Kernel");
495 PageFrameIndex = MiRemoveAnyPage(0);
496 TempPde = ValidKernelPdeLocal;
497 TempPde.u.Hard.PageFrameNumber = PageFrameIndex;
498 MI_WRITE_VALID_PTE(StartPde, TempPde);
499
500 /* Flush the TLB */
501 KeFlushCurrentTb();
502
503 /* Release the lock */
504 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
505
506 //
507 // Zero out the page table now
508 //
509 PointerPte = MiAddressToPte(HYPER_SPACE);
510 RtlZeroMemory(PointerPte, PAGE_SIZE);
511
512 //
513 // Setup the mapping PTEs
514 //
515 MmFirstReservedMappingPte = MiAddressToPte(MI_MAPPING_RANGE_START);
516 MmLastReservedMappingPte = MiAddressToPte(MI_MAPPING_RANGE_END);
517 MmFirstReservedMappingPte->u.Hard.PageFrameNumber = MI_HYPERSPACE_PTES;
518
519 /* Set the working set address */
520 MmWorkingSetList = (PVOID)MI_WORKING_SET_LIST;
521
522 //
523 // Reserve system PTEs for zeroing PTEs and clear them
524 //
525 MiFirstReservedZeroingPte = MiReserveSystemPtes(MI_ZERO_PTES,
526 SystemPteSpace);
527 RtlZeroMemory(MiFirstReservedZeroingPte, MI_ZERO_PTES * sizeof(MMPTE));
528
529 //
530 // Set the counter to maximum to boot with
531 //
532 MiFirstReservedZeroingPte->u.Hard.PageFrameNumber = MI_ZERO_PTES - 1;
533
534 /* Lock PFN database */
535 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
536
537 /* Reset the ref/share count so that MmInitializeProcessAddressSpace works */
538 Pfn1 = MiGetPfnEntry(PFN_FROM_PTE(MiAddressToPde(PDE_BASE)));
539 Pfn1->u2.ShareCount = 0;
540 Pfn1->u3.e2.ReferenceCount = 0;
541
542 /* Get a page for the working set list */
543 MI_SET_USAGE(MI_USAGE_PAGE_TABLE);
544 MI_SET_PROCESS2("Kernel WS List");
545 PageFrameIndex = MiRemoveAnyPage(0);
546 TempPte = ValidKernelPteLocal;
547 TempPte.u.Hard.PageFrameNumber = PageFrameIndex;
548
549 /* Map the working set list */
550 PointerPte = MiAddressToPte(MmWorkingSetList);
551 MI_WRITE_VALID_PTE(PointerPte, TempPte);
552
553 /* Zero it out, and save the frame index */
554 RtlZeroMemory(MiPteToAddress(PointerPte), PAGE_SIZE);
555 PsGetCurrentProcess()->WorkingSetPage = PageFrameIndex;
556
557 /* Check for Pentium LOCK errata */
558 if (KiI386PentiumLockErrataPresent)
559 {
560 /* Mark the 1st IDT page as Write-Through to prevent a lockup
561 on a F00F instruction.
562 See http://www.rcollins.org/Errata/Dec97/F00FBug.html */
563 PointerPte = MiAddressToPte(KeGetPcr()->IDT);
564 PointerPte->u.Hard.WriteThrough = 1;
565 }
566
567 /* Release the lock */
568 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
569
570 /* Initialize the bogus address space */
571 Flags = 0;
572 MmInitializeProcessAddressSpace(PsGetCurrentProcess(), NULL, NULL, &Flags, NULL);
573
574 /* Make sure the color lists are valid */
575 ASSERT(MmFreePagesByColor[0] < (PMMCOLOR_TABLES)PTE_BASE);
576 StartPde = MiAddressToPde(MmFreePagesByColor[0]);
577 ASSERT(StartPde->u.Hard.Valid == 1);
578 PointerPte = MiAddressToPte(MmFreePagesByColor[0]);
579 ASSERT(PointerPte->u.Hard.Valid == 1);
580 LastPte = MiAddressToPte((ULONG_PTR)&MmFreePagesByColor[1][MmSecondaryColors] - 1);
581 ASSERT(LastPte->u.Hard.Valid == 1);
582
583 /* Loop the color list PTEs */
584 while (PointerPte <= LastPte)
585 {
586 /* Get the PFN entry */
587 Pfn1 = MiGetPfnEntry(PFN_FROM_PTE(PointerPte));
588 if (!Pfn1->u3.e2.ReferenceCount)
589 {
590 /* Fill it out */
591 Pfn1->u4.PteFrame = PFN_FROM_PTE(StartPde);
592 Pfn1->PteAddress = PointerPte;
593 Pfn1->u2.ShareCount++;
594 Pfn1->u3.e2.ReferenceCount = 1;
595 Pfn1->u3.e1.PageLocation = ActiveAndValid;
596 Pfn1->u3.e1.CacheAttribute = MiCached;
597 }
598
599 /* Keep going */
600 PointerPte++;
601 }
602
603 /* All done */
604 return STATUS_SUCCESS;
605 }
606
607 /* EOF */