[NTOSKRNL]: Add more support for session space, including mapping and unmapping views...
[reactos.git] / reactos / ntoskrnl / mm / ARM3 / i386 / init.c
1 /*
2 * PROJECT: ReactOS Kernel
3 * LICENSE: BSD - See COPYING.ARM in the top level directory
4 * FILE: ntoskrnl/mm/ARM3/i386/init.c
5 * PURPOSE: ARM Memory Manager Initialization for x86
6 * PROGRAMMERS: ReactOS Portable Systems Group
7 */
8
9 /* INCLUDES *******************************************************************/
10
11 #include <ntoskrnl.h>
12 #define NDEBUG
13 #include <debug.h>
14
15 #define MODULE_INVOLVED_IN_ARM3
16 #include "../../ARM3/miarm.h"
17
18 /* GLOBALS ********************************************************************/
19
20 /* Template PTE and PDE for a kernel page */
21 /* FIXME: These should be PTE_GLOBAL */
22 MMPTE ValidKernelPde = {{PTE_VALID|PTE_READWRITE|PTE_DIRTY|PTE_ACCESSED}};
23 MMPTE ValidKernelPte = {{PTE_VALID|PTE_READWRITE|PTE_DIRTY|PTE_ACCESSED}};
24
25 /* The same, but for local pages */
26 MMPTE ValidKernelPdeLocal = {{PTE_VALID|PTE_READWRITE|PTE_DIRTY|PTE_ACCESSED}};
27 MMPTE ValidKernelPteLocal = {{PTE_VALID|PTE_READWRITE|PTE_DIRTY|PTE_ACCESSED}};
28
29 /* Template PDE for a demand-zero page */
30 MMPDE DemandZeroPde = {{MM_READWRITE << MM_PTE_SOFTWARE_PROTECTION_BITS}};
31 MMPTE DemandZeroPte = {{MM_READWRITE << MM_PTE_SOFTWARE_PROTECTION_BITS}};
32
33 /* Template PTE for prototype page */
34 MMPTE PrototypePte = {{(MM_READWRITE << MM_PTE_SOFTWARE_PROTECTION_BITS) |
35 PTE_PROTOTYPE | (MI_PTE_LOOKUP_NEEDED << PAGE_SHIFT)}};
36
37 /* Template PTE for decommited page */
38 MMPTE MmDecommittedPte = {{MM_DECOMMIT << MM_PTE_SOFTWARE_PROTECTION_BITS}};
39
40 /* PRIVATE FUNCTIONS **********************************************************/
41
42 VOID
43 NTAPI
44 INIT_FUNCTION
45 MiInitializeSessionSpaceLayout()
46 {
47 //
48 // Set the size of session view, pool, and image
49 //
50 MmSessionSize = MI_SESSION_SIZE;
51 MmSessionViewSize = MI_SESSION_VIEW_SIZE;
52 MmSessionPoolSize = MI_SESSION_POOL_SIZE;
53 MmSessionImageSize = MI_SESSION_IMAGE_SIZE;
54
55 //
56 // Set the size of system view
57 //
58 MmSystemViewSize = MI_SYSTEM_VIEW_SIZE;
59
60 //
61 // This is where it all ends
62 //
63 MiSessionImageEnd = (PVOID)PTE_BASE;
64
65 //
66 // This is where we will load Win32k.sys and the video driver
67 //
68 MiSessionImageStart = (PVOID)((ULONG_PTR)MiSessionImageEnd -
69 MmSessionImageSize);
70
71 //
72 // So the view starts right below the session working set (itself below
73 // the image area)
74 //
75 MiSessionViewStart = (PVOID)((ULONG_PTR)MiSessionImageEnd -
76 MmSessionImageSize -
77 MI_SESSION_WORKING_SET_SIZE -
78 MmSessionViewSize);
79
80 //
81 // Session pool follows
82 //
83 MiSessionPoolEnd = MiSessionViewStart;
84 MiSessionPoolStart = (PVOID)((ULONG_PTR)MiSessionPoolEnd -
85 MmSessionPoolSize);
86
87 //
88 // And it all begins here
89 //
90 MmSessionBase = MiSessionPoolStart;
91
92 //
93 // Sanity check that our math is correct
94 //
95 ASSERT((ULONG_PTR)MmSessionBase + MmSessionSize == PTE_BASE);
96
97 //
98 // Session space ends wherever image session space ends
99 //
100 MiSessionSpaceEnd = MiSessionImageEnd;
101
102 //
103 // System view space ends at session space, so now that we know where
104 // this is, we can compute the base address of system view space itself.
105 //
106 MiSystemViewStart = (PVOID)((ULONG_PTR)MmSessionBase -
107 MmSystemViewSize);
108
109 /* Compute the PTE addresses for all the addresses we carved out */
110 MiSessionImagePteStart = MiAddressToPte(MiSessionImageStart);
111 MiSessionImagePteEnd = MiAddressToPte(MiSessionImageEnd);
112 MiSessionBasePte = MiAddressToPte(MmSessionBase);
113 MiSessionLastPte = MiAddressToPte(MiSessionSpaceEnd);
114
115 /* Initialize session space */
116 MmSessionSpace = (PMM_SESSION_SPACE)((ULONG_PTR)MmSessionBase +
117 MmSessionSize -
118 MmSessionImageSize -
119 MM_ALLOCATION_GRANULARITY);
120 }
121
122 VOID
123 NTAPI
124 INIT_FUNCTION
125 MiComputeNonPagedPoolVa(IN ULONG FreePages)
126 {
127 IN PFN_NUMBER PoolPages;
128
129 /* Check if this is a machine with less than 256MB of RAM, and no overide */
130 if ((MmNumberOfPhysicalPages <= MI_MIN_PAGES_FOR_NONPAGED_POOL_TUNING) &&
131 !(MmSizeOfNonPagedPoolInBytes))
132 {
133 /* Force the non paged pool to be 2MB so we can reduce RAM usage */
134 MmSizeOfNonPagedPoolInBytes = 2 * _1MB;
135 }
136
137 /* Hyperspace ends here */
138 MmHyperSpaceEnd = (PVOID)((ULONG_PTR)MmSystemCacheWorkingSetList - 1);
139
140 /* Check if the user gave a ridicuously large nonpaged pool RAM size */
141 if ((MmSizeOfNonPagedPoolInBytes >> PAGE_SHIFT) > (FreePages * 7 / 8))
142 {
143 /* More than 7/8ths of RAM was dedicated to nonpaged pool, ignore! */
144 MmSizeOfNonPagedPoolInBytes = 0;
145 }
146
147 /* Check if no registry setting was set, or if the setting was too low */
148 if (MmSizeOfNonPagedPoolInBytes < MmMinimumNonPagedPoolSize)
149 {
150 /* Start with the minimum (256 KB) and add 32 KB for each MB above 4 */
151 MmSizeOfNonPagedPoolInBytes = MmMinimumNonPagedPoolSize;
152 MmSizeOfNonPagedPoolInBytes += (FreePages - 1024) / 256 * MmMinAdditionNonPagedPoolPerMb;
153 }
154
155 /* Check if the registy setting or our dynamic calculation was too high */
156 if (MmSizeOfNonPagedPoolInBytes > MI_MAX_INIT_NONPAGED_POOL_SIZE)
157 {
158 /* Set it to the maximum */
159 MmSizeOfNonPagedPoolInBytes = MI_MAX_INIT_NONPAGED_POOL_SIZE;
160 }
161
162 /* Check if a percentage cap was set through the registry */
163 if (MmMaximumNonPagedPoolPercent) UNIMPLEMENTED;
164
165 /* Page-align the nonpaged pool size */
166 MmSizeOfNonPagedPoolInBytes &= ~(PAGE_SIZE - 1);
167
168 /* Now, check if there was a registry size for the maximum size */
169 if (!MmMaximumNonPagedPoolInBytes)
170 {
171 /* Start with the default (1MB) */
172 MmMaximumNonPagedPoolInBytes = MmDefaultMaximumNonPagedPool;
173
174 /* Add space for PFN database */
175 MmMaximumNonPagedPoolInBytes += (ULONG)
176 PAGE_ALIGN((MmHighestPhysicalPage + 1) * sizeof(MMPFN));
177
178 /* Check if the machine has more than 512MB of free RAM */
179 if (FreePages >= 0x1F000)
180 {
181 /* Add 200KB for each MB above 4 */
182 MmMaximumNonPagedPoolInBytes += (FreePages - 1024) / 256 *
183 (MmMaxAdditionNonPagedPoolPerMb / 2);
184 if (MmMaximumNonPagedPoolInBytes < MI_MAX_NONPAGED_POOL_SIZE)
185 {
186 /* Make it at least 128MB since this machine has a lot of RAM */
187 MmMaximumNonPagedPoolInBytes = MI_MAX_NONPAGED_POOL_SIZE;
188 }
189 }
190 else
191 {
192 /* Add 400KB for each MB above 4 */
193 MmMaximumNonPagedPoolInBytes += (FreePages - 1024) / 256 *
194 MmMaxAdditionNonPagedPoolPerMb;
195 }
196 }
197
198 /* Make sure there's at least 16 pages + the PFN available for expansion */
199 PoolPages = MmSizeOfNonPagedPoolInBytes + (PAGE_SIZE * 16) +
200 ((ULONG)PAGE_ALIGN(MmHighestPhysicalPage + 1) * sizeof(MMPFN));
201 if (MmMaximumNonPagedPoolInBytes < PoolPages)
202 {
203 /* The maximum should be at least high enough to cover all the above */
204 MmMaximumNonPagedPoolInBytes = PoolPages;
205 }
206
207 /* Systems with 2GB of kernel address space get double the size */
208 PoolPages = MI_MAX_NONPAGED_POOL_SIZE * 2;
209
210 /* On the other hand, make sure that PFN + nonpaged pool doesn't get too big */
211 if (MmMaximumNonPagedPoolInBytes > PoolPages)
212 {
213 /* Trim it down to the maximum architectural limit (256MB) */
214 MmMaximumNonPagedPoolInBytes = PoolPages;
215 }
216
217 /* Check if this is a system with > 128MB of non paged pool */
218 if (MmMaximumNonPagedPoolInBytes > MI_MAX_NONPAGED_POOL_SIZE)
219 {
220 /* Check if the initial size is less than the extra 128MB boost */
221 if (MmSizeOfNonPagedPoolInBytes < (MmMaximumNonPagedPoolInBytes -
222 MI_MAX_NONPAGED_POOL_SIZE))
223 {
224 /* FIXME: Should check if the initial pool can be expanded */
225
226 /* Assume no expansion possible, check ift he maximum is too large */
227 if (MmMaximumNonPagedPoolInBytes > (MmSizeOfNonPagedPoolInBytes +
228 MI_MAX_NONPAGED_POOL_SIZE))
229 {
230 /* Set it to the initial value plus the boost */
231 MmMaximumNonPagedPoolInBytes = MmSizeOfNonPagedPoolInBytes +
232 MI_MAX_NONPAGED_POOL_SIZE;
233 }
234 }
235 }
236 }
237
238 NTSTATUS
239 NTAPI
240 INIT_FUNCTION
241 MiInitMachineDependent(IN PLOADER_PARAMETER_BLOCK LoaderBlock)
242 {
243 PFN_NUMBER PageFrameIndex;
244 PMMPTE StartPde, EndPde, PointerPte, LastPte;
245 MMPTE TempPde, TempPte;
246 PVOID NonPagedPoolExpansionVa;
247 KIRQL OldIrql;
248 PMMPFN Pfn1;
249 ULONG Flags;
250
251 /* Check for global bit */
252 #if 0
253 if (KeFeatureBits & KF_GLOBAL_PAGE)
254 {
255 /* Set it on the template PTE and PDE */
256 ValidKernelPte.u.Hard.Global = TRUE;
257 ValidKernelPde.u.Hard.Global = TRUE;
258 }
259 #endif
260 /* Now templates are ready */
261 TempPte = ValidKernelPte;
262 TempPde = ValidKernelPde;
263
264 //
265 // Set CR3 for the system process
266 //
267 PointerPte = MiAddressToPde(PDE_BASE);
268 PageFrameIndex = PFN_FROM_PTE(PointerPte) << PAGE_SHIFT;
269 PsGetCurrentProcess()->Pcb.DirectoryTableBase[0] = PageFrameIndex;
270
271 //
272 // Blow away user-mode
273 //
274 StartPde = MiAddressToPde(0);
275 EndPde = MiAddressToPde(KSEG0_BASE);
276 RtlZeroMemory(StartPde, (EndPde - StartPde) * sizeof(MMPTE));
277
278 /* Compute non paged pool limits and size */
279 MiComputeNonPagedPoolVa(MiNumberOfFreePages);
280
281 //
282 // Now calculate the nonpaged pool expansion VA region
283 //
284 MmNonPagedPoolStart = (PVOID)((ULONG_PTR)MmNonPagedPoolEnd -
285 MmMaximumNonPagedPoolInBytes +
286 MmSizeOfNonPagedPoolInBytes);
287 MmNonPagedPoolStart = (PVOID)PAGE_ALIGN(MmNonPagedPoolStart);
288 NonPagedPoolExpansionVa = MmNonPagedPoolStart;
289 DPRINT("NP Pool has been tuned to: %d bytes and %d bytes\n",
290 MmSizeOfNonPagedPoolInBytes, MmMaximumNonPagedPoolInBytes);
291
292 //
293 // Now calculate the nonpaged system VA region, which includes the
294 // nonpaged pool expansion (above) and the system PTEs. Note that it is
295 // then aligned to a PDE boundary (4MB).
296 //
297 MiNonPagedSystemSize = (MmNumberOfSystemPtes + 1) * PAGE_SIZE;
298 MmNonPagedSystemStart = (PVOID)((ULONG_PTR)MmNonPagedPoolStart -
299 MiNonPagedSystemSize);
300 MmNonPagedSystemStart = (PVOID)((ULONG_PTR)MmNonPagedSystemStart &
301 ~(PDE_MAPPED_VA - 1));
302
303 //
304 // Don't let it go below the minimum
305 //
306 if (MmNonPagedSystemStart < (PVOID)0xEB000000)
307 {
308 //
309 // This is a hard-coded limit in the Windows NT address space
310 //
311 MmNonPagedSystemStart = (PVOID)0xEB000000;
312
313 //
314 // Reduce the amount of system PTEs to reach this point
315 //
316 MmNumberOfSystemPtes = ((ULONG_PTR)MmNonPagedPoolStart -
317 (ULONG_PTR)MmNonPagedSystemStart) >>
318 PAGE_SHIFT;
319 MmNumberOfSystemPtes--;
320 ASSERT(MmNumberOfSystemPtes > 1000);
321 }
322
323 //
324 // Check if we are in a situation where the size of the paged pool
325 // is so large that it overflows into nonpaged pool
326 //
327 if (MmSizeOfPagedPoolInBytes >
328 ((ULONG_PTR)MmNonPagedSystemStart - (ULONG_PTR)MmPagedPoolStart))
329 {
330 //
331 // We need some recalculations here
332 //
333 DPRINT1("Paged pool is too big!\n");
334 }
335
336 //
337 // Normally, the PFN database should start after the loader images.
338 // This is already the case in ReactOS, but for now we want to co-exist
339 // with the old memory manager, so we'll create a "Shadow PFN Database"
340 // instead, and arbitrarly start it at 0xB0000000.
341 //
342 MmPfnDatabase = (PVOID)0xB0000000;
343 ASSERT(((ULONG_PTR)MmPfnDatabase & (PDE_MAPPED_VA - 1)) == 0);
344
345 //
346 // Non paged pool comes after the PFN database
347 //
348 MmNonPagedPoolStart = (PVOID)((ULONG_PTR)MmPfnDatabase +
349 (MxPfnAllocation << PAGE_SHIFT));
350
351 //
352 // Now we actually need to get these many physical pages. Nonpaged pool
353 // is actually also physically contiguous (but not the expansion)
354 //
355 PageFrameIndex = MxGetNextPage(MxPfnAllocation +
356 (MmSizeOfNonPagedPoolInBytes >> PAGE_SHIFT));
357 ASSERT(PageFrameIndex != 0);
358 DPRINT("PFN DB PA PFN begins at: %lx\n", PageFrameIndex);
359 DPRINT("NP PA PFN begins at: %lx\n", PageFrameIndex + MxPfnAllocation);
360
361 /* Convert nonpaged pool size from bytes to pages */
362 MmMaximumNonPagedPoolInPages = MmMaximumNonPagedPoolInBytes >> PAGE_SHIFT;
363
364 //
365 // Now we need some pages to create the page tables for the NP system VA
366 // which includes system PTEs and expansion NP
367 //
368 StartPde = MiAddressToPde(MmNonPagedSystemStart);
369 EndPde = MiAddressToPde((PVOID)((ULONG_PTR)MmNonPagedPoolEnd - 1));
370 while (StartPde <= EndPde)
371 {
372 //
373 // Get a page
374 //
375 TempPde.u.Hard.PageFrameNumber = MxGetNextPage(1);
376 MI_WRITE_VALID_PTE(StartPde, TempPde);
377
378 //
379 // Zero out the page table
380 //
381 PointerPte = MiPteToAddress(StartPde);
382 RtlZeroMemory(PointerPte, PAGE_SIZE);
383
384 //
385 // Next
386 //
387 StartPde++;
388 }
389
390 //
391 // Now we need pages for the page tables which will map initial NP
392 //
393 StartPde = MiAddressToPde(MmPfnDatabase);
394 EndPde = MiAddressToPde((PVOID)((ULONG_PTR)MmNonPagedPoolStart +
395 MmSizeOfNonPagedPoolInBytes - 1));
396 while (StartPde <= EndPde)
397 {
398 //
399 // Get a page
400 //
401 TempPde.u.Hard.PageFrameNumber = MxGetNextPage(1);
402 MI_WRITE_VALID_PTE(StartPde, TempPde);
403
404 //
405 // Zero out the page table
406 //
407 PointerPte = MiPteToAddress(StartPde);
408 RtlZeroMemory(PointerPte, PAGE_SIZE);
409
410 //
411 // Next
412 //
413 StartPde++;
414 }
415
416 //
417 // Now remember where the expansion starts
418 //
419 MmNonPagedPoolExpansionStart = NonPagedPoolExpansionVa;
420
421 //
422 // Last step is to actually map the nonpaged pool
423 //
424 PointerPte = MiAddressToPte(MmNonPagedPoolStart);
425 LastPte = MiAddressToPte((PVOID)((ULONG_PTR)MmNonPagedPoolStart +
426 MmSizeOfNonPagedPoolInBytes - 1));
427 while (PointerPte <= LastPte)
428 {
429 //
430 // Use one of our contigous pages
431 //
432 TempPte.u.Hard.PageFrameNumber = PageFrameIndex++;
433 MI_WRITE_VALID_PTE(PointerPte++, TempPte);
434 }
435
436 //
437 // Sanity check: make sure we have properly defined the system PTE space
438 //
439 ASSERT(MiAddressToPte(MmNonPagedSystemStart) <
440 MiAddressToPte(MmNonPagedPoolExpansionStart));
441
442 /* Now go ahead and initialize the nonpaged pool */
443 MiInitializeNonPagedPool();
444 MiInitializeNonPagedPoolThresholds();
445
446 /* Map the PFN database pages */
447 MiMapPfnDatabase(LoaderBlock);
448
449 /* Initialize the color tables */
450 MiInitializeColorTables();
451
452 /* Build the PFN Database */
453 MiInitializePfnDatabase(LoaderBlock);
454 MmInitializeBalancer(MmAvailablePages, 0);
455
456 //
457 // Reset the descriptor back so we can create the correct memory blocks
458 //
459 *MxFreeDescriptor = MxOldFreeDescriptor;
460
461 //
462 // Initialize the nonpaged pool
463 //
464 InitializePool(NonPagedPool, 0);
465
466 //
467 // We PDE-aligned the nonpaged system start VA, so haul some extra PTEs!
468 //
469 PointerPte = MiAddressToPte(MmNonPagedSystemStart);
470 MmNumberOfSystemPtes = MiAddressToPte(MmNonPagedPoolExpansionStart) -
471 PointerPte;
472 MmNumberOfSystemPtes--;
473 DPRINT("Final System PTE count: %d (%d bytes)\n",
474 MmNumberOfSystemPtes, MmNumberOfSystemPtes * PAGE_SIZE);
475
476 //
477 // Create the system PTE space
478 //
479 MiInitializeSystemPtes(PointerPte, MmNumberOfSystemPtes, SystemPteSpace);
480
481 /* Get the PDE For hyperspace */
482 StartPde = MiAddressToPde(HYPER_SPACE);
483
484 /* Lock PFN database */
485 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
486
487 /* Allocate a page for hyperspace and create it */
488 MI_SET_USAGE(MI_USAGE_PAGE_TABLE);
489 MI_SET_PROCESS2("Kernel");
490 PageFrameIndex = MiRemoveAnyPage(0);
491 TempPde.u.Hard.PageFrameNumber = PageFrameIndex;
492 TempPde.u.Hard.Global = FALSE; // Hyperspace is local!
493 MI_WRITE_VALID_PTE(StartPde, TempPde);
494
495 /* Flush the TLB */
496 KeFlushCurrentTb();
497
498 /* Release the lock */
499 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
500
501 //
502 // Zero out the page table now
503 //
504 PointerPte = MiAddressToPte(HYPER_SPACE);
505 RtlZeroMemory(PointerPte, PAGE_SIZE);
506
507 //
508 // Setup the mapping PTEs
509 //
510 MmFirstReservedMappingPte = MiAddressToPte(MI_MAPPING_RANGE_START);
511 MmLastReservedMappingPte = MiAddressToPte(MI_MAPPING_RANGE_END);
512 MmFirstReservedMappingPte->u.Hard.PageFrameNumber = MI_HYPERSPACE_PTES;
513
514 /* Set the working set address */
515 MmWorkingSetList = (PVOID)MI_WORKING_SET_LIST;
516
517 //
518 // Reserve system PTEs for zeroing PTEs and clear them
519 //
520 MiFirstReservedZeroingPte = MiReserveSystemPtes(MI_ZERO_PTES,
521 SystemPteSpace);
522 RtlZeroMemory(MiFirstReservedZeroingPte, MI_ZERO_PTES * sizeof(MMPTE));
523
524 //
525 // Set the counter to maximum to boot with
526 //
527 MiFirstReservedZeroingPte->u.Hard.PageFrameNumber = MI_ZERO_PTES - 1;
528
529 /* Lock PFN database */
530 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
531
532 /* Reset the ref/share count so that MmInitializeProcessAddressSpace works */
533 Pfn1 = MiGetPfnEntry(PFN_FROM_PTE(MiAddressToPde(PDE_BASE)));
534 Pfn1->u2.ShareCount = 0;
535 Pfn1->u3.e2.ReferenceCount = 0;
536
537 /* Get a page for the working set list */
538 MI_SET_USAGE(MI_USAGE_PAGE_TABLE);
539 MI_SET_PROCESS2("Kernel WS List");
540 PageFrameIndex = MiRemoveAnyPage(0);
541 TempPte.u.Hard.PageFrameNumber = PageFrameIndex;
542
543 /* Map the working set list */
544 PointerPte = MiAddressToPte(MmWorkingSetList);
545 MI_WRITE_VALID_PTE(PointerPte, TempPte);
546
547 /* Zero it out, and save the frame index */
548 RtlZeroMemory(MiPteToAddress(PointerPte), PAGE_SIZE);
549 PsGetCurrentProcess()->WorkingSetPage = PageFrameIndex;
550
551 /* Check for Pentium LOCK errata */
552 if (KiI386PentiumLockErrataPresent)
553 {
554 /* Mark the 1st IDT page as Write-Through to prevent a lockup
555 on a F00F instruction.
556 See http://www.rcollins.org/Errata/Dec97/F00FBug.html */
557 PointerPte = MiAddressToPte(KeGetPcr()->IDT);
558 PointerPte->u.Hard.WriteThrough = 1;
559 }
560
561 /* Release the lock */
562 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
563
564 /* Initialize the bogus address space */
565 Flags = 0;
566 MmInitializeProcessAddressSpace(PsGetCurrentProcess(), NULL, NULL, &Flags, NULL);
567
568 /* Make sure the color lists are valid */
569 ASSERT(MmFreePagesByColor[0] < (PMMCOLOR_TABLES)PTE_BASE);
570 StartPde = MiAddressToPde(MmFreePagesByColor[0]);
571 ASSERT(StartPde->u.Hard.Valid == 1);
572 PointerPte = MiAddressToPte(MmFreePagesByColor[0]);
573 ASSERT(PointerPte->u.Hard.Valid == 1);
574 LastPte = MiAddressToPte((ULONG_PTR)&MmFreePagesByColor[1][MmSecondaryColors] - 1);
575 ASSERT(LastPte->u.Hard.Valid == 1);
576
577 /* Loop the color list PTEs */
578 while (PointerPte <= LastPte)
579 {
580 /* Get the PFN entry */
581 Pfn1 = MiGetPfnEntry(PFN_FROM_PTE(PointerPte));
582 if (!Pfn1->u3.e2.ReferenceCount)
583 {
584 /* Fill it out */
585 Pfn1->u4.PteFrame = PFN_FROM_PTE(StartPde);
586 Pfn1->PteAddress = PointerPte;
587 Pfn1->u2.ShareCount++;
588 Pfn1->u3.e2.ReferenceCount = 1;
589 Pfn1->u3.e1.PageLocation = ActiveAndValid;
590 Pfn1->u3.e1.CacheAttribute = MiCached;
591 }
592
593 /* Keep going */
594 PointerPte++;
595 }
596
597 /* All done */
598 return STATUS_SUCCESS;
599 }
600
601 /* EOF */