[CMAKE]
[reactos.git] / ntoskrnl / mm / ARM3 / i386 / init.c
1 /*
2 * PROJECT: ReactOS Kernel
3 * LICENSE: BSD - See COPYING.ARM in the top level directory
4 * FILE: ntoskrnl/mm/ARM3/i386/init.c
5 * PURPOSE: ARM Memory Manager Initialization for x86
6 * PROGRAMMERS: ReactOS Portable Systems Group
7 */
8
9 /* INCLUDES *******************************************************************/
10
11 #include <ntoskrnl.h>
12 #define NDEBUG
13 #include <debug.h>
14
15 #line 15 "ARMĀ³::INIT:X86"
16 #define MODULE_INVOLVED_IN_ARM3
17 #include "../../ARM3/miarm.h"
18
19 /* GLOBALS ********************************************************************/
20
21 /* Template PTE and PDE for a kernel page */
22 MMPTE ValidKernelPde = {.u.Hard.Valid = 1, .u.Hard.Write = 1, .u.Hard.Dirty = 1, .u.Hard.Accessed = 1};
23 MMPTE ValidKernelPte = {.u.Hard.Valid = 1, .u.Hard.Write = 1, .u.Hard.Dirty = 1, .u.Hard.Accessed = 1};
24
25 /* Template PDE for a demand-zero page */
26 MMPDE DemandZeroPde = {.u.Long = (MM_READWRITE << MM_PTE_SOFTWARE_PROTECTION_BITS)};
27 MMPTE DemandZeroPte = {.u.Long = (MM_READWRITE << MM_PTE_SOFTWARE_PROTECTION_BITS)};
28
29 /* Template PTE for prototype page */
30 MMPTE PrototypePte = {.u.Long = (MM_READWRITE << MM_PTE_SOFTWARE_PROTECTION_BITS) | PTE_PROTOTYPE | (MI_PTE_LOOKUP_NEEDED << PAGE_SHIFT)};
31
32 /* PRIVATE FUNCTIONS **********************************************************/
33
34 VOID
35 NTAPI
36 INIT_FUNCTION
37 MiComputeNonPagedPoolVa(IN ULONG FreePages)
38 {
39 IN PFN_NUMBER PoolPages;
40
41 /* Check if this is a machine with less than 256MB of RAM, and no overide */
42 if ((MmNumberOfPhysicalPages <= MI_MIN_PAGES_FOR_NONPAGED_POOL_TUNING) &&
43 !(MmSizeOfNonPagedPoolInBytes))
44 {
45 /* Force the non paged pool to be 2MB so we can reduce RAM usage */
46 MmSizeOfNonPagedPoolInBytes = 2 * _1MB;
47 }
48
49 /* Hyperspace ends here */
50 MmHyperSpaceEnd = (PVOID)((ULONG_PTR)MmSystemCacheWorkingSetList - 1);
51
52 /* Check if the user gave a ridicuously large nonpaged pool RAM size */
53 if ((MmSizeOfNonPagedPoolInBytes >> PAGE_SHIFT) > (FreePages * 7 / 8))
54 {
55 /* More than 7/8ths of RAM was dedicated to nonpaged pool, ignore! */
56 MmSizeOfNonPagedPoolInBytes = 0;
57 }
58
59 /* Check if no registry setting was set, or if the setting was too low */
60 if (MmSizeOfNonPagedPoolInBytes < MmMinimumNonPagedPoolSize)
61 {
62 /* Start with the minimum (256 KB) and add 32 KB for each MB above 4 */
63 MmSizeOfNonPagedPoolInBytes = MmMinimumNonPagedPoolSize;
64 MmSizeOfNonPagedPoolInBytes += (FreePages - 1024) / 256 * MmMinAdditionNonPagedPoolPerMb;
65 }
66
67 /* Check if the registy setting or our dynamic calculation was too high */
68 if (MmSizeOfNonPagedPoolInBytes > MI_MAX_INIT_NONPAGED_POOL_SIZE)
69 {
70 /* Set it to the maximum */
71 MmSizeOfNonPagedPoolInBytes = MI_MAX_INIT_NONPAGED_POOL_SIZE;
72 }
73
74 /* Check if a percentage cap was set through the registry */
75 if (MmMaximumNonPagedPoolPercent) UNIMPLEMENTED;
76
77 /* Page-align the nonpaged pool size */
78 MmSizeOfNonPagedPoolInBytes &= ~(PAGE_SIZE - 1);
79
80 /* Now, check if there was a registry size for the maximum size */
81 if (!MmMaximumNonPagedPoolInBytes)
82 {
83 /* Start with the default (1MB) */
84 MmMaximumNonPagedPoolInBytes = MmDefaultMaximumNonPagedPool;
85
86 /* Add space for PFN database */
87 MmMaximumNonPagedPoolInBytes += (ULONG)
88 PAGE_ALIGN((MmHighestPhysicalPage + 1) * sizeof(MMPFN));
89
90 /* Check if the machine has more than 512MB of free RAM */
91 if (FreePages >= 0x1F000)
92 {
93 /* Add 200KB for each MB above 4 */
94 MmMaximumNonPagedPoolInBytes += (FreePages - 1024) / 256 *
95 (MmMaxAdditionNonPagedPoolPerMb / 2);
96 if (MmMaximumNonPagedPoolInBytes < MI_MAX_NONPAGED_POOL_SIZE)
97 {
98 /* Make it at least 128MB since this machine has a lot of RAM */
99 MmMaximumNonPagedPoolInBytes = MI_MAX_NONPAGED_POOL_SIZE;
100 }
101 }
102 else
103 {
104 /* Add 400KB for each MB above 4 */
105 MmMaximumNonPagedPoolInBytes += (FreePages - 1024) / 256 *
106 MmMaxAdditionNonPagedPoolPerMb;
107 }
108 }
109
110 /* Make sure there's at least 16 pages + the PFN available for expansion */
111 PoolPages = MmSizeOfNonPagedPoolInBytes + (PAGE_SIZE * 16) +
112 ((ULONG)PAGE_ALIGN(MmHighestPhysicalPage + 1) * sizeof(MMPFN));
113 if (MmMaximumNonPagedPoolInBytes < PoolPages)
114 {
115 /* The maximum should be at least high enough to cover all the above */
116 MmMaximumNonPagedPoolInBytes = PoolPages;
117 }
118
119 /* Systems with 2GB of kernel address space get double the size */
120 PoolPages = MI_MAX_NONPAGED_POOL_SIZE * 2;
121
122 /* On the other hand, make sure that PFN + nonpaged pool doesn't get too big */
123 if (MmMaximumNonPagedPoolInBytes > PoolPages)
124 {
125 /* Trim it down to the maximum architectural limit (256MB) */
126 MmMaximumNonPagedPoolInBytes = PoolPages;
127 }
128
129 /* Check if this is a system with > 128MB of non paged pool */
130 if (MmMaximumNonPagedPoolInBytes > MI_MAX_NONPAGED_POOL_SIZE)
131 {
132 /* Check if the initial size is less than the extra 128MB boost */
133 if (MmSizeOfNonPagedPoolInBytes < (MmMaximumNonPagedPoolInBytes -
134 MI_MAX_NONPAGED_POOL_SIZE))
135 {
136 /* FIXME: Should check if the initial pool can be expanded */
137
138 /* Assume no expansion possible, check ift he maximum is too large */
139 if (MmMaximumNonPagedPoolInBytes > (MmSizeOfNonPagedPoolInBytes +
140 MI_MAX_NONPAGED_POOL_SIZE))
141 {
142 /* Set it to the initial value plus the boost */
143 MmMaximumNonPagedPoolInBytes = MmSizeOfNonPagedPoolInBytes +
144 MI_MAX_NONPAGED_POOL_SIZE;
145 }
146 }
147 }
148 }
149
150 NTSTATUS
151 NTAPI
152 INIT_FUNCTION
153 MiInitMachineDependent(IN PLOADER_PARAMETER_BLOCK LoaderBlock)
154 {
155 PLIST_ENTRY NextEntry;
156 PMEMORY_ALLOCATION_DESCRIPTOR MdBlock;
157 ULONG FreePages = 0;
158 PFN_NUMBER PageFrameIndex;
159 PMMPTE StartPde, EndPde, PointerPte, LastPte;
160 MMPTE TempPde, TempPte;
161 PVOID NonPagedPoolExpansionVa;
162 KIRQL OldIrql;
163 PMMPFN Pfn1;
164 ULONG Flags;
165
166 /* Check for kernel stack size that's too big */
167 if (MmLargeStackSize > (KERNEL_LARGE_STACK_SIZE / _1KB))
168 {
169 /* Sanitize to default value */
170 MmLargeStackSize = KERNEL_LARGE_STACK_SIZE;
171 }
172 else
173 {
174 /* Take the registry setting, and convert it into bytes */
175 MmLargeStackSize *= _1KB;
176
177 /* Now align it to a page boundary */
178 MmLargeStackSize = PAGE_ROUND_UP(MmLargeStackSize);
179
180 /* Sanity checks */
181 ASSERT(MmLargeStackSize <= KERNEL_LARGE_STACK_SIZE);
182 ASSERT((MmLargeStackSize & (PAGE_SIZE - 1)) == 0);
183
184 /* Make sure it's not too low */
185 if (MmLargeStackSize < KERNEL_STACK_SIZE) MmLargeStackSize = KERNEL_STACK_SIZE;
186 }
187
188 /* Check for global bit */
189 #if 0
190 if (KeFeatureBits & KF_GLOBAL_PAGE)
191 {
192 /* Set it on the template PTE and PDE */
193 ValidKernelPte.u.Hard.Global = TRUE;
194 ValidKernelPde.u.Hard.Global = TRUE;
195 }
196 #endif
197 /* Now templates are ready */
198 TempPte = ValidKernelPte;
199 TempPde = ValidKernelPde;
200
201 //
202 // Set CR3 for the system process
203 //
204 PointerPte = MiAddressToPde(PDE_BASE);
205 PageFrameIndex = PFN_FROM_PTE(PointerPte) << PAGE_SHIFT;
206 PsGetCurrentProcess()->Pcb.DirectoryTableBase[0] = PageFrameIndex;
207
208 //
209 // Blow away user-mode
210 //
211 StartPde = MiAddressToPde(0);
212 EndPde = MiAddressToPde(KSEG0_BASE);
213 RtlZeroMemory(StartPde, (EndPde - StartPde) * sizeof(MMPTE));
214
215 //
216 // Loop the memory descriptors
217 //
218 NextEntry = LoaderBlock->MemoryDescriptorListHead.Flink;
219 while (NextEntry != &LoaderBlock->MemoryDescriptorListHead)
220 {
221 //
222 // Get the memory block
223 //
224 MdBlock = CONTAINING_RECORD(NextEntry,
225 MEMORY_ALLOCATION_DESCRIPTOR,
226 ListEntry);
227
228 //
229 // Skip invisible memory
230 //
231 if ((MdBlock->MemoryType != LoaderFirmwarePermanent) &&
232 (MdBlock->MemoryType != LoaderSpecialMemory) &&
233 (MdBlock->MemoryType != LoaderHALCachedMemory) &&
234 (MdBlock->MemoryType != LoaderBBTMemory))
235 {
236 //
237 // Check if BURNMEM was used
238 //
239 if (MdBlock->MemoryType != LoaderBad)
240 {
241 //
242 // Count this in the total of pages
243 //
244 MmNumberOfPhysicalPages += MdBlock->PageCount;
245 }
246
247 //
248 // Check if this is the new lowest page
249 //
250 if (MdBlock->BasePage < MmLowestPhysicalPage)
251 {
252 //
253 // Update the lowest page
254 //
255 MmLowestPhysicalPage = MdBlock->BasePage;
256 }
257
258 //
259 // Check if this is the new highest page
260 //
261 PageFrameIndex = MdBlock->BasePage + MdBlock->PageCount;
262 if (PageFrameIndex > MmHighestPhysicalPage)
263 {
264 //
265 // Update the highest page
266 //
267 MmHighestPhysicalPage = PageFrameIndex - 1;
268 }
269
270 //
271 // Check if this is free memory
272 //
273 if ((MdBlock->MemoryType == LoaderFree) ||
274 (MdBlock->MemoryType == LoaderLoadedProgram) ||
275 (MdBlock->MemoryType == LoaderFirmwareTemporary) ||
276 (MdBlock->MemoryType == LoaderOsloaderStack))
277 {
278 //
279 // Check if this is the largest memory descriptor
280 //
281 if (MdBlock->PageCount > FreePages)
282 {
283 //
284 // For now, it is
285 //
286 MxFreeDescriptor = MdBlock;
287 }
288
289 //
290 // More free pages
291 //
292 FreePages += MdBlock->PageCount;
293 }
294 }
295
296 //
297 // Keep going
298 //
299 NextEntry = MdBlock->ListEntry.Flink;
300 }
301
302 //
303 // Save original values of the free descriptor, since it'll be
304 // altered by early allocations
305 //
306 MxOldFreeDescriptor = *MxFreeDescriptor;
307
308 /* Compute non paged pool limits and size */
309 MiComputeNonPagedPoolVa(FreePages);
310
311 /* Compute color information (L2 cache-separated paging lists) */
312 MiComputeColorInformation();
313
314 //
315 // Calculate the number of bytes for the PFN database
316 // then add the color tables and convert to pages
317 //
318 MxPfnAllocation = (MmHighestPhysicalPage + 1) * sizeof(MMPFN);
319 MxPfnAllocation += (MmSecondaryColors * sizeof(MMCOLOR_TABLES) * 2);
320 MxPfnAllocation >>= PAGE_SHIFT;
321
322 //
323 // We have to add one to the count here, because in the process of
324 // shifting down to the page size, we actually ended up getting the
325 // lower aligned size (so say, 0x5FFFF bytes is now 0x5F pages).
326 // Later on, we'll shift this number back into bytes, which would cause
327 // us to end up with only 0x5F000 bytes -- when we actually want to have
328 // 0x60000 bytes.
329 //
330 MxPfnAllocation++;
331
332 //
333 // Now calculate the nonpaged pool expansion VA region
334 //
335 MmNonPagedPoolStart = (PVOID)((ULONG_PTR)MmNonPagedPoolEnd -
336 MmMaximumNonPagedPoolInBytes +
337 MmSizeOfNonPagedPoolInBytes);
338 MmNonPagedPoolStart = (PVOID)PAGE_ALIGN(MmNonPagedPoolStart);
339 NonPagedPoolExpansionVa = MmNonPagedPoolStart;
340 DPRINT("NP Pool has been tuned to: %d bytes and %d bytes\n",
341 MmSizeOfNonPagedPoolInBytes, MmMaximumNonPagedPoolInBytes);
342
343 //
344 // Now calculate the nonpaged system VA region, which includes the
345 // nonpaged pool expansion (above) and the system PTEs. Note that it is
346 // then aligned to a PDE boundary (4MB).
347 //
348 MmNonPagedSystemStart = (PVOID)((ULONG_PTR)MmNonPagedPoolStart -
349 (MmNumberOfSystemPtes + 1) * PAGE_SIZE);
350 MmNonPagedSystemStart = (PVOID)((ULONG_PTR)MmNonPagedSystemStart &
351 ~(PDE_MAPPED_VA - 1));
352
353 //
354 // Don't let it go below the minimum
355 //
356 if (MmNonPagedSystemStart < (PVOID)0xEB000000)
357 {
358 //
359 // This is a hard-coded limit in the Windows NT address space
360 //
361 MmNonPagedSystemStart = (PVOID)0xEB000000;
362
363 //
364 // Reduce the amount of system PTEs to reach this point
365 //
366 MmNumberOfSystemPtes = ((ULONG_PTR)MmNonPagedPoolStart -
367 (ULONG_PTR)MmNonPagedSystemStart) >>
368 PAGE_SHIFT;
369 MmNumberOfSystemPtes--;
370 ASSERT(MmNumberOfSystemPtes > 1000);
371 }
372
373 //
374 // Check if we are in a situation where the size of the paged pool
375 // is so large that it overflows into nonpaged pool
376 //
377 if (MmSizeOfPagedPoolInBytes >
378 ((ULONG_PTR)MmNonPagedSystemStart - (ULONG_PTR)MmPagedPoolStart))
379 {
380 //
381 // We need some recalculations here
382 //
383 DPRINT1("Paged pool is too big!\n");
384 }
385
386 //
387 // Normally, the PFN database should start after the loader images.
388 // This is already the case in ReactOS, but for now we want to co-exist
389 // with the old memory manager, so we'll create a "Shadow PFN Database"
390 // instead, and arbitrarly start it at 0xB0000000.
391 //
392 MmPfnDatabase = (PVOID)0xB0000000;
393 ASSERT(((ULONG_PTR)MmPfnDatabase & (PDE_MAPPED_VA - 1)) == 0);
394
395 //
396 // Non paged pool comes after the PFN database
397 //
398 MmNonPagedPoolStart = (PVOID)((ULONG_PTR)MmPfnDatabase +
399 (MxPfnAllocation << PAGE_SHIFT));
400
401 //
402 // Now we actually need to get these many physical pages. Nonpaged pool
403 // is actually also physically contiguous (but not the expansion)
404 //
405 PageFrameIndex = MxGetNextPage(MxPfnAllocation +
406 (MmSizeOfNonPagedPoolInBytes >> PAGE_SHIFT));
407 ASSERT(PageFrameIndex != 0);
408 DPRINT("PFN DB PA PFN begins at: %lx\n", PageFrameIndex);
409 DPRINT("NP PA PFN begins at: %lx\n", PageFrameIndex + MxPfnAllocation);
410
411 /* Convert nonpaged pool size from bytes to pages */
412 MmMaximumNonPagedPoolInPages = MmMaximumNonPagedPoolInBytes >> PAGE_SHIFT;
413
414 //
415 // Now we need some pages to create the page tables for the NP system VA
416 // which includes system PTEs and expansion NP
417 //
418 StartPde = MiAddressToPde(MmNonPagedSystemStart);
419 EndPde = MiAddressToPde((PVOID)((ULONG_PTR)MmNonPagedPoolEnd - 1));
420 while (StartPde <= EndPde)
421 {
422 //
423 // Get a page
424 //
425 TempPde.u.Hard.PageFrameNumber = MxGetNextPage(1);
426 MI_WRITE_VALID_PTE(StartPde, TempPde);
427
428 //
429 // Zero out the page table
430 //
431 PointerPte = MiPteToAddress(StartPde);
432 RtlZeroMemory(PointerPte, PAGE_SIZE);
433
434 //
435 // Next
436 //
437 StartPde++;
438 }
439
440 //
441 // Now we need pages for the page tables which will map initial NP
442 //
443 StartPde = MiAddressToPde(MmPfnDatabase);
444 EndPde = MiAddressToPde((PVOID)((ULONG_PTR)MmNonPagedPoolStart +
445 MmSizeOfNonPagedPoolInBytes - 1));
446 while (StartPde <= EndPde)
447 {
448 //
449 // Get a page
450 //
451 TempPde.u.Hard.PageFrameNumber = MxGetNextPage(1);
452 MI_WRITE_VALID_PTE(StartPde, TempPde);
453
454 //
455 // Zero out the page table
456 //
457 PointerPte = MiPteToAddress(StartPde);
458 RtlZeroMemory(PointerPte, PAGE_SIZE);
459
460 //
461 // Next
462 //
463 StartPde++;
464 }
465
466 //
467 // Now remember where the expansion starts
468 //
469 MmNonPagedPoolExpansionStart = NonPagedPoolExpansionVa;
470
471 //
472 // Last step is to actually map the nonpaged pool
473 //
474 PointerPte = MiAddressToPte(MmNonPagedPoolStart);
475 LastPte = MiAddressToPte((PVOID)((ULONG_PTR)MmNonPagedPoolStart +
476 MmSizeOfNonPagedPoolInBytes - 1));
477 while (PointerPte <= LastPte)
478 {
479 //
480 // Use one of our contigous pages
481 //
482 TempPte.u.Hard.PageFrameNumber = PageFrameIndex++;
483 MI_WRITE_VALID_PTE(PointerPte++, TempPte);
484 }
485
486 //
487 // Sanity check: make sure we have properly defined the system PTE space
488 //
489 ASSERT(MiAddressToPte(MmNonPagedSystemStart) <
490 MiAddressToPte(MmNonPagedPoolExpansionStart));
491
492 /* Now go ahead and initialize the nonpaged pool */
493 MiInitializeNonPagedPool();
494 MiInitializeNonPagedPoolThresholds();
495
496 /* Map the PFN database pages */
497 MiMapPfnDatabase(LoaderBlock);
498
499 /* Initialize the color tables */
500 MiInitializeColorTables();
501
502 /* Build the PFN Database */
503 MiInitializePfnDatabase(LoaderBlock);
504 MmInitializeBalancer(MmAvailablePages, 0);
505
506 //
507 // Reset the descriptor back so we can create the correct memory blocks
508 //
509 *MxFreeDescriptor = MxOldFreeDescriptor;
510
511 //
512 // Initialize the nonpaged pool
513 //
514 InitializePool(NonPagedPool, 0);
515
516 //
517 // We PDE-aligned the nonpaged system start VA, so haul some extra PTEs!
518 //
519 PointerPte = MiAddressToPte(MmNonPagedSystemStart);
520 MmNumberOfSystemPtes = MiAddressToPte(MmNonPagedPoolExpansionStart) -
521 PointerPte;
522 MmNumberOfSystemPtes--;
523 DPRINT("Final System PTE count: %d (%d bytes)\n",
524 MmNumberOfSystemPtes, MmNumberOfSystemPtes * PAGE_SIZE);
525
526 //
527 // Create the system PTE space
528 //
529 MiInitializeSystemPtes(PointerPte, MmNumberOfSystemPtes, SystemPteSpace);
530
531 /* Get the PDE For hyperspace */
532 StartPde = MiAddressToPde(HYPER_SPACE);
533
534 /* Lock PFN database */
535 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
536
537 /* Allocate a page for hyperspace and create it */
538 MI_SET_USAGE(MI_USAGE_PAGE_TABLE);
539 MI_SET_PROCESS2("Kernel");
540 PageFrameIndex = MiRemoveAnyPage(0);
541 TempPde.u.Hard.PageFrameNumber = PageFrameIndex;
542 TempPde.u.Hard.Global = FALSE; // Hyperspace is local!
543 MI_WRITE_VALID_PTE(StartPde, TempPde);
544
545 /* Flush the TLB */
546 KeFlushCurrentTb();
547
548 /* Release the lock */
549 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
550
551 //
552 // Zero out the page table now
553 //
554 PointerPte = MiAddressToPte(HYPER_SPACE);
555 RtlZeroMemory(PointerPte, PAGE_SIZE);
556
557 //
558 // Setup the mapping PTEs
559 //
560 MmFirstReservedMappingPte = MiAddressToPte(MI_MAPPING_RANGE_START);
561 MmLastReservedMappingPte = MiAddressToPte(MI_MAPPING_RANGE_END);
562 MmFirstReservedMappingPte->u.Hard.PageFrameNumber = MI_HYPERSPACE_PTES;
563
564 /* Set the working set address */
565 MmWorkingSetList = (PVOID)MI_WORKING_SET_LIST;
566
567 //
568 // Reserve system PTEs for zeroing PTEs and clear them
569 //
570 MiFirstReservedZeroingPte = MiReserveSystemPtes(MI_ZERO_PTES,
571 SystemPteSpace);
572 RtlZeroMemory(MiFirstReservedZeroingPte, MI_ZERO_PTES * sizeof(MMPTE));
573
574 //
575 // Set the counter to maximum to boot with
576 //
577 MiFirstReservedZeroingPte->u.Hard.PageFrameNumber = MI_ZERO_PTES - 1;
578
579 /* Lock PFN database */
580 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
581
582 /* Reset the ref/share count so that MmInitializeProcessAddressSpace works */
583 Pfn1 = MiGetPfnEntry(PFN_FROM_PTE(MiAddressToPde(PDE_BASE)));
584 Pfn1->u2.ShareCount = 0;
585 Pfn1->u3.e2.ReferenceCount = 0;
586
587 /* Get a page for the working set list */
588 MI_SET_USAGE(MI_USAGE_PAGE_TABLE);
589 MI_SET_PROCESS2("Kernel WS List");
590 PageFrameIndex = MiRemoveAnyPage(0);
591 TempPte.u.Hard.PageFrameNumber = PageFrameIndex;
592
593 /* Map the working set list */
594 PointerPte = MiAddressToPte(MmWorkingSetList);
595 MI_WRITE_VALID_PTE(PointerPte, TempPte);
596
597 /* Zero it out, and save the frame index */
598 RtlZeroMemory(MiPteToAddress(PointerPte), PAGE_SIZE);
599 PsGetCurrentProcess()->WorkingSetPage = PageFrameIndex;
600
601 /* Check for Pentium LOCK errata */
602 if (KiI386PentiumLockErrataPresent)
603 {
604 /* Mark the 1st IDT page as Write-Through to prevent a lockup
605 on a F00F instruction.
606 See http://www.rcollins.org/Errata/Dec97/F00FBug.html */
607 PointerPte = MiAddressToPte(KeGetPcr()->IDT);
608 PointerPte->u.Hard.WriteThrough = 1;
609 }
610
611 /* Release the lock */
612 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
613
614 /* Initialize the bogus address space */
615 Flags = 0;
616 MmInitializeProcessAddressSpace(PsGetCurrentProcess(), NULL, NULL, &Flags, NULL);
617
618 /* Make sure the color lists are valid */
619 ASSERT(MmFreePagesByColor[0] < (PMMCOLOR_TABLES)PTE_BASE);
620 StartPde = MiAddressToPde(MmFreePagesByColor[0]);
621 ASSERT(StartPde->u.Hard.Valid == 1);
622 PointerPte = MiAddressToPte(MmFreePagesByColor[0]);
623 ASSERT(PointerPte->u.Hard.Valid == 1);
624 LastPte = MiAddressToPte((ULONG_PTR)&MmFreePagesByColor[1][MmSecondaryColors] - 1);
625 ASSERT(LastPte->u.Hard.Valid == 1);
626
627 /* Loop the color list PTEs */
628 while (PointerPte <= LastPte)
629 {
630 /* Get the PFN entry */
631 Pfn1 = MiGetPfnEntry(PFN_FROM_PTE(PointerPte));
632 if (!Pfn1->u3.e2.ReferenceCount)
633 {
634 /* Fill it out */
635 Pfn1->u4.PteFrame = PFN_FROM_PTE(StartPde);
636 Pfn1->PteAddress = PointerPte;
637 Pfn1->u2.ShareCount++;
638 Pfn1->u3.e2.ReferenceCount = 1;
639 Pfn1->u3.e1.PageLocation = ActiveAndValid;
640 Pfn1->u3.e1.CacheAttribute = MiCached;
641 }
642
643 /* Keep going */
644 PointerPte++;
645 }
646
647 /* All done */
648 return STATUS_SUCCESS;
649 }
650
651 /* EOF */