Sync with trunk (r49303)
[reactos.git] / ntoskrnl / mm / ARM3 / i386 / init.c
1 /*
2 * PROJECT: ReactOS Kernel
3 * LICENSE: BSD - See COPYING.ARM in the top level directory
4 * FILE: ntoskrnl/mm/ARM3/i386/init.c
5 * PURPOSE: ARM Memory Manager Initialization for x86
6 * PROGRAMMERS: ReactOS Portable Systems Group
7 */
8
9 /* INCLUDES *******************************************************************/
10
11 #include <ntoskrnl.h>
12 #define NDEBUG
13 #include <debug.h>
14
15 #line 15 "ARMĀ³::INIT:X86"
16 #define MODULE_INVOLVED_IN_ARM3
17 #include "../../ARM3/miarm.h"
18
19 /* GLOBALS ********************************************************************/
20
21 /* Template PTE and PDE for a kernel page */
22 MMPTE ValidKernelPde = {.u.Hard.Valid = 1, .u.Hard.Write = 1, .u.Hard.Dirty = 1, .u.Hard.Accessed = 1};
23 MMPTE ValidKernelPte = {.u.Hard.Valid = 1, .u.Hard.Write = 1, .u.Hard.Dirty = 1, .u.Hard.Accessed = 1};
24
25 /* Template PDE for a demand-zero page */
26 MMPDE DemandZeroPde = {.u.Long = (MM_READWRITE << MM_PTE_SOFTWARE_PROTECTION_BITS)};
27
28 /* Template PTE for prototype page */
29 MMPTE PrototypePte = {.u.Long = (MM_READWRITE << MM_PTE_SOFTWARE_PROTECTION_BITS) | PTE_PROTOTYPE | (MI_PTE_LOOKUP_NEEDED << PAGE_SHIFT)};
30
31 /* PRIVATE FUNCTIONS **********************************************************/
32
33 VOID
34 NTAPI
35 MiComputeNonPagedPoolVa(IN ULONG FreePages)
36 {
37 IN PFN_NUMBER PoolPages;
38
39 /* Check if this is a machine with less than 256MB of RAM, and no overide */
40 if ((MmNumberOfPhysicalPages <= MI_MIN_PAGES_FOR_NONPAGED_POOL_TUNING) &&
41 !(MmSizeOfNonPagedPoolInBytes))
42 {
43 /* Force the non paged pool to be 2MB so we can reduce RAM usage */
44 MmSizeOfNonPagedPoolInBytes = 2 * _1MB;
45 }
46
47 /* Hyperspace ends here */
48 MmHyperSpaceEnd = (PVOID)((ULONG_PTR)MmSystemCacheWorkingSetList - 1);
49
50 /* Check if the user gave a ridicuously large nonpaged pool RAM size */
51 if ((MmSizeOfNonPagedPoolInBytes >> PAGE_SHIFT) > (FreePages * 7 / 8))
52 {
53 /* More than 7/8ths of RAM was dedicated to nonpaged pool, ignore! */
54 MmSizeOfNonPagedPoolInBytes = 0;
55 }
56
57 /* Check if no registry setting was set, or if the setting was too low */
58 if (MmSizeOfNonPagedPoolInBytes < MmMinimumNonPagedPoolSize)
59 {
60 /* Start with the minimum (256 KB) and add 32 KB for each MB above 4 */
61 MmSizeOfNonPagedPoolInBytes = MmMinimumNonPagedPoolSize;
62 MmSizeOfNonPagedPoolInBytes += (FreePages - 1024) / 256 * MmMinAdditionNonPagedPoolPerMb;
63 }
64
65 /* Check if the registy setting or our dynamic calculation was too high */
66 if (MmSizeOfNonPagedPoolInBytes > MI_MAX_INIT_NONPAGED_POOL_SIZE)
67 {
68 /* Set it to the maximum */
69 MmSizeOfNonPagedPoolInBytes = MI_MAX_INIT_NONPAGED_POOL_SIZE;
70 }
71
72 /* Check if a percentage cap was set through the registry */
73 if (MmMaximumNonPagedPoolPercent) UNIMPLEMENTED;
74
75 /* Page-align the nonpaged pool size */
76 MmSizeOfNonPagedPoolInBytes &= ~(PAGE_SIZE - 1);
77
78 /* Now, check if there was a registry size for the maximum size */
79 if (!MmMaximumNonPagedPoolInBytes)
80 {
81 /* Start with the default (1MB) */
82 MmMaximumNonPagedPoolInBytes = MmDefaultMaximumNonPagedPool;
83
84 /* Add space for PFN database */
85 MmMaximumNonPagedPoolInBytes += (ULONG)
86 PAGE_ALIGN((MmHighestPhysicalPage + 1) * sizeof(MMPFN));
87
88 /* Check if the machine has more than 512MB of free RAM */
89 if (FreePages >= 0x1F000)
90 {
91 /* Add 200KB for each MB above 4 */
92 MmMaximumNonPagedPoolInBytes += (FreePages - 1024) / 256 *
93 (MmMaxAdditionNonPagedPoolPerMb / 2);
94 if (MmMaximumNonPagedPoolInBytes < MI_MAX_NONPAGED_POOL_SIZE)
95 {
96 /* Make it at least 128MB since this machine has a lot of RAM */
97 MmMaximumNonPagedPoolInBytes = MI_MAX_NONPAGED_POOL_SIZE;
98 }
99 }
100 else
101 {
102 /* Add 400KB for each MB above 4 */
103 MmMaximumNonPagedPoolInBytes += (FreePages - 1024) / 256 *
104 MmMaxAdditionNonPagedPoolPerMb;
105 }
106 }
107
108 /* Make sure there's at least 16 pages + the PFN available for expansion */
109 PoolPages = MmSizeOfNonPagedPoolInBytes + (PAGE_SIZE * 16) +
110 ((ULONG)PAGE_ALIGN(MmHighestPhysicalPage + 1) * sizeof(MMPFN));
111 if (MmMaximumNonPagedPoolInBytes < PoolPages)
112 {
113 /* The maximum should be at least high enough to cover all the above */
114 MmMaximumNonPagedPoolInBytes = PoolPages;
115 }
116
117 /* Systems with 2GB of kernel address space get double the size */
118 PoolPages = MI_MAX_NONPAGED_POOL_SIZE * 2;
119
120 /* On the other hand, make sure that PFN + nonpaged pool doesn't get too big */
121 if (MmMaximumNonPagedPoolInBytes > PoolPages)
122 {
123 /* Trim it down to the maximum architectural limit (256MB) */
124 MmMaximumNonPagedPoolInBytes = PoolPages;
125 }
126
127 /* Check if this is a system with > 128MB of non paged pool */
128 if (MmMaximumNonPagedPoolInBytes > MI_MAX_NONPAGED_POOL_SIZE)
129 {
130 /* Check if the initial size is less than the extra 128MB boost */
131 if (MmSizeOfNonPagedPoolInBytes < (MmMaximumNonPagedPoolInBytes -
132 MI_MAX_NONPAGED_POOL_SIZE))
133 {
134 /* FIXME: Should check if the initial pool can be expanded */
135
136 /* Assume no expansion possible, check ift he maximum is too large */
137 if (MmMaximumNonPagedPoolInBytes > (MmSizeOfNonPagedPoolInBytes +
138 MI_MAX_NONPAGED_POOL_SIZE))
139 {
140 /* Set it to the initial value plus the boost */
141 MmMaximumNonPagedPoolInBytes = MmSizeOfNonPagedPoolInBytes +
142 MI_MAX_NONPAGED_POOL_SIZE;
143 }
144 }
145 }
146 }
147
148 NTSTATUS
149 NTAPI
150 MiInitMachineDependent(IN PLOADER_PARAMETER_BLOCK LoaderBlock)
151 {
152 PLIST_ENTRY NextEntry;
153 PMEMORY_ALLOCATION_DESCRIPTOR MdBlock;
154 ULONG FreePages = 0;
155 PFN_NUMBER PageFrameIndex;
156 PMMPTE StartPde, EndPde, PointerPte, LastPte;
157 MMPTE TempPde, TempPte;
158 PVOID NonPagedPoolExpansionVa;
159 KIRQL OldIrql;
160
161 /* Check for kernel stack size that's too big */
162 if (MmLargeStackSize > (KERNEL_LARGE_STACK_SIZE / _1KB))
163 {
164 /* Sanitize to default value */
165 MmLargeStackSize = KERNEL_LARGE_STACK_SIZE;
166 }
167 else
168 {
169 /* Take the registry setting, and convert it into bytes */
170 MmLargeStackSize *= _1KB;
171
172 /* Now align it to a page boundary */
173 MmLargeStackSize = PAGE_ROUND_UP(MmLargeStackSize);
174
175 /* Sanity checks */
176 ASSERT(MmLargeStackSize <= KERNEL_LARGE_STACK_SIZE);
177 ASSERT((MmLargeStackSize & (PAGE_SIZE - 1)) == 0);
178
179 /* Make sure it's not too low */
180 if (MmLargeStackSize < KERNEL_STACK_SIZE) MmLargeStackSize = KERNEL_STACK_SIZE;
181 }
182
183 /* Check for global bit */
184 #if 0
185 if (KeFeatureBits & KF_GLOBAL_PAGE)
186 {
187 /* Set it on the template PTE and PDE */
188 ValidKernelPte.u.Hard.Global = TRUE;
189 ValidKernelPde.u.Hard.Global = TRUE;
190 }
191 #endif
192 /* Now templates are ready */
193 TempPte = ValidKernelPte;
194 TempPde = ValidKernelPde;
195
196 //
197 // Set CR3 for the system process
198 //
199 PointerPte = MiAddressToPde(PTE_BASE);
200 PageFrameIndex = PFN_FROM_PTE(PointerPte) << PAGE_SHIFT;
201 PsGetCurrentProcess()->Pcb.DirectoryTableBase[0] = PageFrameIndex;
202
203 //
204 // Blow away user-mode
205 //
206 StartPde = MiAddressToPde(0);
207 EndPde = MiAddressToPde(KSEG0_BASE);
208 RtlZeroMemory(StartPde, (EndPde - StartPde) * sizeof(MMPTE));
209
210 //
211 // Loop the memory descriptors
212 //
213 NextEntry = LoaderBlock->MemoryDescriptorListHead.Flink;
214 while (NextEntry != &LoaderBlock->MemoryDescriptorListHead)
215 {
216 //
217 // Get the memory block
218 //
219 MdBlock = CONTAINING_RECORD(NextEntry,
220 MEMORY_ALLOCATION_DESCRIPTOR,
221 ListEntry);
222
223 //
224 // Skip invisible memory
225 //
226 if ((MdBlock->MemoryType != LoaderFirmwarePermanent) &&
227 (MdBlock->MemoryType != LoaderSpecialMemory) &&
228 (MdBlock->MemoryType != LoaderHALCachedMemory) &&
229 (MdBlock->MemoryType != LoaderBBTMemory))
230 {
231 //
232 // Check if BURNMEM was used
233 //
234 if (MdBlock->MemoryType != LoaderBad)
235 {
236 //
237 // Count this in the total of pages
238 //
239 MmNumberOfPhysicalPages += MdBlock->PageCount;
240 }
241
242 //
243 // Check if this is the new lowest page
244 //
245 if (MdBlock->BasePage < MmLowestPhysicalPage)
246 {
247 //
248 // Update the lowest page
249 //
250 MmLowestPhysicalPage = MdBlock->BasePage;
251 }
252
253 //
254 // Check if this is the new highest page
255 //
256 PageFrameIndex = MdBlock->BasePage + MdBlock->PageCount;
257 if (PageFrameIndex > MmHighestPhysicalPage)
258 {
259 //
260 // Update the highest page
261 //
262 MmHighestPhysicalPage = PageFrameIndex - 1;
263 }
264
265 //
266 // Check if this is free memory
267 //
268 if ((MdBlock->MemoryType == LoaderFree) ||
269 (MdBlock->MemoryType == LoaderLoadedProgram) ||
270 (MdBlock->MemoryType == LoaderFirmwareTemporary) ||
271 (MdBlock->MemoryType == LoaderOsloaderStack))
272 {
273 //
274 // Check if this is the largest memory descriptor
275 //
276 if (MdBlock->PageCount > FreePages)
277 {
278 //
279 // For now, it is
280 //
281 MxFreeDescriptor = MdBlock;
282 }
283
284 //
285 // More free pages
286 //
287 FreePages += MdBlock->PageCount;
288 }
289 }
290
291 //
292 // Keep going
293 //
294 NextEntry = MdBlock->ListEntry.Flink;
295 }
296
297 //
298 // Save original values of the free descriptor, since it'll be
299 // altered by early allocations
300 //
301 MxOldFreeDescriptor = *MxFreeDescriptor;
302
303 /* Compute non paged pool limits and size */
304 MiComputeNonPagedPoolVa(FreePages);
305
306 /* Compute color information (L2 cache-separated paging lists) */
307 MiComputeColorInformation();
308
309 //
310 // Calculate the number of bytes for the PFN database, double it for ARM3,
311 // then add the color tables and convert to pages
312 //
313 MxPfnAllocation = (MmHighestPhysicalPage + 1) * sizeof(MMPFN);
314 //MxPfnAllocation <<= 1;
315 MxPfnAllocation += (MmSecondaryColors * sizeof(MMCOLOR_TABLES) * 2);
316 MxPfnAllocation >>= PAGE_SHIFT;
317
318 //
319 // We have to add one to the count here, because in the process of
320 // shifting down to the page size, we actually ended up getting the
321 // lower aligned size (so say, 0x5FFFF bytes is now 0x5F pages).
322 // Later on, we'll shift this number back into bytes, which would cause
323 // us to end up with only 0x5F000 bytes -- when we actually want to have
324 // 0x60000 bytes.
325 //
326 MxPfnAllocation++;
327
328 //
329 // Now calculate the nonpaged pool expansion VA region
330 //
331 MmNonPagedPoolStart = (PVOID)((ULONG_PTR)MmNonPagedPoolEnd -
332 MmMaximumNonPagedPoolInBytes +
333 MmSizeOfNonPagedPoolInBytes);
334 MmNonPagedPoolStart = (PVOID)PAGE_ALIGN(MmNonPagedPoolStart);
335 NonPagedPoolExpansionVa = MmNonPagedPoolStart;
336 DPRINT("NP Pool has been tuned to: %d bytes and %d bytes\n",
337 MmSizeOfNonPagedPoolInBytes, MmMaximumNonPagedPoolInBytes);
338
339 //
340 // Now calculate the nonpaged system VA region, which includes the
341 // nonpaged pool expansion (above) and the system PTEs. Note that it is
342 // then aligned to a PDE boundary (4MB).
343 //
344 MmNonPagedSystemStart = (PVOID)((ULONG_PTR)MmNonPagedPoolStart -
345 (MmNumberOfSystemPtes + 1) * PAGE_SIZE);
346 MmNonPagedSystemStart = (PVOID)((ULONG_PTR)MmNonPagedSystemStart &
347 ~(PDE_MAPPED_VA - 1));
348
349 //
350 // Don't let it go below the minimum
351 //
352 if (MmNonPagedSystemStart < (PVOID)0xEB000000)
353 {
354 //
355 // This is a hard-coded limit in the Windows NT address space
356 //
357 MmNonPagedSystemStart = (PVOID)0xEB000000;
358
359 //
360 // Reduce the amount of system PTEs to reach this point
361 //
362 MmNumberOfSystemPtes = ((ULONG_PTR)MmNonPagedPoolStart -
363 (ULONG_PTR)MmNonPagedSystemStart) >>
364 PAGE_SHIFT;
365 MmNumberOfSystemPtes--;
366 ASSERT(MmNumberOfSystemPtes > 1000);
367 }
368
369 //
370 // Check if we are in a situation where the size of the paged pool
371 // is so large that it overflows into nonpaged pool
372 //
373 if (MmSizeOfPagedPoolInBytes >
374 ((ULONG_PTR)MmNonPagedSystemStart - (ULONG_PTR)MmPagedPoolStart))
375 {
376 //
377 // We need some recalculations here
378 //
379 DPRINT1("Paged pool is too big!\n");
380 }
381
382 //
383 // Normally, the PFN database should start after the loader images.
384 // This is already the case in ReactOS, but for now we want to co-exist
385 // with the old memory manager, so we'll create a "Shadow PFN Database"
386 // instead, and arbitrarly start it at 0xB0000000.
387 //
388 MmPfnDatabase = (PVOID)0xB0000000;
389 ASSERT(((ULONG_PTR)MmPfnDatabase & (PDE_MAPPED_VA - 1)) == 0);
390
391 //
392 // Non paged pool comes after the PFN database
393 //
394 MmNonPagedPoolStart = (PVOID)((ULONG_PTR)MmPfnDatabase +
395 (MxPfnAllocation << PAGE_SHIFT));
396
397 //
398 // Now we actually need to get these many physical pages. Nonpaged pool
399 // is actually also physically contiguous (but not the expansion)
400 //
401 PageFrameIndex = MxGetNextPage(MxPfnAllocation +
402 (MmSizeOfNonPagedPoolInBytes >> PAGE_SHIFT));
403 ASSERT(PageFrameIndex != 0);
404 DPRINT("PFN DB PA PFN begins at: %lx\n", PageFrameIndex);
405 DPRINT("NP PA PFN begins at: %lx\n", PageFrameIndex + MxPfnAllocation);
406
407 /* Convert nonpaged pool size from bytes to pages */
408 MmMaximumNonPagedPoolInPages = MmMaximumNonPagedPoolInBytes >> PAGE_SHIFT;
409
410 //
411 // Now we need some pages to create the page tables for the NP system VA
412 // which includes system PTEs and expansion NP
413 //
414 StartPde = MiAddressToPde(MmNonPagedSystemStart);
415 EndPde = MiAddressToPde((PVOID)((ULONG_PTR)MmNonPagedPoolEnd - 1));
416 while (StartPde <= EndPde)
417 {
418 //
419 // Get a page
420 //
421 TempPde.u.Hard.PageFrameNumber = MxGetNextPage(1);
422 MI_WRITE_VALID_PTE(StartPde, TempPde);
423
424 //
425 // Zero out the page table
426 //
427 PointerPte = MiPteToAddress(StartPde);
428 RtlZeroMemory(PointerPte, PAGE_SIZE);
429
430 //
431 // Next
432 //
433 StartPde++;
434 }
435
436 //
437 // Now we need pages for the page tables which will map initial NP
438 //
439 StartPde = MiAddressToPde(MmPfnDatabase);
440 EndPde = MiAddressToPde((PVOID)((ULONG_PTR)MmNonPagedPoolStart +
441 MmSizeOfNonPagedPoolInBytes - 1));
442 while (StartPde <= EndPde)
443 {
444 //
445 // Get a page
446 //
447 TempPde.u.Hard.PageFrameNumber = MxGetNextPage(1);
448 MI_WRITE_VALID_PTE(StartPde, TempPde);
449
450 //
451 // Zero out the page table
452 //
453 PointerPte = MiPteToAddress(StartPde);
454 RtlZeroMemory(PointerPte, PAGE_SIZE);
455
456 //
457 // Next
458 //
459 StartPde++;
460 }
461
462 //
463 // Now remember where the expansion starts
464 //
465 MmNonPagedPoolExpansionStart = NonPagedPoolExpansionVa;
466
467 //
468 // Last step is to actually map the nonpaged pool
469 //
470 PointerPte = MiAddressToPte(MmNonPagedPoolStart);
471 LastPte = MiAddressToPte((PVOID)((ULONG_PTR)MmNonPagedPoolStart +
472 MmSizeOfNonPagedPoolInBytes - 1));
473 while (PointerPte <= LastPte)
474 {
475 //
476 // Use one of our contigous pages
477 //
478 TempPte.u.Hard.PageFrameNumber = PageFrameIndex++;
479 MI_WRITE_VALID_PTE(PointerPte++, TempPte);
480 }
481
482 //
483 // Sanity check: make sure we have properly defined the system PTE space
484 //
485 ASSERT(MiAddressToPte(MmNonPagedSystemStart) <
486 MiAddressToPte(MmNonPagedPoolExpansionStart));
487
488 /* Now go ahead and initialize the nonpaged pool */
489 MiInitializeNonPagedPool();
490 MiInitializeNonPagedPoolThresholds();
491
492 /* Map the PFN database pages */
493 MiMapPfnDatabase(LoaderBlock);
494
495 /* Initialize the color tables */
496 MiInitializeColorTables();
497
498 /* Build the PFN Database */
499 MiInitializePfnDatabase(LoaderBlock);
500 MmInitializeBalancer(MmAvailablePages, 0);
501
502 //
503 // Reset the descriptor back so we can create the correct memory blocks
504 //
505 *MxFreeDescriptor = MxOldFreeDescriptor;
506
507 //
508 // Initialize the nonpaged pool
509 //
510 InitializePool(NonPagedPool, 0);
511
512 //
513 // We PDE-aligned the nonpaged system start VA, so haul some extra PTEs!
514 //
515 PointerPte = MiAddressToPte(MmNonPagedSystemStart);
516 MmNumberOfSystemPtes = MiAddressToPte(MmNonPagedPoolExpansionStart) -
517 PointerPte;
518 MmNumberOfSystemPtes--;
519 DPRINT("Final System PTE count: %d (%d bytes)\n",
520 MmNumberOfSystemPtes, MmNumberOfSystemPtes * PAGE_SIZE);
521
522 //
523 // Create the system PTE space
524 //
525 MiInitializeSystemPtes(PointerPte, MmNumberOfSystemPtes, SystemPteSpace);
526
527 /* Get the PDE For hyperspace */
528 StartPde = MiAddressToPde(HYPER_SPACE);
529
530 /* Lock PFN database */
531 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
532
533 /* Allocate a page for hyperspace and create it */
534 PageFrameIndex = MiRemoveAnyPage(0);
535 TempPde.u.Hard.PageFrameNumber = PageFrameIndex;
536 TempPde.u.Hard.Global = FALSE; // Hyperspace is local!
537 MI_WRITE_VALID_PTE(StartPde, TempPde);
538
539 /* Flush the TLB */
540 KeFlushCurrentTb();
541
542 /* Release the lock */
543 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
544
545 //
546 // Zero out the page table now
547 //
548 PointerPte = MiAddressToPte(HYPER_SPACE);
549 RtlZeroMemory(PointerPte, PAGE_SIZE);
550
551 //
552 // Setup the mapping PTEs
553 //
554 MmFirstReservedMappingPte = MiAddressToPte(MI_MAPPING_RANGE_START);
555 MmLastReservedMappingPte = MiAddressToPte(MI_MAPPING_RANGE_END);
556 MmFirstReservedMappingPte->u.Hard.PageFrameNumber = MI_HYPERSPACE_PTES;
557
558 //
559 // Reserve system PTEs for zeroing PTEs and clear them
560 //
561 MiFirstReservedZeroingPte = MiReserveSystemPtes(MI_ZERO_PTES,
562 SystemPteSpace);
563 RtlZeroMemory(MiFirstReservedZeroingPte, MI_ZERO_PTES * sizeof(MMPTE));
564
565 //
566 // Set the counter to maximum to boot with
567 //
568 MiFirstReservedZeroingPte->u.Hard.PageFrameNumber = MI_ZERO_PTES - 1;
569
570 /* Check for Pentium LOCK errata */
571 if (KiI386PentiumLockErrataPresent)
572 {
573 /* Mark the 1st IDT page as Write-Through to prevent a lockup
574 on a FOOF instruction.
575 See http://www.rcollins.org/Errata/Dec97/F00FBug.html */
576 PointerPte = MiAddressToPte(KeGetPcr()->IDT);
577 PointerPte->u.Hard.WriteThrough = 1;
578 }
579
580 return STATUS_SUCCESS;
581 }
582
583 /* EOF */