[NTOS]: Enable usage of ARM3 paged pool, up until Mm Phase 2.
[reactos.git] / reactos / ntoskrnl / mm / ARM3 / i386 / init.c
1 /*
2 * PROJECT: ReactOS Kernel
3 * LICENSE: BSD - See COPYING.ARM in the top level directory
4 * FILE: ntoskrnl/mm/ARM3/i386/init.c
5 * PURPOSE: ARM Memory Manager Initialization for x86
6 * PROGRAMMERS: ReactOS Portable Systems Group
7 */
8
9 /* INCLUDES *******************************************************************/
10
11 #include <ntoskrnl.h>
12 #define NDEBUG
13 #include <debug.h>
14
15 #line 15 "ARMĀ³::INIT:X86"
16 #define MODULE_INVOLVED_IN_ARM3
17 #include "../../ARM3/miarm.h"
18
19 /* GLOBALS ********************************************************************/
20
21 /* Template PTE and PDE for a kernel page */
22 MMPTE ValidKernelPde = {.u.Hard.Valid = 1, .u.Hard.Write = 1, .u.Hard.Dirty = 1, .u.Hard.Accessed = 1};
23 MMPTE ValidKernelPte = {.u.Hard.Valid = 1, .u.Hard.Write = 1, .u.Hard.Dirty = 1, .u.Hard.Accessed = 1};
24
25 /* PRIVATE FUNCTIONS **********************************************************/
26
27 VOID
28 NTAPI
29 MiComputeNonPagedPoolVa(IN ULONG FreePages)
30 {
31 IN PFN_NUMBER PoolPages;
32
33 /* Check if this is a machine with less than 256MB of RAM, and no overide */
34 if ((MmNumberOfPhysicalPages <= MI_MIN_PAGES_FOR_NONPAGED_POOL_TUNING) &&
35 !(MmSizeOfNonPagedPoolInBytes))
36 {
37 /* Force the non paged pool to be 2MB so we can reduce RAM usage */
38 MmSizeOfNonPagedPoolInBytes = 2 * _1MB;
39 }
40
41 /* Hyperspace ends here */
42 MmHyperSpaceEnd = (PVOID)((ULONG_PTR)MmSystemCacheWorkingSetList - 1);
43
44 /* Check if the user gave a ridicuously large nonpaged pool RAM size */
45 if ((MmSizeOfNonPagedPoolInBytes >> PAGE_SHIFT) > (FreePages * 7 / 8))
46 {
47 /* More than 7/8ths of RAM was dedicated to nonpaged pool, ignore! */
48 MmSizeOfNonPagedPoolInBytes = 0;
49 }
50
51 /* Check if no registry setting was set, or if the setting was too low */
52 if (MmSizeOfNonPagedPoolInBytes < MmMinimumNonPagedPoolSize)
53 {
54 /* Start with the minimum (256 KB) and add 32 KB for each MB above 4 */
55 MmSizeOfNonPagedPoolInBytes = MmMinimumNonPagedPoolSize;
56 MmSizeOfNonPagedPoolInBytes += (FreePages - 1024) / 256 * MmMinAdditionNonPagedPoolPerMb;
57 }
58
59 /* Check if the registy setting or our dynamic calculation was too high */
60 if (MmSizeOfNonPagedPoolInBytes > MI_MAX_INIT_NONPAGED_POOL_SIZE)
61 {
62 /* Set it to the maximum */
63 MmSizeOfNonPagedPoolInBytes = MI_MAX_INIT_NONPAGED_POOL_SIZE;
64 }
65
66 /* Check if a percentage cap was set through the registry */
67 if (MmMaximumNonPagedPoolPercent) UNIMPLEMENTED;
68
69 /* Page-align the nonpaged pool size */
70 MmSizeOfNonPagedPoolInBytes &= ~(PAGE_SIZE - 1);
71
72 /* Now, check if there was a registry size for the maximum size */
73 if (!MmMaximumNonPagedPoolInBytes)
74 {
75 /* Start with the default (1MB) */
76 MmMaximumNonPagedPoolInBytes = MmDefaultMaximumNonPagedPool;
77
78 /* Add space for PFN database */
79 MmMaximumNonPagedPoolInBytes += (ULONG)
80 PAGE_ALIGN((MmHighestPhysicalPage + 1) * sizeof(MMPFN));
81
82 /* Check if the machine has more than 512MB of free RAM */
83 if (FreePages >= 0x1F000)
84 {
85 /* Add 200KB for each MB above 4 */
86 MmMaximumNonPagedPoolInBytes += (FreePages - 1024) / 256 *
87 (MmMaxAdditionNonPagedPoolPerMb / 2);
88 if (MmMaximumNonPagedPoolInBytes < MI_MAX_NONPAGED_POOL_SIZE)
89 {
90 /* Make it at least 128MB since this machine has a lot of RAM */
91 MmMaximumNonPagedPoolInBytes = MI_MAX_NONPAGED_POOL_SIZE;
92 }
93 }
94 else
95 {
96 /* Add 400KB for each MB above 4 */
97 MmMaximumNonPagedPoolInBytes += (FreePages - 1024) / 256 *
98 MmMaxAdditionNonPagedPoolPerMb;
99 }
100 }
101
102 /* Make sure there's at least 16 pages + the PFN available for expansion */
103 PoolPages = MmSizeOfNonPagedPoolInBytes + (PAGE_SIZE * 16) +
104 ((ULONG)PAGE_ALIGN(MmHighestPhysicalPage + 1) * sizeof(MMPFN));
105 if (MmMaximumNonPagedPoolInBytes < PoolPages)
106 {
107 /* The maximum should be at least high enough to cover all the above */
108 MmMaximumNonPagedPoolInBytes = PoolPages;
109 }
110
111 /* Systems with 2GB of kernel address space get double the size */
112 PoolPages = MI_MAX_NONPAGED_POOL_SIZE * 2;
113
114 /* On the other hand, make sure that PFN + nonpaged pool doesn't get too big */
115 if (MmMaximumNonPagedPoolInBytes > PoolPages)
116 {
117 /* Trim it down to the maximum architectural limit (256MB) */
118 MmMaximumNonPagedPoolInBytes = PoolPages;
119 }
120
121 /* Check if this is a system with > 128MB of non paged pool */
122 if (MmMaximumNonPagedPoolInBytes > MI_MAX_NONPAGED_POOL_SIZE)
123 {
124 /* Check if the initial size is less than the extra 128MB boost */
125 if (MmSizeOfNonPagedPoolInBytes < (MmMaximumNonPagedPoolInBytes -
126 MI_MAX_NONPAGED_POOL_SIZE))
127 {
128 /* FIXME: Should check if the initial pool can be expanded */
129
130 /* Assume no expansion possible, check ift he maximum is too large */
131 if (MmMaximumNonPagedPoolInBytes > (MmSizeOfNonPagedPoolInBytes +
132 MI_MAX_NONPAGED_POOL_SIZE))
133 {
134 /* Set it to the initial value plus the boost */
135 MmMaximumNonPagedPoolInBytes = MmSizeOfNonPagedPoolInBytes +
136 MI_MAX_NONPAGED_POOL_SIZE;
137 }
138 }
139 }
140 }
141
142 NTSTATUS
143 NTAPI
144 MiInitMachineDependent(IN PLOADER_PARAMETER_BLOCK LoaderBlock)
145 {
146 PLIST_ENTRY NextEntry;
147 PMEMORY_ALLOCATION_DESCRIPTOR MdBlock;
148 ULONG FreePages = 0;
149 PFN_NUMBER PageFrameIndex;
150 PMMPTE StartPde, EndPde, PointerPte, LastPte;
151 MMPTE TempPde, TempPte;
152 PVOID NonPagedPoolExpansionVa;
153 ULONG OldCount;
154 KIRQL OldIrql;
155
156 /* Check for kernel stack size that's too big */
157 if (MmLargeStackSize > (KERNEL_LARGE_STACK_SIZE / _1KB))
158 {
159 /* Sanitize to default value */
160 MmLargeStackSize = KERNEL_LARGE_STACK_SIZE;
161 }
162 else
163 {
164 /* Take the registry setting, and convert it into bytes */
165 MmLargeStackSize *= _1KB;
166
167 /* Now align it to a page boundary */
168 MmLargeStackSize = PAGE_ROUND_UP(MmLargeStackSize);
169
170 /* Sanity checks */
171 ASSERT(MmLargeStackSize <= KERNEL_LARGE_STACK_SIZE);
172 ASSERT((MmLargeStackSize & (PAGE_SIZE - 1)) == 0);
173
174 /* Make sure it's not too low */
175 if (MmLargeStackSize < KERNEL_STACK_SIZE) MmLargeStackSize = KERNEL_STACK_SIZE;
176 }
177
178 /* Check for global bit */
179 #if 0
180 if (KeFeatureBits & KF_GLOBAL_PAGE)
181 {
182 /* Set it on the template PTE and PDE */
183 ValidKernelPte.u.Hard.Global = TRUE;
184 ValidKernelPde.u.Hard.Global = TRUE;
185 }
186 #endif
187 /* Now templates are ready */
188 TempPte = ValidKernelPte;
189 TempPde = ValidKernelPde;
190
191 //
192 // Set CR3 for the system process
193 //
194 PointerPte = MiAddressToPde(PTE_BASE);
195 PageFrameIndex = PFN_FROM_PTE(PointerPte) << PAGE_SHIFT;
196 PsGetCurrentProcess()->Pcb.DirectoryTableBase[0] = PageFrameIndex;
197
198 //
199 // Blow away user-mode
200 //
201 StartPde = MiAddressToPde(0);
202 EndPde = MiAddressToPde(KSEG0_BASE);
203 RtlZeroMemory(StartPde, (EndPde - StartPde) * sizeof(MMPTE));
204
205 //
206 // Loop the memory descriptors
207 //
208 NextEntry = LoaderBlock->MemoryDescriptorListHead.Flink;
209 while (NextEntry != &LoaderBlock->MemoryDescriptorListHead)
210 {
211 //
212 // Get the memory block
213 //
214 MdBlock = CONTAINING_RECORD(NextEntry,
215 MEMORY_ALLOCATION_DESCRIPTOR,
216 ListEntry);
217
218 //
219 // Skip invisible memory
220 //
221 if ((MdBlock->MemoryType != LoaderFirmwarePermanent) &&
222 (MdBlock->MemoryType != LoaderSpecialMemory) &&
223 (MdBlock->MemoryType != LoaderHALCachedMemory) &&
224 (MdBlock->MemoryType != LoaderBBTMemory))
225 {
226 //
227 // Check if BURNMEM was used
228 //
229 if (MdBlock->MemoryType != LoaderBad)
230 {
231 //
232 // Count this in the total of pages
233 //
234 MmNumberOfPhysicalPages += MdBlock->PageCount;
235 }
236
237 //
238 // Check if this is the new lowest page
239 //
240 if (MdBlock->BasePage < MmLowestPhysicalPage)
241 {
242 //
243 // Update the lowest page
244 //
245 MmLowestPhysicalPage = MdBlock->BasePage;
246 }
247
248 //
249 // Check if this is the new highest page
250 //
251 PageFrameIndex = MdBlock->BasePage + MdBlock->PageCount;
252 if (PageFrameIndex > MmHighestPhysicalPage)
253 {
254 //
255 // Update the highest page
256 //
257 MmHighestPhysicalPage = PageFrameIndex - 1;
258 }
259
260 //
261 // Check if this is free memory
262 //
263 if ((MdBlock->MemoryType == LoaderFree) ||
264 (MdBlock->MemoryType == LoaderLoadedProgram) ||
265 (MdBlock->MemoryType == LoaderFirmwareTemporary) ||
266 (MdBlock->MemoryType == LoaderOsloaderStack))
267 {
268 //
269 // Check if this is the largest memory descriptor
270 //
271 if (MdBlock->PageCount > FreePages)
272 {
273 //
274 // For now, it is
275 //
276 MxFreeDescriptor = MdBlock;
277 }
278
279 //
280 // More free pages
281 //
282 FreePages += MdBlock->PageCount;
283 }
284 }
285
286 //
287 // Keep going
288 //
289 NextEntry = MdBlock->ListEntry.Flink;
290 }
291
292 //
293 // Save original values of the free descriptor, since it'll be
294 // altered by early allocations
295 //
296 MxOldFreeDescriptor = *MxFreeDescriptor;
297
298 /* Compute non paged pool limits and size */
299 MiComputeNonPagedPoolVa(FreePages);
300
301 /* Compute color information (L2 cache-separated paging lists) */
302 MiComputeColorInformation();
303
304 //
305 // Calculate the number of bytes for the PFN database, double it for ARM3,
306 // then add the color tables and convert to pages
307 //
308 MxPfnAllocation = (MmHighestPhysicalPage + 1) * sizeof(MMPFN);
309 //MxPfnAllocation <<= 1;
310 MxPfnAllocation += (MmSecondaryColors * sizeof(MMCOLOR_TABLES) * 2);
311 MxPfnAllocation >>= PAGE_SHIFT;
312
313 //
314 // We have to add one to the count here, because in the process of
315 // shifting down to the page size, we actually ended up getting the
316 // lower aligned size (so say, 0x5FFFF bytes is now 0x5F pages).
317 // Later on, we'll shift this number back into bytes, which would cause
318 // us to end up with only 0x5F000 bytes -- when we actually want to have
319 // 0x60000 bytes.
320 //
321 MxPfnAllocation++;
322
323 //
324 // Now calculate the nonpaged pool expansion VA region
325 //
326 MmNonPagedPoolStart = (PVOID)((ULONG_PTR)MmNonPagedPoolEnd -
327 MmMaximumNonPagedPoolInBytes +
328 MmSizeOfNonPagedPoolInBytes);
329 MmNonPagedPoolStart = (PVOID)PAGE_ALIGN(MmNonPagedPoolStart);
330 NonPagedPoolExpansionVa = MmNonPagedPoolStart;
331 DPRINT("NP Pool has been tuned to: %d bytes and %d bytes\n",
332 MmSizeOfNonPagedPoolInBytes, MmMaximumNonPagedPoolInBytes);
333
334 //
335 // Now calculate the nonpaged system VA region, which includes the
336 // nonpaged pool expansion (above) and the system PTEs. Note that it is
337 // then aligned to a PDE boundary (4MB).
338 //
339 MmNonPagedSystemStart = (PVOID)((ULONG_PTR)MmNonPagedPoolStart -
340 (MmNumberOfSystemPtes + 1) * PAGE_SIZE);
341 MmNonPagedSystemStart = (PVOID)((ULONG_PTR)MmNonPagedSystemStart &
342 ~(PDE_MAPPED_VA - 1));
343
344 //
345 // Don't let it go below the minimum
346 //
347 if (MmNonPagedSystemStart < (PVOID)0xEB000000)
348 {
349 //
350 // This is a hard-coded limit in the Windows NT address space
351 //
352 MmNonPagedSystemStart = (PVOID)0xEB000000;
353
354 //
355 // Reduce the amount of system PTEs to reach this point
356 //
357 MmNumberOfSystemPtes = ((ULONG_PTR)MmNonPagedPoolStart -
358 (ULONG_PTR)MmNonPagedSystemStart) >>
359 PAGE_SHIFT;
360 MmNumberOfSystemPtes--;
361 ASSERT(MmNumberOfSystemPtes > 1000);
362 }
363
364 //
365 // Check if we are in a situation where the size of the paged pool
366 // is so large that it overflows into nonpaged pool
367 //
368 if (MmSizeOfPagedPoolInBytes >
369 ((ULONG_PTR)MmNonPagedSystemStart - (ULONG_PTR)MmPagedPoolStart))
370 {
371 //
372 // We need some recalculations here
373 //
374 DPRINT1("Paged pool is too big!\n");
375 }
376
377 //
378 // Normally, the PFN database should start after the loader images.
379 // This is already the case in ReactOS, but for now we want to co-exist
380 // with the old memory manager, so we'll create a "Shadow PFN Database"
381 // instead, and arbitrarly start it at 0xB0000000.
382 //
383 MmPfnDatabase = (PVOID)0xB0000000;
384 ASSERT(((ULONG_PTR)MmPfnDatabase & (PDE_MAPPED_VA - 1)) == 0);
385
386 //
387 // Non paged pool comes after the PFN database
388 //
389 MmNonPagedPoolStart = (PVOID)((ULONG_PTR)MmPfnDatabase +
390 (MxPfnAllocation << PAGE_SHIFT));
391
392 //
393 // Now we actually need to get these many physical pages. Nonpaged pool
394 // is actually also physically contiguous (but not the expansion)
395 //
396 PageFrameIndex = MxGetNextPage(MxPfnAllocation +
397 (MmSizeOfNonPagedPoolInBytes >> PAGE_SHIFT));
398 ASSERT(PageFrameIndex != 0);
399 DPRINT("PFN DB PA PFN begins at: %lx\n", PageFrameIndex);
400 DPRINT("NP PA PFN begins at: %lx\n", PageFrameIndex + MxPfnAllocation);
401
402 /* Convert nonpaged pool size from bytes to pages */
403 MmMaximumNonPagedPoolInPages = MmMaximumNonPagedPoolInBytes >> PAGE_SHIFT;
404
405 //
406 // Now we need some pages to create the page tables for the NP system VA
407 // which includes system PTEs and expansion NP
408 //
409 StartPde = MiAddressToPde(MmNonPagedSystemStart);
410 EndPde = MiAddressToPde((PVOID)((ULONG_PTR)MmNonPagedPoolEnd - 1));
411 while (StartPde <= EndPde)
412 {
413 //
414 // Sanity check
415 //
416 ASSERT(StartPde->u.Hard.Valid == 0);
417
418 //
419 // Get a page
420 //
421 TempPde.u.Hard.PageFrameNumber = MxGetNextPage(1);
422 ASSERT(TempPde.u.Hard.Valid == 1);
423 *StartPde = TempPde;
424
425 //
426 // Zero out the page table
427 //
428 PointerPte = MiPteToAddress(StartPde);
429 RtlZeroMemory(PointerPte, PAGE_SIZE);
430
431 //
432 // Next
433 //
434 StartPde++;
435 }
436
437 //
438 // Now we need pages for the page tables which will map initial NP
439 //
440 StartPde = MiAddressToPde(MmPfnDatabase);
441 EndPde = MiAddressToPde((PVOID)((ULONG_PTR)MmNonPagedPoolStart +
442 MmSizeOfNonPagedPoolInBytes - 1));
443 while (StartPde <= EndPde)
444 {
445 //
446 // Sanity check
447 //
448 ASSERT(StartPde->u.Hard.Valid == 0);
449
450 //
451 // Get a page
452 //
453 TempPde.u.Hard.PageFrameNumber = MxGetNextPage(1);
454 ASSERT(TempPde.u.Hard.Valid == 1);
455 *StartPde = TempPde;
456
457 //
458 // Zero out the page table
459 //
460 PointerPte = MiPteToAddress(StartPde);
461 RtlZeroMemory(PointerPte, PAGE_SIZE);
462
463 //
464 // Next
465 //
466 StartPde++;
467 }
468
469 //
470 // Now remember where the expansion starts
471 //
472 MmNonPagedPoolExpansionStart = NonPagedPoolExpansionVa;
473
474 //
475 // Last step is to actually map the nonpaged pool
476 //
477 PointerPte = MiAddressToPte(MmNonPagedPoolStart);
478 LastPte = MiAddressToPte((PVOID)((ULONG_PTR)MmNonPagedPoolStart +
479 MmSizeOfNonPagedPoolInBytes - 1));
480 while (PointerPte <= LastPte)
481 {
482 //
483 // Use one of our contigous pages
484 //
485 TempPte.u.Hard.PageFrameNumber = PageFrameIndex++;
486 ASSERT(PointerPte->u.Hard.Valid == 0);
487 ASSERT(TempPte.u.Hard.Valid == 1);
488 *PointerPte++ = TempPte;
489 }
490
491 //
492 // Sanity check: make sure we have properly defined the system PTE space
493 //
494 ASSERT(MiAddressToPte(MmNonPagedSystemStart) <
495 MiAddressToPte(MmNonPagedPoolExpansionStart));
496
497 /* Now go ahead and initialize the nonpaged pool */
498 MiInitializeNonPagedPool();
499 MiInitializeNonPagedPoolThresholds();
500
501 /* Map the PFN database pages */
502 MiMapPfnDatabase(LoaderBlock);
503
504 /* Initialize the color tables */
505 MiInitializeColorTables();
506
507 /* ReactOS Stuff */
508 extern KEVENT ZeroPageThreadEvent;
509 KeInitializeEvent(&ZeroPageThreadEvent, NotificationEvent, TRUE);
510
511 /* Build the PFN Database */
512 MiInitializePfnDatabase(LoaderBlock);
513 MmInitializeBalancer(MmAvailablePages, 0);
514
515 //
516 // Reset the descriptor back so we can create the correct memory blocks
517 //
518 *MxFreeDescriptor = MxOldFreeDescriptor;
519
520 //
521 // Initialize the nonpaged pool
522 //
523 InitializePool(NonPagedPool, 0);
524
525 //
526 // We PDE-aligned the nonpaged system start VA, so haul some extra PTEs!
527 //
528 PointerPte = MiAddressToPte(MmNonPagedSystemStart);
529 OldCount = MmNumberOfSystemPtes;
530 MmNumberOfSystemPtes = MiAddressToPte(MmNonPagedPoolExpansionStart) -
531 PointerPte;
532 MmNumberOfSystemPtes--;
533 DPRINT("Final System PTE count: %d (%d bytes)\n",
534 MmNumberOfSystemPtes, MmNumberOfSystemPtes * PAGE_SIZE);
535
536 //
537 // Create the system PTE space
538 //
539 MiInitializeSystemPtes(PointerPte, MmNumberOfSystemPtes, SystemPteSpace);
540
541 /* Get the PDE For hyperspace */
542 StartPde = MiAddressToPde(HYPER_SPACE);
543
544 /* Lock PFN database */
545 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
546
547 /* Allocate a page for hyperspace and create it */
548 PageFrameIndex = MiRemoveAnyPage(0);
549 TempPde.u.Hard.PageFrameNumber = PageFrameIndex;
550 TempPde.u.Hard.Global = FALSE; // Hyperspace is local!
551 ASSERT(StartPde->u.Hard.Valid == 0);
552 ASSERT(TempPde.u.Hard.Valid == 1);
553 *StartPde = TempPde;
554
555 /* Flush the TLB */
556 KeFlushCurrentTb();
557
558 /* Release the lock */
559 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
560
561 //
562 // Zero out the page table now
563 //
564 PointerPte = MiAddressToPte(HYPER_SPACE);
565 RtlZeroMemory(PointerPte, PAGE_SIZE);
566
567 //
568 // Setup the mapping PTEs
569 //
570 MmFirstReservedMappingPte = MiAddressToPte(MI_MAPPING_RANGE_START);
571 MmLastReservedMappingPte = MiAddressToPte(MI_MAPPING_RANGE_END);
572 MmFirstReservedMappingPte->u.Hard.PageFrameNumber = MI_HYPERSPACE_PTES;
573
574 //
575 // Reserve system PTEs for zeroing PTEs and clear them
576 //
577 MiFirstReservedZeroingPte = MiReserveSystemPtes(MI_ZERO_PTES,
578 SystemPteSpace);
579 RtlZeroMemory(MiFirstReservedZeroingPte, MI_ZERO_PTES * sizeof(MMPTE));
580
581 //
582 // Set the counter to maximum to boot with
583 //
584 MiFirstReservedZeroingPte->u.Hard.PageFrameNumber = MI_ZERO_PTES - 1;
585
586 return STATUS_SUCCESS;
587 }
588
589 /* EOF */