[NTOS]: Add support for handling a very specific type of user-fault on ARM3 memory...
[reactos.git] / reactos / ntoskrnl / mm / ARM3 / i386 / init.c
1 /*
2 * PROJECT: ReactOS Kernel
3 * LICENSE: BSD - See COPYING.ARM in the top level directory
4 * FILE: ntoskrnl/mm/ARM3/i386/init.c
5 * PURPOSE: ARM Memory Manager Initialization for x86
6 * PROGRAMMERS: ReactOS Portable Systems Group
7 */
8
9 /* INCLUDES *******************************************************************/
10
11 #include <ntoskrnl.h>
12 #define NDEBUG
13 #include <debug.h>
14
15 #line 15 "ARMĀ³::INIT:X86"
16 #define MODULE_INVOLVED_IN_ARM3
17 #include "../../ARM3/miarm.h"
18
19 /* GLOBALS ********************************************************************/
20
21 /* Template PTE and PDE for a kernel page */
22 MMPTE ValidKernelPde = {.u.Hard.Valid = 1, .u.Hard.Write = 1, .u.Hard.Dirty = 1, .u.Hard.Accessed = 1};
23 MMPTE ValidKernelPte = {.u.Hard.Valid = 1, .u.Hard.Write = 1, .u.Hard.Dirty = 1, .u.Hard.Accessed = 1};
24
25 /* Template PDE for a demand-zero page */
26 MMPDE DemandZeroPde = {.u.Long = (MM_READWRITE << MM_PTE_SOFTWARE_PROTECTION_BITS)};
27
28 /* PRIVATE FUNCTIONS **********************************************************/
29
30 VOID
31 NTAPI
32 MiComputeNonPagedPoolVa(IN ULONG FreePages)
33 {
34 IN PFN_NUMBER PoolPages;
35
36 /* Check if this is a machine with less than 256MB of RAM, and no overide */
37 if ((MmNumberOfPhysicalPages <= MI_MIN_PAGES_FOR_NONPAGED_POOL_TUNING) &&
38 !(MmSizeOfNonPagedPoolInBytes))
39 {
40 /* Force the non paged pool to be 2MB so we can reduce RAM usage */
41 MmSizeOfNonPagedPoolInBytes = 2 * _1MB;
42 }
43
44 /* Hyperspace ends here */
45 MmHyperSpaceEnd = (PVOID)((ULONG_PTR)MmSystemCacheWorkingSetList - 1);
46
47 /* Check if the user gave a ridicuously large nonpaged pool RAM size */
48 if ((MmSizeOfNonPagedPoolInBytes >> PAGE_SHIFT) > (FreePages * 7 / 8))
49 {
50 /* More than 7/8ths of RAM was dedicated to nonpaged pool, ignore! */
51 MmSizeOfNonPagedPoolInBytes = 0;
52 }
53
54 /* Check if no registry setting was set, or if the setting was too low */
55 if (MmSizeOfNonPagedPoolInBytes < MmMinimumNonPagedPoolSize)
56 {
57 /* Start with the minimum (256 KB) and add 32 KB for each MB above 4 */
58 MmSizeOfNonPagedPoolInBytes = MmMinimumNonPagedPoolSize;
59 MmSizeOfNonPagedPoolInBytes += (FreePages - 1024) / 256 * MmMinAdditionNonPagedPoolPerMb;
60 }
61
62 /* Check if the registy setting or our dynamic calculation was too high */
63 if (MmSizeOfNonPagedPoolInBytes > MI_MAX_INIT_NONPAGED_POOL_SIZE)
64 {
65 /* Set it to the maximum */
66 MmSizeOfNonPagedPoolInBytes = MI_MAX_INIT_NONPAGED_POOL_SIZE;
67 }
68
69 /* Check if a percentage cap was set through the registry */
70 if (MmMaximumNonPagedPoolPercent) UNIMPLEMENTED;
71
72 /* Page-align the nonpaged pool size */
73 MmSizeOfNonPagedPoolInBytes &= ~(PAGE_SIZE - 1);
74
75 /* Now, check if there was a registry size for the maximum size */
76 if (!MmMaximumNonPagedPoolInBytes)
77 {
78 /* Start with the default (1MB) */
79 MmMaximumNonPagedPoolInBytes = MmDefaultMaximumNonPagedPool;
80
81 /* Add space for PFN database */
82 MmMaximumNonPagedPoolInBytes += (ULONG)
83 PAGE_ALIGN((MmHighestPhysicalPage + 1) * sizeof(MMPFN));
84
85 /* Check if the machine has more than 512MB of free RAM */
86 if (FreePages >= 0x1F000)
87 {
88 /* Add 200KB for each MB above 4 */
89 MmMaximumNonPagedPoolInBytes += (FreePages - 1024) / 256 *
90 (MmMaxAdditionNonPagedPoolPerMb / 2);
91 if (MmMaximumNonPagedPoolInBytes < MI_MAX_NONPAGED_POOL_SIZE)
92 {
93 /* Make it at least 128MB since this machine has a lot of RAM */
94 MmMaximumNonPagedPoolInBytes = MI_MAX_NONPAGED_POOL_SIZE;
95 }
96 }
97 else
98 {
99 /* Add 400KB for each MB above 4 */
100 MmMaximumNonPagedPoolInBytes += (FreePages - 1024) / 256 *
101 MmMaxAdditionNonPagedPoolPerMb;
102 }
103 }
104
105 /* Make sure there's at least 16 pages + the PFN available for expansion */
106 PoolPages = MmSizeOfNonPagedPoolInBytes + (PAGE_SIZE * 16) +
107 ((ULONG)PAGE_ALIGN(MmHighestPhysicalPage + 1) * sizeof(MMPFN));
108 if (MmMaximumNonPagedPoolInBytes < PoolPages)
109 {
110 /* The maximum should be at least high enough to cover all the above */
111 MmMaximumNonPagedPoolInBytes = PoolPages;
112 }
113
114 /* Systems with 2GB of kernel address space get double the size */
115 PoolPages = MI_MAX_NONPAGED_POOL_SIZE * 2;
116
117 /* On the other hand, make sure that PFN + nonpaged pool doesn't get too big */
118 if (MmMaximumNonPagedPoolInBytes > PoolPages)
119 {
120 /* Trim it down to the maximum architectural limit (256MB) */
121 MmMaximumNonPagedPoolInBytes = PoolPages;
122 }
123
124 /* Check if this is a system with > 128MB of non paged pool */
125 if (MmMaximumNonPagedPoolInBytes > MI_MAX_NONPAGED_POOL_SIZE)
126 {
127 /* Check if the initial size is less than the extra 128MB boost */
128 if (MmSizeOfNonPagedPoolInBytes < (MmMaximumNonPagedPoolInBytes -
129 MI_MAX_NONPAGED_POOL_SIZE))
130 {
131 /* FIXME: Should check if the initial pool can be expanded */
132
133 /* Assume no expansion possible, check ift he maximum is too large */
134 if (MmMaximumNonPagedPoolInBytes > (MmSizeOfNonPagedPoolInBytes +
135 MI_MAX_NONPAGED_POOL_SIZE))
136 {
137 /* Set it to the initial value plus the boost */
138 MmMaximumNonPagedPoolInBytes = MmSizeOfNonPagedPoolInBytes +
139 MI_MAX_NONPAGED_POOL_SIZE;
140 }
141 }
142 }
143 }
144
145 NTSTATUS
146 NTAPI
147 MiInitMachineDependent(IN PLOADER_PARAMETER_BLOCK LoaderBlock)
148 {
149 PLIST_ENTRY NextEntry;
150 PMEMORY_ALLOCATION_DESCRIPTOR MdBlock;
151 ULONG FreePages = 0;
152 PFN_NUMBER PageFrameIndex;
153 PMMPTE StartPde, EndPde, PointerPte, LastPte;
154 MMPTE TempPde, TempPte;
155 PVOID NonPagedPoolExpansionVa;
156 ULONG OldCount;
157 KIRQL OldIrql;
158
159 /* Check for kernel stack size that's too big */
160 if (MmLargeStackSize > (KERNEL_LARGE_STACK_SIZE / _1KB))
161 {
162 /* Sanitize to default value */
163 MmLargeStackSize = KERNEL_LARGE_STACK_SIZE;
164 }
165 else
166 {
167 /* Take the registry setting, and convert it into bytes */
168 MmLargeStackSize *= _1KB;
169
170 /* Now align it to a page boundary */
171 MmLargeStackSize = PAGE_ROUND_UP(MmLargeStackSize);
172
173 /* Sanity checks */
174 ASSERT(MmLargeStackSize <= KERNEL_LARGE_STACK_SIZE);
175 ASSERT((MmLargeStackSize & (PAGE_SIZE - 1)) == 0);
176
177 /* Make sure it's not too low */
178 if (MmLargeStackSize < KERNEL_STACK_SIZE) MmLargeStackSize = KERNEL_STACK_SIZE;
179 }
180
181 /* Check for global bit */
182 #if 0
183 if (KeFeatureBits & KF_GLOBAL_PAGE)
184 {
185 /* Set it on the template PTE and PDE */
186 ValidKernelPte.u.Hard.Global = TRUE;
187 ValidKernelPde.u.Hard.Global = TRUE;
188 }
189 #endif
190 /* Now templates are ready */
191 TempPte = ValidKernelPte;
192 TempPde = ValidKernelPde;
193
194 //
195 // Set CR3 for the system process
196 //
197 PointerPte = MiAddressToPde(PTE_BASE);
198 PageFrameIndex = PFN_FROM_PTE(PointerPte) << PAGE_SHIFT;
199 PsGetCurrentProcess()->Pcb.DirectoryTableBase[0] = PageFrameIndex;
200
201 //
202 // Blow away user-mode
203 //
204 StartPde = MiAddressToPde(0);
205 EndPde = MiAddressToPde(KSEG0_BASE);
206 RtlZeroMemory(StartPde, (EndPde - StartPde) * sizeof(MMPTE));
207
208 //
209 // Loop the memory descriptors
210 //
211 NextEntry = LoaderBlock->MemoryDescriptorListHead.Flink;
212 while (NextEntry != &LoaderBlock->MemoryDescriptorListHead)
213 {
214 //
215 // Get the memory block
216 //
217 MdBlock = CONTAINING_RECORD(NextEntry,
218 MEMORY_ALLOCATION_DESCRIPTOR,
219 ListEntry);
220
221 //
222 // Skip invisible memory
223 //
224 if ((MdBlock->MemoryType != LoaderFirmwarePermanent) &&
225 (MdBlock->MemoryType != LoaderSpecialMemory) &&
226 (MdBlock->MemoryType != LoaderHALCachedMemory) &&
227 (MdBlock->MemoryType != LoaderBBTMemory))
228 {
229 //
230 // Check if BURNMEM was used
231 //
232 if (MdBlock->MemoryType != LoaderBad)
233 {
234 //
235 // Count this in the total of pages
236 //
237 MmNumberOfPhysicalPages += MdBlock->PageCount;
238 }
239
240 //
241 // Check if this is the new lowest page
242 //
243 if (MdBlock->BasePage < MmLowestPhysicalPage)
244 {
245 //
246 // Update the lowest page
247 //
248 MmLowestPhysicalPage = MdBlock->BasePage;
249 }
250
251 //
252 // Check if this is the new highest page
253 //
254 PageFrameIndex = MdBlock->BasePage + MdBlock->PageCount;
255 if (PageFrameIndex > MmHighestPhysicalPage)
256 {
257 //
258 // Update the highest page
259 //
260 MmHighestPhysicalPage = PageFrameIndex - 1;
261 }
262
263 //
264 // Check if this is free memory
265 //
266 if ((MdBlock->MemoryType == LoaderFree) ||
267 (MdBlock->MemoryType == LoaderLoadedProgram) ||
268 (MdBlock->MemoryType == LoaderFirmwareTemporary) ||
269 (MdBlock->MemoryType == LoaderOsloaderStack))
270 {
271 //
272 // Check if this is the largest memory descriptor
273 //
274 if (MdBlock->PageCount > FreePages)
275 {
276 //
277 // For now, it is
278 //
279 MxFreeDescriptor = MdBlock;
280 }
281
282 //
283 // More free pages
284 //
285 FreePages += MdBlock->PageCount;
286 }
287 }
288
289 //
290 // Keep going
291 //
292 NextEntry = MdBlock->ListEntry.Flink;
293 }
294
295 //
296 // Save original values of the free descriptor, since it'll be
297 // altered by early allocations
298 //
299 MxOldFreeDescriptor = *MxFreeDescriptor;
300
301 /* Compute non paged pool limits and size */
302 MiComputeNonPagedPoolVa(FreePages);
303
304 /* Compute color information (L2 cache-separated paging lists) */
305 MiComputeColorInformation();
306
307 //
308 // Calculate the number of bytes for the PFN database, double it for ARM3,
309 // then add the color tables and convert to pages
310 //
311 MxPfnAllocation = (MmHighestPhysicalPage + 1) * sizeof(MMPFN);
312 //MxPfnAllocation <<= 1;
313 MxPfnAllocation += (MmSecondaryColors * sizeof(MMCOLOR_TABLES) * 2);
314 MxPfnAllocation >>= PAGE_SHIFT;
315
316 //
317 // We have to add one to the count here, because in the process of
318 // shifting down to the page size, we actually ended up getting the
319 // lower aligned size (so say, 0x5FFFF bytes is now 0x5F pages).
320 // Later on, we'll shift this number back into bytes, which would cause
321 // us to end up with only 0x5F000 bytes -- when we actually want to have
322 // 0x60000 bytes.
323 //
324 MxPfnAllocation++;
325
326 //
327 // Now calculate the nonpaged pool expansion VA region
328 //
329 MmNonPagedPoolStart = (PVOID)((ULONG_PTR)MmNonPagedPoolEnd -
330 MmMaximumNonPagedPoolInBytes +
331 MmSizeOfNonPagedPoolInBytes);
332 MmNonPagedPoolStart = (PVOID)PAGE_ALIGN(MmNonPagedPoolStart);
333 NonPagedPoolExpansionVa = MmNonPagedPoolStart;
334 DPRINT("NP Pool has been tuned to: %d bytes and %d bytes\n",
335 MmSizeOfNonPagedPoolInBytes, MmMaximumNonPagedPoolInBytes);
336
337 //
338 // Now calculate the nonpaged system VA region, which includes the
339 // nonpaged pool expansion (above) and the system PTEs. Note that it is
340 // then aligned to a PDE boundary (4MB).
341 //
342 MmNonPagedSystemStart = (PVOID)((ULONG_PTR)MmNonPagedPoolStart -
343 (MmNumberOfSystemPtes + 1) * PAGE_SIZE);
344 MmNonPagedSystemStart = (PVOID)((ULONG_PTR)MmNonPagedSystemStart &
345 ~(PDE_MAPPED_VA - 1));
346
347 //
348 // Don't let it go below the minimum
349 //
350 if (MmNonPagedSystemStart < (PVOID)0xEB000000)
351 {
352 //
353 // This is a hard-coded limit in the Windows NT address space
354 //
355 MmNonPagedSystemStart = (PVOID)0xEB000000;
356
357 //
358 // Reduce the amount of system PTEs to reach this point
359 //
360 MmNumberOfSystemPtes = ((ULONG_PTR)MmNonPagedPoolStart -
361 (ULONG_PTR)MmNonPagedSystemStart) >>
362 PAGE_SHIFT;
363 MmNumberOfSystemPtes--;
364 ASSERT(MmNumberOfSystemPtes > 1000);
365 }
366
367 //
368 // Check if we are in a situation where the size of the paged pool
369 // is so large that it overflows into nonpaged pool
370 //
371 if (MmSizeOfPagedPoolInBytes >
372 ((ULONG_PTR)MmNonPagedSystemStart - (ULONG_PTR)MmPagedPoolStart))
373 {
374 //
375 // We need some recalculations here
376 //
377 DPRINT1("Paged pool is too big!\n");
378 }
379
380 //
381 // Normally, the PFN database should start after the loader images.
382 // This is already the case in ReactOS, but for now we want to co-exist
383 // with the old memory manager, so we'll create a "Shadow PFN Database"
384 // instead, and arbitrarly start it at 0xB0000000.
385 //
386 MmPfnDatabase = (PVOID)0xB0000000;
387 ASSERT(((ULONG_PTR)MmPfnDatabase & (PDE_MAPPED_VA - 1)) == 0);
388
389 //
390 // Non paged pool comes after the PFN database
391 //
392 MmNonPagedPoolStart = (PVOID)((ULONG_PTR)MmPfnDatabase +
393 (MxPfnAllocation << PAGE_SHIFT));
394
395 //
396 // Now we actually need to get these many physical pages. Nonpaged pool
397 // is actually also physically contiguous (but not the expansion)
398 //
399 PageFrameIndex = MxGetNextPage(MxPfnAllocation +
400 (MmSizeOfNonPagedPoolInBytes >> PAGE_SHIFT));
401 ASSERT(PageFrameIndex != 0);
402 DPRINT("PFN DB PA PFN begins at: %lx\n", PageFrameIndex);
403 DPRINT("NP PA PFN begins at: %lx\n", PageFrameIndex + MxPfnAllocation);
404
405 /* Convert nonpaged pool size from bytes to pages */
406 MmMaximumNonPagedPoolInPages = MmMaximumNonPagedPoolInBytes >> PAGE_SHIFT;
407
408 //
409 // Now we need some pages to create the page tables for the NP system VA
410 // which includes system PTEs and expansion NP
411 //
412 StartPde = MiAddressToPde(MmNonPagedSystemStart);
413 EndPde = MiAddressToPde((PVOID)((ULONG_PTR)MmNonPagedPoolEnd - 1));
414 while (StartPde <= EndPde)
415 {
416 //
417 // Get a page
418 //
419 TempPde.u.Hard.PageFrameNumber = MxGetNextPage(1);
420 MI_WRITE_VALID_PTE(StartPde, TempPde);
421
422 //
423 // Zero out the page table
424 //
425 PointerPte = MiPteToAddress(StartPde);
426 RtlZeroMemory(PointerPte, PAGE_SIZE);
427
428 //
429 // Next
430 //
431 StartPde++;
432 }
433
434 //
435 // Now we need pages for the page tables which will map initial NP
436 //
437 StartPde = MiAddressToPde(MmPfnDatabase);
438 EndPde = MiAddressToPde((PVOID)((ULONG_PTR)MmNonPagedPoolStart +
439 MmSizeOfNonPagedPoolInBytes - 1));
440 while (StartPde <= EndPde)
441 {
442 //
443 // Get a page
444 //
445 TempPde.u.Hard.PageFrameNumber = MxGetNextPage(1);
446 MI_WRITE_VALID_PTE(StartPde, TempPde);
447
448 //
449 // Zero out the page table
450 //
451 PointerPte = MiPteToAddress(StartPde);
452 RtlZeroMemory(PointerPte, PAGE_SIZE);
453
454 //
455 // Next
456 //
457 StartPde++;
458 }
459
460 //
461 // Now remember where the expansion starts
462 //
463 MmNonPagedPoolExpansionStart = NonPagedPoolExpansionVa;
464
465 //
466 // Last step is to actually map the nonpaged pool
467 //
468 PointerPte = MiAddressToPte(MmNonPagedPoolStart);
469 LastPte = MiAddressToPte((PVOID)((ULONG_PTR)MmNonPagedPoolStart +
470 MmSizeOfNonPagedPoolInBytes - 1));
471 while (PointerPte <= LastPte)
472 {
473 //
474 // Use one of our contigous pages
475 //
476 TempPte.u.Hard.PageFrameNumber = PageFrameIndex++;
477 MI_WRITE_VALID_PTE(PointerPte++, TempPte);
478 }
479
480 //
481 // Sanity check: make sure we have properly defined the system PTE space
482 //
483 ASSERT(MiAddressToPte(MmNonPagedSystemStart) <
484 MiAddressToPte(MmNonPagedPoolExpansionStart));
485
486 /* Now go ahead and initialize the nonpaged pool */
487 MiInitializeNonPagedPool();
488 MiInitializeNonPagedPoolThresholds();
489
490 /* Map the PFN database pages */
491 MiMapPfnDatabase(LoaderBlock);
492
493 /* Initialize the color tables */
494 MiInitializeColorTables();
495
496 /* ReactOS Stuff */
497 extern KEVENT ZeroPageThreadEvent;
498 KeInitializeEvent(&ZeroPageThreadEvent, NotificationEvent, TRUE);
499
500 /* Build the PFN Database */
501 MiInitializePfnDatabase(LoaderBlock);
502 MmInitializeBalancer(MmAvailablePages, 0);
503
504 //
505 // Reset the descriptor back so we can create the correct memory blocks
506 //
507 *MxFreeDescriptor = MxOldFreeDescriptor;
508
509 //
510 // Initialize the nonpaged pool
511 //
512 InitializePool(NonPagedPool, 0);
513
514 //
515 // We PDE-aligned the nonpaged system start VA, so haul some extra PTEs!
516 //
517 PointerPte = MiAddressToPte(MmNonPagedSystemStart);
518 OldCount = MmNumberOfSystemPtes;
519 MmNumberOfSystemPtes = MiAddressToPte(MmNonPagedPoolExpansionStart) -
520 PointerPte;
521 MmNumberOfSystemPtes--;
522 DPRINT("Final System PTE count: %d (%d bytes)\n",
523 MmNumberOfSystemPtes, MmNumberOfSystemPtes * PAGE_SIZE);
524
525 //
526 // Create the system PTE space
527 //
528 MiInitializeSystemPtes(PointerPte, MmNumberOfSystemPtes, SystemPteSpace);
529
530 /* Get the PDE For hyperspace */
531 StartPde = MiAddressToPde(HYPER_SPACE);
532
533 /* Lock PFN database */
534 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
535
536 /* Allocate a page for hyperspace and create it */
537 PageFrameIndex = MiRemoveAnyPage(0);
538 TempPde.u.Hard.PageFrameNumber = PageFrameIndex;
539 TempPde.u.Hard.Global = FALSE; // Hyperspace is local!
540 MI_WRITE_VALID_PTE(StartPde, TempPde);
541
542 /* Flush the TLB */
543 KeFlushCurrentTb();
544
545 /* Release the lock */
546 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
547
548 //
549 // Zero out the page table now
550 //
551 PointerPte = MiAddressToPte(HYPER_SPACE);
552 RtlZeroMemory(PointerPte, PAGE_SIZE);
553
554 //
555 // Setup the mapping PTEs
556 //
557 MmFirstReservedMappingPte = MiAddressToPte(MI_MAPPING_RANGE_START);
558 MmLastReservedMappingPte = MiAddressToPte(MI_MAPPING_RANGE_END);
559 MmFirstReservedMappingPte->u.Hard.PageFrameNumber = MI_HYPERSPACE_PTES;
560
561 //
562 // Reserve system PTEs for zeroing PTEs and clear them
563 //
564 MiFirstReservedZeroingPte = MiReserveSystemPtes(MI_ZERO_PTES,
565 SystemPteSpace);
566 RtlZeroMemory(MiFirstReservedZeroingPte, MI_ZERO_PTES * sizeof(MMPTE));
567
568 //
569 // Set the counter to maximum to boot with
570 //
571 MiFirstReservedZeroingPte->u.Hard.PageFrameNumber = MI_ZERO_PTES - 1;
572
573 return STATUS_SUCCESS;
574 }
575
576 /* EOF */