Hopefully fail to break anything in the process of syncing with trunk (r47786)
[reactos.git] / ntoskrnl / mm / freelist.c
1 /*
2 * COPYRIGHT: See COPYING in the top level directory
3 * PROJECT: ReactOS kernel
4 * FILE: ntoskrnl/mm/freelist.c
5 * PURPOSE: Handle the list of free physical pages
6 *
7 * PROGRAMMERS: David Welch (welch@cwcom.net)
8 * Robert Bergkvist
9 */
10
11 /* INCLUDES ****************************************************************/
12
13 #include <ntoskrnl.h>
14 #define NDEBUG
15 #include <debug.h>
16
17 #if defined (ALLOC_PRAGMA)
18 #pragma alloc_text(INIT, MmInitializePageList)
19 #endif
20
21 #define MODULE_INVOLVED_IN_ARM3
22 #include "ARM3/miarm.h"
23
24 /* GLOBALS ****************************************************************/
25
26 //
27 //
28 // ReactOS to NT Physical Page Descriptor Entry Legacy Mapping Definitions
29 //
30 // REACTOS NT
31 //
32 #define RmapListHead AweReferenceCount
33 #define PHYSICAL_PAGE MMPFN
34 #define PPHYSICAL_PAGE PMMPFN
35
36 PPHYSICAL_PAGE MmPfnDatabase;
37
38 PFN_NUMBER MmAvailablePages;
39 PFN_NUMBER MmResidentAvailablePages;
40 PFN_NUMBER MmResidentAvailableAtInit;
41
42 SIZE_T MmTotalCommitLimit;
43 SIZE_T MmTotalCommittedPages;
44 SIZE_T MmSharedCommit;
45 SIZE_T MmDriverCommit;
46 SIZE_T MmProcessCommit;
47 SIZE_T MmPagedPoolCommit;
48 SIZE_T MmPeakCommitment;
49 SIZE_T MmtotalCommitLimitMaximum;
50
51 KEVENT ZeroPageThreadEvent;
52 static BOOLEAN ZeroPageThreadShouldTerminate = FALSE;
53 static RTL_BITMAP MiUserPfnBitMap;
54
55 /* FUNCTIONS *************************************************************/
56
57 VOID
58 NTAPI
59 MiInitializeUserPfnBitmap(VOID)
60 {
61 PVOID Bitmap;
62
63 /* Allocate enough buffer for the PFN bitmap and align it on 32-bits */
64 Bitmap = ExAllocatePoolWithTag(NonPagedPool,
65 (((MmHighestPhysicalPage + 1) + 31) / 32) * 4,
66 ' mM');
67 ASSERT(Bitmap);
68
69 /* Initialize it and clear all the bits to begin with */
70 RtlInitializeBitMap(&MiUserPfnBitMap,
71 Bitmap,
72 MmHighestPhysicalPage + 1);
73 RtlClearAllBits(&MiUserPfnBitMap);
74 }
75
76 PFN_TYPE
77 NTAPI
78 MmGetLRUFirstUserPage(VOID)
79 {
80 ULONG Position;
81 KIRQL OldIrql;
82
83 /* Find the first user page */
84 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
85 Position = RtlFindSetBits(&MiUserPfnBitMap, 1, 0);
86 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
87 if (Position == 0xFFFFFFFF) return 0;
88
89 /* Return it */
90 return Position;
91 }
92
93 VOID
94 NTAPI
95 MmInsertLRULastUserPage(PFN_TYPE Pfn)
96 {
97 KIRQL OldIrql;
98
99 /* Set the page as a user page */
100 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
101 RtlSetBit(&MiUserPfnBitMap, Pfn);
102 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
103 }
104
105 PFN_TYPE
106 NTAPI
107 MmGetLRUNextUserPage(PFN_TYPE PreviousPfn)
108 {
109 ULONG Position;
110 KIRQL OldIrql;
111
112 /* Find the next user page */
113 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
114 Position = RtlFindSetBits(&MiUserPfnBitMap, 1, PreviousPfn + 1);
115 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
116 if (Position == 0xFFFFFFFF) return 0;
117
118 /* Return it */
119 return Position;
120 }
121
122 VOID
123 NTAPI
124 MmRemoveLRUUserPage(PFN_TYPE Page)
125 {
126 /* Unset the page as a user page */
127 RtlClearBit(&MiUserPfnBitMap, Page);
128 }
129
130 BOOLEAN
131 NTAPI
132 MiIsPfnFree(IN PMMPFN Pfn1)
133 {
134 /* Must be a free or zero page, with no references, linked */
135 return ((Pfn1->u3.e1.PageLocation <= StandbyPageList) &&
136 (Pfn1->u1.Flink) &&
137 (Pfn1->u2.Blink) &&
138 !(Pfn1->u3.e2.ReferenceCount));
139 }
140
141 BOOLEAN
142 NTAPI
143 MiIsPfnInUse(IN PMMPFN Pfn1)
144 {
145 /* Standby list or higher, unlinked, and with references */
146 return !MiIsPfnFree(Pfn1);
147 }
148
149 PMDL
150 NTAPI
151 MiAllocatePagesForMdl(IN PHYSICAL_ADDRESS LowAddress,
152 IN PHYSICAL_ADDRESS HighAddress,
153 IN PHYSICAL_ADDRESS SkipBytes,
154 IN SIZE_T TotalBytes,
155 IN MI_PFN_CACHE_ATTRIBUTE CacheAttribute,
156 IN ULONG MdlFlags)
157 {
158 PMDL Mdl;
159 PFN_NUMBER PageCount, LowPage, HighPage, SkipPages, PagesFound = 0, Page;
160 PPFN_NUMBER MdlPage, LastMdlPage;
161 KIRQL OldIrql;
162 PPHYSICAL_PAGE Pfn1;
163 INT LookForZeroedPages;
164 ASSERT (KeGetCurrentIrql() <= APC_LEVEL);
165
166 //
167 // Convert the low address into a PFN
168 //
169 LowPage = (PFN_NUMBER)(LowAddress.QuadPart >> PAGE_SHIFT);
170
171 //
172 // Convert, and normalize, the high address into a PFN
173 //
174 HighPage = (PFN_NUMBER)(HighAddress.QuadPart >> PAGE_SHIFT);
175 if (HighPage > MmHighestPhysicalPage) HighPage = MmHighestPhysicalPage;
176
177 //
178 // Validate skipbytes and convert them into pages
179 //
180 if (BYTE_OFFSET(SkipBytes.LowPart)) return NULL;
181 SkipPages = (PFN_NUMBER)(SkipBytes.QuadPart >> PAGE_SHIFT);
182
183 //
184 // Now compute the number of pages the MDL will cover
185 //
186 PageCount = (PFN_NUMBER)ADDRESS_AND_SIZE_TO_SPAN_PAGES(0, TotalBytes);
187 do
188 {
189 //
190 // Try creating an MDL for these many pages
191 //
192 Mdl = MmCreateMdl(NULL, NULL, PageCount << PAGE_SHIFT);
193 if (Mdl) break;
194
195 //
196 // This function is not required to return the amount of pages requested
197 // In fact, it can return as little as 1 page, and callers are supposed
198 // to deal with this scenario. So re-attempt the allocation with less
199 // pages than before, and see if it worked this time.
200 //
201 PageCount -= (PageCount >> 4);
202 } while (PageCount);
203
204 //
205 // Wow, not even a single page was around!
206 //
207 if (!Mdl) return NULL;
208
209 //
210 // This is where the page array starts....
211 //
212 MdlPage = (PPFN_NUMBER)(Mdl + 1);
213
214 //
215 // Lock the PFN database
216 //
217 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
218
219 //
220 // Are we looking for any pages, without discriminating?
221 //
222 if ((LowPage == 0) && (HighPage == MmHighestPhysicalPage))
223 {
224 //
225 // Well then, let's go shopping
226 //
227 while (PagesFound < PageCount)
228 {
229 //
230 // Do we have zeroed pages?
231 //
232 if (MmZeroedPageListHead.Total)
233 {
234 //
235 // Grab a zero page
236 //
237 Pfn1 = MiRemoveHeadList(&MmZeroedPageListHead);
238 }
239 else if (MmFreePageListHead.Total)
240 {
241 //
242 // Nope, grab an unzeroed page
243 //
244 Pfn1 = MiRemoveHeadList(&MmFreePageListHead);
245 }
246 else
247 {
248 //
249 // This is not good... hopefully we have at least SOME pages
250 //
251 ASSERT(PagesFound);
252 break;
253 }
254
255 //
256 // Make sure it's really free
257 //
258 ASSERT(MiIsPfnInUse(Pfn1) == FALSE);
259 ASSERT(Pfn1->u3.e2.ReferenceCount == 0);
260
261 //
262 // Allocate it and mark it
263 //
264 Pfn1->u3.e1.StartOfAllocation = 1;
265 Pfn1->u3.e1.EndOfAllocation = 1;
266 Pfn1->u3.e2.ReferenceCount = 1;
267
268 //
269 // Decrease available pages
270 //
271 MmAvailablePages--;
272
273 //
274 // Save it into the MDL
275 //
276 *MdlPage++ = MiGetPfnEntryIndex(Pfn1);
277 PagesFound++;
278 }
279 }
280 else
281 {
282 //
283 // You want specific range of pages. We'll do this in two runs
284 //
285 for (LookForZeroedPages = 1; LookForZeroedPages >= 0; LookForZeroedPages--)
286 {
287 //
288 // Scan the range you specified
289 //
290 for (Page = LowPage; Page < HighPage; Page++)
291 {
292 //
293 // Get the PFN entry for this page
294 //
295 Pfn1 = MiGetPfnEntry(Page);
296 ASSERT(Pfn1);
297
298 //
299 // Make sure it's free and if this is our first pass, zeroed
300 //
301 if (MiIsPfnInUse(Pfn1)) continue;
302 if ((Pfn1->u3.e1.PageLocation == ZeroedPageList) != LookForZeroedPages) continue;
303
304 //
305 // Sanity checks
306 //
307 ASSERT(Pfn1->u3.e2.ReferenceCount == 0);
308
309 //
310 // Now setup the page and mark it
311 //
312 Pfn1->u3.e2.ReferenceCount = 1;
313 Pfn1->u3.e1.StartOfAllocation = 1;
314 Pfn1->u3.e1.EndOfAllocation = 1;
315
316 //
317 // Decrease available pages
318 //
319 MmAvailablePages--;
320
321 //
322 // Save this page into the MDL
323 //
324 *MdlPage++ = Page;
325 if (++PagesFound == PageCount) break;
326 }
327
328 //
329 // If the first pass was enough, don't keep going, otherwise, go again
330 //
331 if (PagesFound == PageCount) break;
332 }
333 }
334
335 //
336 // Now release the PFN count
337 //
338 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
339
340 //
341 // We might've found less pages, but not more ;-)
342 //
343 if (PagesFound != PageCount) ASSERT(PagesFound < PageCount);
344 if (!PagesFound)
345 {
346 //
347 // If we didn' tfind any pages at all, fail
348 //
349 DPRINT1("NO MDL PAGES!\n");
350 ExFreePool(Mdl);
351 return NULL;
352 }
353
354 //
355 // Write out how many pages we found
356 //
357 Mdl->ByteCount = (ULONG)(PagesFound << PAGE_SHIFT);
358
359 //
360 // Terminate the MDL array if there's certain missing pages
361 //
362 if (PagesFound != PageCount) *MdlPage = -1;
363
364 //
365 // Now go back and loop over all the MDL pages
366 //
367 MdlPage = (PPFN_NUMBER)(Mdl + 1);
368 LastMdlPage = MdlPage + PagesFound;
369 while (MdlPage < LastMdlPage)
370 {
371 //
372 // Check if we've reached the end
373 //
374 Page = *MdlPage++;
375 if (Page == (PFN_NUMBER)-1) break;
376
377 //
378 // Get the PFN entry for the page and check if we should zero it out
379 //
380 Pfn1 = MiGetPfnEntry(Page);
381 ASSERT(Pfn1);
382 if (Pfn1->u3.e1.PageLocation != ZeroedPageList) MiZeroPage(Page);
383 Pfn1->u3.e1.PageLocation = ActiveAndValid;
384 }
385
386 //
387 // We're done, mark the pages as locked (should we lock them, though???)
388 //
389 Mdl->Process = NULL;
390 Mdl->MdlFlags |= MDL_PAGES_LOCKED;
391 return Mdl;
392 }
393
394 VOID
395 NTAPI
396 MmDumpPfnDatabase(VOID)
397 {
398 ULONG i;
399 PPHYSICAL_PAGE Pfn1;
400 PCHAR State = "????", Type = "Unknown";
401 KIRQL OldIrql;
402 ULONG Totals[5] = {0}, FreePages = 0;
403
404 KeRaiseIrql(HIGH_LEVEL, &OldIrql);
405
406 //
407 // Loop the PFN database
408 //
409 for (i = 0; i <= MmHighestPhysicalPage; i++)
410 {
411 Pfn1 = MiGetPfnEntry(i);
412 if (!Pfn1) continue;
413
414 //
415 // Get the type
416 //
417 if (MiIsPfnInUse(Pfn1))
418 {
419 State = "Used";
420 }
421 else
422 {
423 State = "Free";
424 Type = "Free";
425 FreePages++;
426 break;
427 }
428
429 //
430 // Pretty-print the page
431 //
432 DbgPrint("0x%08p:\t%04s\t%20s\t(%02d) [%08p])\n",
433 i << PAGE_SHIFT,
434 State,
435 Type,
436 Pfn1->u3.e2.ReferenceCount,
437 Pfn1->RmapListHead);
438 }
439
440 DbgPrint("Nonpaged Pool: %d pages\t[%d KB]\n", Totals[MC_NPPOOL], (Totals[MC_NPPOOL] << PAGE_SHIFT) / 1024);
441 DbgPrint("Paged Pool: %d pages\t[%d KB]\n", Totals[MC_PPOOL], (Totals[MC_PPOOL] << PAGE_SHIFT) / 1024);
442 DbgPrint("File System Cache: %d pages\t[%d KB]\n", Totals[MC_CACHE], (Totals[MC_CACHE] << PAGE_SHIFT) / 1024);
443 DbgPrint("Process Working Set: %d pages\t[%d KB]\n", Totals[MC_USER], (Totals[MC_USER] << PAGE_SHIFT) / 1024);
444 DbgPrint("System: %d pages\t[%d KB]\n", Totals[MC_SYSTEM], (Totals[MC_SYSTEM] << PAGE_SHIFT) / 1024);
445 DbgPrint("Free: %d pages\t[%d KB]\n", FreePages, (FreePages << PAGE_SHIFT) / 1024);
446
447 KeLowerIrql(OldIrql);
448 }
449
450 VOID
451 NTAPI
452 MmSetRmapListHeadPage(PFN_TYPE Pfn, struct _MM_RMAP_ENTRY* ListHead)
453 {
454 KIRQL oldIrql;
455
456 oldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
457 MiGetPfnEntry(Pfn)->RmapListHead = (LONG)ListHead;
458 KeReleaseQueuedSpinLock(LockQueuePfnLock, oldIrql);
459 }
460
461 struct _MM_RMAP_ENTRY*
462 NTAPI
463 MmGetRmapListHeadPage(PFN_TYPE Pfn)
464 {
465 KIRQL oldIrql;
466 struct _MM_RMAP_ENTRY* ListHead;
467
468 oldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
469 ListHead = (struct _MM_RMAP_ENTRY*)MiGetPfnEntry(Pfn)->RmapListHead;
470 KeReleaseQueuedSpinLock(LockQueuePfnLock, oldIrql);
471
472 return(ListHead);
473 }
474
475 VOID
476 NTAPI
477 MmSetSavedSwapEntryPage(PFN_TYPE Pfn, SWAPENTRY SwapEntry)
478 {
479 KIRQL oldIrql;
480
481 oldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
482 MiGetPfnEntry(Pfn)->u1.WsIndex = SwapEntry;
483 KeReleaseQueuedSpinLock(LockQueuePfnLock, oldIrql);
484 }
485
486 SWAPENTRY
487 NTAPI
488 MmGetSavedSwapEntryPage(PFN_TYPE Pfn)
489 {
490 SWAPENTRY SwapEntry;
491 KIRQL oldIrql;
492
493 oldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
494 SwapEntry = MiGetPfnEntry(Pfn)->u1.WsIndex;
495 KeReleaseQueuedSpinLock(LockQueuePfnLock, oldIrql);
496
497 return(SwapEntry);
498 }
499
500 VOID
501 NTAPI
502 MmReferencePage(PFN_TYPE Pfn)
503 {
504 PPHYSICAL_PAGE Page;
505
506 DPRINT("MmReferencePage(PysicalAddress %x)\n", Pfn << PAGE_SHIFT);
507
508 if (Pfn == 0 || Pfn > MmHighestPhysicalPage)
509 {
510 return;
511 }
512
513 Page = MiGetPfnEntry(Pfn);
514 ASSERT(Page);
515
516 Page->u3.e2.ReferenceCount++;
517 }
518
519 ULONG
520 NTAPI
521 MmGetReferenceCountPage(PFN_TYPE Pfn)
522 {
523 KIRQL oldIrql;
524 ULONG RCount;
525 PPHYSICAL_PAGE Page;
526
527 DPRINT("MmGetReferenceCountPage(PhysicalAddress %x)\n", Pfn << PAGE_SHIFT);
528
529 oldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
530 Page = MiGetPfnEntry(Pfn);
531 ASSERT(Page);
532
533 RCount = Page->u3.e2.ReferenceCount;
534
535 KeReleaseQueuedSpinLock(LockQueuePfnLock, oldIrql);
536 return(RCount);
537 }
538
539 BOOLEAN
540 NTAPI
541 MmIsPageInUse(PFN_TYPE Pfn)
542 {
543 return MiIsPfnInUse(MiGetPfnEntry(Pfn));
544 }
545
546 VOID
547 NTAPI
548 MiSetConsumer(IN PFN_TYPE Pfn,
549 IN ULONG Type)
550 {
551 MiGetPfnEntry(Pfn)->u3.e1.PageLocation = ActiveAndValid;
552 }
553
554 VOID
555 NTAPI
556 MmDereferencePage(PFN_TYPE Pfn)
557 {
558 PPHYSICAL_PAGE Page;
559
560 DPRINT("MmDereferencePage(PhysicalAddress %x)\n", Pfn << PAGE_SHIFT);
561
562 Page = MiGetPfnEntry(Pfn);
563 ASSERT(Page);
564
565 Page->u3.e2.ReferenceCount--;
566 if (Page->u3.e2.ReferenceCount == 0)
567 {
568 MmAvailablePages++;
569 Page->u3.e1.PageLocation = FreePageList;
570 MiInsertInListTail(&MmFreePageListHead, Page);
571 if (MmFreePageListHead.Total > 8 && 0 == KeReadStateEvent(&ZeroPageThreadEvent))
572 {
573 KeSetEvent(&ZeroPageThreadEvent, IO_NO_INCREMENT, FALSE);
574 }
575 }
576 }
577
578 PFN_TYPE
579 NTAPI
580 MmAllocPage(ULONG Type)
581 {
582 PFN_TYPE PfnOffset;
583 PPHYSICAL_PAGE PageDescriptor;
584 BOOLEAN NeedClear = FALSE;
585
586 DPRINT("MmAllocPage()\n");
587
588 if (MmZeroedPageListHead.Total == 0)
589 {
590 if (MmFreePageListHead.Total == 0)
591 {
592 /* Check if this allocation is for the PFN DB itself */
593 if (MmNumberOfPhysicalPages == 0)
594 {
595 ASSERT(FALSE);
596 }
597
598 DPRINT1("MmAllocPage(): Out of memory\n");
599 return 0;
600 }
601 PageDescriptor = MiRemoveHeadList(&MmFreePageListHead);
602
603 NeedClear = TRUE;
604 }
605 else
606 {
607 PageDescriptor = MiRemoveHeadList(&MmZeroedPageListHead);
608 }
609
610 PageDescriptor->u3.e2.ReferenceCount = 1;
611
612 MmAvailablePages--;
613
614 PfnOffset = MiGetPfnEntryIndex(PageDescriptor);
615 if ((NeedClear) && (Type != MC_SYSTEM))
616 {
617 MiZeroPage(PfnOffset);
618 }
619
620 PageDescriptor->u3.e1.PageLocation = ActiveAndValid;
621 return PfnOffset;
622 }
623
624 NTSTATUS
625 NTAPI
626 MiZeroPage(PFN_TYPE Page)
627 {
628 KIRQL Irql;
629 PVOID TempAddress;
630
631 Irql = KeRaiseIrqlToDpcLevel();
632 TempAddress = MiMapPageToZeroInHyperSpace(Page);
633 if (TempAddress == NULL)
634 {
635 return(STATUS_NO_MEMORY);
636 }
637 memset(TempAddress, 0, PAGE_SIZE);
638 MiUnmapPagesInZeroSpace(TempAddress, 1);
639 KeLowerIrql(Irql);
640 return(STATUS_SUCCESS);
641 }
642
643 NTSTATUS
644 NTAPI
645 MmZeroPageThreadMain(PVOID Ignored)
646 {
647 NTSTATUS Status;
648 KIRQL oldIrql;
649 PPHYSICAL_PAGE PageDescriptor;
650 PFN_TYPE Pfn;
651 ULONG Count;
652
653 /* Free initial kernel memory */
654 //MiFreeInitMemory();
655
656 /* Set our priority to 0 */
657 KeGetCurrentThread()->BasePriority = 0;
658 KeSetPriorityThread(KeGetCurrentThread(), 0);
659
660 while(1)
661 {
662 Status = KeWaitForSingleObject(&ZeroPageThreadEvent,
663 0,
664 KernelMode,
665 FALSE,
666 NULL);
667
668 if (ZeroPageThreadShouldTerminate)
669 {
670 DPRINT1("ZeroPageThread: Terminating\n");
671 return STATUS_SUCCESS;
672 }
673 Count = 0;
674 oldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
675 while (MmFreePageListHead.Total)
676 {
677 PageDescriptor = MiRemoveHeadList(&MmFreePageListHead);
678 /* We set the page to used, because MmCreateVirtualMapping failed with unused pages */
679 KeReleaseQueuedSpinLock(LockQueuePfnLock, oldIrql);
680 Pfn = MiGetPfnEntryIndex(PageDescriptor);
681 Status = MiZeroPage(Pfn);
682
683 oldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
684 if (NT_SUCCESS(Status))
685 {
686 MiInsertZeroListAtBack(Pfn);
687 Count++;
688 }
689 else
690 {
691 MiInsertInListTail(&MmFreePageListHead, PageDescriptor);
692 PageDescriptor->u3.e1.PageLocation = FreePageList;
693 }
694
695 }
696 DPRINT("Zeroed %d pages.\n", Count);
697 KeResetEvent(&ZeroPageThreadEvent);
698 KeReleaseQueuedSpinLock(LockQueuePfnLock, oldIrql);
699 }
700
701 return STATUS_SUCCESS;
702 }
703
704 /* EOF */