Sync to trunk (r44371)
[reactos.git] / reactos / ntoskrnl / mm / freelist.c
1 /*
2 * COPYRIGHT: See COPYING in the top level directory
3 * PROJECT: ReactOS kernel
4 * FILE: ntoskrnl/mm/freelist.c
5 * PURPOSE: Handle the list of free physical pages
6 *
7 * PROGRAMMERS: David Welch (welch@cwcom.net)
8 * Robert Bergkvist
9 */
10
11 /* INCLUDES ****************************************************************/
12
13 #include <ntoskrnl.h>
14 #define NDEBUG
15 #include <debug.h>
16
17 #if defined (ALLOC_PRAGMA)
18 #pragma alloc_text(INIT, MmInitializePageList)
19 #endif
20
21 #define MODULE_INVOLVED_IN_ARM3
22 #include "ARM3/miarm.h"
23
24 /* TYPES *******************************************************************/
25
26 #define MM_PHYSICAL_PAGE_FREE (0x1)
27 #define MM_PHYSICAL_PAGE_USED (0x2)
28
29 /* GLOBALS ****************************************************************/
30
31 //
32 //
33 // ReactOS to NT Physical Page Descriptor Entry Legacy Mapping Definitions
34 //
35
36 typedef union
37 {
38 MMPFN Pfn;
39
40 struct
41 {
42 LIST_ENTRY ListEntry; // 0x000
43 ULONG_PTR RmapListHead; // 0x008
44 USHORT ReferenceCount; // 0x00C
45 struct // 0x00$
46 {
47 USHORT _unused1:1;
48 USHORT StartOfAllocation:1;
49 USHORT EndOfAllocation:1;
50 USHORT Zero:1;
51 USHORT LockCount:4;
52 USHORT Consumer:3;
53 USHORT _unused2:1;
54 USHORT Type:2;
55 USHORT _unused3:1;
56 USHORT _unused4:1;
57 } Flags;
58 LONG MapCount; // 0x10
59 ULONG_PTR SavedSwapEntry; // 0x018
60 };
61 } PHYSICAL_PAGE, *PPHYSICAL_PAGE;
62
63 C_ASSERT(sizeof(PHYSICAL_PAGE) == sizeof(MMPFN));
64
65 #define MiGetPfnEntry(Pfn) ((PPHYSICAL_PAGE)MiGetPfnEntry(Pfn))
66 #define MiGetPfnEntryIndex(x) MiGetPfnEntryIndex((struct _MMPFN*)x)
67 #define LockCount Flags.LockCount
68
69 PMMPFN MmPfnDatabase;
70 #define MmPfnDatabase ((PPHYSICAL_PAGE)MmPfnDatabase)
71
72 #define MMPFN PHYSICAL_PAGE
73 #define PMMPFN PPHYSICAL_PAGE
74
75 ULONG MmAvailablePages;
76 ULONG MmResidentAvailablePages;
77
78 SIZE_T MmTotalCommitLimit;
79 SIZE_T MmTotalCommittedPages;
80 SIZE_T MmSharedCommit;
81 SIZE_T MmDriverCommit;
82 SIZE_T MmProcessCommit;
83 SIZE_T MmPagedPoolCommit;
84 SIZE_T MmPeakCommitment;
85 SIZE_T MmtotalCommitLimitMaximum;
86
87 MMPFNLIST MmZeroedPageListHead;
88 MMPFNLIST MmFreePageListHead;
89 MMPFNLIST MmStandbyPageListHead;
90 MMPFNLIST MmModifiedPageListHead;
91 MMPFNLIST MmModifiedNoWritePageListHead;
92
93 /* List of pages allocated to the MC_USER Consumer */
94 static LIST_ENTRY UserPageListHead;
95 /* List of pages zeroed by the ZPW (MmZeroPageThreadMain) */
96 static LIST_ENTRY FreeZeroedPageListHead;
97 /* List of free pages, filled by MmGetReferenceCountPage and
98 * and MmInitializePageList */
99 static LIST_ENTRY FreeUnzeroedPageListHead;
100
101 static KEVENT ZeroPageThreadEvent;
102 static BOOLEAN ZeroPageThreadShouldTerminate = FALSE;
103
104 static ULONG UnzeroedPageCount = 0;
105
106 /* FUNCTIONS *************************************************************/
107
108 PFN_NUMBER
109 NTAPI
110 MmGetLRUFirstUserPage(VOID)
111 {
112 PLIST_ENTRY NextListEntry;
113 PHYSICAL_PAGE* PageDescriptor;
114 KIRQL oldIrql;
115
116 oldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
117 NextListEntry = UserPageListHead.Flink;
118 if (NextListEntry == &UserPageListHead)
119 {
120 KeReleaseQueuedSpinLock(LockQueuePfnLock, oldIrql);
121 return 0;
122 }
123 PageDescriptor = CONTAINING_RECORD(NextListEntry, PHYSICAL_PAGE, ListEntry);
124 ASSERT_PFN(&PageDescriptor->Pfn);
125 KeReleaseQueuedSpinLock(LockQueuePfnLock, oldIrql);
126 return PageDescriptor - MmPfnDatabase;
127 }
128
129 VOID
130 NTAPI
131 MmInsertLRULastUserPage(PFN_NUMBER Pfn)
132 {
133 KIRQL oldIrql;
134 PPHYSICAL_PAGE Page;
135
136 oldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
137 Page = MiGetPfnEntry(Pfn);
138 ASSERT(Page);
139 ASSERT(Page->Flags.Type == MM_PHYSICAL_PAGE_USED);
140 ASSERT(Page->Flags.Consumer == MC_USER);
141 InsertTailList(&UserPageListHead, &Page->ListEntry);
142 KeReleaseQueuedSpinLock(LockQueuePfnLock, oldIrql);
143 }
144
145 PFN_NUMBER
146 NTAPI
147 MmGetLRUNextUserPage(PFN_NUMBER PreviousPfn)
148 {
149 PLIST_ENTRY NextListEntry;
150 PHYSICAL_PAGE* PageDescriptor;
151 KIRQL oldIrql;
152 PPHYSICAL_PAGE Page;
153
154 oldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
155 Page = MiGetPfnEntry(PreviousPfn);
156 ASSERT(Page);
157 ASSERT(Page->Flags.Type == MM_PHYSICAL_PAGE_USED);
158 ASSERT(Page->Flags.Consumer == MC_USER);
159 NextListEntry = (PLIST_ENTRY)Page->ListEntry.Flink;
160 if (NextListEntry == &UserPageListHead)
161 {
162 KeReleaseQueuedSpinLock(LockQueuePfnLock, oldIrql);
163 return 0;
164 }
165 PageDescriptor = CONTAINING_RECORD(NextListEntry, PHYSICAL_PAGE, ListEntry);
166 KeReleaseQueuedSpinLock(LockQueuePfnLock, oldIrql);
167 return PageDescriptor - MmPfnDatabase;
168 }
169
170 VOID
171 NTAPI
172 MmRemoveLRUUserPage(PFN_NUMBER Page)
173 {
174 RemoveEntryList(&MiGetPfnEntry(Page)->ListEntry);
175 }
176
177 PFN_NUMBER
178 NTAPI
179 MiFindContiguousPages(IN PFN_NUMBER LowestPfn,
180 IN PFN_NUMBER HighestPfn,
181 IN PFN_NUMBER BoundaryPfn,
182 IN PFN_NUMBER SizeInPages,
183 IN MEMORY_CACHING_TYPE CacheType)
184 {
185 PFN_NUMBER Page, PageCount, LastPage, Length, BoundaryMask;
186 ULONG i = 0;
187 PMMPFN Pfn1, EndPfn;
188 KIRQL OldIrql;
189 PAGED_CODE ();
190 ASSERT(SizeInPages != 0);
191
192 //
193 // Convert the boundary PFN into an alignment mask
194 //
195 BoundaryMask = ~(BoundaryPfn - 1);
196
197 //
198 // Loop all the physical memory blocks
199 //
200 do
201 {
202 //
203 // Capture the base page and length of this memory block
204 //
205 Page = MmPhysicalMemoryBlock->Run[i].BasePage;
206 PageCount = MmPhysicalMemoryBlock->Run[i].PageCount;
207
208 //
209 // Check how far this memory block will go
210 //
211 LastPage = Page + PageCount;
212
213 //
214 // Trim it down to only the PFNs we're actually interested in
215 //
216 if ((LastPage - 1) > HighestPfn) LastPage = HighestPfn + 1;
217 if (Page < LowestPfn) Page = LowestPfn;
218
219 //
220 // Skip this run if it's empty or fails to contain all the pages we need
221 //
222 if (!(PageCount) || ((Page + SizeInPages) > LastPage)) continue;
223
224 //
225 // Now scan all the relevant PFNs in this run
226 //
227 Length = 0;
228 for (Pfn1 = MiGetPfnEntry(Page); Page < LastPage; Page++, Pfn1++)
229 {
230 //
231 // If this PFN is in use, ignore it
232 //
233 if (Pfn1->Flags.Type != MM_PHYSICAL_PAGE_FREE) continue;
234
235 //
236 // If we haven't chosen a start PFN yet and the caller specified an
237 // alignment, make sure the page matches the alignment restriction
238 //
239 if ((!(Length) && (BoundaryPfn)) &&
240 (((Page ^ (Page + SizeInPages - 1)) & BoundaryMask)))
241 {
242 //
243 // It does not, so bail out
244 //
245 continue;
246 }
247
248 //
249 // Increase the number of valid pages, and check if we have enough
250 //
251 if (++Length == SizeInPages)
252 {
253 //
254 // It appears we've amassed enough legitimate pages, rollback
255 //
256 Pfn1 -= (Length - 1);
257 Page -= (Length - 1);
258
259 //
260 // Acquire the PFN lock
261 //
262 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
263 do
264 {
265 //
266 // Things might've changed for us. Is the page still free?
267 //
268 if (Pfn1->Flags.Type != MM_PHYSICAL_PAGE_FREE) break;
269
270 //
271 // So far so good. Is this the last confirmed valid page?
272 //
273 if (!--Length)
274 {
275 //
276 // Sanity check that we didn't go out of bounds
277 //
278 ASSERT(i != MmPhysicalMemoryBlock->NumberOfRuns);
279
280 //
281 // Loop until all PFN entries have been processed
282 //
283 EndPfn = Pfn1 - SizeInPages + 1;
284 do
285 {
286 //
287 // If this was an unzeroed page, there are now less
288 //
289 if (Pfn1->Flags.Zero == 0) UnzeroedPageCount--;
290
291 //
292 // One less free page
293 //
294 MmAvailablePages--;
295
296 //
297 // This PFN is now a used page, set it up
298 //
299 RemoveEntryList(&Pfn1->ListEntry);
300 Pfn1->Flags.Type = MM_PHYSICAL_PAGE_USED;
301 Pfn1->Flags.Consumer = MC_NPPOOL;
302 Pfn1->ReferenceCount = 1;
303 Pfn1->LockCount = 0;
304 Pfn1->SavedSwapEntry = 0;
305
306 //
307 // Check if it was already zeroed
308 //
309 if (Pfn1->Flags.Zero == 0)
310 {
311 //
312 // It wasn't, so zero it
313 //
314 MiZeroPage(MiGetPfnEntryIndex(Pfn1));
315 }
316
317 //
318 // Check if this is the last PFN, otherwise go on
319 //
320 if (Pfn1 == EndPfn) break;
321 Pfn1--;
322 } while (TRUE);
323
324 //
325 // Mark the first and last PFN so we can find them later
326 //
327 Pfn1->Flags.StartOfAllocation = 1;
328 (Pfn1 + SizeInPages - 1)->Flags.EndOfAllocation = 1;
329
330 //
331 // Now it's safe to let go of the PFN lock
332 //
333 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
334
335 //
336 // Quick sanity check that the last PFN is consistent
337 //
338 EndPfn = Pfn1 + SizeInPages;
339 ASSERT(EndPfn == MiGetPfnEntry(Page + 1));
340
341 //
342 // Compute the first page, and make sure it's consistent
343 //
344 Page -= SizeInPages - 1;
345 ASSERT(Pfn1 == MiGetPfnEntry(Page));
346 ASSERT(Page != 0);
347 return Page;
348 }
349
350 //
351 // Keep going. The purpose of this loop is to reconfirm that
352 // after acquiring the PFN lock these pages are still usable
353 //
354 Pfn1++;
355 Page++;
356 } while (TRUE);
357
358 //
359 // If we got here, something changed while we hadn't acquired
360 // the PFN lock yet, so we'll have to restart
361 //
362 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
363 Length = 0;
364 }
365 }
366 } while (++i != MmPhysicalMemoryBlock->NumberOfRuns);
367
368 //
369 // And if we get here, it means no suitable physical memory runs were found
370 //
371 return 0;
372 }
373
374 PMDL
375 NTAPI
376 MiAllocatePagesForMdl(IN PHYSICAL_ADDRESS LowAddress,
377 IN PHYSICAL_ADDRESS HighAddress,
378 IN PHYSICAL_ADDRESS SkipBytes,
379 IN SIZE_T TotalBytes,
380 IN MI_PFN_CACHE_ATTRIBUTE CacheAttribute,
381 IN ULONG MdlFlags)
382 {
383 PMDL Mdl;
384 PFN_NUMBER PageCount, LowPage, HighPage, SkipPages, PagesFound = 0, Page;
385 PPFN_NUMBER MdlPage, LastMdlPage;
386 KIRQL OldIrql;
387 PLIST_ENTRY ListEntry;
388 PPHYSICAL_PAGE Pfn1;
389 INT LookForZeroedPages;
390 ASSERT (KeGetCurrentIrql() <= APC_LEVEL);
391
392 //
393 // Convert the low address into a PFN
394 //
395 LowPage = (PFN_NUMBER)(LowAddress.QuadPart >> PAGE_SHIFT);
396
397 //
398 // Convert, and normalize, the high address into a PFN
399 //
400 HighPage = (PFN_NUMBER)(HighAddress.QuadPart >> PAGE_SHIFT);
401 if (HighPage > MmHighestPhysicalPage) HighPage = MmHighestPhysicalPage;
402
403 //
404 // Validate skipbytes and convert them into pages
405 //
406 if (BYTE_OFFSET(SkipBytes.LowPart)) return NULL;
407 SkipPages = (PFN_NUMBER)(SkipBytes.QuadPart >> PAGE_SHIFT);
408
409 //
410 // Now compute the number of pages the MDL will cover
411 //
412 PageCount = (PFN_NUMBER)ADDRESS_AND_SIZE_TO_SPAN_PAGES(0, TotalBytes);
413 do
414 {
415 //
416 // Try creating an MDL for these many pages
417 //
418 Mdl = MmCreateMdl(NULL, NULL, PageCount << PAGE_SHIFT);
419 if (Mdl) break;
420
421 //
422 // This function is not required to return the amount of pages requested
423 // In fact, it can return as little as 1 page, and callers are supposed
424 // to deal with this scenario. So re-attempt the allocation with less
425 // pages than before, and see if it worked this time.
426 //
427 PageCount -= (PageCount >> 4);
428 } while (PageCount);
429
430 //
431 // Wow, not even a single page was around!
432 //
433 if (!Mdl) return NULL;
434
435 //
436 // This is where the page array starts....
437 //
438 MdlPage = (PPFN_NUMBER)(Mdl + 1);
439
440 //
441 // Lock the PFN database
442 //
443 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
444
445 //
446 // Are we looking for any pages, without discriminating?
447 //
448 if ((LowPage == 0) && (HighPage == MmHighestPhysicalPage))
449 {
450 //
451 // Well then, let's go shopping
452 //
453 while (PagesFound < PageCount)
454 {
455 //
456 // Do we have zeroed pages?
457 //
458 if (!IsListEmpty(&FreeZeroedPageListHead))
459 {
460 //
461 // Grab a zero page
462 //
463 ListEntry = RemoveTailList(&FreeZeroedPageListHead);
464 }
465 else if (!IsListEmpty(&FreeUnzeroedPageListHead))
466 {
467 //
468 // Nope, grab an unzeroed page
469 //
470 ListEntry = RemoveTailList(&FreeUnzeroedPageListHead);
471 UnzeroedPageCount--;
472 }
473 else
474 {
475 //
476 // This is not good... hopefully we have at least SOME pages
477 //
478 ASSERT(PagesFound);
479 break;
480 }
481
482 //
483 // Get the PFN entry for this page
484 //
485 Pfn1 = CONTAINING_RECORD(ListEntry, PHYSICAL_PAGE, ListEntry);
486
487 //
488 // Make sure it's really free
489 //
490 ASSERT(Pfn1->Flags.Type == MM_PHYSICAL_PAGE_FREE);
491 ASSERT(Pfn1->ReferenceCount == 0);
492
493 //
494 // Allocate it and mark it
495 //
496 Pfn1->Flags.Type = MM_PHYSICAL_PAGE_USED;
497 Pfn1->Flags.Consumer = MC_NPPOOL;
498 Pfn1->Flags.StartOfAllocation = 1;
499 Pfn1->Flags.EndOfAllocation = 1;
500 Pfn1->ReferenceCount = 1;
501 Pfn1->LockCount = 0;
502 Pfn1->SavedSwapEntry = 0;
503
504 //
505 // Decrease available pages
506 //
507 MmAvailablePages--;
508
509 //
510 // Save it into the MDL
511 //
512 *MdlPage++ = MiGetPfnEntryIndex(Pfn1);
513 PagesFound++;
514 }
515 }
516 else
517 {
518 //
519 // You want specific range of pages. We'll do this in two runs
520 //
521 for (LookForZeroedPages = 1; LookForZeroedPages >= 0; LookForZeroedPages--)
522 {
523 //
524 // Scan the range you specified
525 //
526 for (Page = LowPage; Page < HighPage; Page++)
527 {
528 //
529 // Get the PFN entry for this page
530 //
531 Pfn1 = MiGetPfnEntry(Page);
532 ASSERT(Pfn1);
533
534 //
535 // Make sure it's free and if this is our first pass, zeroed
536 //
537 if (Pfn1->Flags.Type != MM_PHYSICAL_PAGE_FREE) continue;
538 if (Pfn1->Flags.Zero != LookForZeroedPages) continue;
539
540 //
541 // Sanity checks
542 //
543 ASSERT(Pfn1->ReferenceCount == 0);
544
545 //
546 // Now setup the page and mark it
547 //
548 Pfn1->Flags.Type = MM_PHYSICAL_PAGE_USED;
549 Pfn1->Flags.Consumer = MC_NPPOOL;
550 Pfn1->ReferenceCount = 1;
551 Pfn1->Flags.StartOfAllocation = 1;
552 Pfn1->Flags.EndOfAllocation = 1;
553 Pfn1->LockCount = 0;
554 Pfn1->SavedSwapEntry = 0;
555
556 //
557 // If this page was unzeroed, we've consumed such a page
558 //
559 if (!Pfn1->Flags.Zero) UnzeroedPageCount--;
560
561 //
562 // Decrease available pages
563 //
564 MmAvailablePages--;
565
566 //
567 // Save this page into the MDL
568 //
569 *MdlPage++ = Page;
570 if (++PagesFound == PageCount) break;
571 }
572
573 //
574 // If the first pass was enough, don't keep going, otherwise, go again
575 //
576 if (PagesFound == PageCount) break;
577 }
578 }
579
580 //
581 // Now release the PFN count
582 //
583 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
584
585 //
586 // We might've found less pages, but not more ;-)
587 //
588 if (PagesFound != PageCount) ASSERT(PagesFound < PageCount);
589 if (!PagesFound)
590 {
591 //
592 // If we didn' tfind any pages at all, fail
593 //
594 DPRINT1("NO MDL PAGES!\n");
595 ExFreePool(Mdl);
596 return NULL;
597 }
598
599 //
600 // Write out how many pages we found
601 //
602 Mdl->ByteCount = (ULONG)(PagesFound << PAGE_SHIFT);
603
604 //
605 // Terminate the MDL array if there's certain missing pages
606 //
607 if (PagesFound != PageCount) *MdlPage = -1;
608
609 //
610 // Now go back and loop over all the MDL pages
611 //
612 MdlPage = (PPFN_NUMBER)(Mdl + 1);
613 LastMdlPage = MdlPage + PagesFound;
614 while (MdlPage < LastMdlPage)
615 {
616 //
617 // Check if we've reached the end
618 //
619 Page = *MdlPage;
620 if (Page == (PFN_NUMBER)-1) break;
621
622 //
623 // Get the PFN entry for the page and check if we should zero it out
624 //
625 Pfn1 = MiGetPfnEntry(Page);
626 ASSERT(Pfn1);
627 if (Pfn1->Flags.Zero == 0) MiZeroPage(Page);
628 }
629
630 //
631 // We're done, mark the pages as locked (should we lock them, though???)
632 //
633 Mdl->Process = NULL;
634 Mdl->MdlFlags |= MDL_PAGES_LOCKED;
635 return Mdl;
636 }
637
638 VOID
639 NTAPI
640 MmDumpPfnDatabase(VOID)
641 {
642 ULONG i;
643 PPHYSICAL_PAGE Pfn1;
644 PCHAR State = "????", Consumer = "Unknown";
645 KIRQL OldIrql;
646 ULONG Totals[5] = {0}, FreePages = 0;
647
648 KeRaiseIrql(HIGH_LEVEL, &OldIrql);
649
650 //
651 // Loop the PFN database
652 //
653 for (i = 0; i <= MmHighestPhysicalPage; i++)
654 {
655 Pfn1 = MiGetPfnEntry(i);
656 if (!Pfn1) continue;
657
658 //
659 // Get the consumer
660 //
661 switch (Pfn1->Flags.Consumer)
662 {
663 case MC_NPPOOL:
664
665 Consumer = "Nonpaged Pool";
666 break;
667
668 case MC_PPOOL:
669
670 Consumer = "Paged Pool";
671 break;
672
673 case MC_CACHE:
674
675 Consumer = "File System Cache";
676 break;
677
678 case MC_USER:
679
680 Consumer = "Process Working Set";
681 break;
682
683 case MC_SYSTEM:
684
685 Consumer = "System";
686 break;
687 }
688
689 //
690 // Get the type
691 //
692 switch (Pfn1->Flags.Type)
693 {
694 case MM_PHYSICAL_PAGE_USED:
695
696 State = "Used";
697 Totals[Pfn1->Flags.Consumer]++;
698 break;
699
700 case MM_PHYSICAL_PAGE_FREE:
701
702 State = "Free";
703 Consumer = "Free";
704 FreePages++;
705 break;
706 }
707
708 //
709 // Pretty-print the page
710 //
711 DbgPrint("0x%08p:\t%04s\t%20s\t(%02d.%02d) [%08p])\n",
712 i << PAGE_SHIFT,
713 State,
714 Consumer,
715 Pfn1->ReferenceCount,
716 Pfn1->LockCount,
717 Pfn1->RmapListHead);
718 }
719
720 DbgPrint("Nonpaged Pool: %d pages\t[%d KB]\n", Totals[MC_NPPOOL], (Totals[MC_NPPOOL] << PAGE_SHIFT) / 1024);
721 DbgPrint("Paged Pool: %d pages\t[%d KB]\n", Totals[MC_PPOOL], (Totals[MC_PPOOL] << PAGE_SHIFT) / 1024);
722 DbgPrint("File System Cache: %d pages\t[%d KB]\n", Totals[MC_CACHE], (Totals[MC_CACHE] << PAGE_SHIFT) / 1024);
723 DbgPrint("Process Working Set: %d pages\t[%d KB]\n", Totals[MC_USER], (Totals[MC_USER] << PAGE_SHIFT) / 1024);
724 DbgPrint("System: %d pages\t[%d KB]\n", Totals[MC_SYSTEM], (Totals[MC_SYSTEM] << PAGE_SHIFT) / 1024);
725 DbgPrint("Free: %d pages\t[%d KB]\n", FreePages, (FreePages << PAGE_SHIFT) / 1024);
726
727 KeLowerIrql(OldIrql);
728 }
729
730 VOID
731 NTAPI
732 MmInitializePageList(VOID)
733 {
734 ULONG i;
735 PHYSICAL_PAGE UsedPage;
736 PMEMORY_ALLOCATION_DESCRIPTOR Md;
737 PLIST_ENTRY NextEntry;
738 ULONG NrSystemPages = 0;
739
740 /* Initialize the page lists */
741 InitializeListHead(&UserPageListHead);
742 InitializeListHead(&FreeUnzeroedPageListHead);
743 InitializeListHead(&FreeZeroedPageListHead);
744
745 /* This is what a used page looks like */
746 RtlZeroMemory(&UsedPage, sizeof(UsedPage));
747 UsedPage.Flags.Type = MM_PHYSICAL_PAGE_USED;
748 UsedPage.Flags.Consumer = MC_NPPOOL;
749 UsedPage.ReferenceCount = 1;
750
751 /* Loop the memory descriptors */
752 for (NextEntry = KeLoaderBlock->MemoryDescriptorListHead.Flink;
753 NextEntry != &KeLoaderBlock->MemoryDescriptorListHead;
754 NextEntry = NextEntry->Flink)
755 {
756 /* Get the descriptor */
757 Md = CONTAINING_RECORD(NextEntry,
758 MEMORY_ALLOCATION_DESCRIPTOR,
759 ListEntry);
760
761 /* Skip bad memory */
762 if ((Md->MemoryType == LoaderFirmwarePermanent) ||
763 (Md->MemoryType == LoaderBBTMemory) ||
764 (Md->MemoryType == LoaderSpecialMemory) ||
765 (Md->MemoryType == LoaderBad))
766 {
767 //
768 // We do not build PFN entries for this
769 //
770 continue;
771 }
772 else if ((Md->MemoryType == LoaderFree) ||
773 (Md->MemoryType == LoaderLoadedProgram) ||
774 (Md->MemoryType == LoaderFirmwareTemporary) ||
775 (Md->MemoryType == LoaderOsloaderStack))
776 {
777 /* Loop every page part of the block */
778 for (i = 0; i < Md->PageCount; i++)
779 {
780 /* Mark it as a free page */
781 MmPfnDatabase[Md->BasePage + i].Flags.Type = MM_PHYSICAL_PAGE_FREE;
782 InsertTailList(&FreeUnzeroedPageListHead,
783 &MmPfnDatabase[Md->BasePage + i].ListEntry);
784 UnzeroedPageCount++;
785 MmAvailablePages++;
786 }
787 }
788 else
789 {
790 /* Loop every page part of the block */
791 for (i = 0; i < Md->PageCount; i++)
792 {
793 /* Everything else is used memory */
794 MmPfnDatabase[Md->BasePage + i] = UsedPage;
795 NrSystemPages++;
796 }
797 }
798 }
799
800 /* Finally handle the pages describing the PFN database themselves */
801 for (i = MxOldFreeDescriptor.BasePage; i < MxFreeDescriptor->BasePage; i++)
802 {
803 /* Ensure this page was not added previously */
804 ASSERT(MmPfnDatabase[i].Flags.Type == 0);
805
806 /* Mark it as used kernel memory */
807 MmPfnDatabase[i] = UsedPage;
808 NrSystemPages++;
809 }
810
811 KeInitializeEvent(&ZeroPageThreadEvent, NotificationEvent, TRUE);
812 DPRINT("Pages: %x %x\n", MmAvailablePages, NrSystemPages);
813 MmInitializeBalancer(MmAvailablePages, NrSystemPages);
814 }
815
816 VOID
817 NTAPI
818 MmSetRmapListHeadPage(PFN_NUMBER Pfn, struct _MM_RMAP_ENTRY* ListHead)
819 {
820 KIRQL oldIrql;
821
822 oldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
823 MiGetPfnEntry(Pfn)->RmapListHead = (LONG_PTR)ListHead;
824 KeReleaseQueuedSpinLock(LockQueuePfnLock, oldIrql);
825 }
826
827 struct _MM_RMAP_ENTRY*
828 NTAPI
829 MmGetRmapListHeadPage(PFN_NUMBER Pfn)
830 {
831 KIRQL oldIrql;
832 struct _MM_RMAP_ENTRY* ListHead;
833
834 oldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
835 ListHead = (struct _MM_RMAP_ENTRY*)MiGetPfnEntry(Pfn)->RmapListHead;
836 KeReleaseQueuedSpinLock(LockQueuePfnLock, oldIrql);
837
838 return(ListHead);
839 }
840
841 VOID
842 NTAPI
843 MmSetSavedSwapEntryPage(PFN_NUMBER Pfn, SWAPENTRY SwapEntry)
844 {
845 KIRQL oldIrql;
846
847 oldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
848 MiGetPfnEntry(Pfn)->SavedSwapEntry = SwapEntry;
849 KeReleaseQueuedSpinLock(LockQueuePfnLock, oldIrql);
850 }
851
852 SWAPENTRY
853 NTAPI
854 MmGetSavedSwapEntryPage(PFN_NUMBER Pfn)
855 {
856 SWAPENTRY SwapEntry;
857 KIRQL oldIrql;
858
859 oldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
860 SwapEntry = MiGetPfnEntry(Pfn)->SavedSwapEntry;
861 KeReleaseQueuedSpinLock(LockQueuePfnLock, oldIrql);
862
863 return(SwapEntry);
864 }
865
866 VOID
867 NTAPI
868 MmReferencePage(PFN_NUMBER Pfn)
869 {
870 PPHYSICAL_PAGE Page;
871
872 DPRINT("MmReferencePage(PysicalAddress %x)\n", Pfn << PAGE_SHIFT);
873
874 if (Pfn == 0 || Pfn > MmHighestPhysicalPage)
875 {
876 return;
877 }
878
879 Page = MiGetPfnEntry(Pfn);
880 ASSERT(Page);
881 if (Page->Flags.Type != MM_PHYSICAL_PAGE_USED)
882 {
883 DPRINT1("Referencing non-used page\n");
884 KeBugCheck(MEMORY_MANAGEMENT);
885 }
886
887 Page->ReferenceCount++;
888 }
889
890 ULONG
891 NTAPI
892 MmGetReferenceCountPage(PFN_NUMBER Pfn)
893 {
894 KIRQL oldIrql;
895 ULONG RCount;
896 PPHYSICAL_PAGE Page;
897
898 DPRINT("MmGetReferenceCountPage(PhysicalAddress %x)\n", Pfn << PAGE_SHIFT);
899
900 oldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
901 Page = MiGetPfnEntry(Pfn);
902 ASSERT(Page);
903 if (Page->Flags.Type != MM_PHYSICAL_PAGE_USED)
904 {
905 DPRINT1("Getting reference count for free page\n");
906 KeBugCheck(MEMORY_MANAGEMENT);
907 }
908
909 RCount = Page->ReferenceCount;
910
911 KeReleaseQueuedSpinLock(LockQueuePfnLock, oldIrql);
912 return(RCount);
913 }
914
915 BOOLEAN
916 NTAPI
917 MmIsPageInUse(PFN_NUMBER Pfn)
918 {
919
920 DPRINT("MmIsPageInUse(PhysicalAddress %x)\n", Pfn << PAGE_SHIFT);
921
922 return (MiGetPfnEntry(Pfn)->Flags.Type == MM_PHYSICAL_PAGE_USED);
923 }
924
925 VOID
926 NTAPI
927 MmDereferencePage(PFN_NUMBER Pfn)
928 {
929 PPHYSICAL_PAGE Page;
930
931 DPRINT("MmDereferencePage(PhysicalAddress %x)\n", Pfn << PAGE_SHIFT);
932
933 Page = MiGetPfnEntry(Pfn);
934 ASSERT(Page);
935
936 if (Page->Flags.Type != MM_PHYSICAL_PAGE_USED)
937 {
938 DPRINT1("Dereferencing free page\n");
939 KeBugCheck(MEMORY_MANAGEMENT);
940 }
941 if (Page->ReferenceCount == 0)
942 {
943 DPRINT1("Derefrencing page with reference count 0\n");
944 KeBugCheck(MEMORY_MANAGEMENT);
945 }
946
947 Page->ReferenceCount--;
948 if (Page->ReferenceCount == 0)
949 {
950 MmAvailablePages++;
951 if (Page->Flags.Consumer == MC_USER) RemoveEntryList(&Page->ListEntry);
952 if (Page->RmapListHead != (LONG_PTR)NULL)
953 {
954 DPRINT1("Freeing page with rmap entries.\n");
955 KeBugCheck(MEMORY_MANAGEMENT);
956 }
957 if (Page->LockCount > 0)
958 {
959 DPRINT1("Freeing locked page\n");
960 KeBugCheck(MEMORY_MANAGEMENT);
961 }
962 if (Page->SavedSwapEntry != 0)
963 {
964 DPRINT1("Freeing page with swap entry.\n");
965 KeBugCheck(MEMORY_MANAGEMENT);
966 }
967 if (Page->Flags.Type != MM_PHYSICAL_PAGE_USED)
968 {
969 DPRINT1("Freeing page with flags %x\n",
970 Page->Flags.Type);
971 KeBugCheck(MEMORY_MANAGEMENT);
972 }
973 Page->Flags.Type = MM_PHYSICAL_PAGE_FREE;
974 Page->Flags.Consumer = MC_MAXIMUM;
975 InsertTailList(&FreeUnzeroedPageListHead,
976 &Page->ListEntry);
977 UnzeroedPageCount++;
978 if (UnzeroedPageCount > 8 && 0 == KeReadStateEvent(&ZeroPageThreadEvent))
979 {
980 KeSetEvent(&ZeroPageThreadEvent, IO_NO_INCREMENT, FALSE);
981 }
982 }
983 }
984
985 ULONG
986 NTAPI
987 MmGetLockCountPage(PFN_NUMBER Pfn)
988 {
989 KIRQL oldIrql;
990 ULONG CurrentLockCount;
991 PPHYSICAL_PAGE Page;
992
993 DPRINT("MmGetLockCountPage(PhysicalAddress %x)\n", Pfn << PAGE_SHIFT);
994
995 oldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
996
997 Page = MiGetPfnEntry(Pfn);
998 ASSERT(Page);
999 if (Page->Flags.Type != MM_PHYSICAL_PAGE_USED)
1000 {
1001 DPRINT1("Getting lock count for free page\n");
1002 KeBugCheck(MEMORY_MANAGEMENT);
1003 }
1004
1005 CurrentLockCount = Page->LockCount;
1006 KeReleaseQueuedSpinLock(LockQueuePfnLock, oldIrql);
1007
1008 return(CurrentLockCount);
1009 }
1010
1011 VOID
1012 NTAPI
1013 MmLockPage(PFN_NUMBER Pfn)
1014 {
1015 PPHYSICAL_PAGE Page;
1016
1017 DPRINT("MmLockPage(PhysicalAddress %x)\n", Pfn << PAGE_SHIFT);
1018
1019 Page = MiGetPfnEntry(Pfn);
1020 ASSERT(Page);
1021 if (Page->Flags.Type != MM_PHYSICAL_PAGE_USED)
1022 {
1023 DPRINT1("Locking free page\n");
1024 KeBugCheck(MEMORY_MANAGEMENT);
1025 }
1026
1027 Page->LockCount++;
1028 }
1029
1030 VOID
1031 NTAPI
1032 MmUnlockPage(PFN_NUMBER Pfn)
1033 {
1034 PPHYSICAL_PAGE Page;
1035
1036 DPRINT("MmUnlockPage(PhysicalAddress %x)\n", Pfn << PAGE_SHIFT);
1037
1038 Page = MiGetPfnEntry(Pfn);
1039 ASSERT(Page);
1040 if (Page->Flags.Type != MM_PHYSICAL_PAGE_USED)
1041 {
1042 DPRINT1("Unlocking free page\n");
1043 KeBugCheck(MEMORY_MANAGEMENT);
1044 }
1045
1046 Page->LockCount--;
1047 }
1048
1049 PFN_NUMBER
1050 NTAPI
1051 MmAllocPage(ULONG Consumer, SWAPENTRY SwapEntry)
1052 {
1053 PFN_NUMBER PfnOffset;
1054 PLIST_ENTRY ListEntry;
1055 PPHYSICAL_PAGE PageDescriptor;
1056 BOOLEAN NeedClear = FALSE;
1057
1058 DPRINT("MmAllocPage()\n");
1059
1060 if (IsListEmpty(&FreeZeroedPageListHead))
1061 {
1062 if (IsListEmpty(&FreeUnzeroedPageListHead))
1063 {
1064 /* Check if this allocation is for the PFN DB itself */
1065 if (MmNumberOfPhysicalPages == 0)
1066 {
1067 ASSERT(FALSE);
1068 }
1069
1070 DPRINT1("MmAllocPage(): Out of memory\n");
1071 return 0;
1072 }
1073 ListEntry = RemoveTailList(&FreeUnzeroedPageListHead);
1074 UnzeroedPageCount--;
1075
1076 PageDescriptor = CONTAINING_RECORD(ListEntry, PHYSICAL_PAGE, ListEntry);
1077
1078 NeedClear = TRUE;
1079 }
1080 else
1081 {
1082 ListEntry = RemoveTailList(&FreeZeroedPageListHead);
1083
1084 PageDescriptor = CONTAINING_RECORD(ListEntry, PHYSICAL_PAGE, ListEntry);
1085 }
1086
1087 if (PageDescriptor->Flags.Type != MM_PHYSICAL_PAGE_FREE)
1088 {
1089 DPRINT1("Got non-free page from freelist\n");
1090 KeBugCheck(MEMORY_MANAGEMENT);
1091 }
1092 if (PageDescriptor->ReferenceCount != 0)
1093 {
1094 DPRINT1("%d\n", PageDescriptor->ReferenceCount);
1095 KeBugCheck(MEMORY_MANAGEMENT);
1096 }
1097 PageDescriptor->Flags.Type = MM_PHYSICAL_PAGE_USED;
1098 PageDescriptor->Flags.Consumer = Consumer;
1099 PageDescriptor->ReferenceCount = 1;
1100 PageDescriptor->LockCount = 0;
1101 PageDescriptor->SavedSwapEntry = SwapEntry;
1102
1103 MmAvailablePages--;
1104
1105 PfnOffset = PageDescriptor - MmPfnDatabase;
1106 if ((NeedClear) && (Consumer != MC_SYSTEM))
1107 {
1108 MiZeroPage(PfnOffset);
1109 }
1110 return PfnOffset;
1111 }
1112
1113 NTSTATUS
1114 NTAPI
1115 MiZeroPage(PFN_NUMBER Page)
1116 {
1117 KIRQL Irql;
1118 PVOID TempAddress;
1119
1120 Irql = KeRaiseIrqlToDpcLevel();
1121 TempAddress = MiMapPageToZeroInHyperSpace(Page);
1122 if (TempAddress == NULL)
1123 {
1124 return(STATUS_NO_MEMORY);
1125 }
1126 memset(TempAddress, 0, PAGE_SIZE);
1127 MiUnmapPagesInZeroSpace(TempAddress, 1);
1128 KeLowerIrql(Irql);
1129 return(STATUS_SUCCESS);
1130 }
1131
1132 NTSTATUS
1133 NTAPI
1134 MmZeroPageThreadMain(PVOID Ignored)
1135 {
1136 NTSTATUS Status;
1137 KIRQL oldIrql;
1138 PLIST_ENTRY ListEntry;
1139 PPHYSICAL_PAGE PageDescriptor;
1140 PFN_NUMBER Pfn;
1141 ULONG Count;
1142
1143 /* Free initial kernel memory */
1144 //MiFreeInitMemory();
1145
1146 /* Set our priority to 0 */
1147 KeGetCurrentThread()->BasePriority = 0;
1148 KeSetPriorityThread(KeGetCurrentThread(), 0);
1149
1150 while(1)
1151 {
1152 Status = KeWaitForSingleObject(&ZeroPageThreadEvent,
1153 0,
1154 KernelMode,
1155 FALSE,
1156 NULL);
1157 if (!NT_SUCCESS(Status))
1158 {
1159 DPRINT1("ZeroPageThread: Wait failed\n");
1160 KeBugCheck(MEMORY_MANAGEMENT);
1161 }
1162
1163 if (ZeroPageThreadShouldTerminate)
1164 {
1165 DPRINT1("ZeroPageThread: Terminating\n");
1166 return STATUS_SUCCESS;
1167 }
1168 Count = 0;
1169 oldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
1170 while (!IsListEmpty(&FreeUnzeroedPageListHead))
1171 {
1172 ListEntry = RemoveTailList(&FreeUnzeroedPageListHead);
1173 UnzeroedPageCount--;
1174 PageDescriptor = CONTAINING_RECORD(ListEntry, PHYSICAL_PAGE, ListEntry);
1175 /* We set the page to used, because MmCreateVirtualMapping failed with unused pages */
1176 PageDescriptor->Flags.Type = MM_PHYSICAL_PAGE_USED;
1177 KeReleaseQueuedSpinLock(LockQueuePfnLock, oldIrql);
1178 Pfn = PageDescriptor - MmPfnDatabase;
1179 Status = MiZeroPage(Pfn);
1180
1181 oldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
1182 PageDescriptor->Flags.Zero = 1;
1183 PageDescriptor->Flags.Type = MM_PHYSICAL_PAGE_FREE;
1184 if (NT_SUCCESS(Status))
1185 {
1186 InsertHeadList(&FreeZeroedPageListHead, ListEntry);
1187 Count++;
1188 }
1189 else
1190 {
1191 InsertHeadList(&FreeUnzeroedPageListHead, ListEntry);
1192 UnzeroedPageCount++;
1193 }
1194
1195 }
1196 DPRINT("Zeroed %d pages.\n", Count);
1197 KeResetEvent(&ZeroPageThreadEvent);
1198 KeReleaseQueuedSpinLock(LockQueuePfnLock, oldIrql);
1199 }
1200
1201 return STATUS_SUCCESS;
1202 }
1203
1204 /* EOF */