e32dcb2d1f102b4d4cdc6ef72499665520aada64
[reactos.git] / reactos / ntoskrnl / mm / freelist.c
1 /*
2 * COPYRIGHT: See COPYING in the top level directory
3 * PROJECT: ReactOS kernel
4 * FILE: ntoskrnl/mm/freelist.c
5 * PURPOSE: Handle the list of free physical pages
6 *
7 * PROGRAMMERS: David Welch (welch@cwcom.net)
8 * Robert Bergkvist
9 */
10
11 /* INCLUDES ****************************************************************/
12
13 #include <ntoskrnl.h>
14 #define NDEBUG
15 #include <debug.h>
16
17 #if defined (ALLOC_PRAGMA)
18 #pragma alloc_text(INIT, MmInitializePageList)
19 #endif
20
21 #define MODULE_INVOLVED_IN_ARM3
22 #include "ARM3/miarm.h"
23
24 /* TYPES *******************************************************************/
25
26 #define MM_PHYSICAL_PAGE_FREE (0x1)
27 #define MM_PHYSICAL_PAGE_USED (0x2)
28
29 /* GLOBALS ****************************************************************/
30
31 //
32 //
33 // ReactOS to NT Physical Page Descriptor Entry Legacy Mapping Definitions
34 //
35 // REACTOS NT
36 //
37 #define Consumer PageLocation
38 #define Type CacheAttribute
39 #define Zero PrototypePte
40 #define LockCount u3.e1.PageColor
41 #define RmapListHead AweReferenceCount
42 #define SavedSwapEntry u4.EntireFrame
43 #define Flags u3.e1
44 #define ReferenceCount u3.ReferenceCount
45 #define RemoveEntryList(x) RemoveEntryList((PLIST_ENTRY)x)
46 #define InsertTailList(x, y) InsertTailList(x, (PLIST_ENTRY)y)
47 #define ListEntry u1
48 #define PHYSICAL_PAGE MMPFN
49 #define PPHYSICAL_PAGE PMMPFN
50
51 /* The first array contains ReactOS PFNs, the second contains ARM3 PFNs */
52 PPHYSICAL_PAGE MmPfnDatabase[2];
53
54 ULONG MmAvailablePages;
55 ULONG MmResidentAvailablePages;
56
57 SIZE_T MmTotalCommitLimit;
58 SIZE_T MmTotalCommittedPages;
59 SIZE_T MmSharedCommit;
60 SIZE_T MmDriverCommit;
61 SIZE_T MmProcessCommit;
62 SIZE_T MmPagedPoolCommit;
63 SIZE_T MmPeakCommitment;
64 SIZE_T MmtotalCommitLimitMaximum;
65
66 MMPFNLIST MmZeroedPageListHead;
67 MMPFNLIST MmFreePageListHead;
68 MMPFNLIST MmStandbyPageListHead;
69 MMPFNLIST MmModifiedPageListHead;
70 MMPFNLIST MmModifiedNoWritePageListHead;
71
72 /* List of pages allocated to the MC_USER Consumer */
73 static LIST_ENTRY UserPageListHead;
74 /* List of pages zeroed by the ZPW (MmZeroPageThreadMain) */
75 static LIST_ENTRY FreeZeroedPageListHead;
76 /* List of free pages, filled by MmGetReferenceCountPage and
77 * and MmInitializePageList */
78 static LIST_ENTRY FreeUnzeroedPageListHead;
79
80 static KEVENT ZeroPageThreadEvent;
81 static BOOLEAN ZeroPageThreadShouldTerminate = FALSE;
82
83 static ULONG UnzeroedPageCount = 0;
84
85 /* FUNCTIONS *************************************************************/
86
87 PFN_TYPE
88 NTAPI
89 MmGetLRUFirstUserPage(VOID)
90 {
91 PLIST_ENTRY NextListEntry;
92 PHYSICAL_PAGE* PageDescriptor;
93 KIRQL oldIrql;
94
95 oldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
96 NextListEntry = UserPageListHead.Flink;
97 if (NextListEntry == &UserPageListHead)
98 {
99 KeReleaseQueuedSpinLock(LockQueuePfnLock, oldIrql);
100 return 0;
101 }
102 PageDescriptor = CONTAINING_RECORD(NextListEntry, PHYSICAL_PAGE, ListEntry);
103 ASSERT_PFN(PageDescriptor);
104 KeReleaseQueuedSpinLock(LockQueuePfnLock, oldIrql);
105 return PageDescriptor - MmPfnDatabase[0];
106 }
107
108 VOID
109 NTAPI
110 MmInsertLRULastUserPage(PFN_TYPE Pfn)
111 {
112 KIRQL oldIrql;
113 PPHYSICAL_PAGE Page;
114
115 oldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
116 Page = MiGetPfnEntry(Pfn);
117 ASSERT(Page);
118 ASSERT(Page->Flags.Type == MM_PHYSICAL_PAGE_USED);
119 ASSERT(Page->Flags.Consumer == MC_USER);
120 InsertTailList(&UserPageListHead, &Page->ListEntry);
121 KeReleaseQueuedSpinLock(LockQueuePfnLock, oldIrql);
122 }
123
124 PFN_TYPE
125 NTAPI
126 MmGetLRUNextUserPage(PFN_TYPE PreviousPfn)
127 {
128 PLIST_ENTRY NextListEntry;
129 PHYSICAL_PAGE* PageDescriptor;
130 KIRQL oldIrql;
131 PPHYSICAL_PAGE Page;
132
133 oldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
134 Page = MiGetPfnEntry(PreviousPfn);
135 ASSERT(Page);
136 ASSERT(Page->Flags.Type == MM_PHYSICAL_PAGE_USED);
137 ASSERT(Page->Flags.Consumer == MC_USER);
138 NextListEntry = (PLIST_ENTRY)Page->ListEntry.Flink;
139 if (NextListEntry == &UserPageListHead)
140 {
141 KeReleaseQueuedSpinLock(LockQueuePfnLock, oldIrql);
142 return 0;
143 }
144 PageDescriptor = CONTAINING_RECORD(NextListEntry, PHYSICAL_PAGE, ListEntry);
145 KeReleaseQueuedSpinLock(LockQueuePfnLock, oldIrql);
146 return PageDescriptor - MmPfnDatabase[0];
147 }
148
149 VOID
150 NTAPI
151 MmRemoveLRUUserPage(PFN_TYPE Page)
152 {
153 RemoveEntryList(&MiGetPfnEntry(Page)->ListEntry);
154 }
155
156 PFN_NUMBER
157 NTAPI
158 MiFindContiguousPages(IN PFN_NUMBER LowestPfn,
159 IN PFN_NUMBER HighestPfn,
160 IN PFN_NUMBER BoundaryPfn,
161 IN PFN_NUMBER SizeInPages,
162 IN MEMORY_CACHING_TYPE CacheType)
163 {
164 PFN_NUMBER Page, PageCount, LastPage, Length, BoundaryMask;
165 ULONG i = 0;
166 PMMPFN Pfn1, EndPfn;
167 KIRQL OldIrql;
168 PAGED_CODE ();
169 ASSERT(SizeInPages != 0);
170
171 //
172 // Convert the boundary PFN into an alignment mask
173 //
174 BoundaryMask = ~(BoundaryPfn - 1);
175
176 //
177 // Loop all the physical memory blocks
178 //
179 do
180 {
181 //
182 // Capture the base page and length of this memory block
183 //
184 Page = MmPhysicalMemoryBlock->Run[i].BasePage;
185 PageCount = MmPhysicalMemoryBlock->Run[i].PageCount;
186
187 //
188 // Check how far this memory block will go
189 //
190 LastPage = Page + PageCount;
191
192 //
193 // Trim it down to only the PFNs we're actually interested in
194 //
195 if ((LastPage - 1) > HighestPfn) LastPage = HighestPfn + 1;
196 if (Page < LowestPfn) Page = LowestPfn;
197
198 //
199 // Skip this run if it's empty or fails to contain all the pages we need
200 //
201 if (!(PageCount) || ((Page + SizeInPages) > LastPage)) continue;
202
203 //
204 // Now scan all the relevant PFNs in this run
205 //
206 Length = 0;
207 for (Pfn1 = MiGetPfnEntry(Page); Page < LastPage; Page++, Pfn1++)
208 {
209 //
210 // If this PFN is in use, ignore it
211 //
212 if (Pfn1->Flags.Type != MM_PHYSICAL_PAGE_FREE) continue;
213
214 //
215 // If we haven't chosen a start PFN yet and the caller specified an
216 // alignment, make sure the page matches the alignment restriction
217 //
218 if ((!(Length) && (BoundaryPfn)) &&
219 (((Page ^ (Page + SizeInPages - 1)) & BoundaryMask)))
220 {
221 //
222 // It does not, so bail out
223 //
224 continue;
225 }
226
227 //
228 // Increase the number of valid pages, and check if we have enough
229 //
230 if (++Length == SizeInPages)
231 {
232 //
233 // It appears we've amassed enough legitimate pages, rollback
234 //
235 Pfn1 -= (Length - 1);
236 Page -= (Length - 1);
237
238 //
239 // Acquire the PFN lock
240 //
241 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
242 do
243 {
244 //
245 // Things might've changed for us. Is the page still free?
246 //
247 if (Pfn1->Flags.Type != MM_PHYSICAL_PAGE_FREE) break;
248
249 //
250 // So far so good. Is this the last confirmed valid page?
251 //
252 if (!--Length)
253 {
254 //
255 // Sanity check that we didn't go out of bounds
256 //
257 ASSERT(i != MmPhysicalMemoryBlock->NumberOfRuns);
258
259 //
260 // Loop until all PFN entries have been processed
261 //
262 EndPfn = Pfn1 - SizeInPages + 1;
263 do
264 {
265 //
266 // If this was an unzeroed page, there are now less
267 //
268 if (Pfn1->Flags.Zero == 0) UnzeroedPageCount--;
269
270 //
271 // One less free page
272 //
273 MmAvailablePages--;
274
275 //
276 // This PFN is now a used page, set it up
277 //
278 RemoveEntryList(&Pfn1->ListEntry);
279 Pfn1->Flags.Type = MM_PHYSICAL_PAGE_USED;
280 Pfn1->Flags.Consumer = MC_NPPOOL;
281 Pfn1->ReferenceCount = 1;
282 Pfn1->LockCount = 0;
283 Pfn1->SavedSwapEntry = 0;
284
285 //
286 // Check if it was already zeroed
287 //
288 if (Pfn1->Flags.Zero == 0)
289 {
290 //
291 // It wasn't, so zero it
292 //
293 MiZeroPage(MiGetPfnEntryIndex(Pfn1));
294 }
295
296 //
297 // Check if this is the last PFN, otherwise go on
298 //
299 if (Pfn1 == EndPfn) break;
300 Pfn1--;
301 } while (TRUE);
302
303 //
304 // Mark the first and last PFN so we can find them later
305 //
306 Pfn1->Flags.StartOfAllocation = 1;
307 (Pfn1 + SizeInPages - 1)->Flags.EndOfAllocation = 1;
308
309 //
310 // Now it's safe to let go of the PFN lock
311 //
312 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
313
314 //
315 // Quick sanity check that the last PFN is consistent
316 //
317 EndPfn = Pfn1 + SizeInPages;
318 ASSERT(EndPfn == MiGetPfnEntry(Page + 1));
319
320 //
321 // Compute the first page, and make sure it's consistent
322 //
323 Page -= SizeInPages - 1;
324 ASSERT(Pfn1 == MiGetPfnEntry(Page));
325 ASSERT(Page != 0);
326 return Page;
327 }
328
329 //
330 // Keep going. The purpose of this loop is to reconfirm that
331 // after acquiring the PFN lock these pages are still usable
332 //
333 Pfn1++;
334 Page++;
335 } while (TRUE);
336
337 //
338 // If we got here, something changed while we hadn't acquired
339 // the PFN lock yet, so we'll have to restart
340 //
341 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
342 Length = 0;
343 }
344 }
345 } while (++i != MmPhysicalMemoryBlock->NumberOfRuns);
346
347 //
348 // And if we get here, it means no suitable physical memory runs were found
349 //
350 return 0;
351 }
352
353 PMDL
354 NTAPI
355 MiAllocatePagesForMdl(IN PHYSICAL_ADDRESS LowAddress,
356 IN PHYSICAL_ADDRESS HighAddress,
357 IN PHYSICAL_ADDRESS SkipBytes,
358 IN SIZE_T TotalBytes,
359 IN MI_PFN_CACHE_ATTRIBUTE CacheAttribute,
360 IN ULONG MdlFlags)
361 {
362 PMDL Mdl;
363 PFN_NUMBER PageCount, LowPage, HighPage, SkipPages, PagesFound = 0, Page;
364 PPFN_NUMBER MdlPage, LastMdlPage;
365 KIRQL OldIrql;
366 PLIST_ENTRY ListEntry;
367 PPHYSICAL_PAGE Pfn1;
368 INT LookForZeroedPages;
369 ASSERT (KeGetCurrentIrql() <= APC_LEVEL);
370
371 //
372 // Convert the low address into a PFN
373 //
374 LowPage = (PFN_NUMBER)(LowAddress.QuadPart >> PAGE_SHIFT);
375
376 //
377 // Convert, and normalize, the high address into a PFN
378 //
379 HighPage = (PFN_NUMBER)(HighAddress.QuadPart >> PAGE_SHIFT);
380 if (HighPage > MmHighestPhysicalPage) HighPage = MmHighestPhysicalPage;
381
382 //
383 // Validate skipbytes and convert them into pages
384 //
385 if (BYTE_OFFSET(SkipBytes.LowPart)) return NULL;
386 SkipPages = (PFN_NUMBER)(SkipBytes.QuadPart >> PAGE_SHIFT);
387
388 //
389 // Now compute the number of pages the MDL will cover
390 //
391 PageCount = (PFN_NUMBER)ADDRESS_AND_SIZE_TO_SPAN_PAGES(0, TotalBytes);
392 do
393 {
394 //
395 // Try creating an MDL for these many pages
396 //
397 Mdl = MmCreateMdl(NULL, NULL, PageCount << PAGE_SHIFT);
398 if (Mdl) break;
399
400 //
401 // This function is not required to return the amount of pages requested
402 // In fact, it can return as little as 1 page, and callers are supposed
403 // to deal with this scenario. So re-attempt the allocation with less
404 // pages than before, and see if it worked this time.
405 //
406 PageCount -= (PageCount >> 4);
407 } while (PageCount);
408
409 //
410 // Wow, not even a single page was around!
411 //
412 if (!Mdl) return NULL;
413
414 //
415 // This is where the page array starts....
416 //
417 MdlPage = (PPFN_NUMBER)(Mdl + 1);
418
419 //
420 // Lock the PFN database
421 //
422 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
423
424 //
425 // Are we looking for any pages, without discriminating?
426 //
427 if ((LowPage == 0) && (HighPage == MmHighestPhysicalPage))
428 {
429 //
430 // Well then, let's go shopping
431 //
432 while (PagesFound < PageCount)
433 {
434 //
435 // Do we have zeroed pages?
436 //
437 if (!IsListEmpty(&FreeZeroedPageListHead))
438 {
439 //
440 // Grab a zero page
441 //
442 ListEntry = RemoveTailList(&FreeZeroedPageListHead);
443 }
444 else if (!IsListEmpty(&FreeUnzeroedPageListHead))
445 {
446 //
447 // Nope, grab an unzeroed page
448 //
449 ListEntry = RemoveTailList(&FreeUnzeroedPageListHead);
450 UnzeroedPageCount--;
451 }
452 else
453 {
454 //
455 // This is not good... hopefully we have at least SOME pages
456 //
457 ASSERT(PagesFound);
458 break;
459 }
460
461 //
462 // Get the PFN entry for this page
463 //
464 Pfn1 = CONTAINING_RECORD(ListEntry, PHYSICAL_PAGE, ListEntry);
465
466 //
467 // Make sure it's really free
468 //
469 ASSERT(Pfn1->Flags.Type == MM_PHYSICAL_PAGE_FREE);
470 ASSERT(Pfn1->ReferenceCount == 0);
471
472 //
473 // Allocate it and mark it
474 //
475 Pfn1->Flags.Type = MM_PHYSICAL_PAGE_USED;
476 Pfn1->Flags.Consumer = MC_NPPOOL;
477 Pfn1->Flags.StartOfAllocation = 1;
478 Pfn1->Flags.EndOfAllocation = 1;
479 Pfn1->ReferenceCount = 1;
480 Pfn1->LockCount = 0;
481 Pfn1->SavedSwapEntry = 0;
482
483 //
484 // Decrease available pages
485 //
486 MmAvailablePages--;
487
488 //
489 // Save it into the MDL
490 //
491 *MdlPage++ = MiGetPfnEntryIndex(Pfn1);
492 PagesFound++;
493 }
494 }
495 else
496 {
497 //
498 // You want specific range of pages. We'll do this in two runs
499 //
500 for (LookForZeroedPages = 1; LookForZeroedPages >= 0; LookForZeroedPages--)
501 {
502 //
503 // Scan the range you specified
504 //
505 for (Page = LowPage; Page < HighPage; Page++)
506 {
507 //
508 // Get the PFN entry for this page
509 //
510 Pfn1 = MiGetPfnEntry(Page);
511 ASSERT(Pfn1);
512
513 //
514 // Make sure it's free and if this is our first pass, zeroed
515 //
516 if (Pfn1->Flags.Type != MM_PHYSICAL_PAGE_FREE) continue;
517 if (Pfn1->Flags.Zero != LookForZeroedPages) continue;
518
519 //
520 // Sanity checks
521 //
522 ASSERT(Pfn1->ReferenceCount == 0);
523
524 //
525 // Now setup the page and mark it
526 //
527 Pfn1->Flags.Type = MM_PHYSICAL_PAGE_USED;
528 Pfn1->Flags.Consumer = MC_NPPOOL;
529 Pfn1->ReferenceCount = 1;
530 Pfn1->Flags.StartOfAllocation = 1;
531 Pfn1->Flags.EndOfAllocation = 1;
532 Pfn1->LockCount = 0;
533 Pfn1->SavedSwapEntry = 0;
534
535 //
536 // If this page was unzeroed, we've consumed such a page
537 //
538 if (!Pfn1->Flags.Zero) UnzeroedPageCount--;
539
540 //
541 // Decrease available pages
542 //
543 MmAvailablePages--;
544
545 //
546 // Save this page into the MDL
547 //
548 *MdlPage++ = Page;
549 if (++PagesFound == PageCount) break;
550 }
551
552 //
553 // If the first pass was enough, don't keep going, otherwise, go again
554 //
555 if (PagesFound == PageCount) break;
556 }
557 }
558
559 //
560 // Now release the PFN count
561 //
562 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
563
564 //
565 // We might've found less pages, but not more ;-)
566 //
567 if (PagesFound != PageCount) ASSERT(PagesFound < PageCount);
568 if (!PagesFound)
569 {
570 //
571 // If we didn' tfind any pages at all, fail
572 //
573 DPRINT1("NO MDL PAGES!\n");
574 ExFreePool(Mdl);
575 return NULL;
576 }
577
578 //
579 // Write out how many pages we found
580 //
581 Mdl->ByteCount = (ULONG)(PagesFound << PAGE_SHIFT);
582
583 //
584 // Terminate the MDL array if there's certain missing pages
585 //
586 if (PagesFound != PageCount) *MdlPage = -1;
587
588 //
589 // Now go back and loop over all the MDL pages
590 //
591 MdlPage = (PPFN_NUMBER)(Mdl + 1);
592 LastMdlPage = MdlPage + PagesFound;
593 while (MdlPage < LastMdlPage)
594 {
595 //
596 // Check if we've reached the end
597 //
598 Page = *MdlPage++;
599 if (Page == (PFN_NUMBER)-1) break;
600
601 //
602 // Get the PFN entry for the page and check if we should zero it out
603 //
604 Pfn1 = MiGetPfnEntry(Page);
605 ASSERT(Pfn1);
606 if (Pfn1->Flags.Zero == 0) MiZeroPage(Page);
607 }
608
609 //
610 // We're done, mark the pages as locked (should we lock them, though???)
611 //
612 Mdl->Process = NULL;
613 Mdl->MdlFlags |= MDL_PAGES_LOCKED;
614 return Mdl;
615 }
616
617 VOID
618 NTAPI
619 MmDumpPfnDatabase(VOID)
620 {
621 ULONG i;
622 PPHYSICAL_PAGE Pfn1;
623 PCHAR State = "????", Consumer = "Unknown";
624 KIRQL OldIrql;
625 ULONG Totals[5] = {0}, FreePages = 0;
626
627 KeRaiseIrql(HIGH_LEVEL, &OldIrql);
628
629 //
630 // Loop the PFN database
631 //
632 for (i = 0; i <= MmHighestPhysicalPage; i++)
633 {
634 Pfn1 = MiGetPfnEntry(i);
635 if (!Pfn1) continue;
636
637 //
638 // Get the consumer
639 //
640 switch (Pfn1->Flags.Consumer)
641 {
642 case MC_NPPOOL:
643
644 Consumer = "Nonpaged Pool";
645 break;
646
647 case MC_PPOOL:
648
649 Consumer = "Paged Pool";
650 break;
651
652 case MC_CACHE:
653
654 Consumer = "File System Cache";
655 break;
656
657 case MC_USER:
658
659 Consumer = "Process Working Set";
660 break;
661
662 case MC_SYSTEM:
663
664 Consumer = "System";
665 break;
666 }
667
668 //
669 // Get the type
670 //
671 switch (Pfn1->Flags.Type)
672 {
673 case MM_PHYSICAL_PAGE_USED:
674
675 State = "Used";
676 Totals[Pfn1->Flags.Consumer]++;
677 break;
678
679 case MM_PHYSICAL_PAGE_FREE:
680
681 State = "Free";
682 Consumer = "Free";
683 FreePages++;
684 break;
685 }
686
687 //
688 // Pretty-print the page
689 //
690 DbgPrint("0x%08p:\t%04s\t%20s\t(%02d.%02d) [%08p])\n",
691 i << PAGE_SHIFT,
692 State,
693 Consumer,
694 Pfn1->ReferenceCount,
695 Pfn1->LockCount,
696 Pfn1->RmapListHead);
697 }
698
699 DbgPrint("Nonpaged Pool: %d pages\t[%d KB]\n", Totals[MC_NPPOOL], (Totals[MC_NPPOOL] << PAGE_SHIFT) / 1024);
700 DbgPrint("Paged Pool: %d pages\t[%d KB]\n", Totals[MC_PPOOL], (Totals[MC_PPOOL] << PAGE_SHIFT) / 1024);
701 DbgPrint("File System Cache: %d pages\t[%d KB]\n", Totals[MC_CACHE], (Totals[MC_CACHE] << PAGE_SHIFT) / 1024);
702 DbgPrint("Process Working Set: %d pages\t[%d KB]\n", Totals[MC_USER], (Totals[MC_USER] << PAGE_SHIFT) / 1024);
703 DbgPrint("System: %d pages\t[%d KB]\n", Totals[MC_SYSTEM], (Totals[MC_SYSTEM] << PAGE_SHIFT) / 1024);
704 DbgPrint("Free: %d pages\t[%d KB]\n", FreePages, (FreePages << PAGE_SHIFT) / 1024);
705
706 KeLowerIrql(OldIrql);
707 }
708
709 VOID
710 NTAPI
711 MmInitializePageList(VOID)
712 {
713 ULONG i;
714 PHYSICAL_PAGE UsedPage;
715 PMEMORY_ALLOCATION_DESCRIPTOR Md;
716 PLIST_ENTRY NextEntry;
717 ULONG NrSystemPages = 0;
718
719 /* Initialize the page lists */
720 InitializeListHead(&UserPageListHead);
721 InitializeListHead(&FreeUnzeroedPageListHead);
722 InitializeListHead(&FreeZeroedPageListHead);
723
724 /* This is what a used page looks like */
725 RtlZeroMemory(&UsedPage, sizeof(UsedPage));
726 UsedPage.Flags.Type = MM_PHYSICAL_PAGE_USED;
727 UsedPage.Flags.Consumer = MC_NPPOOL;
728 UsedPage.ReferenceCount = 1;
729
730 /* Loop the memory descriptors */
731 for (NextEntry = KeLoaderBlock->MemoryDescriptorListHead.Flink;
732 NextEntry != &KeLoaderBlock->MemoryDescriptorListHead;
733 NextEntry = NextEntry->Flink)
734 {
735 #undef ListEntry
736 /* Get the descriptor */
737 Md = CONTAINING_RECORD(NextEntry,
738 MEMORY_ALLOCATION_DESCRIPTOR,
739 ListEntry);
740 #define ListEntry u1
741
742 /* Skip bad memory */
743 if ((Md->MemoryType == LoaderFirmwarePermanent) ||
744 (Md->MemoryType == LoaderBBTMemory) ||
745 (Md->MemoryType == LoaderSpecialMemory) ||
746 (Md->MemoryType == LoaderBad))
747 {
748 //
749 // We do not build PFN entries for this
750 //
751 continue;
752 }
753 else if ((Md->MemoryType == LoaderFree) ||
754 (Md->MemoryType == LoaderLoadedProgram) ||
755 (Md->MemoryType == LoaderFirmwareTemporary) ||
756 (Md->MemoryType == LoaderOsloaderStack))
757 {
758 /* Loop every page part of the block */
759 for (i = 0; i < Md->PageCount; i++)
760 {
761 /* Mark it as a free page */
762 MmPfnDatabase[0][Md->BasePage + i].Flags.Type = MM_PHYSICAL_PAGE_FREE;
763 InsertTailList(&FreeUnzeroedPageListHead,
764 &MmPfnDatabase[0][Md->BasePage + i].ListEntry);
765 UnzeroedPageCount++;
766 MmAvailablePages++;
767 }
768 }
769 else
770 {
771 /* Loop every page part of the block */
772 for (i = 0; i < Md->PageCount; i++)
773 {
774 /* Everything else is used memory */
775 MmPfnDatabase[0][Md->BasePage + i] = UsedPage;
776 NrSystemPages++;
777 }
778 }
779 }
780
781 /* Finally handle the pages describing the PFN database themselves */
782 for (i = MxOldFreeDescriptor.BasePage; i < MxFreeDescriptor->BasePage; i++)
783 {
784 /* Ensure this page was not added previously */
785 ASSERT(MmPfnDatabase[0][i].Flags.Type == 0);
786
787 /* Mark it as used kernel memory */
788 MmPfnDatabase[0][i] = UsedPage;
789 NrSystemPages++;
790 }
791
792 KeInitializeEvent(&ZeroPageThreadEvent, NotificationEvent, TRUE);
793 DPRINT("Pages: %x %x\n", MmAvailablePages, NrSystemPages);
794 MmInitializeBalancer(MmAvailablePages, NrSystemPages);
795 }
796
797 VOID
798 NTAPI
799 MmSetRmapListHeadPage(PFN_TYPE Pfn, struct _MM_RMAP_ENTRY* ListHead)
800 {
801 KIRQL oldIrql;
802
803 oldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
804 MiGetPfnEntry(Pfn)->RmapListHead = (LONG)ListHead;
805 KeReleaseQueuedSpinLock(LockQueuePfnLock, oldIrql);
806 }
807
808 struct _MM_RMAP_ENTRY*
809 NTAPI
810 MmGetRmapListHeadPage(PFN_TYPE Pfn)
811 {
812 KIRQL oldIrql;
813 struct _MM_RMAP_ENTRY* ListHead;
814
815 oldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
816 ListHead = (struct _MM_RMAP_ENTRY*)MiGetPfnEntry(Pfn)->RmapListHead;
817 KeReleaseQueuedSpinLock(LockQueuePfnLock, oldIrql);
818
819 return(ListHead);
820 }
821
822 VOID
823 NTAPI
824 MmSetSavedSwapEntryPage(PFN_TYPE Pfn, SWAPENTRY SwapEntry)
825 {
826 KIRQL oldIrql;
827
828 oldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
829 MiGetPfnEntry(Pfn)->SavedSwapEntry = SwapEntry;
830 KeReleaseQueuedSpinLock(LockQueuePfnLock, oldIrql);
831 }
832
833 SWAPENTRY
834 NTAPI
835 MmGetSavedSwapEntryPage(PFN_TYPE Pfn)
836 {
837 SWAPENTRY SwapEntry;
838 KIRQL oldIrql;
839
840 oldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
841 SwapEntry = MiGetPfnEntry(Pfn)->SavedSwapEntry;
842 KeReleaseQueuedSpinLock(LockQueuePfnLock, oldIrql);
843
844 return(SwapEntry);
845 }
846
847 VOID
848 NTAPI
849 MmReferencePage(PFN_TYPE Pfn)
850 {
851 PPHYSICAL_PAGE Page;
852
853 DPRINT("MmReferencePage(PysicalAddress %x)\n", Pfn << PAGE_SHIFT);
854
855 if (Pfn == 0 || Pfn > MmHighestPhysicalPage)
856 {
857 return;
858 }
859
860 Page = MiGetPfnEntry(Pfn);
861 ASSERT(Page);
862 if (Page->Flags.Type != MM_PHYSICAL_PAGE_USED)
863 {
864 DPRINT1("Referencing non-used page\n");
865 KeBugCheck(MEMORY_MANAGEMENT);
866 }
867
868 Page->ReferenceCount++;
869 }
870
871 ULONG
872 NTAPI
873 MmGetReferenceCountPage(PFN_TYPE Pfn)
874 {
875 KIRQL oldIrql;
876 ULONG RCount;
877 PPHYSICAL_PAGE Page;
878
879 DPRINT("MmGetReferenceCountPage(PhysicalAddress %x)\n", Pfn << PAGE_SHIFT);
880
881 oldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
882 Page = MiGetPfnEntry(Pfn);
883 ASSERT(Page);
884 if (Page->Flags.Type != MM_PHYSICAL_PAGE_USED)
885 {
886 DPRINT1("Getting reference count for free page\n");
887 KeBugCheck(MEMORY_MANAGEMENT);
888 }
889
890 RCount = Page->ReferenceCount;
891
892 KeReleaseQueuedSpinLock(LockQueuePfnLock, oldIrql);
893 return(RCount);
894 }
895
896 BOOLEAN
897 NTAPI
898 MmIsPageInUse(PFN_TYPE Pfn)
899 {
900
901 DPRINT("MmIsPageInUse(PhysicalAddress %x)\n", Pfn << PAGE_SHIFT);
902
903 return (MiGetPfnEntry(Pfn)->Flags.Type == MM_PHYSICAL_PAGE_USED);
904 }
905
906 VOID
907 NTAPI
908 MmDereferencePage(PFN_TYPE Pfn)
909 {
910 PPHYSICAL_PAGE Page;
911
912 DPRINT("MmDereferencePage(PhysicalAddress %x)\n", Pfn << PAGE_SHIFT);
913
914 Page = MiGetPfnEntry(Pfn);
915 ASSERT(Page);
916
917 if (Page->Flags.Type != MM_PHYSICAL_PAGE_USED)
918 {
919 DPRINT1("Dereferencing free page\n");
920 KeBugCheck(MEMORY_MANAGEMENT);
921 }
922 if (Page->ReferenceCount == 0)
923 {
924 DPRINT1("Derefrencing page with reference count 0\n");
925 KeBugCheck(MEMORY_MANAGEMENT);
926 }
927
928 Page->ReferenceCount--;
929 if (Page->ReferenceCount == 0)
930 {
931 MmAvailablePages++;
932 if (Page->Flags.Consumer == MC_USER) RemoveEntryList(&Page->ListEntry);
933 if (Page->RmapListHead != (LONG)NULL)
934 {
935 DPRINT1("Freeing page with rmap entries.\n");
936 KeBugCheck(MEMORY_MANAGEMENT);
937 }
938 if (Page->LockCount > 0)
939 {
940 DPRINT1("Freeing locked page\n");
941 KeBugCheck(MEMORY_MANAGEMENT);
942 }
943 if (Page->SavedSwapEntry != 0)
944 {
945 DPRINT1("Freeing page with swap entry.\n");
946 KeBugCheck(MEMORY_MANAGEMENT);
947 }
948 if (Page->Flags.Type != MM_PHYSICAL_PAGE_USED)
949 {
950 DPRINT1("Freeing page with flags %x\n",
951 Page->Flags.Type);
952 KeBugCheck(MEMORY_MANAGEMENT);
953 }
954 Page->Flags.Type = MM_PHYSICAL_PAGE_FREE;
955 Page->Flags.Consumer = MC_MAXIMUM;
956 InsertTailList(&FreeUnzeroedPageListHead,
957 &Page->ListEntry);
958 UnzeroedPageCount++;
959 if (UnzeroedPageCount > 8 && 0 == KeReadStateEvent(&ZeroPageThreadEvent))
960 {
961 KeSetEvent(&ZeroPageThreadEvent, IO_NO_INCREMENT, FALSE);
962 }
963 }
964 }
965
966 ULONG
967 NTAPI
968 MmGetLockCountPage(PFN_TYPE Pfn)
969 {
970 KIRQL oldIrql;
971 ULONG CurrentLockCount;
972 PPHYSICAL_PAGE Page;
973
974 DPRINT("MmGetLockCountPage(PhysicalAddress %x)\n", Pfn << PAGE_SHIFT);
975
976 oldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
977
978 Page = MiGetPfnEntry(Pfn);
979 ASSERT(Page);
980 if (Page->Flags.Type != MM_PHYSICAL_PAGE_USED)
981 {
982 DPRINT1("Getting lock count for free page\n");
983 KeBugCheck(MEMORY_MANAGEMENT);
984 }
985
986 CurrentLockCount = Page->LockCount;
987 KeReleaseQueuedSpinLock(LockQueuePfnLock, oldIrql);
988
989 return(CurrentLockCount);
990 }
991
992 VOID
993 NTAPI
994 MmLockPage(PFN_TYPE Pfn)
995 {
996 PPHYSICAL_PAGE Page;
997
998 DPRINT("MmLockPage(PhysicalAddress %x)\n", Pfn << PAGE_SHIFT);
999
1000 Page = MiGetPfnEntry(Pfn);
1001 ASSERT(Page);
1002 if (Page->Flags.Type != MM_PHYSICAL_PAGE_USED)
1003 {
1004 DPRINT1("Locking free page\n");
1005 KeBugCheck(MEMORY_MANAGEMENT);
1006 }
1007
1008 Page->LockCount++;
1009 }
1010
1011 VOID
1012 NTAPI
1013 MmUnlockPage(PFN_TYPE Pfn)
1014 {
1015 PPHYSICAL_PAGE Page;
1016
1017 DPRINT("MmUnlockPage(PhysicalAddress %x)\n", Pfn << PAGE_SHIFT);
1018
1019 Page = MiGetPfnEntry(Pfn);
1020 ASSERT(Page);
1021 if (Page->Flags.Type != MM_PHYSICAL_PAGE_USED)
1022 {
1023 DPRINT1("Unlocking free page\n");
1024 KeBugCheck(MEMORY_MANAGEMENT);
1025 }
1026
1027 Page->LockCount--;
1028 }
1029
1030 PFN_TYPE
1031 NTAPI
1032 MmAllocPage(ULONG Consumer, SWAPENTRY SwapEntry)
1033 {
1034 PFN_TYPE PfnOffset;
1035 PLIST_ENTRY ListEntry;
1036 PPHYSICAL_PAGE PageDescriptor;
1037 BOOLEAN NeedClear = FALSE;
1038
1039 DPRINT("MmAllocPage()\n");
1040
1041 if (IsListEmpty(&FreeZeroedPageListHead))
1042 {
1043 if (IsListEmpty(&FreeUnzeroedPageListHead))
1044 {
1045 /* Check if this allocation is for the PFN DB itself */
1046 if (MmNumberOfPhysicalPages == 0)
1047 {
1048 ASSERT(FALSE);
1049 }
1050
1051 DPRINT1("MmAllocPage(): Out of memory\n");
1052 return 0;
1053 }
1054 ListEntry = RemoveTailList(&FreeUnzeroedPageListHead);
1055 UnzeroedPageCount--;
1056
1057 PageDescriptor = CONTAINING_RECORD(ListEntry, PHYSICAL_PAGE, ListEntry);
1058
1059 NeedClear = TRUE;
1060 }
1061 else
1062 {
1063 ListEntry = RemoveTailList(&FreeZeroedPageListHead);
1064
1065 PageDescriptor = CONTAINING_RECORD(ListEntry, PHYSICAL_PAGE, ListEntry);
1066 }
1067
1068 if (PageDescriptor->Flags.Type != MM_PHYSICAL_PAGE_FREE)
1069 {
1070 DPRINT1("Got non-free page from freelist\n");
1071 KeBugCheck(MEMORY_MANAGEMENT);
1072 }
1073 if (PageDescriptor->ReferenceCount != 0)
1074 {
1075 DPRINT1("%d\n", PageDescriptor->ReferenceCount);
1076 KeBugCheck(MEMORY_MANAGEMENT);
1077 }
1078 PageDescriptor->Flags.Type = MM_PHYSICAL_PAGE_USED;
1079 PageDescriptor->Flags.Consumer = Consumer;
1080 PageDescriptor->ReferenceCount = 1;
1081 PageDescriptor->LockCount = 0;
1082 PageDescriptor->SavedSwapEntry = SwapEntry;
1083
1084 MmAvailablePages--;
1085
1086 PfnOffset = PageDescriptor - MmPfnDatabase[0];
1087 if ((NeedClear) && (Consumer != MC_SYSTEM))
1088 {
1089 MiZeroPage(PfnOffset);
1090 }
1091 return PfnOffset;
1092 }
1093
1094 NTSTATUS
1095 NTAPI
1096 MiZeroPage(PFN_TYPE Page)
1097 {
1098 KIRQL Irql;
1099 PVOID TempAddress;
1100
1101 Irql = KeRaiseIrqlToDpcLevel();
1102 TempAddress = MiMapPageToZeroInHyperSpace(Page);
1103 if (TempAddress == NULL)
1104 {
1105 return(STATUS_NO_MEMORY);
1106 }
1107 memset(TempAddress, 0, PAGE_SIZE);
1108 MiUnmapPagesInZeroSpace(TempAddress, 1);
1109 KeLowerIrql(Irql);
1110 return(STATUS_SUCCESS);
1111 }
1112
1113 NTSTATUS
1114 NTAPI
1115 MmZeroPageThreadMain(PVOID Ignored)
1116 {
1117 NTSTATUS Status;
1118 KIRQL oldIrql;
1119 PLIST_ENTRY ListEntry;
1120 PPHYSICAL_PAGE PageDescriptor;
1121 PFN_TYPE Pfn;
1122 ULONG Count;
1123
1124 /* Free initial kernel memory */
1125 //MiFreeInitMemory();
1126
1127 /* Set our priority to 0 */
1128 KeGetCurrentThread()->BasePriority = 0;
1129 KeSetPriorityThread(KeGetCurrentThread(), 0);
1130
1131 while(1)
1132 {
1133 Status = KeWaitForSingleObject(&ZeroPageThreadEvent,
1134 0,
1135 KernelMode,
1136 FALSE,
1137 NULL);
1138 if (!NT_SUCCESS(Status))
1139 {
1140 DPRINT1("ZeroPageThread: Wait failed\n");
1141 KeBugCheck(MEMORY_MANAGEMENT);
1142 }
1143
1144 if (ZeroPageThreadShouldTerminate)
1145 {
1146 DPRINT1("ZeroPageThread: Terminating\n");
1147 return STATUS_SUCCESS;
1148 }
1149 Count = 0;
1150 oldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
1151 while (!IsListEmpty(&FreeUnzeroedPageListHead))
1152 {
1153 ListEntry = RemoveTailList(&FreeUnzeroedPageListHead);
1154 UnzeroedPageCount--;
1155 PageDescriptor = CONTAINING_RECORD(ListEntry, PHYSICAL_PAGE, ListEntry);
1156 /* We set the page to used, because MmCreateVirtualMapping failed with unused pages */
1157 PageDescriptor->Flags.Type = MM_PHYSICAL_PAGE_USED;
1158 KeReleaseQueuedSpinLock(LockQueuePfnLock, oldIrql);
1159 Pfn = PageDescriptor - MmPfnDatabase[0];
1160 Status = MiZeroPage(Pfn);
1161
1162 oldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
1163 PageDescriptor->Flags.Zero = 1;
1164 PageDescriptor->Flags.Type = MM_PHYSICAL_PAGE_FREE;
1165 if (NT_SUCCESS(Status))
1166 {
1167 InsertHeadList(&FreeZeroedPageListHead, ListEntry);
1168 Count++;
1169 }
1170 else
1171 {
1172 InsertHeadList(&FreeUnzeroedPageListHead, ListEntry);
1173 UnzeroedPageCount++;
1174 }
1175
1176 }
1177 DPRINT("Zeroed %d pages.\n", Count);
1178 KeResetEvent(&ZeroPageThreadEvent);
1179 KeReleaseQueuedSpinLock(LockQueuePfnLock, oldIrql);
1180 }
1181
1182 return STATUS_SUCCESS;
1183 }
1184
1185 /* EOF */