Hopefully create a branch and not destroy the svn repository.
[reactos.git] / ntoskrnl / mm / freelist.c
1 /*
2 * COPYRIGHT: See COPYING in the top level directory
3 * PROJECT: ReactOS kernel
4 * FILE: ntoskrnl/mm/freelist.c
5 * PURPOSE: Handle the list of free physical pages
6 *
7 * PROGRAMMERS: David Welch (welch@cwcom.net)
8 * Robert Bergkvist
9 */
10
11 /* INCLUDES ****************************************************************/
12
13 #include <ntoskrnl.h>
14 #define NDEBUG
15 #include <debug.h>
16
17 #if defined (ALLOC_PRAGMA)
18 #pragma alloc_text(INIT, MmInitializePageList)
19 #endif
20
21 #define MODULE_INVOLVED_IN_ARM3
22 #include "ARM3/miarm.h"
23
24 /* TYPES *******************************************************************/
25
26 #define MM_PHYSICAL_PAGE_FREE (0x1)
27 #define MM_PHYSICAL_PAGE_USED (0x2)
28
29 /* GLOBALS ****************************************************************/
30
31 //
32 //
33 // ReactOS to NT Physical Page Descriptor Entry Legacy Mapping Definitions
34 //
35 // REACTOS NT
36 //
37 #define Consumer PageLocation
38 #define Type CacheAttribute
39 #define Zero PrototypePte
40 #define LockCount u3.e1.PageColor
41 #define RmapListHead AweReferenceCount
42 #define SavedSwapEntry u4.EntireFrame
43 #define Flags u3.e1
44 #define ReferenceCount u3.ReferenceCount
45 #define RemoveEntryList(x) RemoveEntryList((PLIST_ENTRY)x)
46 #define InsertTailList(x, y) InsertTailList(x, (PLIST_ENTRY)y)
47 #define ListEntry u1
48 #define PHYSICAL_PAGE MMPFN
49 #define PPHYSICAL_PAGE PMMPFN
50
51 PPHYSICAL_PAGE MmPfnDatabase;
52
53 ULONG MmAvailablePages;
54 ULONG MmResidentAvailablePages;
55
56 SIZE_T MmTotalCommitLimit;
57 SIZE_T MmTotalCommittedPages;
58 SIZE_T MmSharedCommit;
59 SIZE_T MmDriverCommit;
60 SIZE_T MmProcessCommit;
61 SIZE_T MmPagedPoolCommit;
62 SIZE_T MmPeakCommitment;
63 SIZE_T MmtotalCommitLimitMaximum;
64
65 MMPFNLIST MmZeroedPageListHead;
66 MMPFNLIST MmFreePageListHead;
67 MMPFNLIST MmStandbyPageListHead;
68 MMPFNLIST MmModifiedPageListHead;
69 MMPFNLIST MmModifiedNoWritePageListHead;
70
71 /* List of pages allocated to the MC_USER Consumer */
72 static LIST_ENTRY UserPageListHead;
73 /* List of pages zeroed by the ZPW (MmZeroPageThreadMain) */
74 static LIST_ENTRY FreeZeroedPageListHead;
75 /* List of free pages, filled by MmGetReferenceCountPage and
76 * and MmInitializePageList */
77 static LIST_ENTRY FreeUnzeroedPageListHead;
78
79 static KEVENT ZeroPageThreadEvent;
80 static BOOLEAN ZeroPageThreadShouldTerminate = FALSE;
81
82 static ULONG UnzeroedPageCount = 0;
83
84 /* FUNCTIONS *************************************************************/
85
86 PFN_TYPE
87 NTAPI
88 MmGetLRUFirstUserPage(VOID)
89 {
90 PLIST_ENTRY NextListEntry;
91 PHYSICAL_PAGE* PageDescriptor;
92 KIRQL oldIrql;
93
94 oldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
95 NextListEntry = UserPageListHead.Flink;
96 if (NextListEntry == &UserPageListHead)
97 {
98 KeReleaseQueuedSpinLock(LockQueuePfnLock, oldIrql);
99 return 0;
100 }
101 PageDescriptor = CONTAINING_RECORD(NextListEntry, PHYSICAL_PAGE, ListEntry);
102 ASSERT_PFN(PageDescriptor);
103 KeReleaseQueuedSpinLock(LockQueuePfnLock, oldIrql);
104 return PageDescriptor - MmPfnDatabase;
105 }
106
107 VOID
108 NTAPI
109 MmInsertLRULastUserPage(PFN_TYPE Pfn)
110 {
111 KIRQL oldIrql;
112 PPHYSICAL_PAGE Page;
113
114 oldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
115 Page = MiGetPfnEntry(Pfn);
116 ASSERT(Page);
117 ASSERT(Page->Flags.Type == MM_PHYSICAL_PAGE_USED);
118 ASSERT(Page->Flags.Consumer == MC_USER);
119 InsertTailList(&UserPageListHead, &Page->ListEntry);
120 KeReleaseQueuedSpinLock(LockQueuePfnLock, oldIrql);
121 }
122
123 PFN_TYPE
124 NTAPI
125 MmGetLRUNextUserPage(PFN_TYPE PreviousPfn)
126 {
127 PLIST_ENTRY NextListEntry;
128 PHYSICAL_PAGE* PageDescriptor;
129 KIRQL oldIrql;
130 PPHYSICAL_PAGE Page;
131
132 oldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
133 Page = MiGetPfnEntry(PreviousPfn);
134 ASSERT(Page);
135 ASSERT(Page->Flags.Type == MM_PHYSICAL_PAGE_USED);
136 ASSERT(Page->Flags.Consumer == MC_USER);
137 NextListEntry = (PLIST_ENTRY)Page->ListEntry.Flink;
138 if (NextListEntry == &UserPageListHead)
139 {
140 KeReleaseQueuedSpinLock(LockQueuePfnLock, oldIrql);
141 return 0;
142 }
143 PageDescriptor = CONTAINING_RECORD(NextListEntry, PHYSICAL_PAGE, ListEntry);
144 KeReleaseQueuedSpinLock(LockQueuePfnLock, oldIrql);
145 return PageDescriptor - MmPfnDatabase;
146 }
147
148 VOID
149 NTAPI
150 MmRemoveLRUUserPage(PFN_TYPE Page)
151 {
152 RemoveEntryList(&MiGetPfnEntry(Page)->ListEntry);
153 }
154
155 PFN_NUMBER
156 NTAPI
157 MiFindContiguousPages(IN PFN_NUMBER LowestPfn,
158 IN PFN_NUMBER HighestPfn,
159 IN PFN_NUMBER BoundaryPfn,
160 IN PFN_NUMBER SizeInPages,
161 IN MEMORY_CACHING_TYPE CacheType)
162 {
163 PFN_NUMBER Page, PageCount, LastPage, Length, BoundaryMask;
164 ULONG i = 0;
165 PMMPFN Pfn1, EndPfn;
166 KIRQL OldIrql;
167 PAGED_CODE ();
168 ASSERT(SizeInPages != 0);
169
170 //
171 // Convert the boundary PFN into an alignment mask
172 //
173 BoundaryMask = ~(BoundaryPfn - 1);
174
175 //
176 // Loop all the physical memory blocks
177 //
178 do
179 {
180 //
181 // Capture the base page and length of this memory block
182 //
183 Page = MmPhysicalMemoryBlock->Run[i].BasePage;
184 PageCount = MmPhysicalMemoryBlock->Run[i].PageCount;
185
186 //
187 // Check how far this memory block will go
188 //
189 LastPage = Page + PageCount;
190
191 //
192 // Trim it down to only the PFNs we're actually interested in
193 //
194 if ((LastPage - 1) > HighestPfn) LastPage = HighestPfn + 1;
195 if (Page < LowestPfn) Page = LowestPfn;
196
197 //
198 // Skip this run if it's empty or fails to contain all the pages we need
199 //
200 if (!(PageCount) || ((Page + SizeInPages) > LastPage)) continue;
201
202 //
203 // Now scan all the relevant PFNs in this run
204 //
205 Length = 0;
206 for (Pfn1 = MiGetPfnEntry(Page); Page < LastPage; Page++, Pfn1++)
207 {
208 //
209 // If this PFN is in use, ignore it
210 //
211 if (Pfn1->Flags.Type != MM_PHYSICAL_PAGE_FREE) continue;
212
213 //
214 // If we haven't chosen a start PFN yet and the caller specified an
215 // alignment, make sure the page matches the alignment restriction
216 //
217 if ((!(Length) && (BoundaryPfn)) &&
218 (((Page ^ (Page + SizeInPages - 1)) & BoundaryMask)))
219 {
220 //
221 // It does not, so bail out
222 //
223 continue;
224 }
225
226 //
227 // Increase the number of valid pages, and check if we have enough
228 //
229 if (++Length == SizeInPages)
230 {
231 //
232 // It appears we've amassed enough legitimate pages, rollback
233 //
234 Pfn1 -= (Length - 1);
235 Page -= (Length - 1);
236
237 //
238 // Acquire the PFN lock
239 //
240 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
241 do
242 {
243 //
244 // Things might've changed for us. Is the page still free?
245 //
246 if (Pfn1->Flags.Type != MM_PHYSICAL_PAGE_FREE) break;
247
248 //
249 // So far so good. Is this the last confirmed valid page?
250 //
251 if (!--Length)
252 {
253 //
254 // Sanity check that we didn't go out of bounds
255 //
256 ASSERT(i != MmPhysicalMemoryBlock->NumberOfRuns);
257
258 //
259 // Loop until all PFN entries have been processed
260 //
261 EndPfn = Pfn1 - SizeInPages + 1;
262 do
263 {
264 //
265 // If this was an unzeroed page, there are now less
266 //
267 if (Pfn1->Flags.Zero == 0) UnzeroedPageCount--;
268
269 //
270 // One less free page
271 //
272 MmAvailablePages--;
273
274 //
275 // This PFN is now a used page, set it up
276 //
277 RemoveEntryList(&Pfn1->ListEntry);
278 Pfn1->Flags.Type = MM_PHYSICAL_PAGE_USED;
279 Pfn1->Flags.Consumer = MC_NPPOOL;
280 Pfn1->ReferenceCount = 1;
281 Pfn1->LockCount = 0;
282 Pfn1->SavedSwapEntry = 0;
283
284 //
285 // Check if it was already zeroed
286 //
287 if (Pfn1->Flags.Zero == 0)
288 {
289 //
290 // It wasn't, so zero it
291 //
292 MiZeroPage(MiGetPfnEntryIndex(Pfn1));
293 }
294
295 //
296 // Check if this is the last PFN, otherwise go on
297 //
298 if (Pfn1 == EndPfn) break;
299 Pfn1--;
300 } while (TRUE);
301
302 //
303 // Mark the first and last PFN so we can find them later
304 //
305 Pfn1->Flags.StartOfAllocation = 1;
306 (Pfn1 + SizeInPages - 1)->Flags.EndOfAllocation = 1;
307
308 //
309 // Now it's safe to let go of the PFN lock
310 //
311 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
312
313 //
314 // Quick sanity check that the last PFN is consistent
315 //
316 EndPfn = Pfn1 + SizeInPages;
317 ASSERT(EndPfn == MiGetPfnEntry(Page + 1));
318
319 //
320 // Compute the first page, and make sure it's consistent
321 //
322 Page -= SizeInPages - 1;
323 ASSERT(Pfn1 == MiGetPfnEntry(Page));
324 ASSERT(Page != 0);
325 return Page;
326 }
327
328 //
329 // Keep going. The purpose of this loop is to reconfirm that
330 // after acquiring the PFN lock these pages are still usable
331 //
332 Pfn1++;
333 Page++;
334 } while (TRUE);
335
336 //
337 // If we got here, something changed while we hadn't acquired
338 // the PFN lock yet, so we'll have to restart
339 //
340 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
341 Length = 0;
342 }
343 }
344 } while (++i != MmPhysicalMemoryBlock->NumberOfRuns);
345
346 //
347 // And if we get here, it means no suitable physical memory runs were found
348 //
349 return 0;
350 }
351
352 PMDL
353 NTAPI
354 MiAllocatePagesForMdl(IN PHYSICAL_ADDRESS LowAddress,
355 IN PHYSICAL_ADDRESS HighAddress,
356 IN PHYSICAL_ADDRESS SkipBytes,
357 IN SIZE_T TotalBytes,
358 IN MI_PFN_CACHE_ATTRIBUTE CacheAttribute,
359 IN ULONG MdlFlags)
360 {
361 PMDL Mdl;
362 PFN_NUMBER PageCount, LowPage, HighPage, SkipPages, PagesFound = 0, Page;
363 PPFN_NUMBER MdlPage, LastMdlPage;
364 KIRQL OldIrql;
365 PLIST_ENTRY ListEntry;
366 PPHYSICAL_PAGE Pfn1;
367 INT LookForZeroedPages;
368 ASSERT (KeGetCurrentIrql() <= APC_LEVEL);
369
370 //
371 // Convert the low address into a PFN
372 //
373 LowPage = (PFN_NUMBER)(LowAddress.QuadPart >> PAGE_SHIFT);
374
375 //
376 // Convert, and normalize, the high address into a PFN
377 //
378 HighPage = (PFN_NUMBER)(HighAddress.QuadPart >> PAGE_SHIFT);
379 if (HighPage > MmHighestPhysicalPage) HighPage = MmHighestPhysicalPage;
380
381 //
382 // Validate skipbytes and convert them into pages
383 //
384 if (BYTE_OFFSET(SkipBytes.LowPart)) return NULL;
385 SkipPages = (PFN_NUMBER)(SkipBytes.QuadPart >> PAGE_SHIFT);
386
387 //
388 // Now compute the number of pages the MDL will cover
389 //
390 PageCount = (PFN_NUMBER)ADDRESS_AND_SIZE_TO_SPAN_PAGES(0, TotalBytes);
391 do
392 {
393 //
394 // Try creating an MDL for these many pages
395 //
396 Mdl = MmCreateMdl(NULL, NULL, PageCount << PAGE_SHIFT);
397 if (Mdl) break;
398
399 //
400 // This function is not required to return the amount of pages requested
401 // In fact, it can return as little as 1 page, and callers are supposed
402 // to deal with this scenario. So re-attempt the allocation with less
403 // pages than before, and see if it worked this time.
404 //
405 PageCount -= (PageCount >> 4);
406 } while (PageCount);
407
408 //
409 // Wow, not even a single page was around!
410 //
411 if (!Mdl) return NULL;
412
413 //
414 // This is where the page array starts....
415 //
416 MdlPage = (PPFN_NUMBER)(Mdl + 1);
417
418 //
419 // Lock the PFN database
420 //
421 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
422
423 //
424 // Are we looking for any pages, without discriminating?
425 //
426 if ((LowPage == 0) && (HighPage == MmHighestPhysicalPage))
427 {
428 //
429 // Well then, let's go shopping
430 //
431 while (PagesFound < PageCount)
432 {
433 //
434 // Do we have zeroed pages?
435 //
436 if (!IsListEmpty(&FreeZeroedPageListHead))
437 {
438 //
439 // Grab a zero page
440 //
441 ListEntry = RemoveTailList(&FreeZeroedPageListHead);
442 }
443 else if (!IsListEmpty(&FreeUnzeroedPageListHead))
444 {
445 //
446 // Nope, grab an unzeroed page
447 //
448 ListEntry = RemoveTailList(&FreeUnzeroedPageListHead);
449 UnzeroedPageCount--;
450 }
451 else
452 {
453 //
454 // This is not good... hopefully we have at least SOME pages
455 //
456 ASSERT(PagesFound);
457 break;
458 }
459
460 //
461 // Get the PFN entry for this page
462 //
463 Pfn1 = CONTAINING_RECORD(ListEntry, PHYSICAL_PAGE, ListEntry);
464
465 //
466 // Make sure it's really free
467 //
468 ASSERT(Pfn1->Flags.Type == MM_PHYSICAL_PAGE_FREE);
469 ASSERT(Pfn1->ReferenceCount == 0);
470
471 //
472 // Allocate it and mark it
473 //
474 Pfn1->Flags.Type = MM_PHYSICAL_PAGE_USED;
475 Pfn1->Flags.Consumer = MC_NPPOOL;
476 Pfn1->Flags.StartOfAllocation = 1;
477 Pfn1->Flags.EndOfAllocation = 1;
478 Pfn1->ReferenceCount = 1;
479 Pfn1->LockCount = 0;
480 Pfn1->SavedSwapEntry = 0;
481
482 //
483 // Decrease available pages
484 //
485 MmAvailablePages--;
486
487 //
488 // Save it into the MDL
489 //
490 *MdlPage++ = MiGetPfnEntryIndex(Pfn1);
491 PagesFound++;
492 }
493 }
494 else
495 {
496 //
497 // You want specific range of pages. We'll do this in two runs
498 //
499 for (LookForZeroedPages = 1; LookForZeroedPages >= 0; LookForZeroedPages--)
500 {
501 //
502 // Scan the range you specified
503 //
504 for (Page = LowPage; Page < HighPage; Page++)
505 {
506 //
507 // Get the PFN entry for this page
508 //
509 Pfn1 = MiGetPfnEntry(Page);
510 ASSERT(Pfn1);
511
512 //
513 // Make sure it's free and if this is our first pass, zeroed
514 //
515 if (Pfn1->Flags.Type != MM_PHYSICAL_PAGE_FREE) continue;
516 if (Pfn1->Flags.Zero != LookForZeroedPages) continue;
517
518 //
519 // Sanity checks
520 //
521 ASSERT(Pfn1->ReferenceCount == 0);
522
523 //
524 // Now setup the page and mark it
525 //
526 Pfn1->Flags.Type = MM_PHYSICAL_PAGE_USED;
527 Pfn1->Flags.Consumer = MC_NPPOOL;
528 Pfn1->ReferenceCount = 1;
529 Pfn1->Flags.StartOfAllocation = 1;
530 Pfn1->Flags.EndOfAllocation = 1;
531 Pfn1->LockCount = 0;
532 Pfn1->SavedSwapEntry = 0;
533
534 //
535 // If this page was unzeroed, we've consumed such a page
536 //
537 if (!Pfn1->Flags.Zero) UnzeroedPageCount--;
538
539 //
540 // Decrease available pages
541 //
542 MmAvailablePages--;
543
544 //
545 // Save this page into the MDL
546 //
547 *MdlPage++ = Page;
548 if (++PagesFound == PageCount) break;
549 }
550
551 //
552 // If the first pass was enough, don't keep going, otherwise, go again
553 //
554 if (PagesFound == PageCount) break;
555 }
556 }
557
558 //
559 // Now release the PFN count
560 //
561 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
562
563 //
564 // We might've found less pages, but not more ;-)
565 //
566 if (PagesFound != PageCount) ASSERT(PagesFound < PageCount);
567 if (!PagesFound)
568 {
569 //
570 // If we didn' tfind any pages at all, fail
571 //
572 DPRINT1("NO MDL PAGES!\n");
573 ExFreePool(Mdl);
574 return NULL;
575 }
576
577 //
578 // Write out how many pages we found
579 //
580 Mdl->ByteCount = (ULONG)(PagesFound << PAGE_SHIFT);
581
582 //
583 // Terminate the MDL array if there's certain missing pages
584 //
585 if (PagesFound != PageCount) *MdlPage = -1;
586
587 //
588 // Now go back and loop over all the MDL pages
589 //
590 MdlPage = (PPFN_NUMBER)(Mdl + 1);
591 LastMdlPage = MdlPage + PagesFound;
592 while (MdlPage < LastMdlPage)
593 {
594 //
595 // Check if we've reached the end
596 //
597 Page = *MdlPage++;
598 if (Page == (PFN_NUMBER)-1) break;
599
600 //
601 // Get the PFN entry for the page and check if we should zero it out
602 //
603 Pfn1 = MiGetPfnEntry(Page);
604 ASSERT(Pfn1);
605 if (Pfn1->Flags.Zero == 0) MiZeroPage(Page);
606 }
607
608 //
609 // We're done, mark the pages as locked (should we lock them, though???)
610 //
611 Mdl->Process = NULL;
612 Mdl->MdlFlags |= MDL_PAGES_LOCKED;
613 return Mdl;
614 }
615
616 VOID
617 NTAPI
618 MmDumpPfnDatabase(VOID)
619 {
620 ULONG i;
621 PPHYSICAL_PAGE Pfn1;
622 PCHAR State = "????", Consumer = "Unknown";
623 KIRQL OldIrql;
624 ULONG Totals[5] = {0}, FreePages = 0;
625
626 KeRaiseIrql(HIGH_LEVEL, &OldIrql);
627
628 //
629 // Loop the PFN database
630 //
631 for (i = 0; i <= MmHighestPhysicalPage; i++)
632 {
633 Pfn1 = MiGetPfnEntry(i);
634 if (!Pfn1) continue;
635
636 //
637 // Get the consumer
638 //
639 switch (Pfn1->Flags.Consumer)
640 {
641 case MC_NPPOOL:
642
643 Consumer = "Nonpaged Pool";
644 break;
645
646 case MC_PPOOL:
647
648 Consumer = "Paged Pool";
649 break;
650
651 case MC_CACHE:
652
653 Consumer = "File System Cache";
654 break;
655
656 case MC_USER:
657
658 Consumer = "Process Working Set";
659 break;
660
661 case MC_SYSTEM:
662
663 Consumer = "System";
664 break;
665 }
666
667 //
668 // Get the type
669 //
670 switch (Pfn1->Flags.Type)
671 {
672 case MM_PHYSICAL_PAGE_USED:
673
674 State = "Used";
675 Totals[Pfn1->Flags.Consumer]++;
676 break;
677
678 case MM_PHYSICAL_PAGE_FREE:
679
680 State = "Free";
681 Consumer = "Free";
682 FreePages++;
683 break;
684 }
685
686 //
687 // Pretty-print the page
688 //
689 DbgPrint("0x%08p:\t%04s\t%20s\t(%02d.%02d) [%08p])\n",
690 i << PAGE_SHIFT,
691 State,
692 Consumer,
693 Pfn1->ReferenceCount,
694 Pfn1->LockCount,
695 Pfn1->RmapListHead);
696 }
697
698 DbgPrint("Nonpaged Pool: %d pages\t[%d KB]\n", Totals[MC_NPPOOL], (Totals[MC_NPPOOL] << PAGE_SHIFT) / 1024);
699 DbgPrint("Paged Pool: %d pages\t[%d KB]\n", Totals[MC_PPOOL], (Totals[MC_PPOOL] << PAGE_SHIFT) / 1024);
700 DbgPrint("File System Cache: %d pages\t[%d KB]\n", Totals[MC_CACHE], (Totals[MC_CACHE] << PAGE_SHIFT) / 1024);
701 DbgPrint("Process Working Set: %d pages\t[%d KB]\n", Totals[MC_USER], (Totals[MC_USER] << PAGE_SHIFT) / 1024);
702 DbgPrint("System: %d pages\t[%d KB]\n", Totals[MC_SYSTEM], (Totals[MC_SYSTEM] << PAGE_SHIFT) / 1024);
703 DbgPrint("Free: %d pages\t[%d KB]\n", FreePages, (FreePages << PAGE_SHIFT) / 1024);
704
705 KeLowerIrql(OldIrql);
706 }
707
708 VOID
709 NTAPI
710 MmInitializePageList(VOID)
711 {
712 ULONG i;
713 PHYSICAL_PAGE UsedPage;
714 PMEMORY_ALLOCATION_DESCRIPTOR Md;
715 PLIST_ENTRY NextEntry;
716 ULONG NrSystemPages = 0;
717
718 /* Initialize the page lists */
719 InitializeListHead(&UserPageListHead);
720 InitializeListHead(&FreeUnzeroedPageListHead);
721 InitializeListHead(&FreeZeroedPageListHead);
722
723 /* This is what a used page looks like */
724 RtlZeroMemory(&UsedPage, sizeof(UsedPage));
725 UsedPage.Flags.Type = MM_PHYSICAL_PAGE_USED;
726 UsedPage.Flags.Consumer = MC_NPPOOL;
727 UsedPage.ReferenceCount = 1;
728
729 /* Loop the memory descriptors */
730 for (NextEntry = KeLoaderBlock->MemoryDescriptorListHead.Flink;
731 NextEntry != &KeLoaderBlock->MemoryDescriptorListHead;
732 NextEntry = NextEntry->Flink)
733 {
734 #undef ListEntry
735 /* Get the descriptor */
736 Md = CONTAINING_RECORD(NextEntry,
737 MEMORY_ALLOCATION_DESCRIPTOR,
738 ListEntry);
739 #define ListEntry u1
740
741 /* Skip bad memory */
742 if ((Md->MemoryType == LoaderFirmwarePermanent) ||
743 (Md->MemoryType == LoaderBBTMemory) ||
744 (Md->MemoryType == LoaderSpecialMemory) ||
745 (Md->MemoryType == LoaderBad))
746 {
747 //
748 // We do not build PFN entries for this
749 //
750 continue;
751 }
752 else if ((Md->MemoryType == LoaderFree) ||
753 (Md->MemoryType == LoaderLoadedProgram) ||
754 (Md->MemoryType == LoaderFirmwareTemporary) ||
755 (Md->MemoryType == LoaderOsloaderStack))
756 {
757 /* Loop every page part of the block */
758 for (i = 0; i < Md->PageCount; i++)
759 {
760 /* Mark it as a free page */
761 MmPfnDatabase[Md->BasePage + i].Flags.Type = MM_PHYSICAL_PAGE_FREE;
762 InsertTailList(&FreeUnzeroedPageListHead,
763 &MmPfnDatabase[Md->BasePage + i].ListEntry);
764 UnzeroedPageCount++;
765 MmAvailablePages++;
766 }
767 }
768 else
769 {
770 /* Loop every page part of the block */
771 for (i = 0; i < Md->PageCount; i++)
772 {
773 /* Everything else is used memory */
774 MmPfnDatabase[Md->BasePage + i] = UsedPage;
775 NrSystemPages++;
776 }
777 }
778 }
779
780 /* Finally handle the pages describing the PFN database themselves */
781 for (i = MxOldFreeDescriptor.BasePage; i < MxFreeDescriptor->BasePage; i++)
782 {
783 /* Ensure this page was not added previously */
784 ASSERT(MmPfnDatabase[i].Flags.Type == 0);
785
786 /* Mark it as used kernel memory */
787 MmPfnDatabase[i] = UsedPage;
788 NrSystemPages++;
789 }
790
791 KeInitializeEvent(&ZeroPageThreadEvent, NotificationEvent, TRUE);
792 DPRINT("Pages: %x %x\n", MmAvailablePages, NrSystemPages);
793 MmInitializeBalancer(MmAvailablePages, NrSystemPages);
794 }
795
796 VOID
797 NTAPI
798 MmSetRmapListHeadPage(PFN_TYPE Pfn, struct _MM_RMAP_ENTRY* ListHead)
799 {
800 KIRQL oldIrql;
801
802 oldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
803 MiGetPfnEntry(Pfn)->RmapListHead = (LONG)ListHead;
804 KeReleaseQueuedSpinLock(LockQueuePfnLock, oldIrql);
805 }
806
807 struct _MM_RMAP_ENTRY*
808 NTAPI
809 MmGetRmapListHeadPage(PFN_TYPE Pfn)
810 {
811 KIRQL oldIrql;
812 struct _MM_RMAP_ENTRY* ListHead;
813
814 oldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
815 ListHead = (struct _MM_RMAP_ENTRY*)MiGetPfnEntry(Pfn)->RmapListHead;
816 KeReleaseQueuedSpinLock(LockQueuePfnLock, oldIrql);
817
818 return(ListHead);
819 }
820
821 VOID
822 NTAPI
823 MmSetSavedSwapEntryPage(PFN_TYPE Pfn, SWAPENTRY SwapEntry)
824 {
825 KIRQL oldIrql;
826
827 oldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
828 MiGetPfnEntry(Pfn)->SavedSwapEntry = SwapEntry;
829 KeReleaseQueuedSpinLock(LockQueuePfnLock, oldIrql);
830 }
831
832 SWAPENTRY
833 NTAPI
834 MmGetSavedSwapEntryPage(PFN_TYPE Pfn)
835 {
836 SWAPENTRY SwapEntry;
837 KIRQL oldIrql;
838
839 oldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
840 SwapEntry = MiGetPfnEntry(Pfn)->SavedSwapEntry;
841 KeReleaseQueuedSpinLock(LockQueuePfnLock, oldIrql);
842
843 return(SwapEntry);
844 }
845
846 VOID
847 NTAPI
848 MmReferencePage(PFN_TYPE Pfn)
849 {
850 PPHYSICAL_PAGE Page;
851
852 DPRINT("MmReferencePage(PysicalAddress %x)\n", Pfn << PAGE_SHIFT);
853
854 if (Pfn == 0 || Pfn > MmHighestPhysicalPage)
855 {
856 return;
857 }
858
859 Page = MiGetPfnEntry(Pfn);
860 ASSERT(Page);
861 if (Page->Flags.Type != MM_PHYSICAL_PAGE_USED)
862 {
863 DPRINT1("Referencing non-used page\n");
864 KeBugCheck(MEMORY_MANAGEMENT);
865 }
866
867 Page->ReferenceCount++;
868 }
869
870 ULONG
871 NTAPI
872 MmGetReferenceCountPage(PFN_TYPE Pfn)
873 {
874 KIRQL oldIrql;
875 ULONG RCount;
876 PPHYSICAL_PAGE Page;
877
878 DPRINT("MmGetReferenceCountPage(PhysicalAddress %x)\n", Pfn << PAGE_SHIFT);
879
880 oldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
881 Page = MiGetPfnEntry(Pfn);
882 ASSERT(Page);
883 if (Page->Flags.Type != MM_PHYSICAL_PAGE_USED)
884 {
885 DPRINT1("Getting reference count for free page\n");
886 KeBugCheck(MEMORY_MANAGEMENT);
887 }
888
889 RCount = Page->ReferenceCount;
890
891 KeReleaseQueuedSpinLock(LockQueuePfnLock, oldIrql);
892 return(RCount);
893 }
894
895 BOOLEAN
896 NTAPI
897 MmIsPageInUse(PFN_TYPE Pfn)
898 {
899
900 DPRINT("MmIsPageInUse(PhysicalAddress %x)\n", Pfn << PAGE_SHIFT);
901
902 return (MiGetPfnEntry(Pfn)->Flags.Type == MM_PHYSICAL_PAGE_USED);
903 }
904
905 VOID
906 NTAPI
907 MmDereferencePage(PFN_TYPE Pfn)
908 {
909 PPHYSICAL_PAGE Page;
910
911 DPRINT("MmDereferencePage(PhysicalAddress %x)\n", Pfn << PAGE_SHIFT);
912
913 Page = MiGetPfnEntry(Pfn);
914 ASSERT(Page);
915
916 if (Page->Flags.Type != MM_PHYSICAL_PAGE_USED)
917 {
918 DPRINT1("Dereferencing free page\n");
919 KeBugCheck(MEMORY_MANAGEMENT);
920 }
921 if (Page->ReferenceCount == 0)
922 {
923 DPRINT1("Derefrencing page with reference count 0\n");
924 KeBugCheck(MEMORY_MANAGEMENT);
925 }
926
927 Page->ReferenceCount--;
928 if (Page->ReferenceCount == 0)
929 {
930 MmAvailablePages++;
931 if (Page->Flags.Consumer == MC_USER) RemoveEntryList(&Page->ListEntry);
932 if (Page->RmapListHead != (LONG)NULL)
933 {
934 DPRINT1("Freeing page with rmap entries.\n");
935 KeBugCheck(MEMORY_MANAGEMENT);
936 }
937 if (Page->LockCount > 0)
938 {
939 DPRINT1("Freeing locked page\n");
940 KeBugCheck(MEMORY_MANAGEMENT);
941 }
942 if (Page->SavedSwapEntry != 0)
943 {
944 DPRINT1("Freeing page with swap entry.\n");
945 KeBugCheck(MEMORY_MANAGEMENT);
946 }
947 if (Page->Flags.Type != MM_PHYSICAL_PAGE_USED)
948 {
949 DPRINT1("Freeing page with flags %x\n",
950 Page->Flags.Type);
951 KeBugCheck(MEMORY_MANAGEMENT);
952 }
953 Page->Flags.Type = MM_PHYSICAL_PAGE_FREE;
954 Page->Flags.Consumer = MC_MAXIMUM;
955 InsertTailList(&FreeUnzeroedPageListHead,
956 &Page->ListEntry);
957 UnzeroedPageCount++;
958 if (UnzeroedPageCount > 8 && 0 == KeReadStateEvent(&ZeroPageThreadEvent))
959 {
960 KeSetEvent(&ZeroPageThreadEvent, IO_NO_INCREMENT, FALSE);
961 }
962 }
963 }
964
965 ULONG
966 NTAPI
967 MmGetLockCountPage(PFN_TYPE Pfn)
968 {
969 KIRQL oldIrql;
970 ULONG CurrentLockCount;
971 PPHYSICAL_PAGE Page;
972
973 DPRINT("MmGetLockCountPage(PhysicalAddress %x)\n", Pfn << PAGE_SHIFT);
974
975 oldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
976
977 Page = MiGetPfnEntry(Pfn);
978 ASSERT(Page);
979 if (Page->Flags.Type != MM_PHYSICAL_PAGE_USED)
980 {
981 DPRINT1("Getting lock count for free page\n");
982 KeBugCheck(MEMORY_MANAGEMENT);
983 }
984
985 CurrentLockCount = Page->LockCount;
986 KeReleaseQueuedSpinLock(LockQueuePfnLock, oldIrql);
987
988 return(CurrentLockCount);
989 }
990
991 VOID
992 NTAPI
993 MmLockPage(PFN_TYPE Pfn)
994 {
995 PPHYSICAL_PAGE Page;
996
997 DPRINT("MmLockPage(PhysicalAddress %x)\n", Pfn << PAGE_SHIFT);
998
999 Page = MiGetPfnEntry(Pfn);
1000 ASSERT(Page);
1001 if (Page->Flags.Type != MM_PHYSICAL_PAGE_USED)
1002 {
1003 DPRINT1("Locking free page\n");
1004 KeBugCheck(MEMORY_MANAGEMENT);
1005 }
1006
1007 Page->LockCount++;
1008 }
1009
1010 VOID
1011 NTAPI
1012 MmUnlockPage(PFN_TYPE Pfn)
1013 {
1014 PPHYSICAL_PAGE Page;
1015
1016 DPRINT("MmUnlockPage(PhysicalAddress %x)\n", Pfn << PAGE_SHIFT);
1017
1018 Page = MiGetPfnEntry(Pfn);
1019 ASSERT(Page);
1020 if (Page->Flags.Type != MM_PHYSICAL_PAGE_USED)
1021 {
1022 DPRINT1("Unlocking free page\n");
1023 KeBugCheck(MEMORY_MANAGEMENT);
1024 }
1025
1026 Page->LockCount--;
1027 }
1028
1029 PFN_TYPE
1030 NTAPI
1031 MmAllocPage(ULONG Consumer, SWAPENTRY SwapEntry)
1032 {
1033 PFN_TYPE PfnOffset;
1034 PLIST_ENTRY ListEntry;
1035 PPHYSICAL_PAGE PageDescriptor;
1036 BOOLEAN NeedClear = FALSE;
1037
1038 DPRINT("MmAllocPage()\n");
1039
1040 if (IsListEmpty(&FreeZeroedPageListHead))
1041 {
1042 if (IsListEmpty(&FreeUnzeroedPageListHead))
1043 {
1044 /* Check if this allocation is for the PFN DB itself */
1045 if (MmNumberOfPhysicalPages == 0)
1046 {
1047 ASSERT(FALSE);
1048 }
1049
1050 DPRINT1("MmAllocPage(): Out of memory\n");
1051 return 0;
1052 }
1053 ListEntry = RemoveTailList(&FreeUnzeroedPageListHead);
1054 UnzeroedPageCount--;
1055
1056 PageDescriptor = CONTAINING_RECORD(ListEntry, PHYSICAL_PAGE, ListEntry);
1057
1058 NeedClear = TRUE;
1059 }
1060 else
1061 {
1062 ListEntry = RemoveTailList(&FreeZeroedPageListHead);
1063
1064 PageDescriptor = CONTAINING_RECORD(ListEntry, PHYSICAL_PAGE, ListEntry);
1065 }
1066
1067 if (PageDescriptor->Flags.Type != MM_PHYSICAL_PAGE_FREE)
1068 {
1069 DPRINT1("Got non-free page from freelist\n");
1070 KeBugCheck(MEMORY_MANAGEMENT);
1071 }
1072 if (PageDescriptor->ReferenceCount != 0)
1073 {
1074 DPRINT1("%d\n", PageDescriptor->ReferenceCount);
1075 KeBugCheck(MEMORY_MANAGEMENT);
1076 }
1077 PageDescriptor->Flags.Type = MM_PHYSICAL_PAGE_USED;
1078 PageDescriptor->Flags.Consumer = Consumer;
1079 PageDescriptor->ReferenceCount = 1;
1080 PageDescriptor->LockCount = 0;
1081 PageDescriptor->SavedSwapEntry = SwapEntry;
1082
1083 MmAvailablePages--;
1084
1085 PfnOffset = PageDescriptor - MmPfnDatabase;
1086 if ((NeedClear) && (Consumer != MC_SYSTEM))
1087 {
1088 MiZeroPage(PfnOffset);
1089 }
1090 return PfnOffset;
1091 }
1092
1093 NTSTATUS
1094 NTAPI
1095 MiZeroPage(PFN_TYPE Page)
1096 {
1097 KIRQL Irql;
1098 PVOID TempAddress;
1099
1100 Irql = KeRaiseIrqlToDpcLevel();
1101 TempAddress = MiMapPageToZeroInHyperSpace(Page);
1102 if (TempAddress == NULL)
1103 {
1104 return(STATUS_NO_MEMORY);
1105 }
1106 memset(TempAddress, 0, PAGE_SIZE);
1107 MiUnmapPagesInZeroSpace(TempAddress, 1);
1108 KeLowerIrql(Irql);
1109 return(STATUS_SUCCESS);
1110 }
1111
1112 NTSTATUS
1113 NTAPI
1114 MmZeroPageThreadMain(PVOID Ignored)
1115 {
1116 NTSTATUS Status;
1117 KIRQL oldIrql;
1118 PLIST_ENTRY ListEntry;
1119 PPHYSICAL_PAGE PageDescriptor;
1120 PFN_TYPE Pfn;
1121 ULONG Count;
1122
1123 /* Free initial kernel memory */
1124 //MiFreeInitMemory();
1125
1126 /* Set our priority to 0 */
1127 KeGetCurrentThread()->BasePriority = 0;
1128 KeSetPriorityThread(KeGetCurrentThread(), 0);
1129
1130 while(1)
1131 {
1132 Status = KeWaitForSingleObject(&ZeroPageThreadEvent,
1133 0,
1134 KernelMode,
1135 FALSE,
1136 NULL);
1137 if (!NT_SUCCESS(Status))
1138 {
1139 DPRINT1("ZeroPageThread: Wait failed\n");
1140 KeBugCheck(MEMORY_MANAGEMENT);
1141 }
1142
1143 if (ZeroPageThreadShouldTerminate)
1144 {
1145 DPRINT1("ZeroPageThread: Terminating\n");
1146 return STATUS_SUCCESS;
1147 }
1148 Count = 0;
1149 oldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
1150 while (!IsListEmpty(&FreeUnzeroedPageListHead))
1151 {
1152 ListEntry = RemoveTailList(&FreeUnzeroedPageListHead);
1153 UnzeroedPageCount--;
1154 PageDescriptor = CONTAINING_RECORD(ListEntry, PHYSICAL_PAGE, ListEntry);
1155 /* We set the page to used, because MmCreateVirtualMapping failed with unused pages */
1156 PageDescriptor->Flags.Type = MM_PHYSICAL_PAGE_USED;
1157 KeReleaseQueuedSpinLock(LockQueuePfnLock, oldIrql);
1158 Pfn = PageDescriptor - MmPfnDatabase;
1159 Status = MiZeroPage(Pfn);
1160
1161 oldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
1162 PageDescriptor->Flags.Zero = 1;
1163 PageDescriptor->Flags.Type = MM_PHYSICAL_PAGE_FREE;
1164 if (NT_SUCCESS(Status))
1165 {
1166 InsertHeadList(&FreeZeroedPageListHead, ListEntry);
1167 Count++;
1168 }
1169 else
1170 {
1171 InsertHeadList(&FreeUnzeroedPageListHead, ListEntry);
1172 UnzeroedPageCount++;
1173 }
1174
1175 }
1176 DPRINT("Zeroed %d pages.\n", Count);
1177 KeResetEvent(&ZeroPageThreadEvent);
1178 KeReleaseQueuedSpinLock(LockQueuePfnLock, oldIrql);
1179 }
1180
1181 return STATUS_SUCCESS;
1182 }
1183
1184 /* EOF */