63f47408313dbeb2477adec708c006da00db862d
[reactos.git] / reactos / ntoskrnl / mm / freelist.c
1 /*
2 * COPYRIGHT: See COPYING in the top level directory
3 * PROJECT: ReactOS kernel
4 * FILE: ntoskrnl/mm/freelist.c
5 * PURPOSE: Handle the list of free physical pages
6 *
7 * PROGRAMMERS: David Welch (welch@cwcom.net)
8 * Robert Bergkvist
9 */
10
11 /* INCLUDES ****************************************************************/
12
13 #include <ntoskrnl.h>
14 #define NDEBUG
15 #include <debug.h>
16
17 #if defined (ALLOC_PRAGMA)
18 #pragma alloc_text(INIT, MmInitializePageList)
19 #endif
20
21 #define MODULE_INVOLVED_IN_ARM3
22 #include "ARM3/miarm.h"
23
24 /* GLOBALS ****************************************************************/
25
26 //
27 //
28 // ReactOS to NT Physical Page Descriptor Entry Legacy Mapping Definitions
29 //
30 // REACTOS NT
31 //
32 #define Consumer u3.e1.PageColor
33 #define RmapListHead AweReferenceCount
34 #define SavedSwapEntry u4.EntireFrame
35 #define RemoveEntryList(x) RemoveEntryList((PLIST_ENTRY)x)
36 #define InsertTailList(x, y) InsertTailList(x, (PLIST_ENTRY)y)
37 #define ListEntry u1
38 #define PHYSICAL_PAGE MMPFN
39 #define PPHYSICAL_PAGE PMMPFN
40
41 /* The first array contains ReactOS PFNs, the second contains ARM3 PFNs */
42 PPHYSICAL_PAGE MmPfnDatabase[2];
43
44 ULONG MmAvailablePages;
45 ULONG MmResidentAvailablePages;
46
47 SIZE_T MmTotalCommitLimit;
48 SIZE_T MmTotalCommittedPages;
49 SIZE_T MmSharedCommit;
50 SIZE_T MmDriverCommit;
51 SIZE_T MmProcessCommit;
52 SIZE_T MmPagedPoolCommit;
53 SIZE_T MmPeakCommitment;
54 SIZE_T MmtotalCommitLimitMaximum;
55
56 MMPFNLIST MmZeroedPageListHead;
57 MMPFNLIST MmFreePageListHead;
58 MMPFNLIST MmStandbyPageListHead;
59 MMPFNLIST MmModifiedPageListHead;
60 MMPFNLIST MmModifiedNoWritePageListHead;
61
62 /* List of pages allocated to the MC_USER Consumer */
63 static LIST_ENTRY UserPageListHead;
64 /* List of pages zeroed by the ZPW (MmZeroPageThreadMain) */
65 static LIST_ENTRY FreeZeroedPageListHead;
66 /* List of free pages, filled by MmGetReferenceCountPage and
67 * and MmInitializePageList */
68 static LIST_ENTRY FreeUnzeroedPageListHead;
69
70 static KEVENT ZeroPageThreadEvent;
71 static BOOLEAN ZeroPageThreadShouldTerminate = FALSE;
72
73 static ULONG UnzeroedPageCount = 0;
74
75 /* FUNCTIONS *************************************************************/
76
77 static RTL_BITMAP MiUserPfnBitMap;
78
79 /* FUNCTIONS *************************************************************/
80
81 VOID
82 NTAPI
83 MiInitializeUserPfnBitmap(VOID)
84 {
85 PVOID Bitmap;
86
87 /* Allocate enough buffer for the PFN bitmap and align it on 32-bits */
88 Bitmap = ExAllocatePoolWithTag(NonPagedPool,
89 (((MmHighestPhysicalPage + 1) + 31) / 32) * 4,
90 ' mM');
91 ASSERT(Bitmap);
92
93 /* Initialize it and clear all the bits to begin with */
94 RtlInitializeBitMap(&MiUserPfnBitMap,
95 Bitmap,
96 MmHighestPhysicalPage + 1);
97 RtlClearAllBits(&MiUserPfnBitMap);
98 }
99
100 PFN_TYPE
101 NTAPI
102 MmGetLRUFirstUserPage(VOID)
103 {
104 ULONG Position;
105 KIRQL OldIrql;
106
107 /* Find the first user page */
108 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
109 Position = RtlFindSetBits(&MiUserPfnBitMap, 1, 0);
110 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
111 if (Position == 0xFFFFFFFF) return 0;
112
113 /* Return it */
114 return Position;
115 }
116
117 VOID
118 NTAPI
119 MmInsertLRULastUserPage(PFN_TYPE Pfn)
120 {
121 KIRQL OldIrql;
122
123 /* Set the page as a user page */
124 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
125 RtlSetBit(&MiUserPfnBitMap, Pfn);
126 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
127 }
128
129 PFN_TYPE
130 NTAPI
131 MmGetLRUNextUserPage(PFN_TYPE PreviousPfn)
132 {
133 ULONG Position;
134 KIRQL OldIrql;
135
136 /* Find the next user page */
137 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
138 Position = RtlFindSetBits(&MiUserPfnBitMap, 1, PreviousPfn + 1);
139 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
140 if (Position == 0xFFFFFFFF) return 0;
141
142 /* Return it */
143 return Position;
144 }
145
146 VOID
147 NTAPI
148 MmRemoveLRUUserPage(PFN_TYPE Page)
149 {
150 /* Unset the page as a user page */
151 RtlClearBit(&MiUserPfnBitMap, Page);
152 }
153
154 BOOLEAN
155 NTAPI
156 MiIsPfnInUse(IN PMMPFN Pfn1)
157 {
158 return ((Pfn1->u3.e1.PageLocation != FreePageList) &&
159 (Pfn1->u3.e1.PageLocation != ZeroedPageList));
160 }
161
162 PFN_NUMBER
163 NTAPI
164 MiFindContiguousPages(IN PFN_NUMBER LowestPfn,
165 IN PFN_NUMBER HighestPfn,
166 IN PFN_NUMBER BoundaryPfn,
167 IN PFN_NUMBER SizeInPages,
168 IN MEMORY_CACHING_TYPE CacheType)
169 {
170 PFN_NUMBER Page, PageCount, LastPage, Length, BoundaryMask;
171 ULONG i = 0;
172 PMMPFN Pfn1, EndPfn;
173 KIRQL OldIrql;
174 PAGED_CODE ();
175 ASSERT(SizeInPages != 0);
176
177 //
178 // Convert the boundary PFN into an alignment mask
179 //
180 BoundaryMask = ~(BoundaryPfn - 1);
181
182 //
183 // Loop all the physical memory blocks
184 //
185 do
186 {
187 //
188 // Capture the base page and length of this memory block
189 //
190 Page = MmPhysicalMemoryBlock->Run[i].BasePage;
191 PageCount = MmPhysicalMemoryBlock->Run[i].PageCount;
192
193 //
194 // Check how far this memory block will go
195 //
196 LastPage = Page + PageCount;
197
198 //
199 // Trim it down to only the PFNs we're actually interested in
200 //
201 if ((LastPage - 1) > HighestPfn) LastPage = HighestPfn + 1;
202 if (Page < LowestPfn) Page = LowestPfn;
203
204 //
205 // Skip this run if it's empty or fails to contain all the pages we need
206 //
207 if (!(PageCount) || ((Page + SizeInPages) > LastPage)) continue;
208
209 //
210 // Now scan all the relevant PFNs in this run
211 //
212 Length = 0;
213 for (Pfn1 = MiGetPfnEntry(Page); Page < LastPage; Page++, Pfn1++)
214 {
215 //
216 // If this PFN is in use, ignore it
217 //
218 if (MiIsPfnInUse(Pfn1)) continue;
219
220 //
221 // If we haven't chosen a start PFN yet and the caller specified an
222 // alignment, make sure the page matches the alignment restriction
223 //
224 if ((!(Length) && (BoundaryPfn)) &&
225 (((Page ^ (Page + SizeInPages - 1)) & BoundaryMask)))
226 {
227 //
228 // It does not, so bail out
229 //
230 continue;
231 }
232
233 //
234 // Increase the number of valid pages, and check if we have enough
235 //
236 if (++Length == SizeInPages)
237 {
238 //
239 // It appears we've amassed enough legitimate pages, rollback
240 //
241 Pfn1 -= (Length - 1);
242 Page -= (Length - 1);
243
244 //
245 // Acquire the PFN lock
246 //
247 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
248 do
249 {
250 //
251 // Things might've changed for us. Is the page still free?
252 //
253 if (MiIsPfnInUse(Pfn1)) break;
254
255 //
256 // So far so good. Is this the last confirmed valid page?
257 //
258 if (!--Length)
259 {
260 //
261 // Sanity check that we didn't go out of bounds
262 //
263 ASSERT(i != MmPhysicalMemoryBlock->NumberOfRuns);
264
265 //
266 // Loop until all PFN entries have been processed
267 //
268 EndPfn = Pfn1 - SizeInPages + 1;
269 do
270 {
271 //
272 // If this was an unzeroed page, there are now less
273 //
274 if (Pfn1->u3.e1.PageLocation == ZeroedPageList) UnzeroedPageCount--;
275
276 //
277 // One less free page
278 //
279 MmAvailablePages--;
280
281 //
282 // This PFN is now a used page, set it up
283 //
284 RemoveEntryList(&Pfn1->ListEntry);
285 Pfn1->Consumer = MC_NPPOOL;
286 Pfn1->u3.e2.ReferenceCount = 1;
287 Pfn1->SavedSwapEntry = 0;
288
289 //
290 // Check if it was already zeroed
291 //
292 if (Pfn1->u3.e1.PageLocation != ZeroedPageList)
293 {
294 //
295 // It wasn't, so zero it
296 //
297 MiZeroPage(MiGetPfnEntryIndex(Pfn1));
298 }
299
300 //
301 // Mark it in use
302 //
303 Pfn1->u3.e1.PageLocation = ActiveAndValid;
304
305 //
306 // Check if this is the last PFN, otherwise go on
307 //
308 if (Pfn1 == EndPfn) break;
309 Pfn1--;
310 } while (TRUE);
311
312 //
313 // Mark the first and last PFN so we can find them later
314 //
315 Pfn1->u3.e1.StartOfAllocation = 1;
316 (Pfn1 + SizeInPages - 1)->u3.e1.EndOfAllocation = 1;
317
318 //
319 // Now it's safe to let go of the PFN lock
320 //
321 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
322
323 //
324 // Quick sanity check that the last PFN is consistent
325 //
326 EndPfn = Pfn1 + SizeInPages;
327 ASSERT(EndPfn == MiGetPfnEntry(Page + 1));
328
329 //
330 // Compute the first page, and make sure it's consistent
331 //
332 Page -= SizeInPages - 1;
333 ASSERT(Pfn1 == MiGetPfnEntry(Page));
334 ASSERT(Page != 0);
335 return Page;
336 }
337
338 //
339 // Keep going. The purpose of this loop is to reconfirm that
340 // after acquiring the PFN lock these pages are still usable
341 //
342 Pfn1++;
343 Page++;
344 } while (TRUE);
345
346 //
347 // If we got here, something changed while we hadn't acquired
348 // the PFN lock yet, so we'll have to restart
349 //
350 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
351 Length = 0;
352 }
353 }
354 } while (++i != MmPhysicalMemoryBlock->NumberOfRuns);
355
356 //
357 // And if we get here, it means no suitable physical memory runs were found
358 //
359 return 0;
360 }
361
362 PMDL
363 NTAPI
364 MiAllocatePagesForMdl(IN PHYSICAL_ADDRESS LowAddress,
365 IN PHYSICAL_ADDRESS HighAddress,
366 IN PHYSICAL_ADDRESS SkipBytes,
367 IN SIZE_T TotalBytes,
368 IN MI_PFN_CACHE_ATTRIBUTE CacheAttribute,
369 IN ULONG MdlFlags)
370 {
371 PMDL Mdl;
372 PFN_NUMBER PageCount, LowPage, HighPage, SkipPages, PagesFound = 0, Page;
373 PPFN_NUMBER MdlPage, LastMdlPage;
374 KIRQL OldIrql;
375 PLIST_ENTRY ListEntry;
376 PPHYSICAL_PAGE Pfn1;
377 INT LookForZeroedPages;
378 ASSERT (KeGetCurrentIrql() <= APC_LEVEL);
379
380 //
381 // Convert the low address into a PFN
382 //
383 LowPage = (PFN_NUMBER)(LowAddress.QuadPart >> PAGE_SHIFT);
384
385 //
386 // Convert, and normalize, the high address into a PFN
387 //
388 HighPage = (PFN_NUMBER)(HighAddress.QuadPart >> PAGE_SHIFT);
389 if (HighPage > MmHighestPhysicalPage) HighPage = MmHighestPhysicalPage;
390
391 //
392 // Validate skipbytes and convert them into pages
393 //
394 if (BYTE_OFFSET(SkipBytes.LowPart)) return NULL;
395 SkipPages = (PFN_NUMBER)(SkipBytes.QuadPart >> PAGE_SHIFT);
396
397 //
398 // Now compute the number of pages the MDL will cover
399 //
400 PageCount = (PFN_NUMBER)ADDRESS_AND_SIZE_TO_SPAN_PAGES(0, TotalBytes);
401 do
402 {
403 //
404 // Try creating an MDL for these many pages
405 //
406 Mdl = MmCreateMdl(NULL, NULL, PageCount << PAGE_SHIFT);
407 if (Mdl) break;
408
409 //
410 // This function is not required to return the amount of pages requested
411 // In fact, it can return as little as 1 page, and callers are supposed
412 // to deal with this scenario. So re-attempt the allocation with less
413 // pages than before, and see if it worked this time.
414 //
415 PageCount -= (PageCount >> 4);
416 } while (PageCount);
417
418 //
419 // Wow, not even a single page was around!
420 //
421 if (!Mdl) return NULL;
422
423 //
424 // This is where the page array starts....
425 //
426 MdlPage = (PPFN_NUMBER)(Mdl + 1);
427
428 //
429 // Lock the PFN database
430 //
431 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
432
433 //
434 // Are we looking for any pages, without discriminating?
435 //
436 if ((LowPage == 0) && (HighPage == MmHighestPhysicalPage))
437 {
438 //
439 // Well then, let's go shopping
440 //
441 while (PagesFound < PageCount)
442 {
443 //
444 // Do we have zeroed pages?
445 //
446 if (!IsListEmpty(&FreeZeroedPageListHead))
447 {
448 //
449 // Grab a zero page
450 //
451 ListEntry = RemoveTailList(&FreeZeroedPageListHead);
452 }
453 else if (!IsListEmpty(&FreeUnzeroedPageListHead))
454 {
455 //
456 // Nope, grab an unzeroed page
457 //
458 ListEntry = RemoveTailList(&FreeUnzeroedPageListHead);
459 UnzeroedPageCount--;
460 }
461 else
462 {
463 //
464 // This is not good... hopefully we have at least SOME pages
465 //
466 ASSERT(PagesFound);
467 break;
468 }
469
470 //
471 // Get the PFN entry for this page
472 //
473 Pfn1 = CONTAINING_RECORD(ListEntry, PHYSICAL_PAGE, ListEntry);
474
475 //
476 // Make sure it's really free
477 //
478 ASSERT(MiIsPfnInUse(Pfn1) == FALSE);
479 ASSERT(Pfn1->u3.e2.ReferenceCount == 0);
480
481 //
482 // Allocate it and mark it
483 //
484 Pfn1->Consumer = MC_NPPOOL;
485 Pfn1->u3.e1.StartOfAllocation = 1;
486 Pfn1->u3.e1.EndOfAllocation = 1;
487 Pfn1->u3.e2.ReferenceCount = 1;
488 Pfn1->u3.e1.PageLocation = ActiveAndValid;
489 Pfn1->SavedSwapEntry = 0;
490
491 //
492 // Decrease available pages
493 //
494 MmAvailablePages--;
495
496 //
497 // Save it into the MDL
498 //
499 *MdlPage++ = MiGetPfnEntryIndex(Pfn1);
500 PagesFound++;
501 }
502 }
503 else
504 {
505 //
506 // You want specific range of pages. We'll do this in two runs
507 //
508 for (LookForZeroedPages = 1; LookForZeroedPages >= 0; LookForZeroedPages--)
509 {
510 //
511 // Scan the range you specified
512 //
513 for (Page = LowPage; Page < HighPage; Page++)
514 {
515 //
516 // Get the PFN entry for this page
517 //
518 Pfn1 = MiGetPfnEntry(Page);
519 ASSERT(Pfn1);
520
521 //
522 // Make sure it's free and if this is our first pass, zeroed
523 //
524 if (!MiIsPfnInUse(Pfn1)) continue;
525 if ((Pfn1->u3.e1.PageLocation == ZeroedPageList) != LookForZeroedPages) continue;
526
527 //
528 // Sanity checks
529 //
530 ASSERT(Pfn1->u3.e2.ReferenceCount == 0);
531
532 //
533 // Now setup the page and mark it
534 //
535 Pfn1->Consumer = MC_NPPOOL;
536 Pfn1->u3.e2.ReferenceCount = 1;
537 Pfn1->u3.e1.StartOfAllocation = 1;
538 Pfn1->u3.e1.EndOfAllocation = 1;
539 Pfn1->SavedSwapEntry = 0;
540
541 //
542 // If this page was unzeroed, we've consumed such a page
543 //
544 if (Pfn1->u3.e1.PageLocation != ZeroedPageList) UnzeroedPageCount--;
545
546 //
547 // Decrease available pages
548 //
549 MmAvailablePages--;
550
551 //
552 // Save this page into the MDL
553 //
554 *MdlPage++ = Page;
555 if (++PagesFound == PageCount) break;
556 }
557
558 //
559 // If the first pass was enough, don't keep going, otherwise, go again
560 //
561 if (PagesFound == PageCount) break;
562 }
563 }
564
565 //
566 // Now release the PFN count
567 //
568 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
569
570 //
571 // We might've found less pages, but not more ;-)
572 //
573 if (PagesFound != PageCount) ASSERT(PagesFound < PageCount);
574 if (!PagesFound)
575 {
576 //
577 // If we didn' tfind any pages at all, fail
578 //
579 DPRINT1("NO MDL PAGES!\n");
580 ExFreePool(Mdl);
581 return NULL;
582 }
583
584 //
585 // Write out how many pages we found
586 //
587 Mdl->ByteCount = (ULONG)(PagesFound << PAGE_SHIFT);
588
589 //
590 // Terminate the MDL array if there's certain missing pages
591 //
592 if (PagesFound != PageCount) *MdlPage = -1;
593
594 //
595 // Now go back and loop over all the MDL pages
596 //
597 MdlPage = (PPFN_NUMBER)(Mdl + 1);
598 LastMdlPage = MdlPage + PagesFound;
599 while (MdlPage < LastMdlPage)
600 {
601 //
602 // Check if we've reached the end
603 //
604 Page = *MdlPage++;
605 if (Page == (PFN_NUMBER)-1) break;
606
607 //
608 // Get the PFN entry for the page and check if we should zero it out
609 //
610 Pfn1 = MiGetPfnEntry(Page);
611 ASSERT(Pfn1);
612 if (Pfn1->u3.e1.PageLocation != ZeroedPageList) MiZeroPage(Page);
613 Pfn1->u3.e1.PageLocation = ActiveAndValid;
614 }
615
616 //
617 // We're done, mark the pages as locked (should we lock them, though???)
618 //
619 Mdl->Process = NULL;
620 Mdl->MdlFlags |= MDL_PAGES_LOCKED;
621 return Mdl;
622 }
623
624 VOID
625 NTAPI
626 MmDumpPfnDatabase(VOID)
627 {
628 ULONG i;
629 PPHYSICAL_PAGE Pfn1;
630 PCHAR State = "????", Type = "Unknown";
631 KIRQL OldIrql;
632 ULONG Totals[5] = {0}, FreePages = 0;
633
634 KeRaiseIrql(HIGH_LEVEL, &OldIrql);
635
636 //
637 // Loop the PFN database
638 //
639 for (i = 0; i <= MmHighestPhysicalPage; i++)
640 {
641 Pfn1 = MiGetPfnEntry(i);
642 if (!Pfn1) continue;
643
644 //
645 // Get the consumer
646 //
647 switch (Pfn1->Consumer)
648 {
649 case MC_NPPOOL:
650
651 Type = "Nonpaged Pool";
652 break;
653
654 case MC_PPOOL:
655
656 Type = "Paged Pool";
657 break;
658
659 case MC_CACHE:
660
661 Type = "File System Cache";
662 break;
663
664 case MC_USER:
665
666 Type = "Process Working Set";
667 break;
668
669 case MC_SYSTEM:
670
671 Type = "System";
672 break;
673 }
674
675 //
676 // Get the type
677 //
678 if (MiIsPfnInUse(Pfn1))
679 {
680 State = "Used";
681 Totals[Pfn1->Consumer]++;
682 }
683 else
684 {
685 State = "Free";
686 Type = "Free";
687 FreePages++;
688 break;
689 }
690
691 //
692 // Pretty-print the page
693 //
694 DbgPrint("0x%08p:\t%04s\t%20s\t(%02d) [%08p])\n",
695 i << PAGE_SHIFT,
696 State,
697 Type,
698 Pfn1->u3.e2.ReferenceCount,
699 Pfn1->RmapListHead);
700 }
701
702 DbgPrint("Nonpaged Pool: %d pages\t[%d KB]\n", Totals[MC_NPPOOL], (Totals[MC_NPPOOL] << PAGE_SHIFT) / 1024);
703 DbgPrint("Paged Pool: %d pages\t[%d KB]\n", Totals[MC_PPOOL], (Totals[MC_PPOOL] << PAGE_SHIFT) / 1024);
704 DbgPrint("File System Cache: %d pages\t[%d KB]\n", Totals[MC_CACHE], (Totals[MC_CACHE] << PAGE_SHIFT) / 1024);
705 DbgPrint("Process Working Set: %d pages\t[%d KB]\n", Totals[MC_USER], (Totals[MC_USER] << PAGE_SHIFT) / 1024);
706 DbgPrint("System: %d pages\t[%d KB]\n", Totals[MC_SYSTEM], (Totals[MC_SYSTEM] << PAGE_SHIFT) / 1024);
707 DbgPrint("Free: %d pages\t[%d KB]\n", FreePages, (FreePages << PAGE_SHIFT) / 1024);
708
709 KeLowerIrql(OldIrql);
710 }
711
712 VOID
713 NTAPI
714 MmInitializePageList(VOID)
715 {
716 ULONG i;
717 PHYSICAL_PAGE UsedPage;
718 PMEMORY_ALLOCATION_DESCRIPTOR Md;
719 PLIST_ENTRY NextEntry;
720 ULONG NrSystemPages = 0;
721
722 /* Initialize the page lists */
723 InitializeListHead(&UserPageListHead);
724 InitializeListHead(&FreeUnzeroedPageListHead);
725 InitializeListHead(&FreeZeroedPageListHead);
726
727 /* This is what a used page looks like */
728 RtlZeroMemory(&UsedPage, sizeof(UsedPage));
729 UsedPage.Consumer = MC_NPPOOL;
730 UsedPage.u3.e1.PageLocation = ActiveAndValid;
731 UsedPage.u3.e2.ReferenceCount = 1;
732
733 /* Loop the memory descriptors */
734 for (NextEntry = KeLoaderBlock->MemoryDescriptorListHead.Flink;
735 NextEntry != &KeLoaderBlock->MemoryDescriptorListHead;
736 NextEntry = NextEntry->Flink)
737 {
738 #undef ListEntry
739 /* Get the descriptor */
740 Md = CONTAINING_RECORD(NextEntry,
741 MEMORY_ALLOCATION_DESCRIPTOR,
742 ListEntry);
743 #define ListEntry u1
744
745 /* Skip bad memory */
746 if ((Md->MemoryType == LoaderFirmwarePermanent) ||
747 (Md->MemoryType == LoaderBBTMemory) ||
748 (Md->MemoryType == LoaderSpecialMemory) ||
749 (Md->MemoryType == LoaderBad))
750 {
751 //
752 // We do not build PFN entries for this
753 //
754 continue;
755 }
756 else if ((Md->MemoryType == LoaderFree) ||
757 (Md->MemoryType == LoaderLoadedProgram) ||
758 (Md->MemoryType == LoaderFirmwareTemporary) ||
759 (Md->MemoryType == LoaderOsloaderStack))
760 {
761 /* Loop every page part of the block */
762 for (i = 0; i < Md->PageCount; i++)
763 {
764 /* Mark it as a free page */
765 MmPfnDatabase[0][Md->BasePage + i].u3.e1.PageLocation = FreePageList;
766 InsertTailList(&FreeUnzeroedPageListHead,
767 &MmPfnDatabase[0][Md->BasePage + i].ListEntry);
768 UnzeroedPageCount++;
769 MmAvailablePages++;
770 }
771 }
772 else
773 {
774 /* Loop every page part of the block */
775 for (i = 0; i < Md->PageCount; i++)
776 {
777 /* Everything else is used memory */
778 MmPfnDatabase[0][Md->BasePage + i] = UsedPage;
779 NrSystemPages++;
780 }
781 }
782 }
783
784 /* Finally handle the pages describing the PFN database themselves */
785 for (i = MxOldFreeDescriptor.BasePage; i < MxFreeDescriptor->BasePage; i++)
786 {
787 /* Ensure this page was not added previously */
788 ASSERT(MmPfnDatabase[0][i].Consumer == 0);
789
790 /* Mark it as used kernel memory */
791 MmPfnDatabase[0][i] = UsedPage;
792 NrSystemPages++;
793 }
794
795 KeInitializeEvent(&ZeroPageThreadEvent, NotificationEvent, TRUE);
796 DPRINT("Pages: %x %x\n", MmAvailablePages, NrSystemPages);
797 MmInitializeBalancer(MmAvailablePages, NrSystemPages);
798 }
799
800 VOID
801 NTAPI
802 MmSetRmapListHeadPage(PFN_TYPE Pfn, struct _MM_RMAP_ENTRY* ListHead)
803 {
804 KIRQL oldIrql;
805
806 oldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
807 MiGetPfnEntry(Pfn)->RmapListHead = (LONG)ListHead;
808 KeReleaseQueuedSpinLock(LockQueuePfnLock, oldIrql);
809 }
810
811 struct _MM_RMAP_ENTRY*
812 NTAPI
813 MmGetRmapListHeadPage(PFN_TYPE Pfn)
814 {
815 KIRQL oldIrql;
816 struct _MM_RMAP_ENTRY* ListHead;
817
818 oldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
819 ListHead = (struct _MM_RMAP_ENTRY*)MiGetPfnEntry(Pfn)->RmapListHead;
820 KeReleaseQueuedSpinLock(LockQueuePfnLock, oldIrql);
821
822 return(ListHead);
823 }
824
825 VOID
826 NTAPI
827 MmSetSavedSwapEntryPage(PFN_TYPE Pfn, SWAPENTRY SwapEntry)
828 {
829 KIRQL oldIrql;
830
831 oldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
832 MiGetPfnEntry(Pfn)->SavedSwapEntry = SwapEntry;
833 KeReleaseQueuedSpinLock(LockQueuePfnLock, oldIrql);
834 }
835
836 SWAPENTRY
837 NTAPI
838 MmGetSavedSwapEntryPage(PFN_TYPE Pfn)
839 {
840 SWAPENTRY SwapEntry;
841 KIRQL oldIrql;
842
843 oldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
844 SwapEntry = MiGetPfnEntry(Pfn)->SavedSwapEntry;
845 KeReleaseQueuedSpinLock(LockQueuePfnLock, oldIrql);
846
847 return(SwapEntry);
848 }
849
850 VOID
851 NTAPI
852 MmReferencePage(PFN_TYPE Pfn)
853 {
854 PPHYSICAL_PAGE Page;
855
856 DPRINT("MmReferencePage(PysicalAddress %x)\n", Pfn << PAGE_SHIFT);
857
858 if (Pfn == 0 || Pfn > MmHighestPhysicalPage)
859 {
860 return;
861 }
862
863 Page = MiGetPfnEntry(Pfn);
864 ASSERT(Page);
865
866 Page->u3.e2.ReferenceCount++;
867 }
868
869 ULONG
870 NTAPI
871 MmGetReferenceCountPage(PFN_TYPE Pfn)
872 {
873 KIRQL oldIrql;
874 ULONG RCount;
875 PPHYSICAL_PAGE Page;
876
877 DPRINT("MmGetReferenceCountPage(PhysicalAddress %x)\n", Pfn << PAGE_SHIFT);
878
879 oldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
880 Page = MiGetPfnEntry(Pfn);
881 ASSERT(Page);
882
883 RCount = Page->u3.e2.ReferenceCount;
884
885 KeReleaseQueuedSpinLock(LockQueuePfnLock, oldIrql);
886 return(RCount);
887 }
888
889 BOOLEAN
890 NTAPI
891 MmIsPageInUse(PFN_TYPE Pfn)
892 {
893 return MiIsPfnInUse(MiGetPfnEntry(Pfn));
894 }
895
896 VOID
897 NTAPI
898 MiSetConsumer(IN PFN_TYPE Pfn,
899 IN ULONG Type)
900 {
901 MiGetPfnEntry(Pfn)->Consumer = Type;
902 MiGetPfnEntry(Pfn)->u3.e1.PageLocation = ActiveAndValid;
903 }
904
905 VOID
906 NTAPI
907 MmDereferencePage(PFN_TYPE Pfn)
908 {
909 PPHYSICAL_PAGE Page;
910
911 DPRINT("MmDereferencePage(PhysicalAddress %x)\n", Pfn << PAGE_SHIFT);
912
913 Page = MiGetPfnEntry(Pfn);
914 ASSERT(Page);
915
916 Page->u3.e2.ReferenceCount--;
917 if (Page->u3.e2.ReferenceCount == 0)
918 {
919 MmAvailablePages++;
920 Page->u3.e1.PageLocation = FreePageList;
921 InsertTailList(&FreeUnzeroedPageListHead,
922 &Page->ListEntry);
923 UnzeroedPageCount++;
924 if (UnzeroedPageCount > 8 && 0 == KeReadStateEvent(&ZeroPageThreadEvent))
925 {
926 KeSetEvent(&ZeroPageThreadEvent, IO_NO_INCREMENT, FALSE);
927 }
928 }
929 }
930
931 PFN_TYPE
932 NTAPI
933 MmAllocPage(ULONG Type, SWAPENTRY SwapEntry)
934 {
935 PFN_TYPE PfnOffset;
936 PLIST_ENTRY ListEntry;
937 PPHYSICAL_PAGE PageDescriptor;
938 BOOLEAN NeedClear = FALSE;
939
940 DPRINT("MmAllocPage()\n");
941
942 if (IsListEmpty(&FreeZeroedPageListHead))
943 {
944 if (IsListEmpty(&FreeUnzeroedPageListHead))
945 {
946 /* Check if this allocation is for the PFN DB itself */
947 if (MmNumberOfPhysicalPages == 0)
948 {
949 ASSERT(FALSE);
950 }
951
952 DPRINT1("MmAllocPage(): Out of memory\n");
953 return 0;
954 }
955 ListEntry = RemoveTailList(&FreeUnzeroedPageListHead);
956 UnzeroedPageCount--;
957
958 PageDescriptor = CONTAINING_RECORD(ListEntry, PHYSICAL_PAGE, ListEntry);
959
960 NeedClear = TRUE;
961 }
962 else
963 {
964 ListEntry = RemoveTailList(&FreeZeroedPageListHead);
965
966 PageDescriptor = CONTAINING_RECORD(ListEntry, PHYSICAL_PAGE, ListEntry);
967 }
968
969 PageDescriptor->Consumer = Type;
970 PageDescriptor->u3.e2.ReferenceCount = 1;
971 PageDescriptor->SavedSwapEntry = SwapEntry;
972
973 MmAvailablePages--;
974
975 PfnOffset = PageDescriptor - MmPfnDatabase[0];
976 if ((NeedClear) && (Type != MC_SYSTEM))
977 {
978 MiZeroPage(PfnOffset);
979 }
980
981 PageDescriptor->u3.e1.PageLocation = ActiveAndValid;
982 return PfnOffset;
983 }
984
985 NTSTATUS
986 NTAPI
987 MiZeroPage(PFN_TYPE Page)
988 {
989 KIRQL Irql;
990 PVOID TempAddress;
991
992 Irql = KeRaiseIrqlToDpcLevel();
993 TempAddress = MiMapPageToZeroInHyperSpace(Page);
994 if (TempAddress == NULL)
995 {
996 return(STATUS_NO_MEMORY);
997 }
998 memset(TempAddress, 0, PAGE_SIZE);
999 MiUnmapPagesInZeroSpace(TempAddress, 1);
1000 KeLowerIrql(Irql);
1001 return(STATUS_SUCCESS);
1002 }
1003
1004 NTSTATUS
1005 NTAPI
1006 MmZeroPageThreadMain(PVOID Ignored)
1007 {
1008 NTSTATUS Status;
1009 KIRQL oldIrql;
1010 PLIST_ENTRY ListEntry;
1011 PPHYSICAL_PAGE PageDescriptor;
1012 PFN_TYPE Pfn;
1013 ULONG Count;
1014
1015 /* Free initial kernel memory */
1016 //MiFreeInitMemory();
1017
1018 /* Set our priority to 0 */
1019 KeGetCurrentThread()->BasePriority = 0;
1020 KeSetPriorityThread(KeGetCurrentThread(), 0);
1021
1022 while(1)
1023 {
1024 Status = KeWaitForSingleObject(&ZeroPageThreadEvent,
1025 0,
1026 KernelMode,
1027 FALSE,
1028 NULL);
1029
1030 if (ZeroPageThreadShouldTerminate)
1031 {
1032 DPRINT1("ZeroPageThread: Terminating\n");
1033 return STATUS_SUCCESS;
1034 }
1035 Count = 0;
1036 oldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
1037 while (!IsListEmpty(&FreeUnzeroedPageListHead))
1038 {
1039 ListEntry = RemoveTailList(&FreeUnzeroedPageListHead);
1040 UnzeroedPageCount--;
1041 PageDescriptor = CONTAINING_RECORD(ListEntry, PHYSICAL_PAGE, ListEntry);
1042 /* We set the page to used, because MmCreateVirtualMapping failed with unused pages */
1043 KeReleaseQueuedSpinLock(LockQueuePfnLock, oldIrql);
1044 Pfn = PageDescriptor - MmPfnDatabase[0];
1045 Status = MiZeroPage(Pfn);
1046
1047 oldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
1048 PageDescriptor->u3.e1.PageLocation = ZeroedPageList;
1049 if (NT_SUCCESS(Status))
1050 {
1051 InsertHeadList(&FreeZeroedPageListHead, ListEntry);
1052 Count++;
1053 }
1054 else
1055 {
1056 InsertHeadList(&FreeUnzeroedPageListHead, ListEntry);
1057 UnzeroedPageCount++;
1058 }
1059
1060 }
1061 DPRINT("Zeroed %d pages.\n", Count);
1062 KeResetEvent(&ZeroPageThreadEvent);
1063 KeReleaseQueuedSpinLock(LockQueuePfnLock, oldIrql);
1064 }
1065
1066 return STATUS_SUCCESS;
1067 }
1068
1069 /* EOF */