4eec29d95cafedb8ca2e39fb647e08d08f76e8e7
[reactos.git] / reactos / ntoskrnl / mm / freelist.c
1 /*
2 * COPYRIGHT: See COPYING in the top level directory
3 * PROJECT: ReactOS kernel
4 * FILE: ntoskrnl/mm/freelist.c
5 * PURPOSE: Handle the list of free physical pages
6 *
7 * PROGRAMMERS: David Welch (welch@cwcom.net)
8 * Robert Bergkvist
9 */
10
11 /* INCLUDES ****************************************************************/
12
13 #include <ntoskrnl.h>
14 #define NDEBUG
15 #include <debug.h>
16
17 #if defined (ALLOC_PRAGMA)
18 #pragma alloc_text(INIT, MmInitializePageList)
19 #endif
20
21 #define MODULE_INVOLVED_IN_ARM3
22 #include "ARM3/miarm.h"
23
24 /* GLOBALS ****************************************************************/
25
26 //
27 //
28 // ReactOS to NT Physical Page Descriptor Entry Legacy Mapping Definitions
29 //
30 // REACTOS NT
31 //
32 #define RmapListHead AweReferenceCount
33 #define SavedSwapEntry u4.EntireFrame
34 #define RemoveEntryList(x) RemoveEntryList((PLIST_ENTRY)x)
35 #define InsertTailList(x, y) InsertTailList(x, (PLIST_ENTRY)y)
36 #define ListEntry u1
37 #define PHYSICAL_PAGE MMPFN
38 #define PPHYSICAL_PAGE PMMPFN
39
40 /* The first array contains ReactOS PFNs, the second contains ARM3 PFNs */
41 PPHYSICAL_PAGE MmPfnDatabase[2];
42
43 ULONG MmAvailablePages;
44 ULONG MmResidentAvailablePages;
45
46 SIZE_T MmTotalCommitLimit;
47 SIZE_T MmTotalCommittedPages;
48 SIZE_T MmSharedCommit;
49 SIZE_T MmDriverCommit;
50 SIZE_T MmProcessCommit;
51 SIZE_T MmPagedPoolCommit;
52 SIZE_T MmPeakCommitment;
53 SIZE_T MmtotalCommitLimitMaximum;
54
55 MMPFNLIST MmZeroedPageListHead;
56 MMPFNLIST MmFreePageListHead;
57 MMPFNLIST MmStandbyPageListHead;
58 MMPFNLIST MmModifiedPageListHead;
59 MMPFNLIST MmModifiedNoWritePageListHead;
60
61 /* List of pages zeroed by the ZPW (MmZeroPageThreadMain) */
62 static LIST_ENTRY FreeZeroedPageListHead;
63 /* List of free pages, filled by MmGetReferenceCountPage and
64 * and MmInitializePageList */
65 static LIST_ENTRY FreeUnzeroedPageListHead;
66
67 static KEVENT ZeroPageThreadEvent;
68 static BOOLEAN ZeroPageThreadShouldTerminate = FALSE;
69
70 static ULONG UnzeroedPageCount = 0;
71
72 /* FUNCTIONS *************************************************************/
73
74 static RTL_BITMAP MiUserPfnBitMap;
75
76 /* FUNCTIONS *************************************************************/
77
78 VOID
79 NTAPI
80 MiInitializeUserPfnBitmap(VOID)
81 {
82 PVOID Bitmap;
83
84 /* Allocate enough buffer for the PFN bitmap and align it on 32-bits */
85 Bitmap = ExAllocatePoolWithTag(NonPagedPool,
86 (((MmHighestPhysicalPage + 1) + 31) / 32) * 4,
87 ' mM');
88 ASSERT(Bitmap);
89
90 /* Initialize it and clear all the bits to begin with */
91 RtlInitializeBitMap(&MiUserPfnBitMap,
92 Bitmap,
93 MmHighestPhysicalPage + 1);
94 RtlClearAllBits(&MiUserPfnBitMap);
95 }
96
97 PFN_TYPE
98 NTAPI
99 MmGetLRUFirstUserPage(VOID)
100 {
101 ULONG Position;
102 KIRQL OldIrql;
103
104 /* Find the first user page */
105 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
106 Position = RtlFindSetBits(&MiUserPfnBitMap, 1, 0);
107 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
108 if (Position == 0xFFFFFFFF) return 0;
109
110 /* Return it */
111 return Position;
112 }
113
114 VOID
115 NTAPI
116 MmInsertLRULastUserPage(PFN_TYPE Pfn)
117 {
118 KIRQL OldIrql;
119
120 /* Set the page as a user page */
121 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
122 RtlSetBit(&MiUserPfnBitMap, Pfn);
123 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
124 }
125
126 PFN_TYPE
127 NTAPI
128 MmGetLRUNextUserPage(PFN_TYPE PreviousPfn)
129 {
130 ULONG Position;
131 KIRQL OldIrql;
132
133 /* Find the next user page */
134 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
135 Position = RtlFindSetBits(&MiUserPfnBitMap, 1, PreviousPfn + 1);
136 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
137 if (Position == 0xFFFFFFFF) return 0;
138
139 /* Return it */
140 return Position;
141 }
142
143 VOID
144 NTAPI
145 MmRemoveLRUUserPage(PFN_TYPE Page)
146 {
147 /* Unset the page as a user page */
148 RtlClearBit(&MiUserPfnBitMap, Page);
149 }
150
151 BOOLEAN
152 NTAPI
153 MiIsPfnInUse(IN PMMPFN Pfn1)
154 {
155 return ((Pfn1->u3.e1.PageLocation != FreePageList) &&
156 (Pfn1->u3.e1.PageLocation != ZeroedPageList));
157 }
158
159 PFN_NUMBER
160 NTAPI
161 MiFindContiguousPages(IN PFN_NUMBER LowestPfn,
162 IN PFN_NUMBER HighestPfn,
163 IN PFN_NUMBER BoundaryPfn,
164 IN PFN_NUMBER SizeInPages,
165 IN MEMORY_CACHING_TYPE CacheType)
166 {
167 PFN_NUMBER Page, PageCount, LastPage, Length, BoundaryMask;
168 ULONG i = 0;
169 PMMPFN Pfn1, EndPfn;
170 KIRQL OldIrql;
171 PAGED_CODE ();
172 ASSERT(SizeInPages != 0);
173
174 //
175 // Convert the boundary PFN into an alignment mask
176 //
177 BoundaryMask = ~(BoundaryPfn - 1);
178
179 //
180 // Loop all the physical memory blocks
181 //
182 do
183 {
184 //
185 // Capture the base page and length of this memory block
186 //
187 Page = MmPhysicalMemoryBlock->Run[i].BasePage;
188 PageCount = MmPhysicalMemoryBlock->Run[i].PageCount;
189
190 //
191 // Check how far this memory block will go
192 //
193 LastPage = Page + PageCount;
194
195 //
196 // Trim it down to only the PFNs we're actually interested in
197 //
198 if ((LastPage - 1) > HighestPfn) LastPage = HighestPfn + 1;
199 if (Page < LowestPfn) Page = LowestPfn;
200
201 //
202 // Skip this run if it's empty or fails to contain all the pages we need
203 //
204 if (!(PageCount) || ((Page + SizeInPages) > LastPage)) continue;
205
206 //
207 // Now scan all the relevant PFNs in this run
208 //
209 Length = 0;
210 for (Pfn1 = MiGetPfnEntry(Page); Page < LastPage; Page++, Pfn1++)
211 {
212 //
213 // If this PFN is in use, ignore it
214 //
215 if (MiIsPfnInUse(Pfn1)) continue;
216
217 //
218 // If we haven't chosen a start PFN yet and the caller specified an
219 // alignment, make sure the page matches the alignment restriction
220 //
221 if ((!(Length) && (BoundaryPfn)) &&
222 (((Page ^ (Page + SizeInPages - 1)) & BoundaryMask)))
223 {
224 //
225 // It does not, so bail out
226 //
227 continue;
228 }
229
230 //
231 // Increase the number of valid pages, and check if we have enough
232 //
233 if (++Length == SizeInPages)
234 {
235 //
236 // It appears we've amassed enough legitimate pages, rollback
237 //
238 Pfn1 -= (Length - 1);
239 Page -= (Length - 1);
240
241 //
242 // Acquire the PFN lock
243 //
244 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
245 do
246 {
247 //
248 // Things might've changed for us. Is the page still free?
249 //
250 if (MiIsPfnInUse(Pfn1)) break;
251
252 //
253 // So far so good. Is this the last confirmed valid page?
254 //
255 if (!--Length)
256 {
257 //
258 // Sanity check that we didn't go out of bounds
259 //
260 ASSERT(i != MmPhysicalMemoryBlock->NumberOfRuns);
261
262 //
263 // Loop until all PFN entries have been processed
264 //
265 EndPfn = Pfn1 - SizeInPages + 1;
266 do
267 {
268 //
269 // If this was an unzeroed page, there are now less
270 //
271 if (Pfn1->u3.e1.PageLocation == ZeroedPageList) UnzeroedPageCount--;
272
273 //
274 // One less free page
275 //
276 MmAvailablePages--;
277
278 //
279 // This PFN is now a used page, set it up
280 //
281 RemoveEntryList(&Pfn1->ListEntry);
282 Pfn1->u3.e2.ReferenceCount = 1;
283 Pfn1->SavedSwapEntry = 0;
284
285 //
286 // Check if it was already zeroed
287 //
288 if (Pfn1->u3.e1.PageLocation != ZeroedPageList)
289 {
290 //
291 // It wasn't, so zero it
292 //
293 MiZeroPage(MiGetPfnEntryIndex(Pfn1));
294 }
295
296 //
297 // Mark it in use
298 //
299 Pfn1->u3.e1.PageLocation = ActiveAndValid;
300
301 //
302 // Check if this is the last PFN, otherwise go on
303 //
304 if (Pfn1 == EndPfn) break;
305 Pfn1--;
306 } while (TRUE);
307
308 //
309 // Mark the first and last PFN so we can find them later
310 //
311 Pfn1->u3.e1.StartOfAllocation = 1;
312 (Pfn1 + SizeInPages - 1)->u3.e1.EndOfAllocation = 1;
313
314 //
315 // Now it's safe to let go of the PFN lock
316 //
317 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
318
319 //
320 // Quick sanity check that the last PFN is consistent
321 //
322 EndPfn = Pfn1 + SizeInPages;
323 ASSERT(EndPfn == MiGetPfnEntry(Page + 1));
324
325 //
326 // Compute the first page, and make sure it's consistent
327 //
328 Page -= SizeInPages - 1;
329 ASSERT(Pfn1 == MiGetPfnEntry(Page));
330 ASSERT(Page != 0);
331 return Page;
332 }
333
334 //
335 // Keep going. The purpose of this loop is to reconfirm that
336 // after acquiring the PFN lock these pages are still usable
337 //
338 Pfn1++;
339 Page++;
340 } while (TRUE);
341
342 //
343 // If we got here, something changed while we hadn't acquired
344 // the PFN lock yet, so we'll have to restart
345 //
346 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
347 Length = 0;
348 }
349 }
350 } while (++i != MmPhysicalMemoryBlock->NumberOfRuns);
351
352 //
353 // And if we get here, it means no suitable physical memory runs were found
354 //
355 return 0;
356 }
357
358 PMDL
359 NTAPI
360 MiAllocatePagesForMdl(IN PHYSICAL_ADDRESS LowAddress,
361 IN PHYSICAL_ADDRESS HighAddress,
362 IN PHYSICAL_ADDRESS SkipBytes,
363 IN SIZE_T TotalBytes,
364 IN MI_PFN_CACHE_ATTRIBUTE CacheAttribute,
365 IN ULONG MdlFlags)
366 {
367 PMDL Mdl;
368 PFN_NUMBER PageCount, LowPage, HighPage, SkipPages, PagesFound = 0, Page;
369 PPFN_NUMBER MdlPage, LastMdlPage;
370 KIRQL OldIrql;
371 PLIST_ENTRY ListEntry;
372 PPHYSICAL_PAGE Pfn1;
373 INT LookForZeroedPages;
374 ASSERT (KeGetCurrentIrql() <= APC_LEVEL);
375
376 //
377 // Convert the low address into a PFN
378 //
379 LowPage = (PFN_NUMBER)(LowAddress.QuadPart >> PAGE_SHIFT);
380
381 //
382 // Convert, and normalize, the high address into a PFN
383 //
384 HighPage = (PFN_NUMBER)(HighAddress.QuadPart >> PAGE_SHIFT);
385 if (HighPage > MmHighestPhysicalPage) HighPage = MmHighestPhysicalPage;
386
387 //
388 // Validate skipbytes and convert them into pages
389 //
390 if (BYTE_OFFSET(SkipBytes.LowPart)) return NULL;
391 SkipPages = (PFN_NUMBER)(SkipBytes.QuadPart >> PAGE_SHIFT);
392
393 //
394 // Now compute the number of pages the MDL will cover
395 //
396 PageCount = (PFN_NUMBER)ADDRESS_AND_SIZE_TO_SPAN_PAGES(0, TotalBytes);
397 do
398 {
399 //
400 // Try creating an MDL for these many pages
401 //
402 Mdl = MmCreateMdl(NULL, NULL, PageCount << PAGE_SHIFT);
403 if (Mdl) break;
404
405 //
406 // This function is not required to return the amount of pages requested
407 // In fact, it can return as little as 1 page, and callers are supposed
408 // to deal with this scenario. So re-attempt the allocation with less
409 // pages than before, and see if it worked this time.
410 //
411 PageCount -= (PageCount >> 4);
412 } while (PageCount);
413
414 //
415 // Wow, not even a single page was around!
416 //
417 if (!Mdl) return NULL;
418
419 //
420 // This is where the page array starts....
421 //
422 MdlPage = (PPFN_NUMBER)(Mdl + 1);
423
424 //
425 // Lock the PFN database
426 //
427 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
428
429 //
430 // Are we looking for any pages, without discriminating?
431 //
432 if ((LowPage == 0) && (HighPage == MmHighestPhysicalPage))
433 {
434 //
435 // Well then, let's go shopping
436 //
437 while (PagesFound < PageCount)
438 {
439 //
440 // Do we have zeroed pages?
441 //
442 if (!IsListEmpty(&FreeZeroedPageListHead))
443 {
444 //
445 // Grab a zero page
446 //
447 ListEntry = RemoveTailList(&FreeZeroedPageListHead);
448 }
449 else if (!IsListEmpty(&FreeUnzeroedPageListHead))
450 {
451 //
452 // Nope, grab an unzeroed page
453 //
454 ListEntry = RemoveTailList(&FreeUnzeroedPageListHead);
455 UnzeroedPageCount--;
456 }
457 else
458 {
459 //
460 // This is not good... hopefully we have at least SOME pages
461 //
462 ASSERT(PagesFound);
463 break;
464 }
465
466 //
467 // Get the PFN entry for this page
468 //
469 Pfn1 = CONTAINING_RECORD(ListEntry, PHYSICAL_PAGE, ListEntry);
470
471 //
472 // Make sure it's really free
473 //
474 ASSERT(MiIsPfnInUse(Pfn1) == FALSE);
475 ASSERT(Pfn1->u3.e2.ReferenceCount == 0);
476
477 //
478 // Allocate it and mark it
479 //
480 Pfn1->u3.e1.StartOfAllocation = 1;
481 Pfn1->u3.e1.EndOfAllocation = 1;
482 Pfn1->u3.e2.ReferenceCount = 1;
483 Pfn1->u3.e1.PageLocation = ActiveAndValid;
484 Pfn1->SavedSwapEntry = 0;
485
486 //
487 // Decrease available pages
488 //
489 MmAvailablePages--;
490
491 //
492 // Save it into the MDL
493 //
494 *MdlPage++ = MiGetPfnEntryIndex(Pfn1);
495 PagesFound++;
496 }
497 }
498 else
499 {
500 //
501 // You want specific range of pages. We'll do this in two runs
502 //
503 for (LookForZeroedPages = 1; LookForZeroedPages >= 0; LookForZeroedPages--)
504 {
505 //
506 // Scan the range you specified
507 //
508 for (Page = LowPage; Page < HighPage; Page++)
509 {
510 //
511 // Get the PFN entry for this page
512 //
513 Pfn1 = MiGetPfnEntry(Page);
514 ASSERT(Pfn1);
515
516 //
517 // Make sure it's free and if this is our first pass, zeroed
518 //
519 if (MiIsPfnInUse(Pfn1)) continue;
520 if ((Pfn1->u3.e1.PageLocation == ZeroedPageList) != LookForZeroedPages) continue;
521
522 //
523 // Sanity checks
524 //
525 ASSERT(Pfn1->u3.e2.ReferenceCount == 0);
526
527 //
528 // Now setup the page and mark it
529 //
530 Pfn1->u3.e2.ReferenceCount = 1;
531 Pfn1->u3.e1.StartOfAllocation = 1;
532 Pfn1->u3.e1.EndOfAllocation = 1;
533 Pfn1->SavedSwapEntry = 0;
534
535 //
536 // If this page was unzeroed, we've consumed such a page
537 //
538 if (Pfn1->u3.e1.PageLocation != ZeroedPageList) UnzeroedPageCount--;
539
540 //
541 // Decrease available pages
542 //
543 MmAvailablePages--;
544
545 //
546 // Save this page into the MDL
547 //
548 *MdlPage++ = Page;
549 if (++PagesFound == PageCount) break;
550 }
551
552 //
553 // If the first pass was enough, don't keep going, otherwise, go again
554 //
555 if (PagesFound == PageCount) break;
556 }
557 }
558
559 //
560 // Now release the PFN count
561 //
562 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
563
564 //
565 // We might've found less pages, but not more ;-)
566 //
567 if (PagesFound != PageCount) ASSERT(PagesFound < PageCount);
568 if (!PagesFound)
569 {
570 //
571 // If we didn' tfind any pages at all, fail
572 //
573 DPRINT1("NO MDL PAGES!\n");
574 ExFreePool(Mdl);
575 return NULL;
576 }
577
578 //
579 // Write out how many pages we found
580 //
581 Mdl->ByteCount = (ULONG)(PagesFound << PAGE_SHIFT);
582
583 //
584 // Terminate the MDL array if there's certain missing pages
585 //
586 if (PagesFound != PageCount) *MdlPage = -1;
587
588 //
589 // Now go back and loop over all the MDL pages
590 //
591 MdlPage = (PPFN_NUMBER)(Mdl + 1);
592 LastMdlPage = MdlPage + PagesFound;
593 while (MdlPage < LastMdlPage)
594 {
595 //
596 // Check if we've reached the end
597 //
598 Page = *MdlPage++;
599 if (Page == (PFN_NUMBER)-1) break;
600
601 //
602 // Get the PFN entry for the page and check if we should zero it out
603 //
604 Pfn1 = MiGetPfnEntry(Page);
605 ASSERT(Pfn1);
606 if (Pfn1->u3.e1.PageLocation != ZeroedPageList) MiZeroPage(Page);
607 Pfn1->u3.e1.PageLocation = ActiveAndValid;
608 }
609
610 //
611 // We're done, mark the pages as locked (should we lock them, though???)
612 //
613 Mdl->Process = NULL;
614 Mdl->MdlFlags |= MDL_PAGES_LOCKED;
615 return Mdl;
616 }
617
618 VOID
619 NTAPI
620 MmDumpPfnDatabase(VOID)
621 {
622 ULONG i;
623 PPHYSICAL_PAGE Pfn1;
624 PCHAR State = "????", Type = "Unknown";
625 KIRQL OldIrql;
626 ULONG Totals[5] = {0}, FreePages = 0;
627
628 KeRaiseIrql(HIGH_LEVEL, &OldIrql);
629
630 //
631 // Loop the PFN database
632 //
633 for (i = 0; i <= MmHighestPhysicalPage; i++)
634 {
635 Pfn1 = MiGetPfnEntry(i);
636 if (!Pfn1) continue;
637
638 //
639 // Get the type
640 //
641 if (MiIsPfnInUse(Pfn1))
642 {
643 State = "Used";
644 }
645 else
646 {
647 State = "Free";
648 Type = "Free";
649 FreePages++;
650 break;
651 }
652
653 //
654 // Pretty-print the page
655 //
656 DbgPrint("0x%08p:\t%04s\t%20s\t(%02d) [%08p])\n",
657 i << PAGE_SHIFT,
658 State,
659 Type,
660 Pfn1->u3.e2.ReferenceCount,
661 Pfn1->RmapListHead);
662 }
663
664 DbgPrint("Nonpaged Pool: %d pages\t[%d KB]\n", Totals[MC_NPPOOL], (Totals[MC_NPPOOL] << PAGE_SHIFT) / 1024);
665 DbgPrint("Paged Pool: %d pages\t[%d KB]\n", Totals[MC_PPOOL], (Totals[MC_PPOOL] << PAGE_SHIFT) / 1024);
666 DbgPrint("File System Cache: %d pages\t[%d KB]\n", Totals[MC_CACHE], (Totals[MC_CACHE] << PAGE_SHIFT) / 1024);
667 DbgPrint("Process Working Set: %d pages\t[%d KB]\n", Totals[MC_USER], (Totals[MC_USER] << PAGE_SHIFT) / 1024);
668 DbgPrint("System: %d pages\t[%d KB]\n", Totals[MC_SYSTEM], (Totals[MC_SYSTEM] << PAGE_SHIFT) / 1024);
669 DbgPrint("Free: %d pages\t[%d KB]\n", FreePages, (FreePages << PAGE_SHIFT) / 1024);
670
671 KeLowerIrql(OldIrql);
672 }
673
674 VOID
675 NTAPI
676 MmInitializePageList(VOID)
677 {
678 ULONG i;
679 PHYSICAL_PAGE UsedPage;
680 PMEMORY_ALLOCATION_DESCRIPTOR Md;
681 PLIST_ENTRY NextEntry;
682 ULONG NrSystemPages = 0;
683
684 /* Initialize the page lists */
685 InitializeListHead(&FreeUnzeroedPageListHead);
686 InitializeListHead(&FreeZeroedPageListHead);
687
688 /* This is what a used page looks like */
689 RtlZeroMemory(&UsedPage, sizeof(UsedPage));
690 UsedPage.u3.e1.PageLocation = ActiveAndValid;
691 UsedPage.u3.e2.ReferenceCount = 1;
692
693 /* Loop the memory descriptors */
694 for (NextEntry = KeLoaderBlock->MemoryDescriptorListHead.Flink;
695 NextEntry != &KeLoaderBlock->MemoryDescriptorListHead;
696 NextEntry = NextEntry->Flink)
697 {
698 #undef ListEntry
699 /* Get the descriptor */
700 Md = CONTAINING_RECORD(NextEntry,
701 MEMORY_ALLOCATION_DESCRIPTOR,
702 ListEntry);
703 #define ListEntry u1
704
705 /* Skip bad memory */
706 if ((Md->MemoryType == LoaderFirmwarePermanent) ||
707 (Md->MemoryType == LoaderBBTMemory) ||
708 (Md->MemoryType == LoaderSpecialMemory) ||
709 (Md->MemoryType == LoaderBad))
710 {
711 //
712 // We do not build PFN entries for this
713 //
714 continue;
715 }
716 else if ((Md->MemoryType == LoaderFree) ||
717 (Md->MemoryType == LoaderLoadedProgram) ||
718 (Md->MemoryType == LoaderFirmwareTemporary) ||
719 (Md->MemoryType == LoaderOsloaderStack))
720 {
721 /* Loop every page part of the block */
722 for (i = 0; i < Md->PageCount; i++)
723 {
724 /* Mark it as a free page */
725 MmPfnDatabase[0][Md->BasePage + i].u3.e1.PageLocation = FreePageList;
726 InsertTailList(&FreeUnzeroedPageListHead,
727 &MmPfnDatabase[0][Md->BasePage + i].ListEntry);
728 UnzeroedPageCount++;
729 MmAvailablePages++;
730 }
731 }
732 else
733 {
734 /* Loop every page part of the block */
735 for (i = 0; i < Md->PageCount; i++)
736 {
737 /* Everything else is used memory */
738 MmPfnDatabase[0][Md->BasePage + i] = UsedPage;
739 NrSystemPages++;
740 }
741 }
742 }
743
744 /* Finally handle the pages describing the PFN database themselves */
745 for (i = MxOldFreeDescriptor.BasePage; i < MxFreeDescriptor->BasePage; i++)
746 {
747 /* Mark it as used kernel memory */
748 MmPfnDatabase[0][i] = UsedPage;
749 NrSystemPages++;
750 }
751
752 KeInitializeEvent(&ZeroPageThreadEvent, NotificationEvent, TRUE);
753 DPRINT("Pages: %x %x\n", MmAvailablePages, NrSystemPages);
754 MmInitializeBalancer(MmAvailablePages, NrSystemPages);
755 }
756
757 VOID
758 NTAPI
759 MmSetRmapListHeadPage(PFN_TYPE Pfn, struct _MM_RMAP_ENTRY* ListHead)
760 {
761 KIRQL oldIrql;
762
763 oldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
764 MiGetPfnEntry(Pfn)->RmapListHead = (LONG)ListHead;
765 KeReleaseQueuedSpinLock(LockQueuePfnLock, oldIrql);
766 }
767
768 struct _MM_RMAP_ENTRY*
769 NTAPI
770 MmGetRmapListHeadPage(PFN_TYPE Pfn)
771 {
772 KIRQL oldIrql;
773 struct _MM_RMAP_ENTRY* ListHead;
774
775 oldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
776 ListHead = (struct _MM_RMAP_ENTRY*)MiGetPfnEntry(Pfn)->RmapListHead;
777 KeReleaseQueuedSpinLock(LockQueuePfnLock, oldIrql);
778
779 return(ListHead);
780 }
781
782 VOID
783 NTAPI
784 MmSetSavedSwapEntryPage(PFN_TYPE Pfn, SWAPENTRY SwapEntry)
785 {
786 KIRQL oldIrql;
787
788 oldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
789 MiGetPfnEntry(Pfn)->SavedSwapEntry = SwapEntry;
790 KeReleaseQueuedSpinLock(LockQueuePfnLock, oldIrql);
791 }
792
793 SWAPENTRY
794 NTAPI
795 MmGetSavedSwapEntryPage(PFN_TYPE Pfn)
796 {
797 SWAPENTRY SwapEntry;
798 KIRQL oldIrql;
799
800 oldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
801 SwapEntry = MiGetPfnEntry(Pfn)->SavedSwapEntry;
802 KeReleaseQueuedSpinLock(LockQueuePfnLock, oldIrql);
803
804 return(SwapEntry);
805 }
806
807 VOID
808 NTAPI
809 MmReferencePage(PFN_TYPE Pfn)
810 {
811 PPHYSICAL_PAGE Page;
812
813 DPRINT("MmReferencePage(PysicalAddress %x)\n", Pfn << PAGE_SHIFT);
814
815 if (Pfn == 0 || Pfn > MmHighestPhysicalPage)
816 {
817 return;
818 }
819
820 Page = MiGetPfnEntry(Pfn);
821 ASSERT(Page);
822
823 Page->u3.e2.ReferenceCount++;
824 }
825
826 ULONG
827 NTAPI
828 MmGetReferenceCountPage(PFN_TYPE Pfn)
829 {
830 KIRQL oldIrql;
831 ULONG RCount;
832 PPHYSICAL_PAGE Page;
833
834 DPRINT("MmGetReferenceCountPage(PhysicalAddress %x)\n", Pfn << PAGE_SHIFT);
835
836 oldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
837 Page = MiGetPfnEntry(Pfn);
838 ASSERT(Page);
839
840 RCount = Page->u3.e2.ReferenceCount;
841
842 KeReleaseQueuedSpinLock(LockQueuePfnLock, oldIrql);
843 return(RCount);
844 }
845
846 BOOLEAN
847 NTAPI
848 MmIsPageInUse(PFN_TYPE Pfn)
849 {
850 return MiIsPfnInUse(MiGetPfnEntry(Pfn));
851 }
852
853 VOID
854 NTAPI
855 MiSetConsumer(IN PFN_TYPE Pfn,
856 IN ULONG Type)
857 {
858 MiGetPfnEntry(Pfn)->u3.e1.PageLocation = ActiveAndValid;
859 }
860
861 VOID
862 NTAPI
863 MmDereferencePage(PFN_TYPE Pfn)
864 {
865 PPHYSICAL_PAGE Page;
866
867 DPRINT("MmDereferencePage(PhysicalAddress %x)\n", Pfn << PAGE_SHIFT);
868
869 Page = MiGetPfnEntry(Pfn);
870 ASSERT(Page);
871
872 Page->u3.e2.ReferenceCount--;
873 if (Page->u3.e2.ReferenceCount == 0)
874 {
875 MmAvailablePages++;
876 Page->u3.e1.PageLocation = FreePageList;
877 InsertTailList(&FreeUnzeroedPageListHead,
878 &Page->ListEntry);
879 UnzeroedPageCount++;
880 if (UnzeroedPageCount > 8 && 0 == KeReadStateEvent(&ZeroPageThreadEvent))
881 {
882 KeSetEvent(&ZeroPageThreadEvent, IO_NO_INCREMENT, FALSE);
883 }
884 }
885 }
886
887 PFN_TYPE
888 NTAPI
889 MmAllocPage(ULONG Type, SWAPENTRY SwapEntry)
890 {
891 PFN_TYPE PfnOffset;
892 PLIST_ENTRY ListEntry;
893 PPHYSICAL_PAGE PageDescriptor;
894 BOOLEAN NeedClear = FALSE;
895
896 DPRINT("MmAllocPage()\n");
897
898 if (IsListEmpty(&FreeZeroedPageListHead))
899 {
900 if (IsListEmpty(&FreeUnzeroedPageListHead))
901 {
902 /* Check if this allocation is for the PFN DB itself */
903 if (MmNumberOfPhysicalPages == 0)
904 {
905 ASSERT(FALSE);
906 }
907
908 DPRINT1("MmAllocPage(): Out of memory\n");
909 return 0;
910 }
911 ListEntry = RemoveTailList(&FreeUnzeroedPageListHead);
912 UnzeroedPageCount--;
913
914 PageDescriptor = CONTAINING_RECORD(ListEntry, PHYSICAL_PAGE, ListEntry);
915
916 NeedClear = TRUE;
917 }
918 else
919 {
920 ListEntry = RemoveTailList(&FreeZeroedPageListHead);
921
922 PageDescriptor = CONTAINING_RECORD(ListEntry, PHYSICAL_PAGE, ListEntry);
923 }
924
925 PageDescriptor->u3.e2.ReferenceCount = 1;
926 PageDescriptor->SavedSwapEntry = SwapEntry;
927
928 MmAvailablePages--;
929
930 PfnOffset = PageDescriptor - MmPfnDatabase[0];
931 if ((NeedClear) && (Type != MC_SYSTEM))
932 {
933 MiZeroPage(PfnOffset);
934 }
935
936 PageDescriptor->u3.e1.PageLocation = ActiveAndValid;
937 return PfnOffset;
938 }
939
940 NTSTATUS
941 NTAPI
942 MiZeroPage(PFN_TYPE Page)
943 {
944 KIRQL Irql;
945 PVOID TempAddress;
946
947 Irql = KeRaiseIrqlToDpcLevel();
948 TempAddress = MiMapPageToZeroInHyperSpace(Page);
949 if (TempAddress == NULL)
950 {
951 return(STATUS_NO_MEMORY);
952 }
953 memset(TempAddress, 0, PAGE_SIZE);
954 MiUnmapPagesInZeroSpace(TempAddress, 1);
955 KeLowerIrql(Irql);
956 return(STATUS_SUCCESS);
957 }
958
959 NTSTATUS
960 NTAPI
961 MmZeroPageThreadMain(PVOID Ignored)
962 {
963 NTSTATUS Status;
964 KIRQL oldIrql;
965 PLIST_ENTRY ListEntry;
966 PPHYSICAL_PAGE PageDescriptor;
967 PFN_TYPE Pfn;
968 ULONG Count;
969
970 /* Free initial kernel memory */
971 //MiFreeInitMemory();
972
973 /* Set our priority to 0 */
974 KeGetCurrentThread()->BasePriority = 0;
975 KeSetPriorityThread(KeGetCurrentThread(), 0);
976
977 while(1)
978 {
979 Status = KeWaitForSingleObject(&ZeroPageThreadEvent,
980 0,
981 KernelMode,
982 FALSE,
983 NULL);
984
985 if (ZeroPageThreadShouldTerminate)
986 {
987 DPRINT1("ZeroPageThread: Terminating\n");
988 return STATUS_SUCCESS;
989 }
990 Count = 0;
991 oldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
992 while (!IsListEmpty(&FreeUnzeroedPageListHead))
993 {
994 ListEntry = RemoveTailList(&FreeUnzeroedPageListHead);
995 UnzeroedPageCount--;
996 PageDescriptor = CONTAINING_RECORD(ListEntry, PHYSICAL_PAGE, ListEntry);
997 /* We set the page to used, because MmCreateVirtualMapping failed with unused pages */
998 KeReleaseQueuedSpinLock(LockQueuePfnLock, oldIrql);
999 Pfn = PageDescriptor - MmPfnDatabase[0];
1000 Status = MiZeroPage(Pfn);
1001
1002 oldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
1003 PageDescriptor->u3.e1.PageLocation = ZeroedPageList;
1004 if (NT_SUCCESS(Status))
1005 {
1006 InsertHeadList(&FreeZeroedPageListHead, ListEntry);
1007 Count++;
1008 }
1009 else
1010 {
1011 InsertHeadList(&FreeUnzeroedPageListHead, ListEntry);
1012 UnzeroedPageCount++;
1013 }
1014
1015 }
1016 DPRINT("Zeroed %d pages.\n", Count);
1017 KeResetEvent(&ZeroPageThreadEvent);
1018 KeReleaseQueuedSpinLock(LockQueuePfnLock, oldIrql);
1019 }
1020
1021 return STATUS_SUCCESS;
1022 }
1023
1024 /* EOF */