Merge trunk head (46467)
[reactos.git] / reactos / ntoskrnl / mm / freelist.c
1 /*
2 * COPYRIGHT: See COPYING in the top level directory
3 * PROJECT: ReactOS kernel
4 * FILE: ntoskrnl/mm/freelist.c
5 * PURPOSE: Handle the list of free physical pages
6 *
7 * PROGRAMMERS: David Welch (welch@cwcom.net)
8 * Robert Bergkvist
9 */
10
11 /* INCLUDES ****************************************************************/
12
13 #include <ntoskrnl.h>
14 #define NDEBUG
15 #include <debug.h>
16
17 #if defined (ALLOC_PRAGMA)
18 #pragma alloc_text(INIT, MmInitializePageList)
19 #endif
20
21 #define MODULE_INVOLVED_IN_ARM3
22 #include "ARM3/miarm.h"
23
24 /* GLOBALS ****************************************************************/
25
26 //
27 //
28 // ReactOS to NT Physical Page Descriptor Entry Legacy Mapping Definitions
29 //
30 typedef union
31 {
32 MMPFN;// Pfn;
33
34 struct
35 {
36 LIST_ENTRY ListEntry; // 0x000
37 ULONG_PTR RmapListHead; // 0x008
38 USHORT ReferenceCount; // 0x00C
39 struct // 0x00$
40 {
41 USHORT _unused1:1;
42 USHORT StartOfAllocation:1;
43 USHORT EndOfAllocation:1;
44 USHORT Zero:1;
45 USHORT LockCount:4;
46 USHORT Consumer:3;
47 USHORT _unused2:1;
48 USHORT Type:2;
49 USHORT _unused3:1;
50 USHORT _unused4:1;
51 } Flags;
52 LONG MapCount; // 0x10
53 ULONG_PTR SavedSwapEntry; // 0x018
54 };
55 } PHYSICAL_PAGE, *PPHYSICAL_PAGE;
56
57 C_ASSERT(sizeof(PHYSICAL_PAGE) == sizeof(MMPFN));
58
59 //#define MiGetPfnEntry(Pfn) ((PPHYSICAL_PAGE)MiGetPfnEntry(Pfn))
60 #define MiGetPfnEntryIndex(x) MiGetPfnEntryIndex((struct _MMPFN*)x)
61 #define LockCount Flags.LockCount
62
63 /* The first array contains ReactOS PFNs, the second contains ARM3 PFNs */
64 PMMPFN MmPfnDatabase[2];
65 #define MmPfnDatabase ((PPHYSICAL_PAGE*)MmPfnDatabase)
66
67 //#define MMPFN PHYSICAL_PAGE
68 //#define PMMPFN PPHYSICAL_PAGE
69
70 /* The first array contains ReactOS PFNs, the second contains ARM3 PFNs */
71 //PPHYSICAL_PAGE MmPfnDatabase[2];
72
73 PFN_NUMBER MmAvailablePages;
74 PFN_NUMBER MmResidentAvailablePages;
75
76 SIZE_T MmTotalCommitLimit;
77 SIZE_T MmTotalCommittedPages;
78 SIZE_T MmSharedCommit;
79 SIZE_T MmDriverCommit;
80 SIZE_T MmProcessCommit;
81 SIZE_T MmPagedPoolCommit;
82 SIZE_T MmPeakCommitment;
83 SIZE_T MmtotalCommitLimitMaximum;
84
85 static KEVENT ZeroPageThreadEvent;
86 static BOOLEAN ZeroPageThreadShouldTerminate = FALSE;
87 static RTL_BITMAP MiUserPfnBitMap;
88
89 /* FUNCTIONS *************************************************************/
90
91 VOID
92 NTAPI
93 MiInitializeUserPfnBitmap(VOID)
94 {
95 PVOID Bitmap;
96
97 /* Allocate enough buffer for the PFN bitmap and align it on 32-bits */
98 Bitmap = ExAllocatePoolWithTag(NonPagedPool,
99 (((MmHighestPhysicalPage + 1) + 31) / 32) * 4,
100 ' mM');
101 ASSERT(Bitmap);
102
103 /* Initialize it and clear all the bits to begin with */
104 RtlInitializeBitMap(&MiUserPfnBitMap,
105 Bitmap,
106 MmHighestPhysicalPage + 1);
107 RtlClearAllBits(&MiUserPfnBitMap);
108 }
109
110 PFN_NUMBER
111 NTAPI
112 MmGetLRUFirstUserPage(VOID)
113 {
114 ULONG Position;
115 KIRQL OldIrql;
116
117 /* Find the first user page */
118 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
119 Position = RtlFindSetBits(&MiUserPfnBitMap, 1, 0);
120 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
121 if (Position == 0xFFFFFFFF) return 0;
122
123 /* Return it */
124 return Position;
125 }
126
127 VOID
128 NTAPI
129 MmInsertLRULastUserPage(PFN_NUMBER Pfn)
130 {
131 KIRQL OldIrql;
132
133 /* Set the page as a user page */
134 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
135 RtlSetBit(&MiUserPfnBitMap, Pfn);
136 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
137 }
138
139 PFN_NUMBER
140 NTAPI
141 MmGetLRUNextUserPage(PFN_NUMBER PreviousPfn)
142 {
143 ULONG Position;
144 KIRQL OldIrql;
145
146 /* Find the next user page */
147 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
148 Position = RtlFindSetBits(&MiUserPfnBitMap, 1, PreviousPfn + 1);
149 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
150 if (Position == 0xFFFFFFFF) return 0;
151
152 /* Return it */
153 return Position;
154 }
155
156 VOID
157 NTAPI
158 MmRemoveLRUUserPage(PFN_NUMBER Page)
159 {
160 /* Unset the page as a user page */
161 RtlClearBit(&MiUserPfnBitMap, Page);
162 }
163
164 BOOLEAN
165 NTAPI
166 MiIsPfnInUse(IN PMMPFN Pfn1)
167 {
168 return ((Pfn1->u3.e1.PageLocation != FreePageList) &&
169 (Pfn1->u3.e1.PageLocation != ZeroedPageList));
170 }
171
172 PFN_NUMBER
173 NTAPI
174 MiFindContiguousPages(IN PFN_NUMBER LowestPfn,
175 IN PFN_NUMBER HighestPfn,
176 IN PFN_NUMBER BoundaryPfn,
177 IN PFN_NUMBER SizeInPages,
178 IN MEMORY_CACHING_TYPE CacheType)
179 {
180 PFN_NUMBER Page, PageCount, LastPage, Length, BoundaryMask;
181 ULONG i = 0;
182 PMMPFN Pfn1, EndPfn;
183 KIRQL OldIrql;
184 PAGED_CODE ();
185 ASSERT(SizeInPages != 0);
186
187 //
188 // Convert the boundary PFN into an alignment mask
189 //
190 BoundaryMask = ~(BoundaryPfn - 1);
191
192 //
193 // Loop all the physical memory blocks
194 //
195 do
196 {
197 //
198 // Capture the base page and length of this memory block
199 //
200 Page = MmPhysicalMemoryBlock->Run[i].BasePage;
201 PageCount = MmPhysicalMemoryBlock->Run[i].PageCount;
202
203 //
204 // Check how far this memory block will go
205 //
206 LastPage = Page + PageCount;
207
208 //
209 // Trim it down to only the PFNs we're actually interested in
210 //
211 if ((LastPage - 1) > HighestPfn) LastPage = HighestPfn + 1;
212 if (Page < LowestPfn) Page = LowestPfn;
213
214 //
215 // Skip this run if it's empty or fails to contain all the pages we need
216 //
217 if (!(PageCount) || ((Page + SizeInPages) > LastPage)) continue;
218
219 //
220 // Now scan all the relevant PFNs in this run
221 //
222 Length = 0;
223 for (Pfn1 = MiGetPfnEntry(Page); Page < LastPage; Page++, Pfn1++)
224 {
225 //
226 // If this PFN is in use, ignore it
227 //
228 if (MiIsPfnInUse(Pfn1)) continue;
229
230 //
231 // If we haven't chosen a start PFN yet and the caller specified an
232 // alignment, make sure the page matches the alignment restriction
233 //
234 if ((!(Length) && (BoundaryPfn)) &&
235 (((Page ^ (Page + SizeInPages - 1)) & BoundaryMask)))
236 {
237 //
238 // It does not, so bail out
239 //
240 continue;
241 }
242
243 //
244 // Increase the number of valid pages, and check if we have enough
245 //
246 if (++Length == SizeInPages)
247 {
248 //
249 // It appears we've amassed enough legitimate pages, rollback
250 //
251 Pfn1 -= (Length - 1);
252 Page -= (Length - 1);
253
254 //
255 // Acquire the PFN lock
256 //
257 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
258 do
259 {
260 //
261 // Things might've changed for us. Is the page still free?
262 //
263 if (MiIsPfnInUse(Pfn1)) break;
264
265 //
266 // So far so good. Is this the last confirmed valid page?
267 //
268 if (!--Length)
269 {
270 //
271 // Sanity check that we didn't go out of bounds
272 //
273 ASSERT(i != MmPhysicalMemoryBlock->NumberOfRuns);
274
275 //
276 // Loop until all PFN entries have been processed
277 //
278 EndPfn = Pfn1 - SizeInPages + 1;
279 do
280 {
281 //
282 // This PFN is now a used page, set it up
283 //
284 MiUnlinkFreeOrZeroedPage(Pfn1);
285 Pfn1->u3.e2.ReferenceCount = 1;
286
287 //
288 // Check if it was already zeroed
289 //
290 if (Pfn1->u3.e1.PageLocation != ZeroedPageList)
291 {
292 //
293 // It wasn't, so zero it
294 //
295 MiZeroPage(MiGetPfnEntryIndex(Pfn1));
296 }
297
298 //
299 // Mark it in use
300 //
301 Pfn1->u3.e1.PageLocation = ActiveAndValid;
302
303 //
304 // Check if this is the last PFN, otherwise go on
305 //
306 if (Pfn1 == EndPfn) break;
307 Pfn1--;
308 } while (TRUE);
309
310 //
311 // Mark the first and last PFN so we can find them later
312 //
313 Pfn1->u3.e1.StartOfAllocation = 1;
314 (Pfn1 + SizeInPages - 1)->u3.e1.EndOfAllocation = 1;
315
316 //
317 // Now it's safe to let go of the PFN lock
318 //
319 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
320
321 //
322 // Quick sanity check that the last PFN is consistent
323 //
324 EndPfn = Pfn1 + SizeInPages;
325 ASSERT(EndPfn == MiGetPfnEntry(Page + 1));
326
327 //
328 // Compute the first page, and make sure it's consistent
329 //
330 Page -= SizeInPages - 1;
331 ASSERT(Pfn1 == MiGetPfnEntry(Page));
332 ASSERT(Page != 0);
333 return Page;
334 }
335
336 //
337 // Keep going. The purpose of this loop is to reconfirm that
338 // after acquiring the PFN lock these pages are still usable
339 //
340 Pfn1++;
341 Page++;
342 } while (TRUE);
343
344 //
345 // If we got here, something changed while we hadn't acquired
346 // the PFN lock yet, so we'll have to restart
347 //
348 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
349 Length = 0;
350 }
351 }
352 } while (++i != MmPhysicalMemoryBlock->NumberOfRuns);
353
354 //
355 // And if we get here, it means no suitable physical memory runs were found
356 //
357 return 0;
358 }
359
360 PMDL
361 NTAPI
362 MiAllocatePagesForMdl(IN PHYSICAL_ADDRESS LowAddress,
363 IN PHYSICAL_ADDRESS HighAddress,
364 IN PHYSICAL_ADDRESS SkipBytes,
365 IN SIZE_T TotalBytes,
366 IN MI_PFN_CACHE_ATTRIBUTE CacheAttribute,
367 IN ULONG MdlFlags)
368 {
369 PMDL Mdl;
370 PFN_NUMBER PageCount, LowPage, HighPage, SkipPages, PagesFound = 0, Page;
371 PPFN_NUMBER MdlPage, LastMdlPage;
372 KIRQL OldIrql;
373 PMMPFN Pfn1;
374 INT LookForZeroedPages;
375 ASSERT (KeGetCurrentIrql() <= APC_LEVEL);
376
377 //
378 // Convert the low address into a PFN
379 //
380 LowPage = (PFN_NUMBER)(LowAddress.QuadPart >> PAGE_SHIFT);
381
382 //
383 // Convert, and normalize, the high address into a PFN
384 //
385 HighPage = (PFN_NUMBER)(HighAddress.QuadPart >> PAGE_SHIFT);
386 if (HighPage > MmHighestPhysicalPage) HighPage = MmHighestPhysicalPage;
387
388 //
389 // Validate skipbytes and convert them into pages
390 //
391 if (BYTE_OFFSET(SkipBytes.LowPart)) return NULL;
392 SkipPages = (PFN_NUMBER)(SkipBytes.QuadPart >> PAGE_SHIFT);
393
394 //
395 // Now compute the number of pages the MDL will cover
396 //
397 PageCount = (PFN_NUMBER)ADDRESS_AND_SIZE_TO_SPAN_PAGES(0, TotalBytes);
398 do
399 {
400 //
401 // Try creating an MDL for these many pages
402 //
403 Mdl = MmCreateMdl(NULL, NULL, PageCount << PAGE_SHIFT);
404 if (Mdl) break;
405
406 //
407 // This function is not required to return the amount of pages requested
408 // In fact, it can return as little as 1 page, and callers are supposed
409 // to deal with this scenario. So re-attempt the allocation with less
410 // pages than before, and see if it worked this time.
411 //
412 PageCount -= (PageCount >> 4);
413 } while (PageCount);
414
415 //
416 // Wow, not even a single page was around!
417 //
418 if (!Mdl) return NULL;
419
420 //
421 // This is where the page array starts....
422 //
423 MdlPage = (PPFN_NUMBER)(Mdl + 1);
424
425 //
426 // Lock the PFN database
427 //
428 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
429
430 //
431 // Are we looking for any pages, without discriminating?
432 //
433 if ((LowPage == 0) && (HighPage == MmHighestPhysicalPage))
434 {
435 //
436 // Well then, let's go shopping
437 //
438 while (PagesFound < PageCount)
439 {
440 //
441 // Do we have zeroed pages?
442 //
443 if (MmZeroedPageListHead.Total)
444 {
445 //
446 // Grab a zero page
447 //
448 Pfn1 = MiRemoveHeadList(&MmZeroedPageListHead);
449 }
450 else if (MmFreePageListHead.Total)
451 {
452 //
453 // Nope, grab an unzeroed page
454 //
455 Pfn1 = MiRemoveHeadList(&MmFreePageListHead);
456 }
457 else
458 {
459 //
460 // This is not good... hopefully we have at least SOME pages
461 //
462 ASSERT(PagesFound);
463 break;
464 }
465
466 //
467 // Make sure it's really free
468 //
469 ASSERT(MiIsPfnInUse(Pfn1) == FALSE);
470 ASSERT(Pfn1->u3.e2.ReferenceCount == 0);
471
472 //
473 // Allocate it and mark it
474 //
475 Pfn1->u3.e1.StartOfAllocation = 1;
476 Pfn1->u3.e1.EndOfAllocation = 1;
477 Pfn1->u3.e2.ReferenceCount = 1;
478
479 //
480 // Decrease available pages
481 //
482 MmAvailablePages--;
483
484 //
485 // Save it into the MDL
486 //
487 *MdlPage++ = MiGetPfnEntryIndex(Pfn1);
488 PagesFound++;
489 }
490 }
491 else
492 {
493 //
494 // You want specific range of pages. We'll do this in two runs
495 //
496 for (LookForZeroedPages = 1; LookForZeroedPages >= 0; LookForZeroedPages--)
497 {
498 //
499 // Scan the range you specified
500 //
501 for (Page = LowPage; Page < HighPage; Page++)
502 {
503 //
504 // Get the PFN entry for this page
505 //
506 Pfn1 = MiGetPfnEntry(Page);
507 ASSERT(Pfn1);
508
509 //
510 // Make sure it's free and if this is our first pass, zeroed
511 //
512 if (MiIsPfnInUse(Pfn1)) continue;
513 if ((Pfn1->u3.e1.PageLocation == ZeroedPageList) != LookForZeroedPages) continue;
514
515 //
516 // Sanity checks
517 //
518 ASSERT(Pfn1->u3.e2.ReferenceCount == 0);
519
520 //
521 // Now setup the page and mark it
522 //
523 Pfn1->u3.e2.ReferenceCount = 1;
524 Pfn1->u3.e1.StartOfAllocation = 1;
525 Pfn1->u3.e1.EndOfAllocation = 1;
526
527 //
528 // Decrease available pages
529 //
530 MmAvailablePages--;
531
532 //
533 // Save this page into the MDL
534 //
535 *MdlPage++ = Page;
536 if (++PagesFound == PageCount) break;
537 }
538
539 //
540 // If the first pass was enough, don't keep going, otherwise, go again
541 //
542 if (PagesFound == PageCount) break;
543 }
544 }
545
546 //
547 // Now release the PFN count
548 //
549 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
550
551 //
552 // We might've found less pages, but not more ;-)
553 //
554 if (PagesFound != PageCount) ASSERT(PagesFound < PageCount);
555 if (!PagesFound)
556 {
557 //
558 // If we didn' tfind any pages at all, fail
559 //
560 DPRINT1("NO MDL PAGES!\n");
561 ExFreePool(Mdl);
562 return NULL;
563 }
564
565 //
566 // Write out how many pages we found
567 //
568 Mdl->ByteCount = (ULONG)(PagesFound << PAGE_SHIFT);
569
570 //
571 // Terminate the MDL array if there's certain missing pages
572 //
573 if (PagesFound != PageCount) *MdlPage = -1;
574
575 //
576 // Now go back and loop over all the MDL pages
577 //
578 MdlPage = (PPFN_NUMBER)(Mdl + 1);
579 LastMdlPage = MdlPage + PagesFound;
580 while (MdlPage < LastMdlPage)
581 {
582 //
583 // Check if we've reached the end
584 //
585 Page = *MdlPage++;
586 if (Page == (PFN_NUMBER)-1) break;
587
588 //
589 // Get the PFN entry for the page and check if we should zero it out
590 //
591 Pfn1 = MiGetPfnEntry(Page);
592 ASSERT(Pfn1);
593 if (Pfn1->u3.e1.PageLocation != ZeroedPageList) MiZeroPage(Page);
594 Pfn1->u3.e1.PageLocation = ActiveAndValid;
595 }
596
597 //
598 // We're done, mark the pages as locked (should we lock them, though???)
599 //
600 Mdl->Process = NULL;
601 Mdl->MdlFlags |= MDL_PAGES_LOCKED;
602 return Mdl;
603 }
604
605 VOID
606 NTAPI
607 MmDumpPfnDatabase(VOID)
608 {
609 ULONG i;
610 PMMPFN Pfn1;
611 PCHAR State = "????", Type = "Unknown";
612 KIRQL OldIrql;
613 ULONG Totals[5] = {0}, FreePages = 0;
614
615 KeRaiseIrql(HIGH_LEVEL, &OldIrql);
616
617 //
618 // Loop the PFN database
619 //
620 for (i = 0; i <= MmHighestPhysicalPage; i++)
621 {
622 Pfn1 = MiGetPfnEntry(i);
623 if (!Pfn1) continue;
624
625 //
626 // Get the type
627 //
628 if (MiIsPfnInUse(Pfn1))
629 {
630 State = "Used";
631 }
632 else
633 {
634 State = "Free";
635 Type = "Free";
636 FreePages++;
637 break;
638 }
639
640 //
641 // Pretty-print the page
642 //
643 DbgPrint("0x%08p:\t%04s\t%20s\t(%02d) [%08p])\n",
644 i << PAGE_SHIFT,
645 State,
646 Type,
647 Pfn1->u3.e2.ReferenceCount,
648 ((PPHYSICAL_PAGE)Pfn1)->RmapListHead);
649 }
650
651 DbgPrint("Nonpaged Pool: %d pages\t[%d KB]\n", Totals[MC_NPPOOL], (Totals[MC_NPPOOL] << PAGE_SHIFT) / 1024);
652 DbgPrint("Paged Pool: %d pages\t[%d KB]\n", Totals[MC_PPOOL], (Totals[MC_PPOOL] << PAGE_SHIFT) / 1024);
653 DbgPrint("File System Cache: %d pages\t[%d KB]\n", Totals[MC_CACHE], (Totals[MC_CACHE] << PAGE_SHIFT) / 1024);
654 DbgPrint("Process Working Set: %d pages\t[%d KB]\n", Totals[MC_USER], (Totals[MC_USER] << PAGE_SHIFT) / 1024);
655 DbgPrint("System: %d pages\t[%d KB]\n", Totals[MC_SYSTEM], (Totals[MC_SYSTEM] << PAGE_SHIFT) / 1024);
656 DbgPrint("Free: %d pages\t[%d KB]\n", FreePages, (FreePages << PAGE_SHIFT) / 1024);
657
658 KeLowerIrql(OldIrql);
659 }
660
661 VOID
662 NTAPI
663 MmInitializePageList(VOID)
664 {
665 ULONG i;
666 PHYSICAL_PAGE UsedPage;
667 PMEMORY_ALLOCATION_DESCRIPTOR Md;
668 PLIST_ENTRY NextEntry;
669 ULONG NrSystemPages = 0;
670
671 /* This is what a used page looks like */
672 RtlZeroMemory(&UsedPage, sizeof(UsedPage));
673 UsedPage.u3.e1.PageLocation = ActiveAndValid;
674 UsedPage.u3.e2.ReferenceCount = 1;
675
676 /* Loop the memory descriptors */
677 for (NextEntry = KeLoaderBlock->MemoryDescriptorListHead.Flink;
678 NextEntry != &KeLoaderBlock->MemoryDescriptorListHead;
679 NextEntry = NextEntry->Flink)
680 {
681 /* Get the descriptor */
682 Md = CONTAINING_RECORD(NextEntry,
683 MEMORY_ALLOCATION_DESCRIPTOR,
684 ListEntry);
685
686 /* Skip bad memory */
687 if ((Md->MemoryType == LoaderFirmwarePermanent) ||
688 (Md->MemoryType == LoaderBBTMemory) ||
689 (Md->MemoryType == LoaderSpecialMemory) ||
690 (Md->MemoryType == LoaderBad))
691 {
692 //
693 // We do not build PFN entries for this
694 //
695 continue;
696 }
697 else if ((Md->MemoryType == LoaderFree) ||
698 (Md->MemoryType == LoaderLoadedProgram) ||
699 (Md->MemoryType == LoaderFirmwareTemporary) ||
700 (Md->MemoryType == LoaderOsloaderStack))
701 {
702 /* Loop every page part of the block */
703 for (i = 0; i < Md->PageCount; i++)
704 {
705 /* Mark it as a free page */
706 MmPfnDatabase[0][Md->BasePage + i].u3.e1.PageLocation = FreePageList;
707 MiInsertInListTail(&MmFreePageListHead,
708 &MmPfnDatabase[0][Md->BasePage + i]);
709 MmAvailablePages++;
710 }
711 }
712 else
713 {
714 /* Loop every page part of the block */
715 for (i = 0; i < Md->PageCount; i++)
716 {
717 /* Everything else is used memory */
718 MmPfnDatabase[0][Md->BasePage + i] = UsedPage;
719 NrSystemPages++;
720 }
721 }
722 }
723
724 /* Finally handle the pages describing the PFN database themselves */
725 for (i = MxOldFreeDescriptor.BasePage; i < MxFreeDescriptor->BasePage; i++)
726 {
727 /* Mark it as used kernel memory */
728 MmPfnDatabase[0][i] = UsedPage;
729 NrSystemPages++;
730 }
731
732 KeInitializeEvent(&ZeroPageThreadEvent, NotificationEvent, TRUE);
733 DPRINT("Pages: %x %x\n", MmAvailablePages, NrSystemPages);
734 MmInitializeBalancer(MmAvailablePages, NrSystemPages);
735 }
736
737 VOID
738 NTAPI
739 MmSetRmapListHeadPage(PFN_NUMBER Pfn, struct _MM_RMAP_ENTRY* ListHead)
740 {
741 KIRQL oldIrql;
742
743 oldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
744 ((PPHYSICAL_PAGE)MiGetPfnEntry(Pfn))->RmapListHead = (LONG_PTR)ListHead;
745 KeReleaseQueuedSpinLock(LockQueuePfnLock, oldIrql);
746 }
747
748 struct _MM_RMAP_ENTRY*
749 NTAPI
750 MmGetRmapListHeadPage(PFN_NUMBER Pfn)
751 {
752 KIRQL oldIrql;
753 struct _MM_RMAP_ENTRY* ListHead;
754
755 oldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
756 ListHead = (struct _MM_RMAP_ENTRY*)((PPHYSICAL_PAGE)MiGetPfnEntry(Pfn))->RmapListHead;
757 KeReleaseQueuedSpinLock(LockQueuePfnLock, oldIrql);
758
759 return(ListHead);
760 }
761
762 VOID
763 NTAPI
764 MmSetSavedSwapEntryPage(PFN_NUMBER Pfn, SWAPENTRY SwapEntry)
765 {
766 KIRQL oldIrql;
767
768 oldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
769 MiGetPfnEntry(Pfn)->u1.WsIndex = SwapEntry;
770 KeReleaseQueuedSpinLock(LockQueuePfnLock, oldIrql);
771 }
772
773 SWAPENTRY
774 NTAPI
775 MmGetSavedSwapEntryPage(PFN_NUMBER Pfn)
776 {
777 SWAPENTRY SwapEntry;
778 KIRQL oldIrql;
779
780 oldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
781 SwapEntry = MiGetPfnEntry(Pfn)->u1.WsIndex;
782 KeReleaseQueuedSpinLock(LockQueuePfnLock, oldIrql);
783
784 return(SwapEntry);
785 }
786
787 VOID
788 NTAPI
789 MmReferencePage(PFN_NUMBER Pfn)
790 {
791 PPHYSICAL_PAGE Page;
792
793 DPRINT("MmReferencePage(PysicalAddress %x)\n", Pfn << PAGE_SHIFT);
794
795 if (Pfn == 0 || Pfn > MmHighestPhysicalPage)
796 {
797 return;
798 }
799
800 Page = MiGetPfnEntry(Pfn);
801 ASSERT(Page);
802
803 Page->u3.e2.ReferenceCount++;
804 }
805
806 ULONG
807 NTAPI
808 MmGetReferenceCountPage(PFN_NUMBER Pfn)
809 {
810 KIRQL oldIrql;
811 ULONG RCount;
812 PPHYSICAL_PAGE Page;
813
814 DPRINT("MmGetReferenceCountPage(PhysicalAddress %x)\n", Pfn << PAGE_SHIFT);
815
816 oldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
817 Page = MiGetPfnEntry(Pfn);
818 ASSERT(Page);
819
820 RCount = Page->u3.e2.ReferenceCount;
821
822 KeReleaseQueuedSpinLock(LockQueuePfnLock, oldIrql);
823 return(RCount);
824 }
825
826 BOOLEAN
827 NTAPI
828 MmIsPageInUse(PFN_NUMBER Pfn)
829 {
830 return MiIsPfnInUse(MiGetPfnEntry(Pfn));
831 }
832
833 VOID
834 NTAPI
835 MiSetConsumer(IN PFN_NUMBER Pfn,
836 IN ULONG Type)
837 {
838 MiGetPfnEntry(Pfn)->u3.e1.PageLocation = ActiveAndValid;
839 }
840
841 VOID
842 NTAPI
843 MmDereferencePage(PFN_NUMBER Pfn)
844 {
845 PPHYSICAL_PAGE Page;
846
847 DPRINT("MmDereferencePage(PhysicalAddress %x)\n", Pfn << PAGE_SHIFT);
848
849 Page = MiGetPfnEntry(Pfn);
850 ASSERT(Page);
851
852 Page->u3.e2.ReferenceCount--;
853 if (Page->u3.e2.ReferenceCount == 0)
854 {
855 MmAvailablePages++;
856 Page->u3.e1.PageLocation = FreePageList;
857 MiInsertInListTail(&MmFreePageListHead, Page);
858 if (MmFreePageListHead.Total > 8 && 0 == KeReadStateEvent(&ZeroPageThreadEvent))
859 {
860 KeSetEvent(&ZeroPageThreadEvent, IO_NO_INCREMENT, FALSE);
861 }
862 }
863 }
864
865 PFN_NUMBER
866 NTAPI
867 MmAllocPage(ULONG Type)
868 {
869 PFN_NUMBER PfnOffset;
870 PPHYSICAL_PAGE PageDescriptor;
871 BOOLEAN NeedClear = FALSE;
872
873 DPRINT("MmAllocPage()\n");
874
875 if (MmZeroedPageListHead.Total == 0)
876 {
877 if (MmFreePageListHead.Total == 0)
878 {
879 /* Check if this allocation is for the PFN DB itself */
880 if (MmNumberOfPhysicalPages == 0)
881 {
882 ASSERT(FALSE);
883 }
884
885 DPRINT1("MmAllocPage(): Out of memory\n");
886 return 0;
887 }
888 PageDescriptor = MiRemoveHeadList(&MmFreePageListHead);
889
890 NeedClear = TRUE;
891 }
892 else
893 {
894 PageDescriptor = MiRemoveHeadList(&MmZeroedPageListHead);
895 }
896
897 PageDescriptor->u3.e2.ReferenceCount = 1;
898
899 MmAvailablePages--;
900
901 PfnOffset = PageDescriptor - MmPfnDatabase[0];
902 if ((NeedClear) && (Type != MC_SYSTEM))
903 {
904 MiZeroPage(PfnOffset);
905 }
906
907 PageDescriptor->u3.e1.PageLocation = ActiveAndValid;
908 return PfnOffset;
909 }
910
911 NTSTATUS
912 NTAPI
913 MiZeroPage(PFN_NUMBER Page)
914 {
915 KIRQL Irql;
916 PVOID TempAddress;
917
918 Irql = KeRaiseIrqlToDpcLevel();
919 TempAddress = MiMapPageToZeroInHyperSpace(Page);
920 if (TempAddress == NULL)
921 {
922 return(STATUS_NO_MEMORY);
923 }
924 memset(TempAddress, 0, PAGE_SIZE);
925 MiUnmapPagesInZeroSpace(TempAddress, 1);
926 KeLowerIrql(Irql);
927 return(STATUS_SUCCESS);
928 }
929
930 NTSTATUS
931 NTAPI
932 MmZeroPageThreadMain(PVOID Ignored)
933 {
934 NTSTATUS Status;
935 KIRQL oldIrql;
936 PPHYSICAL_PAGE PageDescriptor;
937 PFN_NUMBER Pfn;
938 ULONG Count;
939
940 /* Free initial kernel memory */
941 //MiFreeInitMemory();
942
943 /* Set our priority to 0 */
944 KeGetCurrentThread()->BasePriority = 0;
945 KeSetPriorityThread(KeGetCurrentThread(), 0);
946
947 while(1)
948 {
949 Status = KeWaitForSingleObject(&ZeroPageThreadEvent,
950 0,
951 KernelMode,
952 FALSE,
953 NULL);
954
955 if (ZeroPageThreadShouldTerminate)
956 {
957 DPRINT1("ZeroPageThread: Terminating\n");
958 return STATUS_SUCCESS;
959 }
960 Count = 0;
961 oldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
962 while (MmFreePageListHead.Total)
963 {
964 PageDescriptor = MiRemoveHeadList(&MmFreePageListHead);
965 /* We set the page to used, because MmCreateVirtualMapping failed with unused pages */
966 KeReleaseQueuedSpinLock(LockQueuePfnLock, oldIrql);
967 Pfn = PageDescriptor - MmPfnDatabase[0];
968 Status = MiZeroPage(Pfn);
969
970 oldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
971 if (NT_SUCCESS(Status))
972 {
973 MiInsertZeroListAtBack(Pfn);
974 Count++;
975 }
976 else
977 {
978 MiInsertInListTail(&MmFreePageListHead, PageDescriptor);
979 PageDescriptor->u3.e1.PageLocation = FreePageList;
980 }
981
982 }
983 DPRINT("Zeroed %d pages.\n", Count);
984 KeResetEvent(&ZeroPageThreadEvent);
985 KeReleaseQueuedSpinLock(LockQueuePfnLock, oldIrql);
986 }
987
988 return STATUS_SUCCESS;
989 }
990
991 /* EOF */