2 * COPYRIGHT: See COPYING in the top level directory
3 * PROJECT: ReactOS kernel
4 * FILE: ntoskrnl/mm/freelist.c
5 * PURPOSE: Handle the list of free physical pages
7 * PROGRAMMERS: David Welch (welch@cwcom.net)
11 /* INCLUDES ****************************************************************/
17 #if defined (ALLOC_PRAGMA)
18 #pragma alloc_text(INIT, MmInitializePageList)
21 #define MODULE_INVOLVED_IN_ARM3
22 #include "ARM3/miarm.h"
24 /* GLOBALS ****************************************************************/
28 // ReactOS to NT Physical Page Descriptor Entry Legacy Mapping Definitions
36 LIST_ENTRY ListEntry
; // 0x000
37 ULONG_PTR RmapListHead
; // 0x008
38 USHORT ReferenceCount
; // 0x00C
42 USHORT StartOfAllocation
:1;
43 USHORT EndOfAllocation
:1;
52 LONG MapCount
; // 0x10
53 ULONG_PTR SavedSwapEntry
; // 0x018
55 } PHYSICAL_PAGE
, *PPHYSICAL_PAGE
;
57 C_ASSERT(sizeof(PHYSICAL_PAGE
) == sizeof(MMPFN
));
59 //#define MiGetPfnEntry(Pfn) ((PPHYSICAL_PAGE)MiGetPfnEntry(Pfn))
60 #define MiGetPfnEntryIndex(x) MiGetPfnEntryIndex((struct _MMPFN*)x)
61 #define LockCount Flags.LockCount
63 /* The first array contains ReactOS PFNs, the second contains ARM3 PFNs */
64 PMMPFN MmPfnDatabase
[2];
65 #define MmPfnDatabase ((PPHYSICAL_PAGE*)MmPfnDatabase)
67 //#define MMPFN PHYSICAL_PAGE
68 //#define PMMPFN PPHYSICAL_PAGE
70 /* The first array contains ReactOS PFNs, the second contains ARM3 PFNs */
71 //PPHYSICAL_PAGE MmPfnDatabase[2];
73 PFN_NUMBER MmAvailablePages
;
74 PFN_NUMBER MmResidentAvailablePages
;
76 SIZE_T MmTotalCommitLimit
;
77 SIZE_T MmTotalCommittedPages
;
78 SIZE_T MmSharedCommit
;
79 SIZE_T MmDriverCommit
;
80 SIZE_T MmProcessCommit
;
81 SIZE_T MmPagedPoolCommit
;
82 SIZE_T MmPeakCommitment
;
83 SIZE_T MmtotalCommitLimitMaximum
;
85 static KEVENT ZeroPageThreadEvent
;
86 static BOOLEAN ZeroPageThreadShouldTerminate
= FALSE
;
87 static RTL_BITMAP MiUserPfnBitMap
;
89 /* FUNCTIONS *************************************************************/
93 MiInitializeUserPfnBitmap(VOID
)
97 /* Allocate enough buffer for the PFN bitmap and align it on 32-bits */
98 Bitmap
= ExAllocatePoolWithTag(NonPagedPool
,
99 (((MmHighestPhysicalPage
+ 1) + 31) / 32) * 4,
103 /* Initialize it and clear all the bits to begin with */
104 RtlInitializeBitMap(&MiUserPfnBitMap
,
106 MmHighestPhysicalPage
+ 1);
107 RtlClearAllBits(&MiUserPfnBitMap
);
112 MmGetLRUFirstUserPage(VOID
)
117 /* Find the first user page */
118 OldIrql
= KeAcquireQueuedSpinLock(LockQueuePfnLock
);
119 Position
= RtlFindSetBits(&MiUserPfnBitMap
, 1, 0);
120 KeReleaseQueuedSpinLock(LockQueuePfnLock
, OldIrql
);
121 if (Position
== 0xFFFFFFFF) return 0;
129 MmInsertLRULastUserPage(PFN_NUMBER Pfn
)
133 /* Set the page as a user page */
134 OldIrql
= KeAcquireQueuedSpinLock(LockQueuePfnLock
);
135 RtlSetBit(&MiUserPfnBitMap
, Pfn
);
136 KeReleaseQueuedSpinLock(LockQueuePfnLock
, OldIrql
);
141 MmGetLRUNextUserPage(PFN_NUMBER PreviousPfn
)
146 /* Find the next user page */
147 OldIrql
= KeAcquireQueuedSpinLock(LockQueuePfnLock
);
148 Position
= RtlFindSetBits(&MiUserPfnBitMap
, 1, PreviousPfn
+ 1);
149 KeReleaseQueuedSpinLock(LockQueuePfnLock
, OldIrql
);
150 if (Position
== 0xFFFFFFFF) return 0;
158 MmRemoveLRUUserPage(PFN_NUMBER Page
)
160 /* Unset the page as a user page */
161 RtlClearBit(&MiUserPfnBitMap
, Page
);
166 MiIsPfnInUse(IN PMMPFN Pfn1
)
168 return ((Pfn1
->u3
.e1
.PageLocation
!= FreePageList
) &&
169 (Pfn1
->u3
.e1
.PageLocation
!= ZeroedPageList
));
174 MiFindContiguousPages(IN PFN_NUMBER LowestPfn
,
175 IN PFN_NUMBER HighestPfn
,
176 IN PFN_NUMBER BoundaryPfn
,
177 IN PFN_NUMBER SizeInPages
,
178 IN MEMORY_CACHING_TYPE CacheType
)
180 PFN_NUMBER Page
, PageCount
, LastPage
, Length
, BoundaryMask
;
185 ASSERT(SizeInPages
!= 0);
188 // Convert the boundary PFN into an alignment mask
190 BoundaryMask
= ~(BoundaryPfn
- 1);
193 // Loop all the physical memory blocks
198 // Capture the base page and length of this memory block
200 Page
= MmPhysicalMemoryBlock
->Run
[i
].BasePage
;
201 PageCount
= MmPhysicalMemoryBlock
->Run
[i
].PageCount
;
204 // Check how far this memory block will go
206 LastPage
= Page
+ PageCount
;
209 // Trim it down to only the PFNs we're actually interested in
211 if ((LastPage
- 1) > HighestPfn
) LastPage
= HighestPfn
+ 1;
212 if (Page
< LowestPfn
) Page
= LowestPfn
;
215 // Skip this run if it's empty or fails to contain all the pages we need
217 if (!(PageCount
) || ((Page
+ SizeInPages
) > LastPage
)) continue;
220 // Now scan all the relevant PFNs in this run
223 for (Pfn1
= MiGetPfnEntry(Page
); Page
< LastPage
; Page
++, Pfn1
++)
226 // If this PFN is in use, ignore it
228 if (MiIsPfnInUse(Pfn1
)) continue;
231 // If we haven't chosen a start PFN yet and the caller specified an
232 // alignment, make sure the page matches the alignment restriction
234 if ((!(Length
) && (BoundaryPfn
)) &&
235 (((Page
^ (Page
+ SizeInPages
- 1)) & BoundaryMask
)))
238 // It does not, so bail out
244 // Increase the number of valid pages, and check if we have enough
246 if (++Length
== SizeInPages
)
249 // It appears we've amassed enough legitimate pages, rollback
251 Pfn1
-= (Length
- 1);
252 Page
-= (Length
- 1);
255 // Acquire the PFN lock
257 OldIrql
= KeAcquireQueuedSpinLock(LockQueuePfnLock
);
261 // Things might've changed for us. Is the page still free?
263 if (MiIsPfnInUse(Pfn1
)) break;
266 // So far so good. Is this the last confirmed valid page?
271 // Sanity check that we didn't go out of bounds
273 ASSERT(i
!= MmPhysicalMemoryBlock
->NumberOfRuns
);
276 // Loop until all PFN entries have been processed
278 EndPfn
= Pfn1
- SizeInPages
+ 1;
282 // This PFN is now a used page, set it up
284 MiUnlinkFreeOrZeroedPage(Pfn1
);
285 Pfn1
->u3
.e2
.ReferenceCount
= 1;
288 // Check if it was already zeroed
290 if (Pfn1
->u3
.e1
.PageLocation
!= ZeroedPageList
)
293 // It wasn't, so zero it
295 MiZeroPage(MiGetPfnEntryIndex(Pfn1
));
301 Pfn1
->u3
.e1
.PageLocation
= ActiveAndValid
;
304 // Check if this is the last PFN, otherwise go on
306 if (Pfn1
== EndPfn
) break;
311 // Mark the first and last PFN so we can find them later
313 Pfn1
->u3
.e1
.StartOfAllocation
= 1;
314 (Pfn1
+ SizeInPages
- 1)->u3
.e1
.EndOfAllocation
= 1;
317 // Now it's safe to let go of the PFN lock
319 KeReleaseQueuedSpinLock(LockQueuePfnLock
, OldIrql
);
322 // Quick sanity check that the last PFN is consistent
324 EndPfn
= Pfn1
+ SizeInPages
;
325 ASSERT(EndPfn
== MiGetPfnEntry(Page
+ 1));
328 // Compute the first page, and make sure it's consistent
330 Page
-= SizeInPages
- 1;
331 ASSERT(Pfn1
== MiGetPfnEntry(Page
));
337 // Keep going. The purpose of this loop is to reconfirm that
338 // after acquiring the PFN lock these pages are still usable
345 // If we got here, something changed while we hadn't acquired
346 // the PFN lock yet, so we'll have to restart
348 KeReleaseQueuedSpinLock(LockQueuePfnLock
, OldIrql
);
352 } while (++i
!= MmPhysicalMemoryBlock
->NumberOfRuns
);
355 // And if we get here, it means no suitable physical memory runs were found
362 MiAllocatePagesForMdl(IN PHYSICAL_ADDRESS LowAddress
,
363 IN PHYSICAL_ADDRESS HighAddress
,
364 IN PHYSICAL_ADDRESS SkipBytes
,
365 IN SIZE_T TotalBytes
,
366 IN MI_PFN_CACHE_ATTRIBUTE CacheAttribute
,
370 PFN_NUMBER PageCount
, LowPage
, HighPage
, SkipPages
, PagesFound
= 0, Page
;
371 PPFN_NUMBER MdlPage
, LastMdlPage
;
374 INT LookForZeroedPages
;
375 ASSERT (KeGetCurrentIrql() <= APC_LEVEL
);
378 // Convert the low address into a PFN
380 LowPage
= (PFN_NUMBER
)(LowAddress
.QuadPart
>> PAGE_SHIFT
);
383 // Convert, and normalize, the high address into a PFN
385 HighPage
= (PFN_NUMBER
)(HighAddress
.QuadPart
>> PAGE_SHIFT
);
386 if (HighPage
> MmHighestPhysicalPage
) HighPage
= MmHighestPhysicalPage
;
389 // Validate skipbytes and convert them into pages
391 if (BYTE_OFFSET(SkipBytes
.LowPart
)) return NULL
;
392 SkipPages
= (PFN_NUMBER
)(SkipBytes
.QuadPart
>> PAGE_SHIFT
);
395 // Now compute the number of pages the MDL will cover
397 PageCount
= (PFN_NUMBER
)ADDRESS_AND_SIZE_TO_SPAN_PAGES(0, TotalBytes
);
401 // Try creating an MDL for these many pages
403 Mdl
= MmCreateMdl(NULL
, NULL
, PageCount
<< PAGE_SHIFT
);
407 // This function is not required to return the amount of pages requested
408 // In fact, it can return as little as 1 page, and callers are supposed
409 // to deal with this scenario. So re-attempt the allocation with less
410 // pages than before, and see if it worked this time.
412 PageCount
-= (PageCount
>> 4);
416 // Wow, not even a single page was around!
418 if (!Mdl
) return NULL
;
421 // This is where the page array starts....
423 MdlPage
= (PPFN_NUMBER
)(Mdl
+ 1);
426 // Lock the PFN database
428 OldIrql
= KeAcquireQueuedSpinLock(LockQueuePfnLock
);
431 // Are we looking for any pages, without discriminating?
433 if ((LowPage
== 0) && (HighPage
== MmHighestPhysicalPage
))
436 // Well then, let's go shopping
438 while (PagesFound
< PageCount
)
441 // Do we have zeroed pages?
443 if (MmZeroedPageListHead
.Total
)
448 Pfn1
= MiRemoveHeadList(&MmZeroedPageListHead
);
450 else if (MmFreePageListHead
.Total
)
453 // Nope, grab an unzeroed page
455 Pfn1
= MiRemoveHeadList(&MmFreePageListHead
);
460 // This is not good... hopefully we have at least SOME pages
467 // Make sure it's really free
469 ASSERT(MiIsPfnInUse(Pfn1
) == FALSE
);
470 ASSERT(Pfn1
->u3
.e2
.ReferenceCount
== 0);
473 // Allocate it and mark it
475 Pfn1
->u3
.e1
.StartOfAllocation
= 1;
476 Pfn1
->u3
.e1
.EndOfAllocation
= 1;
477 Pfn1
->u3
.e2
.ReferenceCount
= 1;
480 // Decrease available pages
485 // Save it into the MDL
487 *MdlPage
++ = MiGetPfnEntryIndex(Pfn1
);
494 // You want specific range of pages. We'll do this in two runs
496 for (LookForZeroedPages
= 1; LookForZeroedPages
>= 0; LookForZeroedPages
--)
499 // Scan the range you specified
501 for (Page
= LowPage
; Page
< HighPage
; Page
++)
504 // Get the PFN entry for this page
506 Pfn1
= MiGetPfnEntry(Page
);
510 // Make sure it's free and if this is our first pass, zeroed
512 if (MiIsPfnInUse(Pfn1
)) continue;
513 if ((Pfn1
->u3
.e1
.PageLocation
== ZeroedPageList
) != LookForZeroedPages
) continue;
518 ASSERT(Pfn1
->u3
.e2
.ReferenceCount
== 0);
521 // Now setup the page and mark it
523 Pfn1
->u3
.e2
.ReferenceCount
= 1;
524 Pfn1
->u3
.e1
.StartOfAllocation
= 1;
525 Pfn1
->u3
.e1
.EndOfAllocation
= 1;
528 // Decrease available pages
533 // Save this page into the MDL
536 if (++PagesFound
== PageCount
) break;
540 // If the first pass was enough, don't keep going, otherwise, go again
542 if (PagesFound
== PageCount
) break;
547 // Now release the PFN count
549 KeReleaseQueuedSpinLock(LockQueuePfnLock
, OldIrql
);
552 // We might've found less pages, but not more ;-)
554 if (PagesFound
!= PageCount
) ASSERT(PagesFound
< PageCount
);
558 // If we didn' tfind any pages at all, fail
560 DPRINT1("NO MDL PAGES!\n");
566 // Write out how many pages we found
568 Mdl
->ByteCount
= (ULONG
)(PagesFound
<< PAGE_SHIFT
);
571 // Terminate the MDL array if there's certain missing pages
573 if (PagesFound
!= PageCount
) *MdlPage
= -1;
576 // Now go back and loop over all the MDL pages
578 MdlPage
= (PPFN_NUMBER
)(Mdl
+ 1);
579 LastMdlPage
= MdlPage
+ PagesFound
;
580 while (MdlPage
< LastMdlPage
)
583 // Check if we've reached the end
586 if (Page
== (PFN_NUMBER
)-1) break;
589 // Get the PFN entry for the page and check if we should zero it out
591 Pfn1
= MiGetPfnEntry(Page
);
593 if (Pfn1
->u3
.e1
.PageLocation
!= ZeroedPageList
) MiZeroPage(Page
);
594 Pfn1
->u3
.e1
.PageLocation
= ActiveAndValid
;
598 // We're done, mark the pages as locked (should we lock them, though???)
601 Mdl
->MdlFlags
|= MDL_PAGES_LOCKED
;
607 MmDumpPfnDatabase(VOID
)
611 PCHAR State
= "????", Type
= "Unknown";
613 ULONG Totals
[5] = {0}, FreePages
= 0;
615 KeRaiseIrql(HIGH_LEVEL
, &OldIrql
);
618 // Loop the PFN database
620 for (i
= 0; i
<= MmHighestPhysicalPage
; i
++)
622 Pfn1
= MiGetPfnEntry(i
);
628 if (MiIsPfnInUse(Pfn1
))
641 // Pretty-print the page
643 DbgPrint("0x%08p:\t%04s\t%20s\t(%02d) [%08p])\n",
647 Pfn1
->u3
.e2
.ReferenceCount
,
648 ((PPHYSICAL_PAGE
)Pfn1
)->RmapListHead
);
651 DbgPrint("Nonpaged Pool: %d pages\t[%d KB]\n", Totals
[MC_NPPOOL
], (Totals
[MC_NPPOOL
] << PAGE_SHIFT
) / 1024);
652 DbgPrint("Paged Pool: %d pages\t[%d KB]\n", Totals
[MC_PPOOL
], (Totals
[MC_PPOOL
] << PAGE_SHIFT
) / 1024);
653 DbgPrint("File System Cache: %d pages\t[%d KB]\n", Totals
[MC_CACHE
], (Totals
[MC_CACHE
] << PAGE_SHIFT
) / 1024);
654 DbgPrint("Process Working Set: %d pages\t[%d KB]\n", Totals
[MC_USER
], (Totals
[MC_USER
] << PAGE_SHIFT
) / 1024);
655 DbgPrint("System: %d pages\t[%d KB]\n", Totals
[MC_SYSTEM
], (Totals
[MC_SYSTEM
] << PAGE_SHIFT
) / 1024);
656 DbgPrint("Free: %d pages\t[%d KB]\n", FreePages
, (FreePages
<< PAGE_SHIFT
) / 1024);
658 KeLowerIrql(OldIrql
);
663 MmInitializePageList(VOID
)
666 PHYSICAL_PAGE UsedPage
;
667 PMEMORY_ALLOCATION_DESCRIPTOR Md
;
668 PLIST_ENTRY NextEntry
;
669 ULONG NrSystemPages
= 0;
671 /* This is what a used page looks like */
672 RtlZeroMemory(&UsedPage
, sizeof(UsedPage
));
673 UsedPage
.u3
.e1
.PageLocation
= ActiveAndValid
;
674 UsedPage
.u3
.e2
.ReferenceCount
= 1;
676 /* Loop the memory descriptors */
677 for (NextEntry
= KeLoaderBlock
->MemoryDescriptorListHead
.Flink
;
678 NextEntry
!= &KeLoaderBlock
->MemoryDescriptorListHead
;
679 NextEntry
= NextEntry
->Flink
)
681 /* Get the descriptor */
682 Md
= CONTAINING_RECORD(NextEntry
,
683 MEMORY_ALLOCATION_DESCRIPTOR
,
686 /* Skip bad memory */
687 if ((Md
->MemoryType
== LoaderFirmwarePermanent
) ||
688 (Md
->MemoryType
== LoaderBBTMemory
) ||
689 (Md
->MemoryType
== LoaderSpecialMemory
) ||
690 (Md
->MemoryType
== LoaderBad
))
693 // We do not build PFN entries for this
697 else if ((Md
->MemoryType
== LoaderFree
) ||
698 (Md
->MemoryType
== LoaderLoadedProgram
) ||
699 (Md
->MemoryType
== LoaderFirmwareTemporary
) ||
700 (Md
->MemoryType
== LoaderOsloaderStack
))
702 /* Loop every page part of the block */
703 for (i
= 0; i
< Md
->PageCount
; i
++)
705 /* Mark it as a free page */
706 MmPfnDatabase
[0][Md
->BasePage
+ i
].u3
.e1
.PageLocation
= FreePageList
;
707 MiInsertInListTail(&MmFreePageListHead
,
708 &MmPfnDatabase
[0][Md
->BasePage
+ i
]);
714 /* Loop every page part of the block */
715 for (i
= 0; i
< Md
->PageCount
; i
++)
717 /* Everything else is used memory */
718 MmPfnDatabase
[0][Md
->BasePage
+ i
] = UsedPage
;
724 /* Finally handle the pages describing the PFN database themselves */
725 for (i
= MxOldFreeDescriptor
.BasePage
; i
< MxFreeDescriptor
->BasePage
; i
++)
727 /* Mark it as used kernel memory */
728 MmPfnDatabase
[0][i
] = UsedPage
;
732 KeInitializeEvent(&ZeroPageThreadEvent
, NotificationEvent
, TRUE
);
733 DPRINT("Pages: %x %x\n", MmAvailablePages
, NrSystemPages
);
734 MmInitializeBalancer(MmAvailablePages
, NrSystemPages
);
739 MmSetRmapListHeadPage(PFN_NUMBER Pfn
, struct _MM_RMAP_ENTRY
* ListHead
)
743 oldIrql
= KeAcquireQueuedSpinLock(LockQueuePfnLock
);
744 ((PPHYSICAL_PAGE
)MiGetPfnEntry(Pfn
))->RmapListHead
= (LONG_PTR
)ListHead
;
745 KeReleaseQueuedSpinLock(LockQueuePfnLock
, oldIrql
);
748 struct _MM_RMAP_ENTRY
*
750 MmGetRmapListHeadPage(PFN_NUMBER Pfn
)
753 struct _MM_RMAP_ENTRY
* ListHead
;
755 oldIrql
= KeAcquireQueuedSpinLock(LockQueuePfnLock
);
756 ListHead
= (struct _MM_RMAP_ENTRY
*)((PPHYSICAL_PAGE
)MiGetPfnEntry(Pfn
))->RmapListHead
;
757 KeReleaseQueuedSpinLock(LockQueuePfnLock
, oldIrql
);
764 MmSetSavedSwapEntryPage(PFN_NUMBER Pfn
, SWAPENTRY SwapEntry
)
768 oldIrql
= KeAcquireQueuedSpinLock(LockQueuePfnLock
);
769 MiGetPfnEntry(Pfn
)->u1
.WsIndex
= SwapEntry
;
770 KeReleaseQueuedSpinLock(LockQueuePfnLock
, oldIrql
);
775 MmGetSavedSwapEntryPage(PFN_NUMBER Pfn
)
780 oldIrql
= KeAcquireQueuedSpinLock(LockQueuePfnLock
);
781 SwapEntry
= MiGetPfnEntry(Pfn
)->u1
.WsIndex
;
782 KeReleaseQueuedSpinLock(LockQueuePfnLock
, oldIrql
);
789 MmReferencePage(PFN_NUMBER Pfn
)
793 DPRINT("MmReferencePage(PysicalAddress %x)\n", Pfn
<< PAGE_SHIFT
);
795 if (Pfn
== 0 || Pfn
> MmHighestPhysicalPage
)
800 Page
= MiGetPfnEntry(Pfn
);
803 Page
->u3
.e2
.ReferenceCount
++;
808 MmGetReferenceCountPage(PFN_NUMBER Pfn
)
814 DPRINT("MmGetReferenceCountPage(PhysicalAddress %x)\n", Pfn
<< PAGE_SHIFT
);
816 oldIrql
= KeAcquireQueuedSpinLock(LockQueuePfnLock
);
817 Page
= MiGetPfnEntry(Pfn
);
820 RCount
= Page
->u3
.e2
.ReferenceCount
;
822 KeReleaseQueuedSpinLock(LockQueuePfnLock
, oldIrql
);
828 MmIsPageInUse(PFN_NUMBER Pfn
)
830 return MiIsPfnInUse(MiGetPfnEntry(Pfn
));
835 MiSetConsumer(IN PFN_NUMBER Pfn
,
838 MiGetPfnEntry(Pfn
)->u3
.e1
.PageLocation
= ActiveAndValid
;
843 MmDereferencePage(PFN_NUMBER Pfn
)
847 DPRINT("MmDereferencePage(PhysicalAddress %x)\n", Pfn
<< PAGE_SHIFT
);
849 Page
= MiGetPfnEntry(Pfn
);
852 Page
->u3
.e2
.ReferenceCount
--;
853 if (Page
->u3
.e2
.ReferenceCount
== 0)
856 Page
->u3
.e1
.PageLocation
= FreePageList
;
857 MiInsertInListTail(&MmFreePageListHead
, Page
);
858 if (MmFreePageListHead
.Total
> 8 && 0 == KeReadStateEvent(&ZeroPageThreadEvent
))
860 KeSetEvent(&ZeroPageThreadEvent
, IO_NO_INCREMENT
, FALSE
);
867 MmAllocPage(ULONG Type
)
869 PFN_NUMBER PfnOffset
;
870 PPHYSICAL_PAGE PageDescriptor
;
871 BOOLEAN NeedClear
= FALSE
;
873 DPRINT("MmAllocPage()\n");
875 if (MmZeroedPageListHead
.Total
== 0)
877 if (MmFreePageListHead
.Total
== 0)
879 /* Check if this allocation is for the PFN DB itself */
880 if (MmNumberOfPhysicalPages
== 0)
885 DPRINT1("MmAllocPage(): Out of memory\n");
888 PageDescriptor
= MiRemoveHeadList(&MmFreePageListHead
);
894 PageDescriptor
= MiRemoveHeadList(&MmZeroedPageListHead
);
897 PageDescriptor
->u3
.e2
.ReferenceCount
= 1;
901 PfnOffset
= PageDescriptor
- MmPfnDatabase
[0];
902 if ((NeedClear
) && (Type
!= MC_SYSTEM
))
904 MiZeroPage(PfnOffset
);
907 PageDescriptor
->u3
.e1
.PageLocation
= ActiveAndValid
;
913 MiZeroPage(PFN_NUMBER Page
)
918 Irql
= KeRaiseIrqlToDpcLevel();
919 TempAddress
= MiMapPageToZeroInHyperSpace(Page
);
920 if (TempAddress
== NULL
)
922 return(STATUS_NO_MEMORY
);
924 memset(TempAddress
, 0, PAGE_SIZE
);
925 MiUnmapPagesInZeroSpace(TempAddress
, 1);
927 return(STATUS_SUCCESS
);
932 MmZeroPageThreadMain(PVOID Ignored
)
936 PPHYSICAL_PAGE PageDescriptor
;
940 /* Free initial kernel memory */
941 //MiFreeInitMemory();
943 /* Set our priority to 0 */
944 KeGetCurrentThread()->BasePriority
= 0;
945 KeSetPriorityThread(KeGetCurrentThread(), 0);
949 Status
= KeWaitForSingleObject(&ZeroPageThreadEvent
,
955 if (ZeroPageThreadShouldTerminate
)
957 DPRINT1("ZeroPageThread: Terminating\n");
958 return STATUS_SUCCESS
;
961 oldIrql
= KeAcquireQueuedSpinLock(LockQueuePfnLock
);
962 while (MmFreePageListHead
.Total
)
964 PageDescriptor
= MiRemoveHeadList(&MmFreePageListHead
);
965 /* We set the page to used, because MmCreateVirtualMapping failed with unused pages */
966 KeReleaseQueuedSpinLock(LockQueuePfnLock
, oldIrql
);
967 Pfn
= PageDescriptor
- MmPfnDatabase
[0];
968 Status
= MiZeroPage(Pfn
);
970 oldIrql
= KeAcquireQueuedSpinLock(LockQueuePfnLock
);
971 if (NT_SUCCESS(Status
))
973 MiInsertZeroListAtBack(Pfn
);
978 MiInsertInListTail(&MmFreePageListHead
, PageDescriptor
);
979 PageDescriptor
->u3
.e1
.PageLocation
= FreePageList
;
983 DPRINT("Zeroed %d pages.\n", Count
);
984 KeResetEvent(&ZeroPageThreadEvent
);
985 KeReleaseQueuedSpinLock(LockQueuePfnLock
, oldIrql
);
988 return STATUS_SUCCESS
;