2 * COPYRIGHT: See COPYING in the top level directory
3 * PROJECT: ReactOS kernel
4 * FILE: ntoskrnl/mm/freelist.c
5 * PURPOSE: Handle the list of free physical pages
7 * PROGRAMMERS: David Welch (welch@cwcom.net)
11 /* INCLUDES ****************************************************************/
17 #if defined (ALLOC_PRAGMA)
18 #pragma alloc_text(INIT, MmInitializePageList)
21 #define MODULE_INVOLVED_IN_ARM3
22 #include "ARM3/miarm.h"
24 /* GLOBALS ****************************************************************/
28 // ReactOS to NT Physical Page Descriptor Entry Legacy Mapping Definitions
32 #define RmapListHead AweReferenceCount
33 #define PHYSICAL_PAGE MMPFN
34 #define PPHYSICAL_PAGE PMMPFN
36 /* The first array contains ReactOS PFNs, the second contains ARM3 PFNs */
37 PPHYSICAL_PAGE MmPfnDatabase
[2];
39 PFN_NUMBER MmAvailablePages
;
40 PFN_NUMBER MmResidentAvailablePages
;
41 PFN_NUMBER MmResidentAvailableAtInit
;
43 SIZE_T MmTotalCommitLimit
;
44 SIZE_T MmTotalCommittedPages
;
45 SIZE_T MmSharedCommit
;
46 SIZE_T MmDriverCommit
;
47 SIZE_T MmProcessCommit
;
48 SIZE_T MmPagedPoolCommit
;
49 SIZE_T MmPeakCommitment
;
50 SIZE_T MmtotalCommitLimitMaximum
;
52 static KEVENT ZeroPageThreadEvent
;
53 static BOOLEAN ZeroPageThreadShouldTerminate
= FALSE
;
54 static RTL_BITMAP MiUserPfnBitMap
;
56 /* FUNCTIONS *************************************************************/
60 MiInitializeUserPfnBitmap(VOID
)
64 /* Allocate enough buffer for the PFN bitmap and align it on 32-bits */
65 Bitmap
= ExAllocatePoolWithTag(NonPagedPool
,
66 (((MmHighestPhysicalPage
+ 1) + 31) / 32) * 4,
70 /* Initialize it and clear all the bits to begin with */
71 RtlInitializeBitMap(&MiUserPfnBitMap
,
73 MmHighestPhysicalPage
+ 1);
74 RtlClearAllBits(&MiUserPfnBitMap
);
79 MmGetLRUFirstUserPage(VOID
)
84 /* Find the first user page */
85 OldIrql
= KeAcquireQueuedSpinLock(LockQueuePfnLock
);
86 Position
= RtlFindSetBits(&MiUserPfnBitMap
, 1, 0);
87 KeReleaseQueuedSpinLock(LockQueuePfnLock
, OldIrql
);
88 if (Position
== 0xFFFFFFFF) return 0;
96 MmInsertLRULastUserPage(PFN_TYPE Pfn
)
100 /* Set the page as a user page */
101 OldIrql
= KeAcquireQueuedSpinLock(LockQueuePfnLock
);
102 RtlSetBit(&MiUserPfnBitMap
, Pfn
);
103 KeReleaseQueuedSpinLock(LockQueuePfnLock
, OldIrql
);
108 MmGetLRUNextUserPage(PFN_TYPE PreviousPfn
)
113 /* Find the next user page */
114 OldIrql
= KeAcquireQueuedSpinLock(LockQueuePfnLock
);
115 Position
= RtlFindSetBits(&MiUserPfnBitMap
, 1, PreviousPfn
+ 1);
116 KeReleaseQueuedSpinLock(LockQueuePfnLock
, OldIrql
);
117 if (Position
== 0xFFFFFFFF) return 0;
125 MmRemoveLRUUserPage(PFN_TYPE Page
)
127 /* Unset the page as a user page */
128 RtlClearBit(&MiUserPfnBitMap
, Page
);
133 MiIsPfnInUse(IN PMMPFN Pfn1
)
135 return ((Pfn1
->u3
.e1
.PageLocation
!= FreePageList
) &&
136 (Pfn1
->u3
.e1
.PageLocation
!= ZeroedPageList
));
141 MiFindContiguousPages(IN PFN_NUMBER LowestPfn
,
142 IN PFN_NUMBER HighestPfn
,
143 IN PFN_NUMBER BoundaryPfn
,
144 IN PFN_NUMBER SizeInPages
,
145 IN MEMORY_CACHING_TYPE CacheType
)
147 PFN_NUMBER Page
, PageCount
, LastPage
, Length
, BoundaryMask
;
152 ASSERT(SizeInPages
!= 0);
155 // Convert the boundary PFN into an alignment mask
157 BoundaryMask
= ~(BoundaryPfn
- 1);
160 // Loop all the physical memory blocks
165 // Capture the base page and length of this memory block
167 Page
= MmPhysicalMemoryBlock
->Run
[i
].BasePage
;
168 PageCount
= MmPhysicalMemoryBlock
->Run
[i
].PageCount
;
171 // Check how far this memory block will go
173 LastPage
= Page
+ PageCount
;
176 // Trim it down to only the PFNs we're actually interested in
178 if ((LastPage
- 1) > HighestPfn
) LastPage
= HighestPfn
+ 1;
179 if (Page
< LowestPfn
) Page
= LowestPfn
;
182 // Skip this run if it's empty or fails to contain all the pages we need
184 if (!(PageCount
) || ((Page
+ SizeInPages
) > LastPage
)) continue;
187 // Now scan all the relevant PFNs in this run
190 for (Pfn1
= MiGetPfnEntry(Page
); Page
< LastPage
; Page
++, Pfn1
++)
193 // If this PFN is in use, ignore it
195 if (MiIsPfnInUse(Pfn1
)) continue;
198 // If we haven't chosen a start PFN yet and the caller specified an
199 // alignment, make sure the page matches the alignment restriction
201 if ((!(Length
) && (BoundaryPfn
)) &&
202 (((Page
^ (Page
+ SizeInPages
- 1)) & BoundaryMask
)))
205 // It does not, so bail out
211 // Increase the number of valid pages, and check if we have enough
213 if (++Length
== SizeInPages
)
216 // It appears we've amassed enough legitimate pages, rollback
218 Pfn1
-= (Length
- 1);
219 Page
-= (Length
- 1);
222 // Acquire the PFN lock
224 OldIrql
= KeAcquireQueuedSpinLock(LockQueuePfnLock
);
228 // Things might've changed for us. Is the page still free?
230 if (MiIsPfnInUse(Pfn1
)) break;
233 // So far so good. Is this the last confirmed valid page?
238 // Sanity check that we didn't go out of bounds
240 ASSERT(i
!= MmPhysicalMemoryBlock
->NumberOfRuns
);
243 // Loop until all PFN entries have been processed
245 EndPfn
= Pfn1
- SizeInPages
+ 1;
249 // This PFN is now a used page, set it up
251 MiUnlinkFreeOrZeroedPage(Pfn1
);
252 Pfn1
->u3
.e2
.ReferenceCount
= 1;
255 // Check if it was already zeroed
257 if (Pfn1
->u3
.e1
.PageLocation
!= ZeroedPageList
)
260 // It wasn't, so zero it
262 MiZeroPage(MiGetPfnEntryIndex(Pfn1
));
268 Pfn1
->u3
.e1
.PageLocation
= ActiveAndValid
;
271 // Check if this is the last PFN, otherwise go on
273 if (Pfn1
== EndPfn
) break;
278 // Mark the first and last PFN so we can find them later
280 Pfn1
->u3
.e1
.StartOfAllocation
= 1;
281 (Pfn1
+ SizeInPages
- 1)->u3
.e1
.EndOfAllocation
= 1;
284 // Now it's safe to let go of the PFN lock
286 KeReleaseQueuedSpinLock(LockQueuePfnLock
, OldIrql
);
289 // Quick sanity check that the last PFN is consistent
291 EndPfn
= Pfn1
+ SizeInPages
;
292 ASSERT(EndPfn
== MiGetPfnEntry(Page
+ 1));
295 // Compute the first page, and make sure it's consistent
297 Page
-= SizeInPages
- 1;
298 ASSERT(Pfn1
== MiGetPfnEntry(Page
));
304 // Keep going. The purpose of this loop is to reconfirm that
305 // after acquiring the PFN lock these pages are still usable
312 // If we got here, something changed while we hadn't acquired
313 // the PFN lock yet, so we'll have to restart
315 KeReleaseQueuedSpinLock(LockQueuePfnLock
, OldIrql
);
319 } while (++i
!= MmPhysicalMemoryBlock
->NumberOfRuns
);
322 // And if we get here, it means no suitable physical memory runs were found
329 MiAllocatePagesForMdl(IN PHYSICAL_ADDRESS LowAddress
,
330 IN PHYSICAL_ADDRESS HighAddress
,
331 IN PHYSICAL_ADDRESS SkipBytes
,
332 IN SIZE_T TotalBytes
,
333 IN MI_PFN_CACHE_ATTRIBUTE CacheAttribute
,
337 PFN_NUMBER PageCount
, LowPage
, HighPage
, SkipPages
, PagesFound
= 0, Page
;
338 PPFN_NUMBER MdlPage
, LastMdlPage
;
341 INT LookForZeroedPages
;
342 ASSERT (KeGetCurrentIrql() <= APC_LEVEL
);
345 // Convert the low address into a PFN
347 LowPage
= (PFN_NUMBER
)(LowAddress
.QuadPart
>> PAGE_SHIFT
);
350 // Convert, and normalize, the high address into a PFN
352 HighPage
= (PFN_NUMBER
)(HighAddress
.QuadPart
>> PAGE_SHIFT
);
353 if (HighPage
> MmHighestPhysicalPage
) HighPage
= MmHighestPhysicalPage
;
356 // Validate skipbytes and convert them into pages
358 if (BYTE_OFFSET(SkipBytes
.LowPart
)) return NULL
;
359 SkipPages
= (PFN_NUMBER
)(SkipBytes
.QuadPart
>> PAGE_SHIFT
);
362 // Now compute the number of pages the MDL will cover
364 PageCount
= (PFN_NUMBER
)ADDRESS_AND_SIZE_TO_SPAN_PAGES(0, TotalBytes
);
368 // Try creating an MDL for these many pages
370 Mdl
= MmCreateMdl(NULL
, NULL
, PageCount
<< PAGE_SHIFT
);
374 // This function is not required to return the amount of pages requested
375 // In fact, it can return as little as 1 page, and callers are supposed
376 // to deal with this scenario. So re-attempt the allocation with less
377 // pages than before, and see if it worked this time.
379 PageCount
-= (PageCount
>> 4);
383 // Wow, not even a single page was around!
385 if (!Mdl
) return NULL
;
388 // This is where the page array starts....
390 MdlPage
= (PPFN_NUMBER
)(Mdl
+ 1);
393 // Lock the PFN database
395 OldIrql
= KeAcquireQueuedSpinLock(LockQueuePfnLock
);
398 // Are we looking for any pages, without discriminating?
400 if ((LowPage
== 0) && (HighPage
== MmHighestPhysicalPage
))
403 // Well then, let's go shopping
405 while (PagesFound
< PageCount
)
408 // Do we have zeroed pages?
410 if (MmZeroedPageListHead
.Total
)
415 Pfn1
= MiRemoveHeadList(&MmZeroedPageListHead
);
417 else if (MmFreePageListHead
.Total
)
420 // Nope, grab an unzeroed page
422 Pfn1
= MiRemoveHeadList(&MmFreePageListHead
);
427 // This is not good... hopefully we have at least SOME pages
434 // Make sure it's really free
436 ASSERT(MiIsPfnInUse(Pfn1
) == FALSE
);
437 ASSERT(Pfn1
->u3
.e2
.ReferenceCount
== 0);
440 // Allocate it and mark it
442 Pfn1
->u3
.e1
.StartOfAllocation
= 1;
443 Pfn1
->u3
.e1
.EndOfAllocation
= 1;
444 Pfn1
->u3
.e2
.ReferenceCount
= 1;
447 // Decrease available pages
452 // Save it into the MDL
454 *MdlPage
++ = MiGetPfnEntryIndex(Pfn1
);
461 // You want specific range of pages. We'll do this in two runs
463 for (LookForZeroedPages
= 1; LookForZeroedPages
>= 0; LookForZeroedPages
--)
466 // Scan the range you specified
468 for (Page
= LowPage
; Page
< HighPage
; Page
++)
471 // Get the PFN entry for this page
473 Pfn1
= MiGetPfnEntry(Page
);
477 // Make sure it's free and if this is our first pass, zeroed
479 if (MiIsPfnInUse(Pfn1
)) continue;
480 if ((Pfn1
->u3
.e1
.PageLocation
== ZeroedPageList
) != LookForZeroedPages
) continue;
485 ASSERT(Pfn1
->u3
.e2
.ReferenceCount
== 0);
488 // Now setup the page and mark it
490 Pfn1
->u3
.e2
.ReferenceCount
= 1;
491 Pfn1
->u3
.e1
.StartOfAllocation
= 1;
492 Pfn1
->u3
.e1
.EndOfAllocation
= 1;
495 // Decrease available pages
500 // Save this page into the MDL
503 if (++PagesFound
== PageCount
) break;
507 // If the first pass was enough, don't keep going, otherwise, go again
509 if (PagesFound
== PageCount
) break;
514 // Now release the PFN count
516 KeReleaseQueuedSpinLock(LockQueuePfnLock
, OldIrql
);
519 // We might've found less pages, but not more ;-)
521 if (PagesFound
!= PageCount
) ASSERT(PagesFound
< PageCount
);
525 // If we didn' tfind any pages at all, fail
527 DPRINT1("NO MDL PAGES!\n");
533 // Write out how many pages we found
535 Mdl
->ByteCount
= (ULONG
)(PagesFound
<< PAGE_SHIFT
);
538 // Terminate the MDL array if there's certain missing pages
540 if (PagesFound
!= PageCount
) *MdlPage
= -1;
543 // Now go back and loop over all the MDL pages
545 MdlPage
= (PPFN_NUMBER
)(Mdl
+ 1);
546 LastMdlPage
= MdlPage
+ PagesFound
;
547 while (MdlPage
< LastMdlPage
)
550 // Check if we've reached the end
553 if (Page
== (PFN_NUMBER
)-1) break;
556 // Get the PFN entry for the page and check if we should zero it out
558 Pfn1
= MiGetPfnEntry(Page
);
560 if (Pfn1
->u3
.e1
.PageLocation
!= ZeroedPageList
) MiZeroPage(Page
);
561 Pfn1
->u3
.e1
.PageLocation
= ActiveAndValid
;
565 // We're done, mark the pages as locked (should we lock them, though???)
568 Mdl
->MdlFlags
|= MDL_PAGES_LOCKED
;
574 MmDumpPfnDatabase(VOID
)
578 PCHAR State
= "????", Type
= "Unknown";
580 ULONG Totals
[5] = {0}, FreePages
= 0;
582 KeRaiseIrql(HIGH_LEVEL
, &OldIrql
);
585 // Loop the PFN database
587 for (i
= 0; i
<= MmHighestPhysicalPage
; i
++)
589 Pfn1
= MiGetPfnEntry(i
);
595 if (MiIsPfnInUse(Pfn1
))
608 // Pretty-print the page
610 DbgPrint("0x%08p:\t%04s\t%20s\t(%02d) [%08p])\n",
614 Pfn1
->u3
.e2
.ReferenceCount
,
618 DbgPrint("Nonpaged Pool: %d pages\t[%d KB]\n", Totals
[MC_NPPOOL
], (Totals
[MC_NPPOOL
] << PAGE_SHIFT
) / 1024);
619 DbgPrint("Paged Pool: %d pages\t[%d KB]\n", Totals
[MC_PPOOL
], (Totals
[MC_PPOOL
] << PAGE_SHIFT
) / 1024);
620 DbgPrint("File System Cache: %d pages\t[%d KB]\n", Totals
[MC_CACHE
], (Totals
[MC_CACHE
] << PAGE_SHIFT
) / 1024);
621 DbgPrint("Process Working Set: %d pages\t[%d KB]\n", Totals
[MC_USER
], (Totals
[MC_USER
] << PAGE_SHIFT
) / 1024);
622 DbgPrint("System: %d pages\t[%d KB]\n", Totals
[MC_SYSTEM
], (Totals
[MC_SYSTEM
] << PAGE_SHIFT
) / 1024);
623 DbgPrint("Free: %d pages\t[%d KB]\n", FreePages
, (FreePages
<< PAGE_SHIFT
) / 1024);
625 KeLowerIrql(OldIrql
);
630 MmInitializePageList(VOID
)
633 PHYSICAL_PAGE UsedPage
;
634 PMEMORY_ALLOCATION_DESCRIPTOR Md
;
635 PLIST_ENTRY NextEntry
;
636 ULONG NrSystemPages
= 0;
638 /* This is what a used page looks like */
639 RtlZeroMemory(&UsedPage
, sizeof(UsedPage
));
640 UsedPage
.u3
.e1
.PageLocation
= ActiveAndValid
;
641 UsedPage
.u3
.e2
.ReferenceCount
= 1;
643 /* Loop the memory descriptors */
644 for (NextEntry
= KeLoaderBlock
->MemoryDescriptorListHead
.Flink
;
645 NextEntry
!= &KeLoaderBlock
->MemoryDescriptorListHead
;
646 NextEntry
= NextEntry
->Flink
)
648 /* Get the descriptor */
649 Md
= CONTAINING_RECORD(NextEntry
,
650 MEMORY_ALLOCATION_DESCRIPTOR
,
653 /* Skip bad memory */
654 if ((Md
->MemoryType
== LoaderFirmwarePermanent
) ||
655 (Md
->MemoryType
== LoaderBBTMemory
) ||
656 (Md
->MemoryType
== LoaderSpecialMemory
) ||
657 (Md
->MemoryType
== LoaderBad
))
660 // We do not build PFN entries for this
664 else if ((Md
->MemoryType
== LoaderFree
) ||
665 (Md
->MemoryType
== LoaderLoadedProgram
) ||
666 (Md
->MemoryType
== LoaderFirmwareTemporary
) ||
667 (Md
->MemoryType
== LoaderOsloaderStack
))
669 /* Loop every page part of the block */
670 for (i
= 0; i
< Md
->PageCount
; i
++)
672 /* Mark it as a free page */
673 MmPfnDatabase
[0][Md
->BasePage
+ i
].u3
.e1
.PageLocation
= FreePageList
;
674 MiInsertInListTail(&MmFreePageListHead
,
675 &MmPfnDatabase
[0][Md
->BasePage
+ i
]);
681 /* Loop every page part of the block */
682 for (i
= 0; i
< Md
->PageCount
; i
++)
684 /* Everything else is used memory */
685 MmPfnDatabase
[0][Md
->BasePage
+ i
] = UsedPage
;
691 /* Finally handle the pages describing the PFN database themselves */
692 for (i
= MxOldFreeDescriptor
.BasePage
; i
< MxFreeDescriptor
->BasePage
; i
++)
694 /* Mark it as used kernel memory */
695 MmPfnDatabase
[0][i
] = UsedPage
;
699 KeInitializeEvent(&ZeroPageThreadEvent
, NotificationEvent
, TRUE
);
700 DPRINT("Pages: %x %x\n", MmAvailablePages
, NrSystemPages
);
701 MmInitializeBalancer(MmAvailablePages
, NrSystemPages
);
706 MmSetRmapListHeadPage(PFN_TYPE Pfn
, struct _MM_RMAP_ENTRY
* ListHead
)
710 oldIrql
= KeAcquireQueuedSpinLock(LockQueuePfnLock
);
711 MiGetPfnEntry(Pfn
)->RmapListHead
= (LONG
)ListHead
;
712 KeReleaseQueuedSpinLock(LockQueuePfnLock
, oldIrql
);
715 struct _MM_RMAP_ENTRY
*
717 MmGetRmapListHeadPage(PFN_TYPE Pfn
)
720 struct _MM_RMAP_ENTRY
* ListHead
;
722 oldIrql
= KeAcquireQueuedSpinLock(LockQueuePfnLock
);
723 ListHead
= (struct _MM_RMAP_ENTRY
*)MiGetPfnEntry(Pfn
)->RmapListHead
;
724 KeReleaseQueuedSpinLock(LockQueuePfnLock
, oldIrql
);
731 MmSetSavedSwapEntryPage(PFN_TYPE Pfn
, SWAPENTRY SwapEntry
)
735 oldIrql
= KeAcquireQueuedSpinLock(LockQueuePfnLock
);
736 MiGetPfnEntry(Pfn
)->u1
.WsIndex
= SwapEntry
;
737 KeReleaseQueuedSpinLock(LockQueuePfnLock
, oldIrql
);
742 MmGetSavedSwapEntryPage(PFN_TYPE Pfn
)
747 oldIrql
= KeAcquireQueuedSpinLock(LockQueuePfnLock
);
748 SwapEntry
= MiGetPfnEntry(Pfn
)->u1
.WsIndex
;
749 KeReleaseQueuedSpinLock(LockQueuePfnLock
, oldIrql
);
756 MmReferencePage(PFN_TYPE Pfn
)
760 DPRINT("MmReferencePage(PysicalAddress %x)\n", Pfn
<< PAGE_SHIFT
);
762 if (Pfn
== 0 || Pfn
> MmHighestPhysicalPage
)
767 Page
= MiGetPfnEntry(Pfn
);
770 Page
->u3
.e2
.ReferenceCount
++;
775 MmGetReferenceCountPage(PFN_TYPE Pfn
)
781 DPRINT("MmGetReferenceCountPage(PhysicalAddress %x)\n", Pfn
<< PAGE_SHIFT
);
783 oldIrql
= KeAcquireQueuedSpinLock(LockQueuePfnLock
);
784 Page
= MiGetPfnEntry(Pfn
);
787 RCount
= Page
->u3
.e2
.ReferenceCount
;
789 KeReleaseQueuedSpinLock(LockQueuePfnLock
, oldIrql
);
795 MmIsPageInUse(PFN_TYPE Pfn
)
797 return MiIsPfnInUse(MiGetPfnEntry(Pfn
));
802 MiSetConsumer(IN PFN_TYPE Pfn
,
805 MiGetPfnEntry(Pfn
)->u3
.e1
.PageLocation
= ActiveAndValid
;
810 MmDereferencePage(PFN_TYPE Pfn
)
814 DPRINT("MmDereferencePage(PhysicalAddress %x)\n", Pfn
<< PAGE_SHIFT
);
816 Page
= MiGetPfnEntry(Pfn
);
819 Page
->u3
.e2
.ReferenceCount
--;
820 if (Page
->u3
.e2
.ReferenceCount
== 0)
823 Page
->u3
.e1
.PageLocation
= FreePageList
;
824 MiInsertInListTail(&MmFreePageListHead
, Page
);
825 if (MmFreePageListHead
.Total
> 8 && 0 == KeReadStateEvent(&ZeroPageThreadEvent
))
827 KeSetEvent(&ZeroPageThreadEvent
, IO_NO_INCREMENT
, FALSE
);
834 MmAllocPage(ULONG Type
)
837 PPHYSICAL_PAGE PageDescriptor
;
838 BOOLEAN NeedClear
= FALSE
;
840 DPRINT("MmAllocPage()\n");
842 if (MmZeroedPageListHead
.Total
== 0)
844 if (MmFreePageListHead
.Total
== 0)
846 /* Check if this allocation is for the PFN DB itself */
847 if (MmNumberOfPhysicalPages
== 0)
852 DPRINT1("MmAllocPage(): Out of memory\n");
855 PageDescriptor
= MiRemoveHeadList(&MmFreePageListHead
);
861 PageDescriptor
= MiRemoveHeadList(&MmZeroedPageListHead
);
864 PageDescriptor
->u3
.e2
.ReferenceCount
= 1;
868 PfnOffset
= PageDescriptor
- MmPfnDatabase
[0];
869 if ((NeedClear
) && (Type
!= MC_SYSTEM
))
871 MiZeroPage(PfnOffset
);
874 PageDescriptor
->u3
.e1
.PageLocation
= ActiveAndValid
;
880 MiZeroPage(PFN_TYPE Page
)
885 Irql
= KeRaiseIrqlToDpcLevel();
886 TempAddress
= MiMapPageToZeroInHyperSpace(Page
);
887 if (TempAddress
== NULL
)
889 return(STATUS_NO_MEMORY
);
891 memset(TempAddress
, 0, PAGE_SIZE
);
892 MiUnmapPagesInZeroSpace(TempAddress
, 1);
894 return(STATUS_SUCCESS
);
899 MmZeroPageThreadMain(PVOID Ignored
)
903 PPHYSICAL_PAGE PageDescriptor
;
907 /* Free initial kernel memory */
908 //MiFreeInitMemory();
910 /* Set our priority to 0 */
911 KeGetCurrentThread()->BasePriority
= 0;
912 KeSetPriorityThread(KeGetCurrentThread(), 0);
916 Status
= KeWaitForSingleObject(&ZeroPageThreadEvent
,
922 if (ZeroPageThreadShouldTerminate
)
924 DPRINT1("ZeroPageThread: Terminating\n");
925 return STATUS_SUCCESS
;
928 oldIrql
= KeAcquireQueuedSpinLock(LockQueuePfnLock
);
929 while (MmFreePageListHead
.Total
)
931 PageDescriptor
= MiRemoveHeadList(&MmFreePageListHead
);
932 /* We set the page to used, because MmCreateVirtualMapping failed with unused pages */
933 KeReleaseQueuedSpinLock(LockQueuePfnLock
, oldIrql
);
934 Pfn
= PageDescriptor
- MmPfnDatabase
[0];
935 Status
= MiZeroPage(Pfn
);
937 oldIrql
= KeAcquireQueuedSpinLock(LockQueuePfnLock
);
938 if (NT_SUCCESS(Status
))
940 MiInsertZeroListAtBack(Pfn
);
945 MiInsertInListTail(&MmFreePageListHead
, PageDescriptor
);
946 PageDescriptor
->u3
.e1
.PageLocation
= FreePageList
;
950 DPRINT("Zeroed %d pages.\n", Count
);
951 KeResetEvent(&ZeroPageThreadEvent
);
952 KeReleaseQueuedSpinLock(LockQueuePfnLock
, oldIrql
);
955 return STATUS_SUCCESS
;