2 * COPYRIGHT: See COPYING in the top level directory
3 * PROJECT: ReactOS kernel
4 * FILE: ntoskrnl/mm/freelist.c
5 * PURPOSE: Handle the list of free physical pages
7 * PROGRAMMERS: David Welch (welch@cwcom.net)
11 /* INCLUDES ****************************************************************/
17 #if defined (ALLOC_PRAGMA)
18 #pragma alloc_text(INIT, MmInitializePageList)
21 #define MODULE_INVOLVED_IN_ARM3
22 #include "ARM3/miarm.h"
24 /* GLOBALS ****************************************************************/
28 // ReactOS to NT Physical Page Descriptor Entry Legacy Mapping Definitions
32 #define RmapListHead AweReferenceCount
33 #define PHYSICAL_PAGE MMPFN
34 #define PPHYSICAL_PAGE PMMPFN
36 /* The first array contains ReactOS PFNs, the second contains ARM3 PFNs */
37 PPHYSICAL_PAGE MmPfnDatabase
[2];
39 ULONG MmAvailablePages
;
40 ULONG MmResidentAvailablePages
;
42 SIZE_T MmTotalCommitLimit
;
43 SIZE_T MmTotalCommittedPages
;
44 SIZE_T MmSharedCommit
;
45 SIZE_T MmDriverCommit
;
46 SIZE_T MmProcessCommit
;
47 SIZE_T MmPagedPoolCommit
;
48 SIZE_T MmPeakCommitment
;
49 SIZE_T MmtotalCommitLimitMaximum
;
51 static KEVENT ZeroPageThreadEvent
;
52 static BOOLEAN ZeroPageThreadShouldTerminate
= FALSE
;
53 static RTL_BITMAP MiUserPfnBitMap
;
55 /* FUNCTIONS *************************************************************/
59 MiInitializeUserPfnBitmap(VOID
)
63 /* Allocate enough buffer for the PFN bitmap and align it on 32-bits */
64 Bitmap
= ExAllocatePoolWithTag(NonPagedPool
,
65 (((MmHighestPhysicalPage
+ 1) + 31) / 32) * 4,
69 /* Initialize it and clear all the bits to begin with */
70 RtlInitializeBitMap(&MiUserPfnBitMap
,
72 MmHighestPhysicalPage
+ 1);
73 RtlClearAllBits(&MiUserPfnBitMap
);
78 MmGetLRUFirstUserPage(VOID
)
83 /* Find the first user page */
84 OldIrql
= KeAcquireQueuedSpinLock(LockQueuePfnLock
);
85 Position
= RtlFindSetBits(&MiUserPfnBitMap
, 1, 0);
86 KeReleaseQueuedSpinLock(LockQueuePfnLock
, OldIrql
);
87 if (Position
== 0xFFFFFFFF) return 0;
95 MmInsertLRULastUserPage(PFN_TYPE Pfn
)
99 /* Set the page as a user page */
100 OldIrql
= KeAcquireQueuedSpinLock(LockQueuePfnLock
);
101 RtlSetBit(&MiUserPfnBitMap
, Pfn
);
102 KeReleaseQueuedSpinLock(LockQueuePfnLock
, OldIrql
);
107 MmGetLRUNextUserPage(PFN_TYPE PreviousPfn
)
112 /* Find the next user page */
113 OldIrql
= KeAcquireQueuedSpinLock(LockQueuePfnLock
);
114 Position
= RtlFindSetBits(&MiUserPfnBitMap
, 1, PreviousPfn
+ 1);
115 KeReleaseQueuedSpinLock(LockQueuePfnLock
, OldIrql
);
116 if (Position
== 0xFFFFFFFF) return 0;
124 MmRemoveLRUUserPage(PFN_TYPE Page
)
126 /* Unset the page as a user page */
127 RtlClearBit(&MiUserPfnBitMap
, Page
);
132 MiIsPfnInUse(IN PMMPFN Pfn1
)
134 return ((Pfn1
->u3
.e1
.PageLocation
!= FreePageList
) &&
135 (Pfn1
->u3
.e1
.PageLocation
!= ZeroedPageList
));
140 MiFindContiguousPages(IN PFN_NUMBER LowestPfn
,
141 IN PFN_NUMBER HighestPfn
,
142 IN PFN_NUMBER BoundaryPfn
,
143 IN PFN_NUMBER SizeInPages
,
144 IN MEMORY_CACHING_TYPE CacheType
)
146 PFN_NUMBER Page
, PageCount
, LastPage
, Length
, BoundaryMask
;
151 ASSERT(SizeInPages
!= 0);
154 // Convert the boundary PFN into an alignment mask
156 BoundaryMask
= ~(BoundaryPfn
- 1);
159 // Loop all the physical memory blocks
164 // Capture the base page and length of this memory block
166 Page
= MmPhysicalMemoryBlock
->Run
[i
].BasePage
;
167 PageCount
= MmPhysicalMemoryBlock
->Run
[i
].PageCount
;
170 // Check how far this memory block will go
172 LastPage
= Page
+ PageCount
;
175 // Trim it down to only the PFNs we're actually interested in
177 if ((LastPage
- 1) > HighestPfn
) LastPage
= HighestPfn
+ 1;
178 if (Page
< LowestPfn
) Page
= LowestPfn
;
181 // Skip this run if it's empty or fails to contain all the pages we need
183 if (!(PageCount
) || ((Page
+ SizeInPages
) > LastPage
)) continue;
186 // Now scan all the relevant PFNs in this run
189 for (Pfn1
= MiGetPfnEntry(Page
); Page
< LastPage
; Page
++, Pfn1
++)
192 // If this PFN is in use, ignore it
194 if (MiIsPfnInUse(Pfn1
)) continue;
197 // If we haven't chosen a start PFN yet and the caller specified an
198 // alignment, make sure the page matches the alignment restriction
200 if ((!(Length
) && (BoundaryPfn
)) &&
201 (((Page
^ (Page
+ SizeInPages
- 1)) & BoundaryMask
)))
204 // It does not, so bail out
210 // Increase the number of valid pages, and check if we have enough
212 if (++Length
== SizeInPages
)
215 // It appears we've amassed enough legitimate pages, rollback
217 Pfn1
-= (Length
- 1);
218 Page
-= (Length
- 1);
221 // Acquire the PFN lock
223 OldIrql
= KeAcquireQueuedSpinLock(LockQueuePfnLock
);
227 // Things might've changed for us. Is the page still free?
229 if (MiIsPfnInUse(Pfn1
)) break;
232 // So far so good. Is this the last confirmed valid page?
237 // Sanity check that we didn't go out of bounds
239 ASSERT(i
!= MmPhysicalMemoryBlock
->NumberOfRuns
);
242 // Loop until all PFN entries have been processed
244 EndPfn
= Pfn1
- SizeInPages
+ 1;
248 // This PFN is now a used page, set it up
250 MiUnlinkFreeOrZeroedPage(Pfn1
);
251 Pfn1
->u3
.e2
.ReferenceCount
= 1;
254 // Check if it was already zeroed
256 if (Pfn1
->u3
.e1
.PageLocation
!= ZeroedPageList
)
259 // It wasn't, so zero it
261 MiZeroPage(MiGetPfnEntryIndex(Pfn1
));
267 Pfn1
->u3
.e1
.PageLocation
= ActiveAndValid
;
270 // Check if this is the last PFN, otherwise go on
272 if (Pfn1
== EndPfn
) break;
277 // Mark the first and last PFN so we can find them later
279 Pfn1
->u3
.e1
.StartOfAllocation
= 1;
280 (Pfn1
+ SizeInPages
- 1)->u3
.e1
.EndOfAllocation
= 1;
283 // Now it's safe to let go of the PFN lock
285 KeReleaseQueuedSpinLock(LockQueuePfnLock
, OldIrql
);
288 // Quick sanity check that the last PFN is consistent
290 EndPfn
= Pfn1
+ SizeInPages
;
291 ASSERT(EndPfn
== MiGetPfnEntry(Page
+ 1));
294 // Compute the first page, and make sure it's consistent
296 Page
-= SizeInPages
- 1;
297 ASSERT(Pfn1
== MiGetPfnEntry(Page
));
303 // Keep going. The purpose of this loop is to reconfirm that
304 // after acquiring the PFN lock these pages are still usable
311 // If we got here, something changed while we hadn't acquired
312 // the PFN lock yet, so we'll have to restart
314 KeReleaseQueuedSpinLock(LockQueuePfnLock
, OldIrql
);
318 } while (++i
!= MmPhysicalMemoryBlock
->NumberOfRuns
);
321 // And if we get here, it means no suitable physical memory runs were found
328 MiAllocatePagesForMdl(IN PHYSICAL_ADDRESS LowAddress
,
329 IN PHYSICAL_ADDRESS HighAddress
,
330 IN PHYSICAL_ADDRESS SkipBytes
,
331 IN SIZE_T TotalBytes
,
332 IN MI_PFN_CACHE_ATTRIBUTE CacheAttribute
,
336 PFN_NUMBER PageCount
, LowPage
, HighPage
, SkipPages
, PagesFound
= 0, Page
;
337 PPFN_NUMBER MdlPage
, LastMdlPage
;
340 INT LookForZeroedPages
;
341 ASSERT (KeGetCurrentIrql() <= APC_LEVEL
);
344 // Convert the low address into a PFN
346 LowPage
= (PFN_NUMBER
)(LowAddress
.QuadPart
>> PAGE_SHIFT
);
349 // Convert, and normalize, the high address into a PFN
351 HighPage
= (PFN_NUMBER
)(HighAddress
.QuadPart
>> PAGE_SHIFT
);
352 if (HighPage
> MmHighestPhysicalPage
) HighPage
= MmHighestPhysicalPage
;
355 // Validate skipbytes and convert them into pages
357 if (BYTE_OFFSET(SkipBytes
.LowPart
)) return NULL
;
358 SkipPages
= (PFN_NUMBER
)(SkipBytes
.QuadPart
>> PAGE_SHIFT
);
361 // Now compute the number of pages the MDL will cover
363 PageCount
= (PFN_NUMBER
)ADDRESS_AND_SIZE_TO_SPAN_PAGES(0, TotalBytes
);
367 // Try creating an MDL for these many pages
369 Mdl
= MmCreateMdl(NULL
, NULL
, PageCount
<< PAGE_SHIFT
);
373 // This function is not required to return the amount of pages requested
374 // In fact, it can return as little as 1 page, and callers are supposed
375 // to deal with this scenario. So re-attempt the allocation with less
376 // pages than before, and see if it worked this time.
378 PageCount
-= (PageCount
>> 4);
382 // Wow, not even a single page was around!
384 if (!Mdl
) return NULL
;
387 // This is where the page array starts....
389 MdlPage
= (PPFN_NUMBER
)(Mdl
+ 1);
392 // Lock the PFN database
394 OldIrql
= KeAcquireQueuedSpinLock(LockQueuePfnLock
);
397 // Are we looking for any pages, without discriminating?
399 if ((LowPage
== 0) && (HighPage
== MmHighestPhysicalPage
))
402 // Well then, let's go shopping
404 while (PagesFound
< PageCount
)
407 // Do we have zeroed pages?
409 if (MmZeroedPageListHead
.Total
)
414 Pfn1
= MiRemoveHeadList(&MmZeroedPageListHead
);
416 else if (MmFreePageListHead
.Total
)
419 // Nope, grab an unzeroed page
421 Pfn1
= MiRemoveHeadList(&MmFreePageListHead
);
426 // This is not good... hopefully we have at least SOME pages
433 // Make sure it's really free
435 ASSERT(MiIsPfnInUse(Pfn1
) == FALSE
);
436 ASSERT(Pfn1
->u3
.e2
.ReferenceCount
== 0);
439 // Allocate it and mark it
441 Pfn1
->u3
.e1
.StartOfAllocation
= 1;
442 Pfn1
->u3
.e1
.EndOfAllocation
= 1;
443 Pfn1
->u3
.e2
.ReferenceCount
= 1;
446 // Decrease available pages
451 // Save it into the MDL
453 *MdlPage
++ = MiGetPfnEntryIndex(Pfn1
);
460 // You want specific range of pages. We'll do this in two runs
462 for (LookForZeroedPages
= 1; LookForZeroedPages
>= 0; LookForZeroedPages
--)
465 // Scan the range you specified
467 for (Page
= LowPage
; Page
< HighPage
; Page
++)
470 // Get the PFN entry for this page
472 Pfn1
= MiGetPfnEntry(Page
);
476 // Make sure it's free and if this is our first pass, zeroed
478 if (MiIsPfnInUse(Pfn1
)) continue;
479 if ((Pfn1
->u3
.e1
.PageLocation
== ZeroedPageList
) != LookForZeroedPages
) continue;
484 ASSERT(Pfn1
->u3
.e2
.ReferenceCount
== 0);
487 // Now setup the page and mark it
489 Pfn1
->u3
.e2
.ReferenceCount
= 1;
490 Pfn1
->u3
.e1
.StartOfAllocation
= 1;
491 Pfn1
->u3
.e1
.EndOfAllocation
= 1;
494 // Decrease available pages
499 // Save this page into the MDL
502 if (++PagesFound
== PageCount
) break;
506 // If the first pass was enough, don't keep going, otherwise, go again
508 if (PagesFound
== PageCount
) break;
513 // Now release the PFN count
515 KeReleaseQueuedSpinLock(LockQueuePfnLock
, OldIrql
);
518 // We might've found less pages, but not more ;-)
520 if (PagesFound
!= PageCount
) ASSERT(PagesFound
< PageCount
);
524 // If we didn' tfind any pages at all, fail
526 DPRINT1("NO MDL PAGES!\n");
532 // Write out how many pages we found
534 Mdl
->ByteCount
= (ULONG
)(PagesFound
<< PAGE_SHIFT
);
537 // Terminate the MDL array if there's certain missing pages
539 if (PagesFound
!= PageCount
) *MdlPage
= -1;
542 // Now go back and loop over all the MDL pages
544 MdlPage
= (PPFN_NUMBER
)(Mdl
+ 1);
545 LastMdlPage
= MdlPage
+ PagesFound
;
546 while (MdlPage
< LastMdlPage
)
549 // Check if we've reached the end
552 if (Page
== (PFN_NUMBER
)-1) break;
555 // Get the PFN entry for the page and check if we should zero it out
557 Pfn1
= MiGetPfnEntry(Page
);
559 if (Pfn1
->u3
.e1
.PageLocation
!= ZeroedPageList
) MiZeroPage(Page
);
560 Pfn1
->u3
.e1
.PageLocation
= ActiveAndValid
;
564 // We're done, mark the pages as locked (should we lock them, though???)
567 Mdl
->MdlFlags
|= MDL_PAGES_LOCKED
;
573 MmDumpPfnDatabase(VOID
)
577 PCHAR State
= "????", Type
= "Unknown";
579 ULONG Totals
[5] = {0}, FreePages
= 0;
581 KeRaiseIrql(HIGH_LEVEL
, &OldIrql
);
584 // Loop the PFN database
586 for (i
= 0; i
<= MmHighestPhysicalPage
; i
++)
588 Pfn1
= MiGetPfnEntry(i
);
594 if (MiIsPfnInUse(Pfn1
))
607 // Pretty-print the page
609 DbgPrint("0x%08p:\t%04s\t%20s\t(%02d) [%08p])\n",
613 Pfn1
->u3
.e2
.ReferenceCount
,
617 DbgPrint("Nonpaged Pool: %d pages\t[%d KB]\n", Totals
[MC_NPPOOL
], (Totals
[MC_NPPOOL
] << PAGE_SHIFT
) / 1024);
618 DbgPrint("Paged Pool: %d pages\t[%d KB]\n", Totals
[MC_PPOOL
], (Totals
[MC_PPOOL
] << PAGE_SHIFT
) / 1024);
619 DbgPrint("File System Cache: %d pages\t[%d KB]\n", Totals
[MC_CACHE
], (Totals
[MC_CACHE
] << PAGE_SHIFT
) / 1024);
620 DbgPrint("Process Working Set: %d pages\t[%d KB]\n", Totals
[MC_USER
], (Totals
[MC_USER
] << PAGE_SHIFT
) / 1024);
621 DbgPrint("System: %d pages\t[%d KB]\n", Totals
[MC_SYSTEM
], (Totals
[MC_SYSTEM
] << PAGE_SHIFT
) / 1024);
622 DbgPrint("Free: %d pages\t[%d KB]\n", FreePages
, (FreePages
<< PAGE_SHIFT
) / 1024);
624 KeLowerIrql(OldIrql
);
629 MmInitializePageList(VOID
)
632 PHYSICAL_PAGE UsedPage
;
633 PMEMORY_ALLOCATION_DESCRIPTOR Md
;
634 PLIST_ENTRY NextEntry
;
635 ULONG NrSystemPages
= 0;
637 /* This is what a used page looks like */
638 RtlZeroMemory(&UsedPage
, sizeof(UsedPage
));
639 UsedPage
.u3
.e1
.PageLocation
= ActiveAndValid
;
640 UsedPage
.u3
.e2
.ReferenceCount
= 1;
642 /* Loop the memory descriptors */
643 for (NextEntry
= KeLoaderBlock
->MemoryDescriptorListHead
.Flink
;
644 NextEntry
!= &KeLoaderBlock
->MemoryDescriptorListHead
;
645 NextEntry
= NextEntry
->Flink
)
647 /* Get the descriptor */
648 Md
= CONTAINING_RECORD(NextEntry
,
649 MEMORY_ALLOCATION_DESCRIPTOR
,
652 /* Skip bad memory */
653 if ((Md
->MemoryType
== LoaderFirmwarePermanent
) ||
654 (Md
->MemoryType
== LoaderBBTMemory
) ||
655 (Md
->MemoryType
== LoaderSpecialMemory
) ||
656 (Md
->MemoryType
== LoaderBad
))
659 // We do not build PFN entries for this
663 else if ((Md
->MemoryType
== LoaderFree
) ||
664 (Md
->MemoryType
== LoaderLoadedProgram
) ||
665 (Md
->MemoryType
== LoaderFirmwareTemporary
) ||
666 (Md
->MemoryType
== LoaderOsloaderStack
))
668 /* Loop every page part of the block */
669 for (i
= 0; i
< Md
->PageCount
; i
++)
671 /* Mark it as a free page */
672 MmPfnDatabase
[0][Md
->BasePage
+ i
].u3
.e1
.PageLocation
= FreePageList
;
673 MiInsertInListTail(&MmFreePageListHead
,
674 &MmPfnDatabase
[0][Md
->BasePage
+ i
]);
680 /* Loop every page part of the block */
681 for (i
= 0; i
< Md
->PageCount
; i
++)
683 /* Everything else is used memory */
684 MmPfnDatabase
[0][Md
->BasePage
+ i
] = UsedPage
;
690 /* Finally handle the pages describing the PFN database themselves */
691 for (i
= MxOldFreeDescriptor
.BasePage
; i
< MxFreeDescriptor
->BasePage
; i
++)
693 /* Mark it as used kernel memory */
694 MmPfnDatabase
[0][i
] = UsedPage
;
698 KeInitializeEvent(&ZeroPageThreadEvent
, NotificationEvent
, TRUE
);
699 DPRINT("Pages: %x %x\n", MmAvailablePages
, NrSystemPages
);
700 MmInitializeBalancer(MmAvailablePages
, NrSystemPages
);
705 MmSetRmapListHeadPage(PFN_TYPE Pfn
, struct _MM_RMAP_ENTRY
* ListHead
)
709 oldIrql
= KeAcquireQueuedSpinLock(LockQueuePfnLock
);
710 MiGetPfnEntry(Pfn
)->RmapListHead
= (LONG
)ListHead
;
711 KeReleaseQueuedSpinLock(LockQueuePfnLock
, oldIrql
);
714 struct _MM_RMAP_ENTRY
*
716 MmGetRmapListHeadPage(PFN_TYPE Pfn
)
719 struct _MM_RMAP_ENTRY
* ListHead
;
721 oldIrql
= KeAcquireQueuedSpinLock(LockQueuePfnLock
);
722 ListHead
= (struct _MM_RMAP_ENTRY
*)MiGetPfnEntry(Pfn
)->RmapListHead
;
723 KeReleaseQueuedSpinLock(LockQueuePfnLock
, oldIrql
);
730 MmSetSavedSwapEntryPage(PFN_TYPE Pfn
, SWAPENTRY SwapEntry
)
734 oldIrql
= KeAcquireQueuedSpinLock(LockQueuePfnLock
);
735 MiGetPfnEntry(Pfn
)->u1
.WsIndex
= SwapEntry
;
736 KeReleaseQueuedSpinLock(LockQueuePfnLock
, oldIrql
);
741 MmGetSavedSwapEntryPage(PFN_TYPE Pfn
)
746 oldIrql
= KeAcquireQueuedSpinLock(LockQueuePfnLock
);
747 SwapEntry
= MiGetPfnEntry(Pfn
)->u1
.WsIndex
;
748 KeReleaseQueuedSpinLock(LockQueuePfnLock
, oldIrql
);
755 MmReferencePage(PFN_TYPE Pfn
)
759 DPRINT("MmReferencePage(PysicalAddress %x)\n", Pfn
<< PAGE_SHIFT
);
761 if (Pfn
== 0 || Pfn
> MmHighestPhysicalPage
)
766 Page
= MiGetPfnEntry(Pfn
);
769 Page
->u3
.e2
.ReferenceCount
++;
774 MmGetReferenceCountPage(PFN_TYPE Pfn
)
780 DPRINT("MmGetReferenceCountPage(PhysicalAddress %x)\n", Pfn
<< PAGE_SHIFT
);
782 oldIrql
= KeAcquireQueuedSpinLock(LockQueuePfnLock
);
783 Page
= MiGetPfnEntry(Pfn
);
786 RCount
= Page
->u3
.e2
.ReferenceCount
;
788 KeReleaseQueuedSpinLock(LockQueuePfnLock
, oldIrql
);
794 MmIsPageInUse(PFN_TYPE Pfn
)
796 return MiIsPfnInUse(MiGetPfnEntry(Pfn
));
801 MiSetConsumer(IN PFN_TYPE Pfn
,
804 MiGetPfnEntry(Pfn
)->u3
.e1
.PageLocation
= ActiveAndValid
;
809 MmDereferencePage(PFN_TYPE Pfn
)
813 DPRINT("MmDereferencePage(PhysicalAddress %x)\n", Pfn
<< PAGE_SHIFT
);
815 Page
= MiGetPfnEntry(Pfn
);
818 Page
->u3
.e2
.ReferenceCount
--;
819 if (Page
->u3
.e2
.ReferenceCount
== 0)
822 Page
->u3
.e1
.PageLocation
= FreePageList
;
823 MiInsertInListTail(&MmFreePageListHead
, Page
);
824 if (MmFreePageListHead
.Total
> 8 && 0 == KeReadStateEvent(&ZeroPageThreadEvent
))
826 KeSetEvent(&ZeroPageThreadEvent
, IO_NO_INCREMENT
, FALSE
);
833 MmAllocPage(ULONG Type
)
836 PPHYSICAL_PAGE PageDescriptor
;
837 BOOLEAN NeedClear
= FALSE
;
839 DPRINT("MmAllocPage()\n");
841 if (MmZeroedPageListHead
.Total
== 0)
843 if (MmFreePageListHead
.Total
== 0)
845 /* Check if this allocation is for the PFN DB itself */
846 if (MmNumberOfPhysicalPages
== 0)
851 DPRINT1("MmAllocPage(): Out of memory\n");
854 PageDescriptor
= MiRemoveHeadList(&MmFreePageListHead
);
860 PageDescriptor
= MiRemoveHeadList(&MmZeroedPageListHead
);
863 PageDescriptor
->u3
.e2
.ReferenceCount
= 1;
867 PfnOffset
= PageDescriptor
- MmPfnDatabase
[0];
868 if ((NeedClear
) && (Type
!= MC_SYSTEM
))
870 MiZeroPage(PfnOffset
);
873 PageDescriptor
->u3
.e1
.PageLocation
= ActiveAndValid
;
879 MiZeroPage(PFN_TYPE Page
)
884 Irql
= KeRaiseIrqlToDpcLevel();
885 TempAddress
= MiMapPageToZeroInHyperSpace(Page
);
886 if (TempAddress
== NULL
)
888 return(STATUS_NO_MEMORY
);
890 memset(TempAddress
, 0, PAGE_SIZE
);
891 MiUnmapPagesInZeroSpace(TempAddress
, 1);
893 return(STATUS_SUCCESS
);
898 MmZeroPageThreadMain(PVOID Ignored
)
902 PPHYSICAL_PAGE PageDescriptor
;
906 /* Free initial kernel memory */
907 //MiFreeInitMemory();
909 /* Set our priority to 0 */
910 KeGetCurrentThread()->BasePriority
= 0;
911 KeSetPriorityThread(KeGetCurrentThread(), 0);
915 Status
= KeWaitForSingleObject(&ZeroPageThreadEvent
,
921 if (ZeroPageThreadShouldTerminate
)
923 DPRINT1("ZeroPageThread: Terminating\n");
924 return STATUS_SUCCESS
;
927 oldIrql
= KeAcquireQueuedSpinLock(LockQueuePfnLock
);
928 while (MmFreePageListHead
.Total
)
930 PageDescriptor
= MiRemoveHeadList(&MmFreePageListHead
);
931 /* We set the page to used, because MmCreateVirtualMapping failed with unused pages */
932 KeReleaseQueuedSpinLock(LockQueuePfnLock
, oldIrql
);
933 Pfn
= PageDescriptor
- MmPfnDatabase
[0];
934 Status
= MiZeroPage(Pfn
);
936 oldIrql
= KeAcquireQueuedSpinLock(LockQueuePfnLock
);
937 if (NT_SUCCESS(Status
))
939 MiInsertZeroListAtBack(Pfn
);
944 MiInsertInListTail(&MmFreePageListHead
, PageDescriptor
);
945 PageDescriptor
->u3
.e1
.PageLocation
= FreePageList
;
949 DPRINT("Zeroed %d pages.\n", Count
);
950 KeResetEvent(&ZeroPageThreadEvent
);
951 KeReleaseQueuedSpinLock(LockQueuePfnLock
, oldIrql
);
954 return STATUS_SUCCESS
;