2 * COPYRIGHT: See COPYING in the top level directory
3 * PROJECT: ReactOS kernel
4 * FILE: ntoskrnl/mm/freelist.c
5 * PURPOSE: Handle the list of free physical pages
7 * PROGRAMMERS: David Welch (welch@cwcom.net)
11 /* INCLUDES ****************************************************************/
17 #if defined (ALLOC_PRAGMA)
18 #pragma alloc_text(INIT, MmInitializePageList)
21 #define MODULE_INVOLVED_IN_ARM3
22 #include "ARM3/miarm.h"
24 /* GLOBALS ****************************************************************/
28 // ReactOS to NT Physical Page Descriptor Entry Legacy Mapping Definitions
32 #define RmapListHead AweReferenceCount
33 #define PHYSICAL_PAGE MMPFN
34 #define PPHYSICAL_PAGE PMMPFN
36 /* The first array contains ReactOS PFNs, the second contains ARM3 PFNs */
37 PPHYSICAL_PAGE MmPfnDatabase
[2];
39 PFN_NUMBER MmAvailablePages
;
40 PFN_NUMBER MmResidentAvailablePages
;
41 PFN_NUMBER MmResidentAvailableAtInit
;
43 SIZE_T MmTotalCommitLimit
;
44 SIZE_T MmTotalCommittedPages
;
45 SIZE_T MmSharedCommit
;
46 SIZE_T MmDriverCommit
;
47 SIZE_T MmProcessCommit
;
48 SIZE_T MmPagedPoolCommit
;
49 SIZE_T MmPeakCommitment
;
50 SIZE_T MmtotalCommitLimitMaximum
;
52 static KEVENT ZeroPageThreadEvent
;
53 static BOOLEAN ZeroPageThreadShouldTerminate
= FALSE
;
54 static RTL_BITMAP MiUserPfnBitMap
;
56 /* FUNCTIONS *************************************************************/
60 MiInitializeUserPfnBitmap(VOID
)
64 /* Allocate enough buffer for the PFN bitmap and align it on 32-bits */
65 Bitmap
= ExAllocatePoolWithTag(NonPagedPool
,
66 (((MmHighestPhysicalPage
+ 1) + 31) / 32) * 4,
70 /* Initialize it and clear all the bits to begin with */
71 RtlInitializeBitMap(&MiUserPfnBitMap
,
73 MmHighestPhysicalPage
+ 1);
74 RtlClearAllBits(&MiUserPfnBitMap
);
79 MmGetLRUFirstUserPage(VOID
)
84 /* Find the first user page */
85 OldIrql
= KeAcquireQueuedSpinLock(LockQueuePfnLock
);
86 Position
= RtlFindSetBits(&MiUserPfnBitMap
, 1, 0);
87 KeReleaseQueuedSpinLock(LockQueuePfnLock
, OldIrql
);
88 if (Position
== 0xFFFFFFFF) return 0;
96 MmInsertLRULastUserPage(PFN_TYPE Pfn
)
100 /* Set the page as a user page */
101 OldIrql
= KeAcquireQueuedSpinLock(LockQueuePfnLock
);
102 RtlSetBit(&MiUserPfnBitMap
, Pfn
);
103 KeReleaseQueuedSpinLock(LockQueuePfnLock
, OldIrql
);
108 MmGetLRUNextUserPage(PFN_TYPE PreviousPfn
)
113 /* Find the next user page */
114 OldIrql
= KeAcquireQueuedSpinLock(LockQueuePfnLock
);
115 Position
= RtlFindSetBits(&MiUserPfnBitMap
, 1, PreviousPfn
+ 1);
116 KeReleaseQueuedSpinLock(LockQueuePfnLock
, OldIrql
);
117 if (Position
== 0xFFFFFFFF) return 0;
125 MmRemoveLRUUserPage(PFN_TYPE Page
)
127 /* Unset the page as a user page */
128 RtlClearBit(&MiUserPfnBitMap
, Page
);
133 MiIsPfnFree(IN PMMPFN Pfn1
)
135 /* Must be a free or zero page, with no references, linked */
136 return ((Pfn1
->u3
.e1
.PageLocation
<= StandbyPageList
) &&
139 !(Pfn1
->u3
.e2
.ReferenceCount
));
144 MiIsPfnInUse(IN PMMPFN Pfn1
)
146 /* Standby list or higher, unlinked, and with references */
147 return !MiIsPfnFree(Pfn1
);
152 MiFindContiguousPages(IN PFN_NUMBER LowestPfn
,
153 IN PFN_NUMBER HighestPfn
,
154 IN PFN_NUMBER BoundaryPfn
,
155 IN PFN_NUMBER SizeInPages
,
156 IN MEMORY_CACHING_TYPE CacheType
)
158 PFN_NUMBER Page
, PageCount
, LastPage
, Length
, BoundaryMask
;
163 ASSERT(SizeInPages
!= 0);
166 // Convert the boundary PFN into an alignment mask
168 BoundaryMask
= ~(BoundaryPfn
- 1);
171 // Loop all the physical memory blocks
176 // Capture the base page and length of this memory block
178 Page
= MmPhysicalMemoryBlock
->Run
[i
].BasePage
;
179 PageCount
= MmPhysicalMemoryBlock
->Run
[i
].PageCount
;
182 // Check how far this memory block will go
184 LastPage
= Page
+ PageCount
;
187 // Trim it down to only the PFNs we're actually interested in
189 if ((LastPage
- 1) > HighestPfn
) LastPage
= HighestPfn
+ 1;
190 if (Page
< LowestPfn
) Page
= LowestPfn
;
193 // Skip this run if it's empty or fails to contain all the pages we need
195 if (!(PageCount
) || ((Page
+ SizeInPages
) > LastPage
)) continue;
198 // Now scan all the relevant PFNs in this run
201 for (Pfn1
= MiGetPfnEntry(Page
); Page
< LastPage
; Page
++, Pfn1
++)
204 // If this PFN is in use, ignore it
206 if (MiIsPfnInUse(Pfn1
)) continue;
209 // If we haven't chosen a start PFN yet and the caller specified an
210 // alignment, make sure the page matches the alignment restriction
212 if ((!(Length
) && (BoundaryPfn
)) &&
213 (((Page
^ (Page
+ SizeInPages
- 1)) & BoundaryMask
)))
216 // It does not, so bail out
222 // Increase the number of valid pages, and check if we have enough
224 if (++Length
== SizeInPages
)
227 // It appears we've amassed enough legitimate pages, rollback
229 Pfn1
-= (Length
- 1);
230 Page
-= (Length
- 1);
233 // Acquire the PFN lock
235 OldIrql
= KeAcquireQueuedSpinLock(LockQueuePfnLock
);
239 // Things might've changed for us. Is the page still free?
241 if (MiIsPfnInUse(Pfn1
)) break;
244 // So far so good. Is this the last confirmed valid page?
249 // Sanity check that we didn't go out of bounds
251 ASSERT(i
!= MmPhysicalMemoryBlock
->NumberOfRuns
);
254 // Loop until all PFN entries have been processed
256 EndPfn
= Pfn1
- SizeInPages
+ 1;
260 // This PFN is now a used page, set it up
262 MiUnlinkFreeOrZeroedPage(Pfn1
);
263 Pfn1
->u3
.e2
.ReferenceCount
= 1;
266 // Check if it was already zeroed
268 if (Pfn1
->u3
.e1
.PageLocation
!= ZeroedPageList
)
271 // It wasn't, so zero it
273 MiZeroPage(MiGetPfnEntryIndex(Pfn1
));
279 Pfn1
->u3
.e1
.PageLocation
= ActiveAndValid
;
282 // Check if this is the last PFN, otherwise go on
284 if (Pfn1
== EndPfn
) break;
289 // Mark the first and last PFN so we can find them later
291 Pfn1
->u3
.e1
.StartOfAllocation
= 1;
292 (Pfn1
+ SizeInPages
- 1)->u3
.e1
.EndOfAllocation
= 1;
295 // Now it's safe to let go of the PFN lock
297 KeReleaseQueuedSpinLock(LockQueuePfnLock
, OldIrql
);
300 // Quick sanity check that the last PFN is consistent
302 EndPfn
= Pfn1
+ SizeInPages
;
303 ASSERT(EndPfn
== MiGetPfnEntry(Page
+ 1));
306 // Compute the first page, and make sure it's consistent
308 Page
-= SizeInPages
- 1;
309 ASSERT(Pfn1
== MiGetPfnEntry(Page
));
315 // Keep going. The purpose of this loop is to reconfirm that
316 // after acquiring the PFN lock these pages are still usable
323 // If we got here, something changed while we hadn't acquired
324 // the PFN lock yet, so we'll have to restart
326 KeReleaseQueuedSpinLock(LockQueuePfnLock
, OldIrql
);
330 } while (++i
!= MmPhysicalMemoryBlock
->NumberOfRuns
);
333 // And if we get here, it means no suitable physical memory runs were found
340 MiAllocatePagesForMdl(IN PHYSICAL_ADDRESS LowAddress
,
341 IN PHYSICAL_ADDRESS HighAddress
,
342 IN PHYSICAL_ADDRESS SkipBytes
,
343 IN SIZE_T TotalBytes
,
344 IN MI_PFN_CACHE_ATTRIBUTE CacheAttribute
,
348 PFN_NUMBER PageCount
, LowPage
, HighPage
, SkipPages
, PagesFound
= 0, Page
;
349 PPFN_NUMBER MdlPage
, LastMdlPage
;
352 INT LookForZeroedPages
;
353 ASSERT (KeGetCurrentIrql() <= APC_LEVEL
);
356 // Convert the low address into a PFN
358 LowPage
= (PFN_NUMBER
)(LowAddress
.QuadPart
>> PAGE_SHIFT
);
361 // Convert, and normalize, the high address into a PFN
363 HighPage
= (PFN_NUMBER
)(HighAddress
.QuadPart
>> PAGE_SHIFT
);
364 if (HighPage
> MmHighestPhysicalPage
) HighPage
= MmHighestPhysicalPage
;
367 // Validate skipbytes and convert them into pages
369 if (BYTE_OFFSET(SkipBytes
.LowPart
)) return NULL
;
370 SkipPages
= (PFN_NUMBER
)(SkipBytes
.QuadPart
>> PAGE_SHIFT
);
373 // Now compute the number of pages the MDL will cover
375 PageCount
= (PFN_NUMBER
)ADDRESS_AND_SIZE_TO_SPAN_PAGES(0, TotalBytes
);
379 // Try creating an MDL for these many pages
381 Mdl
= MmCreateMdl(NULL
, NULL
, PageCount
<< PAGE_SHIFT
);
385 // This function is not required to return the amount of pages requested
386 // In fact, it can return as little as 1 page, and callers are supposed
387 // to deal with this scenario. So re-attempt the allocation with less
388 // pages than before, and see if it worked this time.
390 PageCount
-= (PageCount
>> 4);
394 // Wow, not even a single page was around!
396 if (!Mdl
) return NULL
;
399 // This is where the page array starts....
401 MdlPage
= (PPFN_NUMBER
)(Mdl
+ 1);
404 // Lock the PFN database
406 OldIrql
= KeAcquireQueuedSpinLock(LockQueuePfnLock
);
409 // Are we looking for any pages, without discriminating?
411 if ((LowPage
== 0) && (HighPage
== MmHighestPhysicalPage
))
414 // Well then, let's go shopping
416 while (PagesFound
< PageCount
)
419 // Do we have zeroed pages?
421 if (MmZeroedPageListHead
.Total
)
426 Pfn1
= MiRemoveHeadList(&MmZeroedPageListHead
);
428 else if (MmFreePageListHead
.Total
)
431 // Nope, grab an unzeroed page
433 Pfn1
= MiRemoveHeadList(&MmFreePageListHead
);
438 // This is not good... hopefully we have at least SOME pages
445 // Make sure it's really free
447 ASSERT(MiIsPfnInUse(Pfn1
) == FALSE
);
448 ASSERT(Pfn1
->u3
.e2
.ReferenceCount
== 0);
451 // Allocate it and mark it
453 Pfn1
->u3
.e1
.StartOfAllocation
= 1;
454 Pfn1
->u3
.e1
.EndOfAllocation
= 1;
455 Pfn1
->u3
.e2
.ReferenceCount
= 1;
458 // Decrease available pages
463 // Save it into the MDL
465 *MdlPage
++ = MiGetPfnEntryIndex(Pfn1
);
472 // You want specific range of pages. We'll do this in two runs
474 for (LookForZeroedPages
= 1; LookForZeroedPages
>= 0; LookForZeroedPages
--)
477 // Scan the range you specified
479 for (Page
= LowPage
; Page
< HighPage
; Page
++)
482 // Get the PFN entry for this page
484 Pfn1
= MiGetPfnEntry(Page
);
488 // Make sure it's free and if this is our first pass, zeroed
490 if (MiIsPfnInUse(Pfn1
)) continue;
491 if ((Pfn1
->u3
.e1
.PageLocation
== ZeroedPageList
) != LookForZeroedPages
) continue;
496 ASSERT(Pfn1
->u3
.e2
.ReferenceCount
== 0);
499 // Now setup the page and mark it
501 Pfn1
->u3
.e2
.ReferenceCount
= 1;
502 Pfn1
->u3
.e1
.StartOfAllocation
= 1;
503 Pfn1
->u3
.e1
.EndOfAllocation
= 1;
506 // Decrease available pages
511 // Save this page into the MDL
514 if (++PagesFound
== PageCount
) break;
518 // If the first pass was enough, don't keep going, otherwise, go again
520 if (PagesFound
== PageCount
) break;
525 // Now release the PFN count
527 KeReleaseQueuedSpinLock(LockQueuePfnLock
, OldIrql
);
530 // We might've found less pages, but not more ;-)
532 if (PagesFound
!= PageCount
) ASSERT(PagesFound
< PageCount
);
536 // If we didn' tfind any pages at all, fail
538 DPRINT1("NO MDL PAGES!\n");
544 // Write out how many pages we found
546 Mdl
->ByteCount
= (ULONG
)(PagesFound
<< PAGE_SHIFT
);
549 // Terminate the MDL array if there's certain missing pages
551 if (PagesFound
!= PageCount
) *MdlPage
= -1;
554 // Now go back and loop over all the MDL pages
556 MdlPage
= (PPFN_NUMBER
)(Mdl
+ 1);
557 LastMdlPage
= MdlPage
+ PagesFound
;
558 while (MdlPage
< LastMdlPage
)
561 // Check if we've reached the end
564 if (Page
== (PFN_NUMBER
)-1) break;
567 // Get the PFN entry for the page and check if we should zero it out
569 Pfn1
= MiGetPfnEntry(Page
);
571 if (Pfn1
->u3
.e1
.PageLocation
!= ZeroedPageList
) MiZeroPage(Page
);
572 Pfn1
->u3
.e1
.PageLocation
= ActiveAndValid
;
576 // We're done, mark the pages as locked (should we lock them, though???)
579 Mdl
->MdlFlags
|= MDL_PAGES_LOCKED
;
585 MmDumpPfnDatabase(VOID
)
589 PCHAR State
= "????", Type
= "Unknown";
591 ULONG Totals
[5] = {0}, FreePages
= 0;
593 KeRaiseIrql(HIGH_LEVEL
, &OldIrql
);
596 // Loop the PFN database
598 for (i
= 0; i
<= MmHighestPhysicalPage
; i
++)
600 Pfn1
= MiGetPfnEntry(i
);
606 if (MiIsPfnInUse(Pfn1
))
619 // Pretty-print the page
621 DbgPrint("0x%08p:\t%04s\t%20s\t(%02d) [%08p])\n",
625 Pfn1
->u3
.e2
.ReferenceCount
,
629 DbgPrint("Nonpaged Pool: %d pages\t[%d KB]\n", Totals
[MC_NPPOOL
], (Totals
[MC_NPPOOL
] << PAGE_SHIFT
) / 1024);
630 DbgPrint("Paged Pool: %d pages\t[%d KB]\n", Totals
[MC_PPOOL
], (Totals
[MC_PPOOL
] << PAGE_SHIFT
) / 1024);
631 DbgPrint("File System Cache: %d pages\t[%d KB]\n", Totals
[MC_CACHE
], (Totals
[MC_CACHE
] << PAGE_SHIFT
) / 1024);
632 DbgPrint("Process Working Set: %d pages\t[%d KB]\n", Totals
[MC_USER
], (Totals
[MC_USER
] << PAGE_SHIFT
) / 1024);
633 DbgPrint("System: %d pages\t[%d KB]\n", Totals
[MC_SYSTEM
], (Totals
[MC_SYSTEM
] << PAGE_SHIFT
) / 1024);
634 DbgPrint("Free: %d pages\t[%d KB]\n", FreePages
, (FreePages
<< PAGE_SHIFT
) / 1024);
636 KeLowerIrql(OldIrql
);
641 MmInitializePageList(VOID
)
644 PHYSICAL_PAGE UsedPage
;
645 PMEMORY_ALLOCATION_DESCRIPTOR Md
;
646 PLIST_ENTRY NextEntry
;
647 ULONG NrSystemPages
= 0;
649 /* This is what a used page looks like */
650 RtlZeroMemory(&UsedPage
, sizeof(UsedPage
));
651 UsedPage
.u3
.e1
.PageLocation
= ActiveAndValid
;
652 UsedPage
.u3
.e2
.ReferenceCount
= 1;
654 /* Loop the memory descriptors */
655 for (NextEntry
= KeLoaderBlock
->MemoryDescriptorListHead
.Flink
;
656 NextEntry
!= &KeLoaderBlock
->MemoryDescriptorListHead
;
657 NextEntry
= NextEntry
->Flink
)
659 /* Get the descriptor */
660 Md
= CONTAINING_RECORD(NextEntry
,
661 MEMORY_ALLOCATION_DESCRIPTOR
,
664 /* Skip bad memory */
665 if ((Md
->MemoryType
== LoaderFirmwarePermanent
) ||
666 (Md
->MemoryType
== LoaderBBTMemory
) ||
667 (Md
->MemoryType
== LoaderSpecialMemory
) ||
668 (Md
->MemoryType
== LoaderBad
))
671 // We do not build PFN entries for this
675 else if ((Md
->MemoryType
== LoaderFree
) ||
676 (Md
->MemoryType
== LoaderLoadedProgram
) ||
677 (Md
->MemoryType
== LoaderFirmwareTemporary
) ||
678 (Md
->MemoryType
== LoaderOsloaderStack
))
680 /* Loop every page part of the block */
681 for (i
= 0; i
< Md
->PageCount
; i
++)
683 /* Mark it as a free page */
684 MmPfnDatabase
[0][Md
->BasePage
+ i
].u3
.e1
.PageLocation
= FreePageList
;
685 MiInsertInListTail(&MmFreePageListHead
,
686 &MmPfnDatabase
[0][Md
->BasePage
+ i
]);
692 /* Loop every page part of the block */
693 for (i
= 0; i
< Md
->PageCount
; i
++)
695 /* Everything else is used memory */
696 MmPfnDatabase
[0][Md
->BasePage
+ i
] = UsedPage
;
702 /* Finally handle the pages describing the PFN database themselves */
703 for (i
= MxOldFreeDescriptor
.BasePage
; i
< MxFreeDescriptor
->BasePage
; i
++)
705 /* Mark it as used kernel memory */
706 MmPfnDatabase
[0][i
] = UsedPage
;
710 KeInitializeEvent(&ZeroPageThreadEvent
, NotificationEvent
, TRUE
);
711 DPRINT("Pages: %x %x\n", MmAvailablePages
, NrSystemPages
);
712 MmInitializeBalancer(MmAvailablePages
, NrSystemPages
);
717 MmSetRmapListHeadPage(PFN_TYPE Pfn
, struct _MM_RMAP_ENTRY
* ListHead
)
721 oldIrql
= KeAcquireQueuedSpinLock(LockQueuePfnLock
);
722 MiGetPfnEntry(Pfn
)->RmapListHead
= (LONG
)ListHead
;
723 KeReleaseQueuedSpinLock(LockQueuePfnLock
, oldIrql
);
726 struct _MM_RMAP_ENTRY
*
728 MmGetRmapListHeadPage(PFN_TYPE Pfn
)
731 struct _MM_RMAP_ENTRY
* ListHead
;
733 oldIrql
= KeAcquireQueuedSpinLock(LockQueuePfnLock
);
734 ListHead
= (struct _MM_RMAP_ENTRY
*)MiGetPfnEntry(Pfn
)->RmapListHead
;
735 KeReleaseQueuedSpinLock(LockQueuePfnLock
, oldIrql
);
742 MmSetSavedSwapEntryPage(PFN_TYPE Pfn
, SWAPENTRY SwapEntry
)
746 oldIrql
= KeAcquireQueuedSpinLock(LockQueuePfnLock
);
747 MiGetPfnEntry(Pfn
)->u1
.WsIndex
= SwapEntry
;
748 KeReleaseQueuedSpinLock(LockQueuePfnLock
, oldIrql
);
753 MmGetSavedSwapEntryPage(PFN_TYPE Pfn
)
758 oldIrql
= KeAcquireQueuedSpinLock(LockQueuePfnLock
);
759 SwapEntry
= MiGetPfnEntry(Pfn
)->u1
.WsIndex
;
760 KeReleaseQueuedSpinLock(LockQueuePfnLock
, oldIrql
);
767 MmReferencePage(PFN_TYPE Pfn
)
771 DPRINT("MmReferencePage(PysicalAddress %x)\n", Pfn
<< PAGE_SHIFT
);
773 if (Pfn
== 0 || Pfn
> MmHighestPhysicalPage
)
778 Page
= MiGetPfnEntry(Pfn
);
781 Page
->u3
.e2
.ReferenceCount
++;
786 MmGetReferenceCountPage(PFN_TYPE Pfn
)
792 DPRINT("MmGetReferenceCountPage(PhysicalAddress %x)\n", Pfn
<< PAGE_SHIFT
);
794 oldIrql
= KeAcquireQueuedSpinLock(LockQueuePfnLock
);
795 Page
= MiGetPfnEntry(Pfn
);
798 RCount
= Page
->u3
.e2
.ReferenceCount
;
800 KeReleaseQueuedSpinLock(LockQueuePfnLock
, oldIrql
);
806 MmIsPageInUse(PFN_TYPE Pfn
)
808 return MiIsPfnInUse(MiGetPfnEntry(Pfn
));
813 MiSetConsumer(IN PFN_TYPE Pfn
,
816 MiGetPfnEntry(Pfn
)->u3
.e1
.PageLocation
= ActiveAndValid
;
821 MmDereferencePage(PFN_TYPE Pfn
)
825 DPRINT("MmDereferencePage(PhysicalAddress %x)\n", Pfn
<< PAGE_SHIFT
);
827 Page
= MiGetPfnEntry(Pfn
);
830 Page
->u3
.e2
.ReferenceCount
--;
831 if (Page
->u3
.e2
.ReferenceCount
== 0)
834 Page
->u3
.e1
.PageLocation
= FreePageList
;
835 MiInsertInListTail(&MmFreePageListHead
, Page
);
836 if (MmFreePageListHead
.Total
> 8 && 0 == KeReadStateEvent(&ZeroPageThreadEvent
))
838 KeSetEvent(&ZeroPageThreadEvent
, IO_NO_INCREMENT
, FALSE
);
845 MmAllocPage(ULONG Type
)
848 PPHYSICAL_PAGE PageDescriptor
;
849 BOOLEAN NeedClear
= FALSE
;
851 DPRINT("MmAllocPage()\n");
853 if (MmZeroedPageListHead
.Total
== 0)
855 if (MmFreePageListHead
.Total
== 0)
857 /* Check if this allocation is for the PFN DB itself */
858 if (MmNumberOfPhysicalPages
== 0)
863 DPRINT1("MmAllocPage(): Out of memory\n");
866 PageDescriptor
= MiRemoveHeadList(&MmFreePageListHead
);
872 PageDescriptor
= MiRemoveHeadList(&MmZeroedPageListHead
);
875 PageDescriptor
->u3
.e2
.ReferenceCount
= 1;
879 PfnOffset
= PageDescriptor
- MmPfnDatabase
[0];
880 if ((NeedClear
) && (Type
!= MC_SYSTEM
))
882 MiZeroPage(PfnOffset
);
885 PageDescriptor
->u3
.e1
.PageLocation
= ActiveAndValid
;
891 MiZeroPage(PFN_TYPE Page
)
896 Irql
= KeRaiseIrqlToDpcLevel();
897 TempAddress
= MiMapPageToZeroInHyperSpace(Page
);
898 if (TempAddress
== NULL
)
900 return(STATUS_NO_MEMORY
);
902 memset(TempAddress
, 0, PAGE_SIZE
);
903 MiUnmapPagesInZeroSpace(TempAddress
, 1);
905 return(STATUS_SUCCESS
);
910 MmZeroPageThreadMain(PVOID Ignored
)
914 PPHYSICAL_PAGE PageDescriptor
;
918 /* Free initial kernel memory */
919 //MiFreeInitMemory();
921 /* Set our priority to 0 */
922 KeGetCurrentThread()->BasePriority
= 0;
923 KeSetPriorityThread(KeGetCurrentThread(), 0);
927 Status
= KeWaitForSingleObject(&ZeroPageThreadEvent
,
933 if (ZeroPageThreadShouldTerminate
)
935 DPRINT1("ZeroPageThread: Terminating\n");
936 return STATUS_SUCCESS
;
939 oldIrql
= KeAcquireQueuedSpinLock(LockQueuePfnLock
);
940 while (MmFreePageListHead
.Total
)
942 PageDescriptor
= MiRemoveHeadList(&MmFreePageListHead
);
943 /* We set the page to used, because MmCreateVirtualMapping failed with unused pages */
944 KeReleaseQueuedSpinLock(LockQueuePfnLock
, oldIrql
);
945 Pfn
= PageDescriptor
- MmPfnDatabase
[0];
946 Status
= MiZeroPage(Pfn
);
948 oldIrql
= KeAcquireQueuedSpinLock(LockQueuePfnLock
);
949 if (NT_SUCCESS(Status
))
951 MiInsertZeroListAtBack(Pfn
);
956 MiInsertInListTail(&MmFreePageListHead
, PageDescriptor
);
957 PageDescriptor
->u3
.e1
.PageLocation
= FreePageList
;
961 DPRINT("Zeroed %d pages.\n", Count
);
962 KeResetEvent(&ZeroPageThreadEvent
);
963 KeReleaseQueuedSpinLock(LockQueuePfnLock
, oldIrql
);
966 return STATUS_SUCCESS
;