2 * COPYRIGHT: See COPYING in the top level directory
3 * PROJECT: ReactOS kernel
4 * FILE: ntoskrnl/mm/freelist.c
5 * PURPOSE: Handle the list of free physical pages
7 * PROGRAMMERS: David Welch (welch@cwcom.net)
11 /* INCLUDES ****************************************************************/
17 #if defined (ALLOC_PRAGMA)
18 #pragma alloc_text(INIT, MmInitializePageList)
22 /* TYPES *******************************************************************/
24 #define MM_PHYSICAL_PAGE_FREE (0x1)
25 #define MM_PHYSICAL_PAGE_USED (0x2)
26 #define MM_PHYSICAL_PAGE_BIOS (0x3)
28 /* GLOBALS ****************************************************************/
30 PPHYSICAL_PAGE MmPageArray
;
31 ULONG MmPageArraySize
;
33 /* List of pages allocated to the MC_USER Consumer */
34 static LIST_ENTRY UserPageListHead
;
35 /* List of pages zeroed by the ZPW (MmZeroPageThreadMain) */
36 static LIST_ENTRY FreeZeroedPageListHead
;
37 /* List of free pages, filled by MmGetReferenceCountPage and
38 * and MmInitializePageList */
39 static LIST_ENTRY FreeUnzeroedPageListHead
;
41 static KEVENT ZeroPageThreadEvent
;
42 static BOOLEAN ZeroPageThreadShouldTerminate
= FALSE
;
44 static ULONG UnzeroedPageCount
= 0;
46 /* FUNCTIONS *************************************************************/
50 MmGetLRUFirstUserPage(VOID
)
52 PLIST_ENTRY NextListEntry
;
53 PHYSICAL_PAGE
* PageDescriptor
;
56 oldIrql
= KeAcquireQueuedSpinLock(LockQueuePfnLock
);
57 NextListEntry
= UserPageListHead
.Flink
;
58 if (NextListEntry
== &UserPageListHead
)
60 KeReleaseQueuedSpinLock(LockQueuePfnLock
, oldIrql
);
63 PageDescriptor
= CONTAINING_RECORD(NextListEntry
, PHYSICAL_PAGE
, ListEntry
);
64 ASSERT_PFN(PageDescriptor
);
65 KeReleaseQueuedSpinLock(LockQueuePfnLock
, oldIrql
);
66 return PageDescriptor
- MmPageArray
;
71 MmInsertLRULastUserPage(PFN_TYPE Pfn
)
76 oldIrql
= KeAcquireQueuedSpinLock(LockQueuePfnLock
);
77 Page
= MiGetPfnEntry(Pfn
);
78 ASSERT(Page
->Flags
.Type
== MM_PHYSICAL_PAGE_USED
);
79 ASSERT(Page
->Flags
.Consumer
== MC_USER
);
80 InsertTailList(&UserPageListHead
, &Page
->ListEntry
);
81 KeReleaseQueuedSpinLock(LockQueuePfnLock
, oldIrql
);
86 MmGetLRUNextUserPage(PFN_TYPE PreviousPfn
)
88 PLIST_ENTRY NextListEntry
;
89 PHYSICAL_PAGE
* PageDescriptor
;
93 oldIrql
= KeAcquireQueuedSpinLock(LockQueuePfnLock
);
94 Page
= MiGetPfnEntry(PreviousPfn
);
95 ASSERT(Page
->Flags
.Type
== MM_PHYSICAL_PAGE_USED
);
96 ASSERT(Page
->Flags
.Consumer
== MC_USER
);
97 NextListEntry
= Page
->ListEntry
.Flink
;
98 if (NextListEntry
== &UserPageListHead
)
100 KeReleaseQueuedSpinLock(LockQueuePfnLock
, oldIrql
);
103 PageDescriptor
= CONTAINING_RECORD(NextListEntry
, PHYSICAL_PAGE
, ListEntry
);
104 KeReleaseQueuedSpinLock(LockQueuePfnLock
, oldIrql
);
105 return PageDescriptor
- MmPageArray
;
110 MmRemoveLRUUserPage(PFN_TYPE Page
)
112 RemoveEntryList(&MiGetPfnEntry(Page
)->ListEntry
);
117 MmGetContinuousPages(ULONG NumberOfBytes
,
118 PHYSICAL_ADDRESS LowestAcceptableAddress
,
119 PHYSICAL_ADDRESS HighestAcceptableAddress
,
120 PHYSICAL_ADDRESS BoundaryAddressMultiple
)
130 NrPages
= PAGE_ROUND_UP(NumberOfBytes
) / PAGE_SIZE
;
132 oldIrql
= KeAcquireQueuedSpinLock(LockQueuePfnLock
);
134 last
= min(HighestAcceptableAddress
.LowPart
/ PAGE_SIZE
, MmPageArraySize
- 1);
135 boundary
= BoundaryAddressMultiple
.LowPart
/ PAGE_SIZE
;
137 for (j
= 0; j
< 2; j
++)
141 /* First try to allocate the pages above the 16MB area. This may fail
142 * because there are not enough continuous pages or we cannot allocate
143 * pages above the 16MB area because the caller has specify an upper limit.
144 * The second try uses the specified lower limit.
146 for (i
= j
== 0 ? 0x100000 / PAGE_SIZE
: LowestAcceptableAddress
.LowPart
/ PAGE_SIZE
; i
<= last
; )
148 if (MiGetPfnEntry(i
)->Flags
.Type
== MM_PHYSICAL_PAGE_FREE
)
150 if (start
== (ULONG
)-1)
160 if (start
/ boundary
!= i
/ boundary
)
167 if (length
== NrPages
)
179 if (start
!= (ULONG
)-1 && length
== NrPages
)
181 for (i
= start
; i
< (start
+ length
); i
++)
184 Page
= MiGetPfnEntry(i
);
185 RemoveEntryList(&Page
->ListEntry
);
186 if (MmPageArray
[i
].Flags
.Zero
== 0)
190 MmStats
.NrFreePages
--;
191 MmStats
.NrSystemPages
++;
192 Page
->Flags
.Type
= MM_PHYSICAL_PAGE_USED
;
193 Page
->Flags
.Consumer
= MC_NPPOOL
;
194 Page
->ReferenceCount
= 1;
197 Page
->SavedSwapEntry
= 0;
199 KeReleaseQueuedSpinLock(LockQueuePfnLock
, oldIrql
);
200 for (i
= start
; i
< (start
+ length
); i
++)
202 if (MiGetPfnEntry(i
)->Flags
.Zero
== 0)
208 MiGetPfnEntry(i
)->Flags
.Zero
= 0;
214 KeReleaseQueuedSpinLock(LockQueuePfnLock
, oldIrql
);
220 MmAllocEarlyPage(VOID
)
224 /* Use one of our highest usable pages */
225 Pfn
= MiFreeDescriptor
->BasePage
+ MiFreeDescriptor
->PageCount
- 1;
226 MiFreeDescriptor
->PageCount
--;
234 MmInitializePageList(VOID
)
240 PHYSICAL_PAGE UsedPage
;
241 PLIST_ENTRY NextEntry
;
242 PMEMORY_ALLOCATION_DESCRIPTOR Md
;
244 /* Initialize the page lists */
245 InitializeListHead(&UserPageListHead
);
246 InitializeListHead(&FreeUnzeroedPageListHead
);
247 InitializeListHead(&FreeZeroedPageListHead
);
249 /* Set the size and start of the PFN Database */
250 MmPageArray
= (PHYSICAL_PAGE
*)MmPfnDatabase
;
251 MmPageArraySize
= MmHighestPhysicalPage
;
252 Reserved
= PAGE_ROUND_UP((MmPageArraySize
* sizeof(PHYSICAL_PAGE
))) / PAGE_SIZE
;
254 /* Loop every page required to hold the PFN database */
255 for (i
= 0; i
< Reserved
; i
++)
257 PVOID Address
= (char*)MmPageArray
+ (i
* PAGE_SIZE
);
259 /* Check if FreeLDR has already allocated it for us */
260 if (!MmIsPagePresent(NULL
, Address
))
262 /* Use one of our highest usable pages */
263 Pfn
= MmAllocEarlyPage();
266 Status
= MmCreateVirtualMappingForKernel(Address
,
270 if (!NT_SUCCESS(Status
))
272 DPRINT1("Unable to create virtual mapping\n");
273 KeBugCheck(MEMORY_MANAGEMENT
);
278 /* Setting the page protection is necessary to set the global bit */
279 MmSetPageProtect(NULL
, Address
, PAGE_READWRITE
);
283 /* Clear the PFN database */
284 RtlZeroMemory(MmPageArray
, (MmPageArraySize
+ 1) * sizeof(PHYSICAL_PAGE
));
286 /* This is what a used page looks like */
287 RtlZeroMemory(&UsedPage
, sizeof(UsedPage
));
288 UsedPage
.Flags
.Type
= MM_PHYSICAL_PAGE_USED
;
289 UsedPage
.Flags
.Consumer
= MC_NPPOOL
;
290 UsedPage
.ReferenceCount
= 2;
291 UsedPage
.MapCount
= 1;
293 /* Loop the memory descriptors */
294 for (NextEntry
= KeLoaderBlock
->MemoryDescriptorListHead
.Flink
;
295 NextEntry
!= &KeLoaderBlock
->MemoryDescriptorListHead
;
296 NextEntry
= NextEntry
->Flink
)
298 /* Get the descriptor */
299 Md
= CONTAINING_RECORD(NextEntry
,
300 MEMORY_ALLOCATION_DESCRIPTOR
,
303 /* Skip bad memory */
304 if ((Md
->MemoryType
== LoaderFirmwarePermanent
) ||
305 (Md
->MemoryType
== LoaderBBTMemory
) ||
306 (Md
->MemoryType
== LoaderSpecialMemory
) ||
307 (Md
->MemoryType
== LoaderBad
))
309 /* Loop every page part of the block but valid in the database */
310 for (i
= 0; i
< Md
->PageCount
; i
++)
312 /* Skip memory we ignore completely */
313 if ((Md
->BasePage
+ i
) > MmPageArraySize
) break;
315 /* These are pages reserved by the BIOS/ROMs */
316 MmPageArray
[Md
->BasePage
+ i
].Flags
.Type
= MM_PHYSICAL_PAGE_BIOS
;
317 MmPageArray
[Md
->BasePage
+ i
].Flags
.Consumer
= MC_NPPOOL
;
318 MmStats
.NrSystemPages
++;
321 else if ((Md
->MemoryType
== LoaderFree
) ||
322 (Md
->MemoryType
== LoaderLoadedProgram
) ||
323 (Md
->MemoryType
== LoaderFirmwareTemporary
) ||
324 (Md
->MemoryType
== LoaderOsloaderStack
))
326 /* Loop every page part of the block */
327 for (i
= 0; i
< Md
->PageCount
; i
++)
329 /* Mark it as a free page */
330 MmPageArray
[Md
->BasePage
+ i
].Flags
.Type
= MM_PHYSICAL_PAGE_FREE
;
331 InsertTailList(&FreeUnzeroedPageListHead
,
332 &MmPageArray
[Md
->BasePage
+ i
].ListEntry
);
334 MmStats
.NrFreePages
++;
339 /* Loop every page part of the block */
340 for (i
= 0; i
< Md
->PageCount
; i
++)
342 /* Everything else is used memory */
343 MmPageArray
[Md
->BasePage
+ i
] = UsedPage
;
344 MmStats
.NrSystemPages
++;
349 /* Finally handle the pages describing the PFN database themselves */
350 for (i
= (MiFreeDescriptor
->BasePage
+ MiFreeDescriptor
->PageCount
);
351 i
< (MiFreeDescriptorOrg
.BasePage
+ MiFreeDescriptorOrg
.PageCount
);
354 /* Ensure this page was not added previously */
355 ASSERT(MmPageArray
[i
].Flags
.Type
== 0);
357 /* Mark it as used kernel memory */
358 MmPageArray
[i
] = UsedPage
;
359 MmStats
.NrSystemPages
++;
362 KeInitializeEvent(&ZeroPageThreadEvent
, NotificationEvent
, TRUE
);
364 DPRINT("Pages: %x %x\n", MmStats
.NrFreePages
, MmStats
.NrSystemPages
);
365 MmStats
.NrTotalPages
= MmStats
.NrFreePages
+ MmStats
.NrSystemPages
+ MmStats
.NrUserPages
;
366 MmInitializeBalancer(MmStats
.NrFreePages
, MmStats
.NrSystemPages
);
371 MmSetFlagsPage(PFN_TYPE Pfn
, ULONG Flags
)
375 oldIrql
= KeAcquireQueuedSpinLock(LockQueuePfnLock
);
376 MiGetPfnEntry(Pfn
)->AllFlags
= Flags
;
377 KeReleaseQueuedSpinLock(LockQueuePfnLock
, oldIrql
);
382 MmSetRmapListHeadPage(PFN_TYPE Pfn
, struct _MM_RMAP_ENTRY
* ListHead
)
386 oldIrql
= KeAcquireQueuedSpinLock(LockQueuePfnLock
);
387 MiGetPfnEntry(Pfn
)->RmapListHead
= ListHead
;
388 KeReleaseQueuedSpinLock(LockQueuePfnLock
, oldIrql
);
391 struct _MM_RMAP_ENTRY
*
393 MmGetRmapListHeadPage(PFN_TYPE Pfn
)
396 struct _MM_RMAP_ENTRY
* ListHead
;
398 oldIrql
= KeAcquireQueuedSpinLock(LockQueuePfnLock
);
399 ListHead
= MiGetPfnEntry(Pfn
)->RmapListHead
;
400 KeReleaseQueuedSpinLock(LockQueuePfnLock
, oldIrql
);
407 MmMarkPageMapped(PFN_TYPE Pfn
)
412 if (Pfn
<= MmPageArraySize
)
414 oldIrql
= KeAcquireQueuedSpinLock(LockQueuePfnLock
);
415 Page
= MiGetPfnEntry(Pfn
);
416 if (Page
->Flags
.Type
== MM_PHYSICAL_PAGE_FREE
)
418 DPRINT1("Mapping non-used page\n");
419 KeBugCheck(MEMORY_MANAGEMENT
);
422 Page
->ReferenceCount
++;
423 KeReleaseQueuedSpinLock(LockQueuePfnLock
, oldIrql
);
429 MmMarkPageUnmapped(PFN_TYPE Pfn
)
434 if (Pfn
<= MmPageArraySize
)
436 oldIrql
= KeAcquireQueuedSpinLock(LockQueuePfnLock
);
437 Page
= MiGetPfnEntry(Pfn
);
438 if (Page
->Flags
.Type
== MM_PHYSICAL_PAGE_FREE
)
440 DPRINT1("Unmapping non-used page\n");
441 KeBugCheck(MEMORY_MANAGEMENT
);
443 if (Page
->MapCount
== 0)
445 DPRINT1("Unmapping not mapped page\n");
446 KeBugCheck(MEMORY_MANAGEMENT
);
449 Page
->ReferenceCount
--;
450 KeReleaseQueuedSpinLock(LockQueuePfnLock
, oldIrql
);
456 MmGetFlagsPage(PFN_TYPE Pfn
)
461 oldIrql
= KeAcquireQueuedSpinLock(LockQueuePfnLock
);
462 Flags
= MiGetPfnEntry(Pfn
)->AllFlags
;
463 KeReleaseQueuedSpinLock(LockQueuePfnLock
, oldIrql
);
471 MmSetSavedSwapEntryPage(PFN_TYPE Pfn
, SWAPENTRY SavedSwapEntry
)
475 oldIrql
= KeAcquireQueuedSpinLock(LockQueuePfnLock
);
476 MiGetPfnEntry(Pfn
)->SavedSwapEntry
= SavedSwapEntry
;
477 KeReleaseQueuedSpinLock(LockQueuePfnLock
, oldIrql
);
482 MmGetSavedSwapEntryPage(PFN_TYPE Pfn
)
484 SWAPENTRY SavedSwapEntry
;
487 oldIrql
= KeAcquireQueuedSpinLock(LockQueuePfnLock
);
488 SavedSwapEntry
= MiGetPfnEntry(Pfn
)->SavedSwapEntry
;
489 KeReleaseQueuedSpinLock(LockQueuePfnLock
, oldIrql
);
491 return(SavedSwapEntry
);
496 MmReferencePageUnsafe(PFN_TYPE Pfn
)
501 DPRINT("MmReferencePageUnsafe(PysicalAddress %x)\n", Pfn
<< PAGE_SHIFT
);
503 if (Pfn
== 0 || Pfn
> MmPageArraySize
)
508 oldIrql
= KeAcquireQueuedSpinLock(LockQueuePfnLock
);
510 Page
= MiGetPfnEntry(Pfn
);
511 if (Page
->Flags
.Type
!= MM_PHYSICAL_PAGE_USED
)
513 DPRINT1("Referencing non-used page\n");
514 KeBugCheck(MEMORY_MANAGEMENT
);
517 Page
->ReferenceCount
++;
518 KeReleaseQueuedSpinLock(LockQueuePfnLock
, oldIrql
);
523 MmReferencePage(PFN_TYPE Pfn
)
525 DPRINT("MmReferencePage(PysicalAddress %x)\n", Pfn
<< PAGE_SHIFT
);
527 MmReferencePageUnsafe(Pfn
);
532 MmGetReferenceCountPage(PFN_TYPE Pfn
)
538 DPRINT("MmGetReferenceCountPage(PhysicalAddress %x)\n", Pfn
<< PAGE_SHIFT
);
540 oldIrql
= KeAcquireQueuedSpinLock(LockQueuePfnLock
);
541 Page
= MiGetPfnEntry(Pfn
);
542 if (Page
->Flags
.Type
!= MM_PHYSICAL_PAGE_USED
)
544 DPRINT1("Getting reference count for free page\n");
545 KeBugCheck(MEMORY_MANAGEMENT
);
548 RCount
= Page
->ReferenceCount
;
550 KeReleaseQueuedSpinLock(LockQueuePfnLock
, oldIrql
);
556 MmIsPageInUse(PFN_TYPE Pfn
)
559 DPRINT("MmIsPageInUse(PhysicalAddress %x)\n", Pfn
<< PAGE_SHIFT
);
561 return (MiGetPfnEntry(Pfn
)->Flags
.Type
== MM_PHYSICAL_PAGE_USED
);
566 MmDereferencePage(PFN_TYPE Pfn
)
571 DPRINT("MmDereferencePage(PhysicalAddress %x)\n", Pfn
<< PAGE_SHIFT
);
573 oldIrql
= KeAcquireQueuedSpinLock(LockQueuePfnLock
);
575 Page
= MiGetPfnEntry(Pfn
);
577 if (Page
->Flags
.Type
!= MM_PHYSICAL_PAGE_USED
)
579 DPRINT1("Dereferencing free page\n");
580 KeBugCheck(MEMORY_MANAGEMENT
);
582 if (Page
->ReferenceCount
== 0)
584 DPRINT1("Derefrencing page with reference count 0\n");
585 KeBugCheck(MEMORY_MANAGEMENT
);
588 Page
->ReferenceCount
--;
589 if (Page
->ReferenceCount
== 0)
591 MmStats
.NrFreePages
++;
592 MmStats
.NrSystemPages
--;
593 if (Page
->Flags
.Consumer
== MC_USER
) RemoveEntryList(&Page
->ListEntry
);
594 if (Page
->RmapListHead
!= NULL
)
596 DPRINT1("Freeing page with rmap entries.\n");
597 KeBugCheck(MEMORY_MANAGEMENT
);
599 if (Page
->MapCount
!= 0)
601 DPRINT1("Freeing mapped page (0x%x count %d)\n",
602 Pfn
<< PAGE_SHIFT
, Page
->MapCount
);
603 KeBugCheck(MEMORY_MANAGEMENT
);
605 if (Page
->LockCount
> 0)
607 DPRINT1("Freeing locked page\n");
608 KeBugCheck(MEMORY_MANAGEMENT
);
610 if (Page
->SavedSwapEntry
!= 0)
612 DPRINT1("Freeing page with swap entry.\n");
613 KeBugCheck(MEMORY_MANAGEMENT
);
615 if (Page
->Flags
.Type
!= MM_PHYSICAL_PAGE_USED
)
617 DPRINT1("Freeing page with flags %x\n",
619 KeBugCheck(MEMORY_MANAGEMENT
);
621 Page
->Flags
.Type
= MM_PHYSICAL_PAGE_FREE
;
622 Page
->Flags
.Consumer
= MC_MAXIMUM
;
623 InsertTailList(&FreeUnzeroedPageListHead
,
626 if (UnzeroedPageCount
> 8 && 0 == KeReadStateEvent(&ZeroPageThreadEvent
))
628 KeSetEvent(&ZeroPageThreadEvent
, IO_NO_INCREMENT
, FALSE
);
631 KeReleaseQueuedSpinLock(LockQueuePfnLock
, oldIrql
);
636 MmGetLockCountPage(PFN_TYPE Pfn
)
642 DPRINT("MmGetLockCountPage(PhysicalAddress %x)\n", Pfn
<< PAGE_SHIFT
);
644 oldIrql
= KeAcquireQueuedSpinLock(LockQueuePfnLock
);
646 Page
= MiGetPfnEntry(Pfn
);
647 if (Page
->Flags
.Type
!= MM_PHYSICAL_PAGE_USED
)
649 DPRINT1("Getting lock count for free page\n");
650 KeBugCheck(MEMORY_MANAGEMENT
);
653 LockCount
= Page
->LockCount
;
654 KeReleaseQueuedSpinLock(LockQueuePfnLock
, oldIrql
);
661 MmLockPageUnsafe(PFN_TYPE Pfn
)
666 DPRINT("MmLockPageUnsafe(PhysicalAddress %x)\n", Pfn
<< PAGE_SHIFT
);
668 oldIrql
= KeAcquireQueuedSpinLock(LockQueuePfnLock
);
670 Page
= MiGetPfnEntry(Pfn
);
671 if (Page
->Flags
.Type
!= MM_PHYSICAL_PAGE_USED
)
673 DPRINT1("Locking free page\n");
674 KeBugCheck(MEMORY_MANAGEMENT
);
678 KeReleaseQueuedSpinLock(LockQueuePfnLock
, oldIrql
);
683 MmLockPage(PFN_TYPE Pfn
)
685 DPRINT("MmLockPage(PhysicalAddress %x)\n", Pfn
<< PAGE_SHIFT
);
687 MmLockPageUnsafe(Pfn
);
692 MmUnlockPage(PFN_TYPE Pfn
)
697 DPRINT("MmUnlockPage(PhysicalAddress %x)\n", Pfn
<< PAGE_SHIFT
);
699 oldIrql
= KeAcquireQueuedSpinLock(LockQueuePfnLock
);
701 Page
= MiGetPfnEntry(Pfn
);
702 if (Page
->Flags
.Type
!= MM_PHYSICAL_PAGE_USED
)
704 DPRINT1("Unlocking free page\n");
705 KeBugCheck(MEMORY_MANAGEMENT
);
709 KeReleaseQueuedSpinLock(LockQueuePfnLock
, oldIrql
);
714 MmAllocPage(ULONG Consumer
, SWAPENTRY SavedSwapEntry
)
717 PLIST_ENTRY ListEntry
;
718 PPHYSICAL_PAGE PageDescriptor
;
720 BOOLEAN NeedClear
= FALSE
;
722 DPRINT("MmAllocPage()\n");
724 oldIrql
= KeAcquireQueuedSpinLock(LockQueuePfnLock
);
725 if (IsListEmpty(&FreeZeroedPageListHead
))
727 if (IsListEmpty(&FreeUnzeroedPageListHead
))
729 /* Check if this allocation is for the PFN DB itself */
730 if (MmStats
.NrTotalPages
== 0)
732 /* Allocate an early page -- we'll account for it later */
733 KeReleaseQueuedSpinLock(LockQueuePfnLock
, oldIrql
);
734 PfnOffset
= MmAllocEarlyPage();
735 MiZeroPage(PfnOffset
);
739 DPRINT1("MmAllocPage(): Out of memory\n");
740 KeReleaseQueuedSpinLock(LockQueuePfnLock
, oldIrql
);
743 ListEntry
= RemoveTailList(&FreeUnzeroedPageListHead
);
746 PageDescriptor
= CONTAINING_RECORD(ListEntry
, PHYSICAL_PAGE
, ListEntry
);
752 ListEntry
= RemoveTailList(&FreeZeroedPageListHead
);
754 PageDescriptor
= CONTAINING_RECORD(ListEntry
, PHYSICAL_PAGE
, ListEntry
);
757 if (PageDescriptor
->Flags
.Type
!= MM_PHYSICAL_PAGE_FREE
)
759 DPRINT1("Got non-free page from freelist\n");
760 KeBugCheck(MEMORY_MANAGEMENT
);
762 if (PageDescriptor
->MapCount
!= 0)
764 DPRINT1("Got mapped page from freelist\n");
765 KeBugCheck(MEMORY_MANAGEMENT
);
767 if (PageDescriptor
->ReferenceCount
!= 0)
769 DPRINT1("%d\n", PageDescriptor
->ReferenceCount
);
770 KeBugCheck(MEMORY_MANAGEMENT
);
772 PageDescriptor
->Flags
.Type
= MM_PHYSICAL_PAGE_USED
;
773 PageDescriptor
->Flags
.Consumer
= Consumer
;
774 PageDescriptor
->ReferenceCount
= 1;
775 PageDescriptor
->LockCount
= 0;
776 PageDescriptor
->MapCount
= 0;
777 PageDescriptor
->SavedSwapEntry
= SavedSwapEntry
;
779 MmStats
.NrSystemPages
++;
780 MmStats
.NrFreePages
--;
782 KeReleaseQueuedSpinLock(LockQueuePfnLock
, oldIrql
);
784 PfnOffset
= PageDescriptor
- MmPageArray
;
787 MiZeroPage(PfnOffset
);
789 if (PageDescriptor
->MapCount
!= 0)
791 DPRINT1("Returning mapped page.\n");
792 KeBugCheck(MEMORY_MANAGEMENT
);
799 MmAllocPagesSpecifyRange(ULONG Consumer
,
800 PHYSICAL_ADDRESS LowestAddress
,
801 PHYSICAL_ADDRESS HighestAddress
,
805 PPHYSICAL_PAGE PageDescriptor
;
807 PFN_TYPE LowestPage
, HighestPage
;
809 ULONG NumberOfPagesFound
= 0;
812 DPRINT("MmAllocPagesSpecifyRange()\n"
813 " LowestAddress = 0x%08x%08x\n"
814 " HighestAddress = 0x%08x%08x\n"
815 " NumberOfPages = %d\n",
816 LowestAddress
.u
.HighPart
, LowestAddress
.u
.LowPart
,
817 HighestAddress
.u
.HighPart
, HighestAddress
.u
.LowPart
,
820 if (NumberOfPages
== 0)
823 LowestPage
= LowestAddress
.LowPart
/ PAGE_SIZE
;
824 HighestPage
= HighestAddress
.LowPart
/ PAGE_SIZE
;
825 if ((HighestAddress
.u
.LowPart
% PAGE_SIZE
) != 0)
828 if (LowestPage
>= MmPageArraySize
)
830 DPRINT1("MmAllocPagesSpecifyRange(): Out of memory\n");
833 if (HighestPage
> MmPageArraySize
)
834 HighestPage
= MmPageArraySize
;
836 oldIrql
= KeAcquireQueuedSpinLock(LockQueuePfnLock
);
837 if (LowestPage
== 0 && HighestPage
== MmPageArraySize
)
839 PLIST_ENTRY ListEntry
;
840 while (NumberOfPagesFound
< NumberOfPages
)
842 if (!IsListEmpty(&FreeZeroedPageListHead
))
844 ListEntry
= RemoveTailList(&FreeZeroedPageListHead
);
846 else if (!IsListEmpty(&FreeUnzeroedPageListHead
))
848 ListEntry
= RemoveTailList(&FreeUnzeroedPageListHead
);
853 if (NumberOfPagesFound
== 0)
855 KeReleaseQueuedSpinLock(LockQueuePfnLock
, oldIrql
);
856 DPRINT1("MmAllocPagesSpecifyRange(): Out of memory\n");
864 PageDescriptor
= CONTAINING_RECORD(ListEntry
, PHYSICAL_PAGE
, ListEntry
);
866 ASSERT(PageDescriptor
->Flags
.Type
== MM_PHYSICAL_PAGE_FREE
);
867 ASSERT(PageDescriptor
->MapCount
== 0);
868 ASSERT(PageDescriptor
->ReferenceCount
== 0);
870 /* Allocate the page */
871 PageDescriptor
->Flags
.Type
= MM_PHYSICAL_PAGE_USED
;
872 PageDescriptor
->Flags
.Consumer
= Consumer
;
873 PageDescriptor
->ReferenceCount
= 1;
874 PageDescriptor
->LockCount
= 0;
875 PageDescriptor
->MapCount
= 0;
876 PageDescriptor
->SavedSwapEntry
= 0; /* FIXME: Do we need swap entries? */
878 MmStats
.NrSystemPages
++;
879 MmStats
.NrFreePages
--;
881 /* Remember the page */
882 pfn
= PageDescriptor
- MmPageArray
;
883 Pages
[NumberOfPagesFound
++] = pfn
;
884 if(Consumer
== MC_USER
) MmInsertLRULastUserPage(pfn
);
889 INT LookForZeroedPages
;
890 for (LookForZeroedPages
= 1; LookForZeroedPages
>= 0; LookForZeroedPages
--)
892 for (pfn
= LowestPage
; pfn
< HighestPage
; pfn
++)
894 PageDescriptor
= MmPageArray
+ pfn
;
896 if (PageDescriptor
->Flags
.Type
!= MM_PHYSICAL_PAGE_FREE
)
898 if (PageDescriptor
->Flags
.Zero
!= LookForZeroedPages
)
901 ASSERT(PageDescriptor
->MapCount
== 0);
902 ASSERT(PageDescriptor
->ReferenceCount
== 0);
904 /* Allocate the page */
905 PageDescriptor
->Flags
.Type
= MM_PHYSICAL_PAGE_USED
;
906 PageDescriptor
->Flags
.Consumer
= Consumer
;
907 PageDescriptor
->ReferenceCount
= 1;
908 PageDescriptor
->LockCount
= 0;
909 PageDescriptor
->MapCount
= 0;
910 PageDescriptor
->SavedSwapEntry
= 0; /* FIXME: Do we need swap entries? */
912 if (!PageDescriptor
->Flags
.Zero
)
914 MmStats
.NrSystemPages
++;
915 MmStats
.NrFreePages
--;
917 /* Remember the page */
918 Pages
[NumberOfPagesFound
++] = pfn
;
919 if (NumberOfPagesFound
== NumberOfPages
)
922 if (NumberOfPagesFound
== NumberOfPages
)
926 KeReleaseQueuedSpinLock(LockQueuePfnLock
, oldIrql
);
928 /* Zero unzero-ed pages */
929 for (i
= 0; i
< NumberOfPagesFound
; i
++)
932 if (MiGetPfnEntry(pfn
)->Flags
.Zero
== 0)
938 MiGetPfnEntry(pfn
)->Flags
.Zero
= 0;
942 return NumberOfPagesFound
;
947 MiZeroPageInternal(PFN_TYPE Page
)
951 TempAddress
= MiMapPageToZeroInHyperSpace(Page
);
952 if (TempAddress
== NULL
)
954 return(STATUS_NO_MEMORY
);
956 memset(TempAddress
, 0, PAGE_SIZE
);
957 return(STATUS_SUCCESS
);
962 MmZeroPageThreadMain(PVOID Ignored
)
966 PLIST_ENTRY ListEntry
;
967 PPHYSICAL_PAGE PageDescriptor
;
971 /* Free initial kernel memory */
972 //MiFreeInitMemory();
974 /* Set our priority to 0 */
975 KeGetCurrentThread()->BasePriority
= 0;
976 KeSetPriorityThread(KeGetCurrentThread(), 0);
980 Status
= KeWaitForSingleObject(&ZeroPageThreadEvent
,
985 if (!NT_SUCCESS(Status
))
987 DPRINT1("ZeroPageThread: Wait failed\n");
988 KeBugCheck(MEMORY_MANAGEMENT
);
991 if (ZeroPageThreadShouldTerminate
)
993 DPRINT1("ZeroPageThread: Terminating\n");
994 return STATUS_SUCCESS
;
997 oldIrql
= KeAcquireQueuedSpinLock(LockQueuePfnLock
);
998 while (!IsListEmpty(&FreeUnzeroedPageListHead
))
1000 ListEntry
= RemoveTailList(&FreeUnzeroedPageListHead
);
1001 UnzeroedPageCount
--;
1002 PageDescriptor
= CONTAINING_RECORD(ListEntry
, PHYSICAL_PAGE
, ListEntry
);
1003 /* We set the page to used, because MmCreateVirtualMapping failed with unused pages */
1004 PageDescriptor
->Flags
.Type
= MM_PHYSICAL_PAGE_USED
;
1005 KeReleaseQueuedSpinLock(LockQueuePfnLock
, oldIrql
);
1006 Pfn
= PageDescriptor
- MmPageArray
;
1007 Status
= MiZeroPageInternal(Pfn
);
1009 oldIrql
= KeAcquireQueuedSpinLock(LockQueuePfnLock
);
1010 if (PageDescriptor
->MapCount
!= 0)
1012 DPRINT1("Mapped page on freelist.\n");
1013 KeBugCheck(MEMORY_MANAGEMENT
);
1015 PageDescriptor
->Flags
.Zero
= 1;
1016 PageDescriptor
->Flags
.Type
= MM_PHYSICAL_PAGE_FREE
;
1017 if (NT_SUCCESS(Status
))
1019 InsertHeadList(&FreeZeroedPageListHead
, ListEntry
);
1024 InsertHeadList(&FreeUnzeroedPageListHead
, ListEntry
);
1025 UnzeroedPageCount
++;
1029 DPRINT("Zeroed %d pages.\n", Count
);
1030 KeResetEvent(&ZeroPageThreadEvent
);
1031 KeReleaseQueuedSpinLock(LockQueuePfnLock
, oldIrql
);
1034 return STATUS_SUCCESS
;