3 * COPYRIGHT: See COPYING in the top level directory
4 * PROJECT: ReactOS kernel
5 * FILE: ntoskrnl/mm/freelist.c
6 * PURPOSE: Handle the list of free physical pages
8 * PROGRAMMERS: David Welch (welch@cwcom.net)
12 /* INCLUDES ****************************************************************/
16 #include <internal/debug.h>
18 /* TYPES *******************************************************************/
20 #define MM_PHYSICAL_PAGE_FREE (0x1)
21 #define MM_PHYSICAL_PAGE_USED (0x2)
22 #define MM_PHYSICAL_PAGE_BIOS (0x3)
24 typedef struct _PHYSICAL_PAGE
40 SWAPENTRY SavedSwapEntry
;
43 struct _MM_RMAP_ENTRY
* RmapListHead
;
45 PHYSICAL_PAGE
, *PPHYSICAL_PAGE
;
48 /* GLOBALS ****************************************************************/
50 static PPHYSICAL_PAGE MmPageArray
;
51 ULONG MmPageArraySize
;
53 static KSPIN_LOCK PageListLock
;
54 static LIST_ENTRY UsedPageListHeads
[MC_MAXIMUM
];
55 static LIST_ENTRY FreeZeroedPageListHead
;
56 static LIST_ENTRY FreeUnzeroedPageListHead
;
57 static LIST_ENTRY BiosPageListHead
;
59 static PETHREAD ZeroPageThread
;
60 static CLIENT_ID ZeroPageThreadId
;
61 static KEVENT ZeroPageThreadEvent
;
62 static BOOLEAN ZeroPageThreadShouldTerminate
= FALSE
;
64 static ULONG UnzeroedPageCount
= 0;
66 /* FUNCTIONS *************************************************************/
70 MmTransferOwnershipPage(PFN_TYPE Pfn
, ULONG NewConsumer
)
74 KeAcquireSpinLock(&PageListLock
, &oldIrql
);
75 if (MmPageArray
[Pfn
].MapCount
!= 0)
77 DbgPrint("Transfering mapped page.\n");
80 if (MmPageArray
[Pfn
].Flags
.Type
!= MM_PHYSICAL_PAGE_USED
)
82 DPRINT1("Type: %d\n", MmPageArray
[Pfn
].Flags
.Type
);
85 if (MmPageArray
[Pfn
].ReferenceCount
!= 1)
87 DPRINT1("ReferenceCount: %d\n", MmPageArray
[Pfn
].ReferenceCount
);
90 RemoveEntryList(&MmPageArray
[Pfn
].ListEntry
);
91 InsertTailList(&UsedPageListHeads
[NewConsumer
],
92 &MmPageArray
[Pfn
].ListEntry
);
93 MmPageArray
[Pfn
].Flags
.Consumer
= NewConsumer
;
94 KeReleaseSpinLock(&PageListLock
, oldIrql
);
100 MmGetLRUFirstUserPage(VOID
)
102 PLIST_ENTRY NextListEntry
;
103 PHYSICAL_PAGE
* PageDescriptor
;
106 KeAcquireSpinLock(&PageListLock
, &oldIrql
);
107 NextListEntry
= UsedPageListHeads
[MC_USER
].Flink
;
108 if (NextListEntry
== &UsedPageListHeads
[MC_USER
])
110 KeReleaseSpinLock(&PageListLock
, oldIrql
);
113 PageDescriptor
= CONTAINING_RECORD(NextListEntry
, PHYSICAL_PAGE
, ListEntry
);
114 KeReleaseSpinLock(&PageListLock
, oldIrql
);
115 return PageDescriptor
- MmPageArray
;
120 MmSetLRULastPage(PFN_TYPE Pfn
)
124 ASSERT(Pfn
< MmPageArraySize
);
125 KeAcquireSpinLock(&PageListLock
, &oldIrql
);
126 if (MmPageArray
[Pfn
].Flags
.Type
== MM_PHYSICAL_PAGE_USED
&&
127 MmPageArray
[Pfn
].Flags
.Consumer
== MC_USER
)
129 RemoveEntryList(&MmPageArray
[Pfn
].ListEntry
);
130 InsertTailList(&UsedPageListHeads
[MC_USER
],
131 &MmPageArray
[Pfn
].ListEntry
);
133 KeReleaseSpinLock(&PageListLock
, oldIrql
);
138 MmGetLRUNextUserPage(PFN_TYPE PreviousPfn
)
140 PLIST_ENTRY NextListEntry
;
141 PHYSICAL_PAGE
* PageDescriptor
;
144 KeAcquireSpinLock(&PageListLock
, &oldIrql
);
145 if (MmPageArray
[PreviousPfn
].Flags
.Type
!= MM_PHYSICAL_PAGE_USED
||
146 MmPageArray
[PreviousPfn
].Flags
.Consumer
!= MC_USER
)
148 NextListEntry
= UsedPageListHeads
[MC_USER
].Flink
;
152 NextListEntry
= MmPageArray
[PreviousPfn
].ListEntry
.Flink
;
154 if (NextListEntry
== &UsedPageListHeads
[MC_USER
])
156 KeReleaseSpinLock(&PageListLock
, oldIrql
);
159 PageDescriptor
= CONTAINING_RECORD(NextListEntry
, PHYSICAL_PAGE
, ListEntry
);
160 KeReleaseSpinLock(&PageListLock
, oldIrql
);
161 return PageDescriptor
- MmPageArray
;
166 MmGetContinuousPages(ULONG NumberOfBytes
,
167 PHYSICAL_ADDRESS LowestAcceptableAddress
,
168 PHYSICAL_ADDRESS HighestAcceptableAddress
,
169 PHYSICAL_ADDRESS BoundaryAddressMultiple
)
179 NrPages
= PAGE_ROUND_UP(NumberOfBytes
) / PAGE_SIZE
;
181 KeAcquireSpinLock(&PageListLock
, &oldIrql
);
183 last
= min(HighestAcceptableAddress
.QuadPart
/ PAGE_SIZE
, MmPageArraySize
- 1);
184 boundary
= BoundaryAddressMultiple
.QuadPart
/ PAGE_SIZE
;
186 for (j
= 0; j
< 2; j
++)
190 /* First try to allocate the pages above the 16MB area. This may fail
191 * because there are not enough continuous pages or we cannot allocate
192 * pages above the 16MB area because the caller has specify an upper limit.
193 * The second try uses the specified lower limit.
195 for (i
= j
== 0 ? 0x100000 / PAGE_SIZE
: LowestAcceptableAddress
.QuadPart
/ PAGE_SIZE
; i
<= last
; )
197 if (MmPageArray
[i
].Flags
.Type
== MM_PHYSICAL_PAGE_FREE
)
199 if (start
== (ULONG
)-1)
209 if (start
/ boundary
!= i
/ boundary
)
216 if (length
== NrPages
)
228 if (start
!= (ULONG
)-1 && length
== NrPages
)
230 for (i
= start
; i
< (start
+ length
); i
++)
232 RemoveEntryList(&MmPageArray
[i
].ListEntry
);
233 if (MmPageArray
[i
].Flags
.Zero
== 0)
237 MmStats
.NrFreePages
--;
238 MmStats
.NrSystemPages
++;
239 MmPageArray
[i
].Flags
.Type
= MM_PHYSICAL_PAGE_USED
;
240 MmPageArray
[i
].Flags
.Consumer
= MC_NPPOOL
;
241 MmPageArray
[i
].ReferenceCount
= 1;
242 MmPageArray
[i
].LockCount
= 0;
243 MmPageArray
[i
].MapCount
= 0;
244 MmPageArray
[i
].SavedSwapEntry
= 0;
245 InsertTailList(&UsedPageListHeads
[MC_NPPOOL
],
246 &MmPageArray
[i
].ListEntry
);
248 KeReleaseSpinLock(&PageListLock
, oldIrql
);
249 for (i
= start
; i
< (start
+ length
); i
++)
251 if (MmPageArray
[i
].Flags
.Zero
== 0)
257 MmPageArray
[i
].Flags
.Zero
= 0;
263 KeReleaseSpinLock(&PageListLock
, oldIrql
);
270 MiIsPfnRam(PADDRESS_RANGE BIOSMemoryMap
,
271 ULONG AddressRangeCount
,
275 LARGE_INTEGER BaseAddress
;
276 LARGE_INTEGER EndAddress
;
278 if (BIOSMemoryMap
!= NULL
&& AddressRangeCount
> 0)
281 for (i
= 0; i
< AddressRangeCount
; i
++)
283 BaseAddress
.u
.LowPart
= BIOSMemoryMap
[i
].BaseAddrLow
;
284 BaseAddress
.u
.HighPart
= BIOSMemoryMap
[i
].BaseAddrHigh
;
285 EndAddress
.u
.LowPart
= BIOSMemoryMap
[i
].LengthLow
;
286 EndAddress
.u
.HighPart
= BIOSMemoryMap
[i
].LengthHigh
;
287 EndAddress
.QuadPart
+= BaseAddress
.QuadPart
;
288 BaseAddress
.QuadPart
= PAGE_ROUND_DOWN(BaseAddress
.QuadPart
);
289 EndAddress
.QuadPart
= PAGE_ROUND_UP(EndAddress
.QuadPart
);
291 if ((BaseAddress
.QuadPart
>> PAGE_SHIFT
) <= Pfn
&&
292 Pfn
< (EndAddress
.QuadPart
>> PAGE_SHIFT
))
294 if (BIOSMemoryMap
[i
].Type
== 1)
313 MmInitializePageList(ULONG_PTR FirstPhysKernelAddress
,
314 ULONG_PTR LastPhysKernelAddress
,
315 ULONG MemorySizeInPages
,
316 ULONG_PTR LastKernelAddress
,
317 PADDRESS_RANGE BIOSMemoryMap
,
318 ULONG AddressRangeCount
)
320 * FUNCTION: Initializes the page list with all pages free
321 * except those known to be reserved and those used by the kernel
323 * FirstKernelAddress = First physical address used by the kernel
324 * LastKernelAddress = Last physical address used by the kernel
331 PFN_TYPE FirstUninitializedPage
;
333 DPRINT("MmInitializePageList(FirstPhysKernelAddress %x, "
334 "LastPhysKernelAddress %x, "
335 "MemorySizeInPages %x, LastKernelAddress %x)\n",
336 FirstPhysKernelAddress
,
337 LastPhysKernelAddress
,
341 for (i
= 0; i
< MC_MAXIMUM
; i
++)
343 InitializeListHead(&UsedPageListHeads
[i
]);
345 KeInitializeSpinLock(&PageListLock
);
346 InitializeListHead(&FreeUnzeroedPageListHead
);
347 InitializeListHead(&FreeZeroedPageListHead
);
348 InitializeListHead(&BiosPageListHead
);
350 LastKernelAddress
= PAGE_ROUND_UP(LastKernelAddress
);
352 MmPageArraySize
= MemorySizeInPages
;
354 PAGE_ROUND_UP((MmPageArraySize
* sizeof(PHYSICAL_PAGE
))) / PAGE_SIZE
;
355 MmPageArray
= (PHYSICAL_PAGE
*)LastKernelAddress
;
357 DPRINT("Reserved %d\n", Reserved
);
359 LastKernelAddress
= PAGE_ROUND_UP(LastKernelAddress
);
360 LastKernelAddress
= ((ULONG_PTR
)LastKernelAddress
+ (Reserved
* PAGE_SIZE
));
361 LastPhysKernelAddress
= (ULONG_PTR
)PAGE_ROUND_UP(LastPhysKernelAddress
);
362 LastPhysKernelAddress
= (ULONG_PTR
)LastPhysKernelAddress
+ (Reserved
* PAGE_SIZE
);
364 MmStats
.NrTotalPages
= 0;
365 MmStats
.NrSystemPages
= 0;
366 MmStats
.NrUserPages
= 0;
367 MmStats
.NrReservedPages
= 0;
368 MmStats
.NrFreePages
= 0;
369 MmStats
.NrLockedPages
= 0;
371 /* Preinitialize the Balancer because we need some pages for pte's */
372 MmInitializeBalancer(MemorySizeInPages
, 0);
374 FirstUninitializedPage
= (ULONG_PTR
)LastPhysKernelAddress
/ PAGE_SIZE
;
375 LastPage
= MmPageArraySize
;
376 for (i
= 0; i
< Reserved
; i
++)
378 PVOID Address
= (char*)MmPageArray
+ (i
* PAGE_SIZE
);
380 if (!MmIsPagePresent(NULL
, Address
))
384 while (Pfn
== 0 && LastPage
> FirstUninitializedPage
)
386 /* Allocate the page from the upper end of the RAM */
387 if (MiIsPfnRam(BIOSMemoryMap
, AddressRangeCount
, --LastPage
))
394 Pfn
= MmAllocPage(MC_NPPOOL
, 0);
400 Status
= MmCreateVirtualMappingForKernel(Address
,
404 if (!NT_SUCCESS(Status
))
406 DbgPrint("Unable to create virtual mapping\n");
412 /* Setting the page protection is necessary to set the global bit on IA32 */
413 MmSetPageProtect(NULL
, Address
, PAGE_READWRITE
);
415 memset(Address
, 0, PAGE_SIZE
);
417 start
= ((ULONG_PTR
)Address
- (ULONG_PTR
)MmPageArray
) / sizeof(PHYSICAL_PAGE
);
418 end
= ((ULONG_PTR
)Address
- (ULONG_PTR
)MmPageArray
+ PAGE_SIZE
) / sizeof(PHYSICAL_PAGE
);
420 for (j
= start
; j
< end
&& j
< LastPage
; j
++)
422 if (MiIsPfnRam(BIOSMemoryMap
, AddressRangeCount
, j
))
427 * Page zero is reserved
429 MmPageArray
[0].Flags
.Type
= MM_PHYSICAL_PAGE_BIOS
;
430 MmPageArray
[0].Flags
.Consumer
= MC_NPPOOL
;
431 MmPageArray
[0].Flags
.Zero
= 0;
432 MmPageArray
[0].ReferenceCount
= 0;
433 InsertTailList(&BiosPageListHead
,
434 &MmPageArray
[0].ListEntry
);
435 MmStats
.NrReservedPages
++;
441 * Page one is reserved for the initial KPCR
443 MmPageArray
[1].Flags
.Type
= MM_PHYSICAL_PAGE_BIOS
;
444 MmPageArray
[1].Flags
.Consumer
= MC_NPPOOL
;
445 MmPageArray
[1].Flags
.Zero
= 0;
446 MmPageArray
[1].ReferenceCount
= 0;
447 InsertTailList(&BiosPageListHead
,
448 &MmPageArray
[1].ListEntry
);
449 MmStats
.NrReservedPages
++;
451 /* Protect the Page Directory. This will be changed in r3 */
452 else if (j
>= (KeLoaderBlock
.PageDirectoryStart
/ PAGE_SIZE
) && j
< (KeLoaderBlock
.PageDirectoryEnd
/ PAGE_SIZE
))
454 MmPageArray
[j
].Flags
.Type
= MM_PHYSICAL_PAGE_BIOS
;
455 MmPageArray
[j
].Flags
.Zero
= 0;
456 MmPageArray
[j
].Flags
.Consumer
= MC_NPPOOL
;
457 MmPageArray
[j
].ReferenceCount
= 1;
458 InsertTailList(&BiosPageListHead
,
459 &MmPageArray
[j
].ListEntry
);
460 MmStats
.NrReservedPages
++;
462 else if (j
>= 0xa0000 / PAGE_SIZE
&& j
< 0x100000 / PAGE_SIZE
)
464 MmPageArray
[j
].Flags
.Type
= MM_PHYSICAL_PAGE_BIOS
;
465 MmPageArray
[j
].Flags
.Zero
= 0;
466 MmPageArray
[j
].Flags
.Consumer
= MC_NPPOOL
;
467 MmPageArray
[j
].ReferenceCount
= 1;
468 InsertTailList(&BiosPageListHead
,
469 &MmPageArray
[j
].ListEntry
);
470 MmStats
.NrReservedPages
++;
472 else if (j
>= (ULONG
)FirstPhysKernelAddress
/PAGE_SIZE
&&
473 j
< (ULONG
)LastPhysKernelAddress
/PAGE_SIZE
)
475 MmPageArray
[j
].Flags
.Type
= MM_PHYSICAL_PAGE_USED
;
476 MmPageArray
[j
].Flags
.Zero
= 0;
477 MmPageArray
[j
].Flags
.Consumer
= MC_NPPOOL
;
478 MmPageArray
[j
].ReferenceCount
= 1;
479 MmPageArray
[j
].MapCount
= 1;
480 InsertTailList(&UsedPageListHeads
[MC_NPPOOL
],
481 &MmPageArray
[j
].ListEntry
);
482 MmStats
.NrSystemPages
++;
486 MmPageArray
[j
].Flags
.Type
= MM_PHYSICAL_PAGE_FREE
;
487 MmPageArray
[j
].Flags
.Zero
= 0;
488 MmPageArray
[j
].ReferenceCount
= 0;
489 InsertTailList(&FreeUnzeroedPageListHead
,
490 &MmPageArray
[j
].ListEntry
);
492 MmStats
.NrFreePages
++;
497 MmPageArray
[j
].Flags
.Type
= MM_PHYSICAL_PAGE_BIOS
;
498 MmPageArray
[j
].Flags
.Consumer
= MC_NPPOOL
;
499 MmPageArray
[j
].Flags
.Zero
= 0;
500 MmPageArray
[j
].ReferenceCount
= 0;
501 InsertTailList(&BiosPageListHead
,
502 &MmPageArray
[j
].ListEntry
);
503 MmStats
.NrReservedPages
++;
506 FirstUninitializedPage
= j
;
510 /* Add the pages from the upper end to the list */
511 for (i
= LastPage
; i
< MmPageArraySize
; i
++)
513 if (MiIsPfnRam(BIOSMemoryMap
, AddressRangeCount
, i
))
515 MmPageArray
[i
].Flags
.Type
= MM_PHYSICAL_PAGE_USED
;
516 MmPageArray
[i
].Flags
.Zero
= 0;
517 MmPageArray
[i
].Flags
.Consumer
= MC_NPPOOL
;
518 MmPageArray
[i
].ReferenceCount
= 1;
519 MmPageArray
[i
].MapCount
= 1;
520 InsertTailList(&UsedPageListHeads
[MC_NPPOOL
],
521 &MmPageArray
[i
].ListEntry
);
522 MmStats
.NrSystemPages
++;
526 MmPageArray
[i
].Flags
.Type
= MM_PHYSICAL_PAGE_BIOS
;
527 MmPageArray
[i
].Flags
.Consumer
= MC_NPPOOL
;
528 MmPageArray
[i
].Flags
.Zero
= 0;
529 MmPageArray
[i
].ReferenceCount
= 0;
530 InsertTailList(&BiosPageListHead
,
531 &MmPageArray
[i
].ListEntry
);
532 MmStats
.NrReservedPages
++;
538 KeInitializeEvent(&ZeroPageThreadEvent
, NotificationEvent
, TRUE
);
540 MmStats
.NrTotalPages
= MmStats
.NrFreePages
+ MmStats
.NrSystemPages
+
541 MmStats
.NrReservedPages
+ MmStats
.NrUserPages
;
542 MmInitializeBalancer(MmStats
.NrFreePages
, MmStats
.NrSystemPages
+ MmStats
.NrReservedPages
);
543 return((PVOID
)LastKernelAddress
);
548 MmSetFlagsPage(PFN_TYPE Pfn
, ULONG Flags
)
552 ASSERT(Pfn
< MmPageArraySize
);
553 KeAcquireSpinLock(&PageListLock
, &oldIrql
);
554 MmPageArray
[Pfn
].AllFlags
= Flags
;
555 KeReleaseSpinLock(&PageListLock
, oldIrql
);
560 MmSetRmapListHeadPage(PFN_TYPE Pfn
, struct _MM_RMAP_ENTRY
* ListHead
)
562 MmPageArray
[Pfn
].RmapListHead
= ListHead
;
565 struct _MM_RMAP_ENTRY
*
567 MmGetRmapListHeadPage(PFN_TYPE Pfn
)
569 return(MmPageArray
[Pfn
].RmapListHead
);
574 MmMarkPageMapped(PFN_TYPE Pfn
)
578 if (Pfn
< MmPageArraySize
)
580 KeAcquireSpinLock(&PageListLock
, &oldIrql
);
581 if (MmPageArray
[Pfn
].Flags
.Type
== MM_PHYSICAL_PAGE_FREE
)
583 DbgPrint("Mapping non-used page\n");
586 MmPageArray
[Pfn
].MapCount
++;
587 KeReleaseSpinLock(&PageListLock
, oldIrql
);
593 MmMarkPageUnmapped(PFN_TYPE Pfn
)
597 if (Pfn
< MmPageArraySize
)
599 KeAcquireSpinLock(&PageListLock
, &oldIrql
);
600 if (MmPageArray
[Pfn
].Flags
.Type
== MM_PHYSICAL_PAGE_FREE
)
602 DbgPrint("Unmapping non-used page\n");
605 if (MmPageArray
[Pfn
].MapCount
== 0)
607 DbgPrint("Unmapping not mapped page\n");
610 MmPageArray
[Pfn
].MapCount
--;
611 KeReleaseSpinLock(&PageListLock
, oldIrql
);
617 MmGetFlagsPage(PFN_TYPE Pfn
)
622 ASSERT(Pfn
< MmPageArraySize
);
623 KeAcquireSpinLock(&PageListLock
, &oldIrql
);
624 Flags
= MmPageArray
[Pfn
].AllFlags
;
625 KeReleaseSpinLock(&PageListLock
, oldIrql
);
633 MmSetSavedSwapEntryPage(PFN_TYPE Pfn
, SWAPENTRY SavedSwapEntry
)
637 ASSERT(Pfn
< MmPageArraySize
);
638 KeAcquireSpinLock(&PageListLock
, &oldIrql
);
639 MmPageArray
[Pfn
].SavedSwapEntry
= SavedSwapEntry
;
640 KeReleaseSpinLock(&PageListLock
, oldIrql
);
645 MmGetSavedSwapEntryPage(PFN_TYPE Pfn
)
647 SWAPENTRY SavedSwapEntry
;
650 ASSERT(Pfn
< MmPageArraySize
);
651 KeAcquireSpinLock(&PageListLock
, &oldIrql
);
652 SavedSwapEntry
= MmPageArray
[Pfn
].SavedSwapEntry
;
653 KeReleaseSpinLock(&PageListLock
, oldIrql
);
655 return(SavedSwapEntry
);
660 MmReferencePageUnsafe(PFN_TYPE Pfn
)
664 DPRINT("MmReferencePageUnsafe(PysicalAddress %x)\n", Pfn
<< PAGE_SHIFT
);
666 if (Pfn
== 0 || Pfn
>= MmPageArraySize
)
671 KeAcquireSpinLock(&PageListLock
, &oldIrql
);
673 if (MmPageArray
[Pfn
].Flags
.Type
!= MM_PHYSICAL_PAGE_USED
)
675 DbgPrint("Referencing non-used page\n");
679 MmPageArray
[Pfn
].ReferenceCount
++;
680 KeReleaseSpinLock(&PageListLock
, oldIrql
);
685 MmReferencePage(PFN_TYPE Pfn
)
687 DPRINT("MmReferencePage(PysicalAddress %x)\n", Pfn
<< PAGE_SHIFT
);
689 if (Pfn
== 0 || Pfn
>= MmPageArraySize
)
694 MmReferencePageUnsafe(Pfn
);
699 MmGetReferenceCountPage(PFN_TYPE Pfn
)
704 DPRINT("MmGetReferenceCountPage(PhysicalAddress %x)\n", Pfn
<< PAGE_SHIFT
);
706 if (Pfn
== 0 || Pfn
>= MmPageArraySize
)
711 KeAcquireSpinLock(&PageListLock
, &oldIrql
);
713 if (MmPageArray
[Pfn
].Flags
.Type
!= MM_PHYSICAL_PAGE_USED
)
715 DbgPrint("Getting reference count for free page\n");
719 RCount
= MmPageArray
[Pfn
].ReferenceCount
;
721 KeReleaseSpinLock(&PageListLock
, oldIrql
);
727 MmIsUsablePage(PFN_TYPE Pfn
)
730 DPRINT("MmIsUsablePage(PhysicalAddress %x)\n", Pfn
<< PAGE_SHIFT
);
732 if (Pfn
== 0 || Pfn
>= MmPageArraySize
)
737 if (MmPageArray
[Pfn
].Flags
.Type
!= MM_PHYSICAL_PAGE_USED
&&
738 MmPageArray
[Pfn
].Flags
.Type
!= MM_PHYSICAL_PAGE_BIOS
)
748 MmDereferencePage(PFN_TYPE Pfn
)
752 DPRINT("MmDereferencePage(PhysicalAddress %x)\n", Pfn
<< PAGE_SHIFT
);
754 if (Pfn
== 0 || Pfn
>= MmPageArraySize
)
759 KeAcquireSpinLock(&PageListLock
, &oldIrql
);
761 if (MmPageArray
[Pfn
].Flags
.Type
!= MM_PHYSICAL_PAGE_USED
)
763 DbgPrint("Dereferencing free page\n");
766 if (MmPageArray
[Pfn
].ReferenceCount
== 0)
768 DbgPrint("Derefrencing page with reference count 0\n");
772 MmPageArray
[Pfn
].ReferenceCount
--;
773 if (MmPageArray
[Pfn
].ReferenceCount
== 0)
775 MmStats
.NrFreePages
++;
776 MmStats
.NrSystemPages
--;
777 RemoveEntryList(&MmPageArray
[Pfn
].ListEntry
);
778 if (MmPageArray
[Pfn
].RmapListHead
!= NULL
)
780 DbgPrint("Freeing page with rmap entries.\n");
783 if (MmPageArray
[Pfn
].MapCount
!= 0)
785 DbgPrint("Freeing mapped page (0x%x count %d)\n",
786 Pfn
<< PAGE_SHIFT
, MmPageArray
[Pfn
].MapCount
);
789 if (MmPageArray
[Pfn
].LockCount
> 0)
791 DbgPrint("Freeing locked page\n");
794 if (MmPageArray
[Pfn
].SavedSwapEntry
!= 0)
796 DbgPrint("Freeing page with swap entry.\n");
799 if (MmPageArray
[Pfn
].Flags
.Type
!= MM_PHYSICAL_PAGE_USED
)
801 DbgPrint("Freeing page with flags %x\n",
802 MmPageArray
[Pfn
].Flags
.Type
);
805 MmPageArray
[Pfn
].Flags
.Type
= MM_PHYSICAL_PAGE_FREE
;
806 MmPageArray
[Pfn
].Flags
.Consumer
= MC_MAXIMUM
;
807 InsertTailList(&FreeUnzeroedPageListHead
,
808 &MmPageArray
[Pfn
].ListEntry
);
810 if (UnzeroedPageCount
> 8 && 0 == KeReadStateEvent(&ZeroPageThreadEvent
))
812 KeSetEvent(&ZeroPageThreadEvent
, IO_NO_INCREMENT
, FALSE
);
815 KeReleaseSpinLock(&PageListLock
, oldIrql
);
820 MmGetLockCountPage(PFN_TYPE Pfn
)
825 DPRINT("MmGetLockCountPage(PhysicalAddress %x)\n", Pfn
<< PAGE_SHIFT
);
827 if (Pfn
== 0 || Pfn
>= MmPageArraySize
)
832 KeAcquireSpinLock(&PageListLock
, &oldIrql
);
834 if (MmPageArray
[Pfn
].Flags
.Type
!= MM_PHYSICAL_PAGE_USED
)
836 DbgPrint("Getting lock count for free page\n");
840 LockCount
= MmPageArray
[Pfn
].LockCount
;
841 KeReleaseSpinLock(&PageListLock
, oldIrql
);
848 MmLockPageUnsafe(PFN_TYPE Pfn
)
852 DPRINT("MmLockPageUnsafe(PhysicalAddress %x)\n", Pfn
<< PAGE_SHIFT
);
854 if (Pfn
== 0 || Pfn
>= MmPageArraySize
)
859 KeAcquireSpinLock(&PageListLock
, &oldIrql
);
861 if (MmPageArray
[Pfn
].Flags
.Type
!= MM_PHYSICAL_PAGE_USED
)
863 DbgPrint("Locking free page\n");
867 MmPageArray
[Pfn
].LockCount
++;
868 KeReleaseSpinLock(&PageListLock
, oldIrql
);
873 MmLockPage(PFN_TYPE Pfn
)
875 DPRINT("MmLockPage(PhysicalAddress %x)\n", Pfn
<< PAGE_SHIFT
);
877 if (Pfn
== 0 || Pfn
>= MmPageArraySize
)
882 MmLockPageUnsafe(Pfn
);
887 MmUnlockPage(PFN_TYPE Pfn
)
891 DPRINT("MmUnlockPage(PhysicalAddress %x)\n", Pfn
<< PAGE_SHIFT
);
893 if (Pfn
== 0 || Pfn
>= MmPageArraySize
)
898 KeAcquireSpinLock(&PageListLock
, &oldIrql
);
900 if (MmPageArray
[Pfn
].Flags
.Type
!= MM_PHYSICAL_PAGE_USED
)
902 DbgPrint("Unlocking free page\n");
906 MmPageArray
[Pfn
].LockCount
--;
907 KeReleaseSpinLock(&PageListLock
, oldIrql
);
912 MmAllocPage(ULONG Consumer
, SWAPENTRY SavedSwapEntry
)
915 PLIST_ENTRY ListEntry
;
916 PPHYSICAL_PAGE PageDescriptor
;
918 BOOLEAN NeedClear
= FALSE
;
920 DPRINT("MmAllocPage()\n");
922 KeAcquireSpinLock(&PageListLock
, &oldIrql
);
923 if (IsListEmpty(&FreeZeroedPageListHead
))
925 if (IsListEmpty(&FreeUnzeroedPageListHead
))
927 DPRINT1("MmAllocPage(): Out of memory\n");
928 KeReleaseSpinLock(&PageListLock
, oldIrql
);
931 ListEntry
= RemoveTailList(&FreeUnzeroedPageListHead
);
934 PageDescriptor
= CONTAINING_RECORD(ListEntry
, PHYSICAL_PAGE
, ListEntry
);
940 ListEntry
= RemoveTailList(&FreeZeroedPageListHead
);
942 PageDescriptor
= CONTAINING_RECORD(ListEntry
, PHYSICAL_PAGE
, ListEntry
);
945 if (PageDescriptor
->Flags
.Type
!= MM_PHYSICAL_PAGE_FREE
)
947 DbgPrint("Got non-free page from freelist\n");
950 if (PageDescriptor
->MapCount
!= 0)
952 DbgPrint("Got mapped page from freelist\n");
955 if (PageDescriptor
->ReferenceCount
!= 0)
957 DPRINT1("%d\n", PageDescriptor
->ReferenceCount
);
960 PageDescriptor
->Flags
.Type
= MM_PHYSICAL_PAGE_USED
;
961 PageDescriptor
->Flags
.Consumer
= Consumer
;
962 PageDescriptor
->ReferenceCount
= 1;
963 PageDescriptor
->LockCount
= 0;
964 PageDescriptor
->MapCount
= 0;
965 PageDescriptor
->SavedSwapEntry
= SavedSwapEntry
;
966 InsertTailList(&UsedPageListHeads
[Consumer
], ListEntry
);
968 MmStats
.NrSystemPages
++;
969 MmStats
.NrFreePages
--;
971 KeReleaseSpinLock(&PageListLock
, oldIrql
);
973 PfnOffset
= PageDescriptor
- MmPageArray
;
976 MiZeroPage(PfnOffset
);
978 if (PageDescriptor
->MapCount
!= 0)
980 DbgPrint("Returning mapped page.\n");
988 MmAllocPagesSpecifyRange(ULONG Consumer
,
989 PHYSICAL_ADDRESS LowestAddress
,
990 PHYSICAL_ADDRESS HighestAddress
,
994 PPHYSICAL_PAGE PageDescriptor
;
996 PFN_TYPE LowestPage
, HighestPage
;
998 ULONG NumberOfPagesFound
= 0;
1001 DPRINT("MmAllocPagesSpecifyRange()\n"
1002 " LowestAddress = 0x%08x%08x\n"
1003 " HighestAddress = 0x%08x%08x\n"
1004 " NumberOfPages = %d\n",
1005 LowestAddress
.u
.HighPart
, LowestAddress
.u
.LowPart
,
1006 HighestAddress
.u
.HighPart
, HighestAddress
.u
.LowPart
,
1009 if (NumberOfPages
== 0)
1012 LowestPage
= LowestAddress
.QuadPart
/ PAGE_SIZE
;
1013 HighestPage
= HighestAddress
.QuadPart
/ PAGE_SIZE
;
1014 if ((HighestAddress
.u
.LowPart
% PAGE_SIZE
) != 0)
1017 if (LowestPage
>= MmPageArraySize
)
1019 DPRINT1("MmAllocPagesSpecifyRange(): Out of memory\n");
1022 if (HighestPage
> MmPageArraySize
)
1023 HighestPage
= MmPageArraySize
;
1025 KeAcquireSpinLock(&PageListLock
, &oldIrql
);
1026 if (LowestPage
== 0 && HighestPage
== MmPageArraySize
)
1028 PLIST_ENTRY ListEntry
;
1029 while (NumberOfPagesFound
< NumberOfPages
)
1031 if (!IsListEmpty(&FreeZeroedPageListHead
))
1033 ListEntry
= RemoveTailList(&FreeZeroedPageListHead
);
1035 else if (!IsListEmpty(&FreeUnzeroedPageListHead
))
1037 ListEntry
= RemoveTailList(&FreeUnzeroedPageListHead
);
1038 UnzeroedPageCount
--;
1042 if (NumberOfPagesFound
== 0)
1044 KeReleaseSpinLock(&PageListLock
, oldIrql
);
1045 DPRINT1("MmAllocPagesSpecifyRange(): Out of memory\n");
1053 PageDescriptor
= CONTAINING_RECORD(ListEntry
, PHYSICAL_PAGE
, ListEntry
);
1055 ASSERT(PageDescriptor
->Flags
.Type
== MM_PHYSICAL_PAGE_FREE
);
1056 ASSERT(PageDescriptor
->MapCount
== 0);
1057 ASSERT(PageDescriptor
->ReferenceCount
== 0);
1059 /* Allocate the page */
1060 PageDescriptor
->Flags
.Type
= MM_PHYSICAL_PAGE_USED
;
1061 PageDescriptor
->Flags
.Consumer
= Consumer
;
1062 PageDescriptor
->ReferenceCount
= 1;
1063 PageDescriptor
->LockCount
= 0;
1064 PageDescriptor
->MapCount
= 0;
1065 PageDescriptor
->SavedSwapEntry
= 0; /* FIXME: Do we need swap entries? */
1066 InsertTailList(&UsedPageListHeads
[Consumer
], &PageDescriptor
->ListEntry
);
1068 MmStats
.NrSystemPages
++;
1069 MmStats
.NrFreePages
--;
1071 /* Remember the page */
1072 pfn
= PageDescriptor
- MmPageArray
;
1073 Pages
[NumberOfPagesFound
++] = pfn
;
1078 INT LookForZeroedPages
;
1079 for (LookForZeroedPages
= 1; LookForZeroedPages
>= 0; LookForZeroedPages
--)
1081 for (pfn
= LowestPage
; pfn
< HighestPage
; pfn
++)
1083 PageDescriptor
= MmPageArray
+ pfn
;
1085 if (PageDescriptor
->Flags
.Type
!= MM_PHYSICAL_PAGE_FREE
)
1087 if (PageDescriptor
->Flags
.Zero
!= LookForZeroedPages
)
1090 ASSERT(PageDescriptor
->MapCount
== 0);
1091 ASSERT(PageDescriptor
->ReferenceCount
== 0);
1093 /* Allocate the page */
1094 PageDescriptor
->Flags
.Type
= MM_PHYSICAL_PAGE_USED
;
1095 PageDescriptor
->Flags
.Consumer
= Consumer
;
1096 PageDescriptor
->ReferenceCount
= 1;
1097 PageDescriptor
->LockCount
= 0;
1098 PageDescriptor
->MapCount
= 0;
1099 PageDescriptor
->SavedSwapEntry
= 0; /* FIXME: Do we need swap entries? */
1100 RemoveEntryList(&PageDescriptor
->ListEntry
);
1101 InsertTailList(&UsedPageListHeads
[Consumer
], &PageDescriptor
->ListEntry
);
1103 if (!PageDescriptor
->Flags
.Zero
)
1104 UnzeroedPageCount
--;
1105 MmStats
.NrSystemPages
++;
1106 MmStats
.NrFreePages
--;
1108 /* Remember the page */
1109 Pages
[NumberOfPagesFound
++] = pfn
;
1110 if (NumberOfPagesFound
== NumberOfPages
)
1113 if (NumberOfPagesFound
== NumberOfPages
)
1117 KeReleaseSpinLock(&PageListLock
, oldIrql
);
1119 /* Zero unzero-ed pages */
1120 for (i
= 0; i
< NumberOfPagesFound
; i
++)
1123 if (MmPageArray
[pfn
].Flags
.Zero
== 0)
1129 MmPageArray
[pfn
].Flags
.Zero
= 0;
1133 return NumberOfPagesFound
;
1137 MmZeroPageThreadMain(PVOID Ignored
)
1141 PLIST_ENTRY ListEntry
;
1142 PPHYSICAL_PAGE PageDescriptor
;
1148 Status
= KeWaitForSingleObject(&ZeroPageThreadEvent
,
1153 if (!NT_SUCCESS(Status
))
1155 DbgPrint("ZeroPageThread: Wait failed\n");
1160 if (ZeroPageThreadShouldTerminate
)
1162 DbgPrint("ZeroPageThread: Terminating\n");
1166 KeAcquireSpinLock(&PageListLock
, &oldIrql
);
1167 while (!IsListEmpty(&FreeUnzeroedPageListHead
))
1169 ListEntry
= RemoveTailList(&FreeUnzeroedPageListHead
);
1170 UnzeroedPageCount
--;
1171 PageDescriptor
= CONTAINING_RECORD(ListEntry
, PHYSICAL_PAGE
, ListEntry
);
1172 /* We set the page to used, because MmCreateVirtualMapping failed with unused pages */
1173 PageDescriptor
->Flags
.Type
= MM_PHYSICAL_PAGE_USED
;
1174 KeReleaseSpinLock(&PageListLock
, oldIrql
);
1175 Pfn
= PageDescriptor
- MmPageArray
;
1176 Status
= MiZeroPage(Pfn
);
1178 KeAcquireSpinLock(&PageListLock
, &oldIrql
);
1179 if (PageDescriptor
->MapCount
!= 0)
1181 DbgPrint("Mapped page on freelist.\n");
1184 PageDescriptor
->Flags
.Zero
= 1;
1185 PageDescriptor
->Flags
.Type
= MM_PHYSICAL_PAGE_FREE
;
1186 if (NT_SUCCESS(Status
))
1188 InsertHeadList(&FreeZeroedPageListHead
, ListEntry
);
1193 InsertHeadList(&FreeUnzeroedPageListHead
, ListEntry
);
1194 UnzeroedPageCount
++;
1198 DPRINT("Zeroed %d pages.\n", Count
);
1199 KeResetEvent(&ZeroPageThreadEvent
);
1200 KeReleaseSpinLock(&PageListLock
, oldIrql
);
1207 MmInitZeroPageThread(VOID
)
1210 HANDLE ThreadHandle
;
1212 ZeroPageThreadShouldTerminate
= FALSE
;
1213 Status
= PsCreateSystemThread(&ThreadHandle
,
1218 MmZeroPageThreadMain
,
1220 if (!NT_SUCCESS(Status
))
1225 Status
= ObReferenceObjectByHandle(ThreadHandle
,
1229 (PVOID
*)&ZeroPageThread
,
1231 if (!NT_SUCCESS(Status
))
1236 KeSetPriorityThread(&ZeroPageThread
->Tcb
, LOW_PRIORITY
);
1237 NtClose(ThreadHandle
);
1238 return STATUS_SUCCESS
;