3 * COPYRIGHT: See COPYING in the top level directory
4 * PROJECT: ReactOS kernel
5 * FILE: ntoskrnl/mm/freelist.c
6 * PURPOSE: Handle the list of free physical pages
8 * PROGRAMMERS: David Welch (welch@cwcom.net)
12 /* INCLUDES ****************************************************************/
16 #include <internal/debug.h>
18 /* TYPES *******************************************************************/
20 #define MM_PHYSICAL_PAGE_FREE (0x1)
21 #define MM_PHYSICAL_PAGE_USED (0x2)
22 #define MM_PHYSICAL_PAGE_BIOS (0x3)
24 typedef struct _PHYSICAL_PAGE
40 SWAPENTRY SavedSwapEntry
;
43 struct _MM_RMAP_ENTRY
* RmapListHead
;
45 PHYSICAL_PAGE
, *PPHYSICAL_PAGE
;
48 /* GLOBALS ****************************************************************/
50 static PPHYSICAL_PAGE MmPageArray
;
51 ULONG MmPageArraySize
;
53 static KSPIN_LOCK PageListLock
;
54 static LIST_ENTRY UsedPageListHeads
[MC_MAXIMUM
];
55 static LIST_ENTRY FreeZeroedPageListHead
;
56 static LIST_ENTRY FreeUnzeroedPageListHead
;
57 static LIST_ENTRY BiosPageListHead
;
59 static PETHREAD ZeroPageThread
;
60 static CLIENT_ID ZeroPageThreadId
;
61 static KEVENT ZeroPageThreadEvent
;
62 static BOOLEAN ZeroPageThreadShouldTerminate
= FALSE
;
64 static ULONG UnzeroedPageCount
= 0;
66 /* FUNCTIONS *************************************************************/
69 MmTransferOwnershipPage(PFN_TYPE Pfn
, ULONG NewConsumer
)
73 KeAcquireSpinLock(&PageListLock
, &oldIrql
);
74 if (MmPageArray
[Pfn
].MapCount
!= 0)
76 DbgPrint("Transfering mapped page.\n");
79 if (MmPageArray
[Pfn
].Flags
.Type
!= MM_PHYSICAL_PAGE_USED
)
81 DPRINT1("Type: %d\n", MmPageArray
[Pfn
].Flags
.Type
);
84 if (MmPageArray
[Pfn
].ReferenceCount
!= 1)
86 DPRINT1("ReferenceCount: %d\n", MmPageArray
[Pfn
].ReferenceCount
);
89 RemoveEntryList(&MmPageArray
[Pfn
].ListEntry
);
90 InsertTailList(&UsedPageListHeads
[NewConsumer
],
91 &MmPageArray
[Pfn
].ListEntry
);
92 MmPageArray
[Pfn
].Flags
.Consumer
= NewConsumer
;
93 KeReleaseSpinLock(&PageListLock
, oldIrql
);
98 MmGetLRUFirstUserPage(VOID
)
100 PLIST_ENTRY NextListEntry
;
101 PHYSICAL_PAGE
* PageDescriptor
;
104 KeAcquireSpinLock(&PageListLock
, &oldIrql
);
105 NextListEntry
= UsedPageListHeads
[MC_USER
].Flink
;
106 if (NextListEntry
== &UsedPageListHeads
[MC_USER
])
108 KeReleaseSpinLock(&PageListLock
, oldIrql
);
111 PageDescriptor
= CONTAINING_RECORD(NextListEntry
, PHYSICAL_PAGE
, ListEntry
);
112 KeReleaseSpinLock(&PageListLock
, oldIrql
);
113 return PageDescriptor
- MmPageArray
;
117 MmSetLRULastPage(PFN_TYPE Pfn
)
121 ASSERT(Pfn
< MmPageArraySize
);
122 KeAcquireSpinLock(&PageListLock
, &oldIrql
);
123 if (MmPageArray
[Pfn
].Flags
.Type
== MM_PHYSICAL_PAGE_USED
&&
124 MmPageArray
[Pfn
].Flags
.Consumer
== MC_USER
)
126 RemoveEntryList(&MmPageArray
[Pfn
].ListEntry
);
127 InsertTailList(&UsedPageListHeads
[MC_USER
],
128 &MmPageArray
[Pfn
].ListEntry
);
130 KeReleaseSpinLock(&PageListLock
, oldIrql
);
134 MmGetLRUNextUserPage(PFN_TYPE PreviousPfn
)
136 PLIST_ENTRY NextListEntry
;
137 PHYSICAL_PAGE
* PageDescriptor
;
140 KeAcquireSpinLock(&PageListLock
, &oldIrql
);
141 if (MmPageArray
[PreviousPfn
].Flags
.Type
!= MM_PHYSICAL_PAGE_USED
||
142 MmPageArray
[PreviousPfn
].Flags
.Consumer
!= MC_USER
)
144 NextListEntry
= UsedPageListHeads
[MC_USER
].Flink
;
148 NextListEntry
= MmPageArray
[PreviousPfn
].ListEntry
.Flink
;
150 if (NextListEntry
== &UsedPageListHeads
[MC_USER
])
152 KeReleaseSpinLock(&PageListLock
, oldIrql
);
155 PageDescriptor
= CONTAINING_RECORD(NextListEntry
, PHYSICAL_PAGE
, ListEntry
);
156 KeReleaseSpinLock(&PageListLock
, oldIrql
);
157 return PageDescriptor
- MmPageArray
;
161 MmGetContinuousPages(ULONG NumberOfBytes
,
162 PHYSICAL_ADDRESS LowestAcceptableAddress
,
163 PHYSICAL_ADDRESS HighestAcceptableAddress
,
172 NrPages
= PAGE_ROUND_UP(NumberOfBytes
) / PAGE_SIZE
;
174 KeAcquireSpinLock(&PageListLock
, &oldIrql
);
178 for (i
= (LowestAcceptableAddress
.QuadPart
/ PAGE_SIZE
); i
< (HighestAcceptableAddress
.QuadPart
/ PAGE_SIZE
); )
180 if (MmPageArray
[i
].Flags
.Type
== MM_PHYSICAL_PAGE_FREE
)
192 if (length
== NrPages
)
201 * Fast forward to the base of the next aligned region
203 i
= ROUND_UP((i
+ 1), (Alignment
/ PAGE_SIZE
));
206 if (start
== -1 || length
!= NrPages
)
208 KeReleaseSpinLock(&PageListLock
, oldIrql
);
211 for (i
= start
; i
< (start
+ length
); i
++)
213 RemoveEntryList(&MmPageArray
[i
].ListEntry
);
214 if (MmPageArray
[i
].Flags
.Zero
== 0)
218 MmStats
.NrFreePages
--;
219 MmStats
.NrSystemPages
++;
220 MmPageArray
[i
].Flags
.Type
= MM_PHYSICAL_PAGE_USED
;
221 MmPageArray
[i
].Flags
.Consumer
= MC_NPPOOL
;
222 MmPageArray
[i
].ReferenceCount
= 1;
223 MmPageArray
[i
].LockCount
= 0;
224 MmPageArray
[i
].MapCount
= 0;
225 MmPageArray
[i
].SavedSwapEntry
= 0;
226 InsertTailList(&UsedPageListHeads
[MC_NPPOOL
],
227 &MmPageArray
[i
].ListEntry
);
229 KeReleaseSpinLock(&PageListLock
, oldIrql
);
230 for (i
= start
; i
< (start
+ length
); i
++)
232 if (MmPageArray
[i
].Flags
.Zero
== 0)
238 MmPageArray
[i
].Flags
.Zero
= 0;
247 MiIsPfnRam(PADDRESS_RANGE BIOSMemoryMap
,
248 ULONG AddressRangeCount
,
252 LARGE_INTEGER BaseAddress
;
253 LARGE_INTEGER EndAddress
;
255 if (BIOSMemoryMap
!= NULL
&& AddressRangeCount
> 0)
258 for (i
= 0; i
< AddressRangeCount
; i
++)
260 BaseAddress
.u
.LowPart
= BIOSMemoryMap
[i
].BaseAddrLow
;
261 BaseAddress
.u
.HighPart
= BIOSMemoryMap
[i
].BaseAddrHigh
;
262 EndAddress
.u
.LowPart
= BIOSMemoryMap
[i
].LengthLow
;
263 EndAddress
.u
.HighPart
= BIOSMemoryMap
[i
].LengthHigh
;
264 EndAddress
.QuadPart
+= BaseAddress
.QuadPart
;
265 BaseAddress
.QuadPart
= PAGE_ROUND_DOWN(BaseAddress
.QuadPart
);
266 EndAddress
.QuadPart
= PAGE_ROUND_UP(EndAddress
.QuadPart
);
268 if ((BaseAddress
.QuadPart
>> PAGE_SHIFT
) <= Pfn
&&
269 Pfn
< (EndAddress
.QuadPart
>> PAGE_SHIFT
))
271 if (BIOSMemoryMap
[i
].Type
== 1)
288 MmInitializePageList(ULONG_PTR FirstPhysKernelAddress
,
289 ULONG_PTR LastPhysKernelAddress
,
290 ULONG MemorySizeInPages
,
291 ULONG_PTR LastKernelAddress
,
292 PADDRESS_RANGE BIOSMemoryMap
,
293 ULONG AddressRangeCount
)
295 * FUNCTION: Initializes the page list with all pages free
296 * except those known to be reserved and those used by the kernel
298 * FirstKernelAddress = First physical address used by the kernel
299 * LastKernelAddress = Last physical address used by the kernel
306 PFN_TYPE FirstUninitializedPage
;
308 DPRINT("MmInitializePageList(FirstPhysKernelAddress %x, "
309 "LastPhysKernelAddress %x, "
310 "MemorySizeInPages %x, LastKernelAddress %x)\n",
311 FirstPhysKernelAddress
,
312 LastPhysKernelAddress
,
316 for (i
= 0; i
< MC_MAXIMUM
; i
++)
318 InitializeListHead(&UsedPageListHeads
[i
]);
320 KeInitializeSpinLock(&PageListLock
);
321 InitializeListHead(&FreeUnzeroedPageListHead
);
322 InitializeListHead(&FreeZeroedPageListHead
);
323 InitializeListHead(&BiosPageListHead
);
325 LastKernelAddress
= PAGE_ROUND_UP(LastKernelAddress
);
327 MmPageArraySize
= MemorySizeInPages
;
329 PAGE_ROUND_UP((MmPageArraySize
* sizeof(PHYSICAL_PAGE
))) / PAGE_SIZE
;
330 MmPageArray
= (PHYSICAL_PAGE
*)LastKernelAddress
;
332 DPRINT("Reserved %d\n", Reserved
);
334 LastKernelAddress
= PAGE_ROUND_UP(LastKernelAddress
);
335 LastKernelAddress
= ((ULONG_PTR
)LastKernelAddress
+ (Reserved
* PAGE_SIZE
));
336 LastPhysKernelAddress
= (ULONG_PTR
)PAGE_ROUND_UP(LastPhysKernelAddress
);
337 LastPhysKernelAddress
= (ULONG_PTR
)LastPhysKernelAddress
+ (Reserved
* PAGE_SIZE
);
339 MmStats
.NrTotalPages
= 0;
340 MmStats
.NrSystemPages
= 0;
341 MmStats
.NrUserPages
= 0;
342 MmStats
.NrReservedPages
= 0;
343 MmStats
.NrFreePages
= 0;
344 MmStats
.NrLockedPages
= 0;
346 /* Preinitialize the Balancer because we need some pages for pte's */
347 MmInitializeBalancer(MemorySizeInPages
, 0);
349 FirstUninitializedPage
= (ULONG_PTR
)LastPhysKernelAddress
/ PAGE_SIZE
;
350 LastPage
= MmPageArraySize
;
351 for (i
= 0; i
< Reserved
; i
++)
353 PVOID Address
= (char*)MmPageArray
+ (i
* PAGE_SIZE
);
355 if (!MmIsPagePresent(NULL
, Address
))
359 while (Pfn
== 0 && LastPage
> FirstUninitializedPage
)
361 /* Allocate the page from the upper end of the RAM */
362 if (MiIsPfnRam(BIOSMemoryMap
, AddressRangeCount
, --LastPage
))
369 Pfn
= MmAllocPage(MC_NPPOOL
, 0);
375 Status
= MmCreateVirtualMappingForKernel(Address
,
379 if (!NT_SUCCESS(Status
))
381 DbgPrint("Unable to create virtual mapping\n");
387 /* Setting the page protection is necessary to set the global bit on IA32 */
388 MmSetPageProtect(NULL
, Address
, PAGE_READWRITE
);
390 memset(Address
, 0, PAGE_SIZE
);
392 start
= ((ULONG_PTR
)Address
- (ULONG_PTR
)MmPageArray
) / sizeof(PHYSICAL_PAGE
);
393 end
= ((ULONG_PTR
)Address
- (ULONG_PTR
)MmPageArray
+ PAGE_SIZE
) / sizeof(PHYSICAL_PAGE
);
395 for (j
= start
; j
< end
&& j
< LastPage
; j
++)
397 if (MiIsPfnRam(BIOSMemoryMap
, AddressRangeCount
, j
))
402 * Page zero is reserved
404 MmPageArray
[0].Flags
.Type
= MM_PHYSICAL_PAGE_BIOS
;
405 MmPageArray
[0].Flags
.Consumer
= MC_NPPOOL
;
406 MmPageArray
[0].Flags
.Zero
= 0;
407 MmPageArray
[0].ReferenceCount
= 0;
408 InsertTailList(&BiosPageListHead
,
409 &MmPageArray
[0].ListEntry
);
410 MmStats
.NrReservedPages
++;
416 * Page one is reserved for the initial KPCR
418 MmPageArray
[1].Flags
.Type
= MM_PHYSICAL_PAGE_BIOS
;
419 MmPageArray
[1].Flags
.Consumer
= MC_NPPOOL
;
420 MmPageArray
[1].Flags
.Zero
= 0;
421 MmPageArray
[1].ReferenceCount
= 0;
422 InsertTailList(&BiosPageListHead
,
423 &MmPageArray
[1].ListEntry
);
424 MmStats
.NrReservedPages
++;
426 /* Protect the Page Directory. This will be changed in r3 */
427 else if (j
>= (KeLoaderBlock
.PageDirectoryStart
/ PAGE_SIZE
) && j
< (KeLoaderBlock
.PageDirectoryEnd
/ PAGE_SIZE
))
429 MmPageArray
[j
].Flags
.Type
= MM_PHYSICAL_PAGE_BIOS
;
430 MmPageArray
[j
].Flags
.Zero
= 0;
431 MmPageArray
[j
].Flags
.Consumer
= MC_NPPOOL
;
432 MmPageArray
[j
].ReferenceCount
= 1;
433 InsertTailList(&BiosPageListHead
,
434 &MmPageArray
[j
].ListEntry
);
435 MmStats
.NrReservedPages
++;
437 else if (j
>= 0xa0000 / PAGE_SIZE
&& j
< 0x100000 / PAGE_SIZE
)
439 MmPageArray
[j
].Flags
.Type
= MM_PHYSICAL_PAGE_BIOS
;
440 MmPageArray
[j
].Flags
.Zero
= 0;
441 MmPageArray
[j
].Flags
.Consumer
= MC_NPPOOL
;
442 MmPageArray
[j
].ReferenceCount
= 1;
443 InsertTailList(&BiosPageListHead
,
444 &MmPageArray
[j
].ListEntry
);
445 MmStats
.NrReservedPages
++;
447 else if (j
>= (ULONG
)FirstPhysKernelAddress
/PAGE_SIZE
&&
448 j
< (ULONG
)LastPhysKernelAddress
/PAGE_SIZE
)
450 MmPageArray
[j
].Flags
.Type
= MM_PHYSICAL_PAGE_USED
;
451 MmPageArray
[j
].Flags
.Zero
= 0;
452 MmPageArray
[j
].Flags
.Consumer
= MC_NPPOOL
;
453 MmPageArray
[j
].ReferenceCount
= 1;
454 MmPageArray
[j
].MapCount
= 1;
455 InsertTailList(&UsedPageListHeads
[MC_NPPOOL
],
456 &MmPageArray
[j
].ListEntry
);
457 MmStats
.NrSystemPages
++;
461 MmPageArray
[j
].Flags
.Type
= MM_PHYSICAL_PAGE_FREE
;
462 MmPageArray
[j
].Flags
.Zero
= 0;
463 MmPageArray
[j
].ReferenceCount
= 0;
464 InsertTailList(&FreeUnzeroedPageListHead
,
465 &MmPageArray
[j
].ListEntry
);
467 MmStats
.NrFreePages
++;
472 MmPageArray
[j
].Flags
.Type
= MM_PHYSICAL_PAGE_BIOS
;
473 MmPageArray
[j
].Flags
.Consumer
= MC_NPPOOL
;
474 MmPageArray
[j
].Flags
.Zero
= 0;
475 MmPageArray
[j
].ReferenceCount
= 0;
476 InsertTailList(&BiosPageListHead
,
477 &MmPageArray
[j
].ListEntry
);
478 MmStats
.NrReservedPages
++;
481 FirstUninitializedPage
= j
;
485 /* Add the pages from the upper end to the list */
486 for (i
= LastPage
; i
< MmPageArraySize
; i
++)
488 if (MiIsPfnRam(BIOSMemoryMap
, AddressRangeCount
, i
))
490 MmPageArray
[i
].Flags
.Type
= MM_PHYSICAL_PAGE_USED
;
491 MmPageArray
[i
].Flags
.Zero
= 0;
492 MmPageArray
[i
].Flags
.Consumer
= MC_NPPOOL
;
493 MmPageArray
[i
].ReferenceCount
= 1;
494 MmPageArray
[i
].MapCount
= 1;
495 InsertTailList(&UsedPageListHeads
[MC_NPPOOL
],
496 &MmPageArray
[i
].ListEntry
);
497 MmStats
.NrSystemPages
++;
501 MmPageArray
[i
].Flags
.Type
= MM_PHYSICAL_PAGE_BIOS
;
502 MmPageArray
[i
].Flags
.Consumer
= MC_NPPOOL
;
503 MmPageArray
[i
].Flags
.Zero
= 0;
504 MmPageArray
[i
].ReferenceCount
= 0;
505 InsertTailList(&BiosPageListHead
,
506 &MmPageArray
[i
].ListEntry
);
507 MmStats
.NrReservedPages
++;
513 KeInitializeEvent(&ZeroPageThreadEvent
, NotificationEvent
, TRUE
);
515 MmStats
.NrTotalPages
= MmStats
.NrFreePages
+ MmStats
.NrSystemPages
+
516 MmStats
.NrReservedPages
+ MmStats
.NrUserPages
;
517 MmInitializeBalancer(MmStats
.NrFreePages
, MmStats
.NrSystemPages
+ MmStats
.NrReservedPages
);
518 return((PVOID
)LastKernelAddress
);
522 MmSetFlagsPage(PFN_TYPE Pfn
, ULONG Flags
)
526 ASSERT(Pfn
< MmPageArraySize
);
527 KeAcquireSpinLock(&PageListLock
, &oldIrql
);
528 MmPageArray
[Pfn
].AllFlags
= Flags
;
529 KeReleaseSpinLock(&PageListLock
, oldIrql
);
533 MmSetRmapListHeadPage(PFN_TYPE Pfn
, struct _MM_RMAP_ENTRY
* ListHead
)
535 MmPageArray
[Pfn
].RmapListHead
= ListHead
;
538 struct _MM_RMAP_ENTRY
*
539 MmGetRmapListHeadPage(PFN_TYPE Pfn
)
541 return(MmPageArray
[Pfn
].RmapListHead
);
545 MmMarkPageMapped(PFN_TYPE Pfn
)
549 if (Pfn
< MmPageArraySize
)
551 KeAcquireSpinLock(&PageListLock
, &oldIrql
);
552 if (MmPageArray
[Pfn
].Flags
.Type
== MM_PHYSICAL_PAGE_FREE
)
554 DbgPrint("Mapping non-used page\n");
557 MmPageArray
[Pfn
].MapCount
++;
558 KeReleaseSpinLock(&PageListLock
, oldIrql
);
563 MmMarkPageUnmapped(PFN_TYPE Pfn
)
567 if (Pfn
< MmPageArraySize
)
569 KeAcquireSpinLock(&PageListLock
, &oldIrql
);
570 if (MmPageArray
[Pfn
].Flags
.Type
== MM_PHYSICAL_PAGE_FREE
)
572 DbgPrint("Unmapping non-used page\n");
575 if (MmPageArray
[Pfn
].MapCount
== 0)
577 DbgPrint("Unmapping not mapped page\n");
580 MmPageArray
[Pfn
].MapCount
--;
581 KeReleaseSpinLock(&PageListLock
, oldIrql
);
586 MmGetFlagsPage(PFN_TYPE Pfn
)
591 ASSERT(Pfn
< MmPageArraySize
);
592 KeAcquireSpinLock(&PageListLock
, &oldIrql
);
593 Flags
= MmPageArray
[Pfn
].AllFlags
;
594 KeReleaseSpinLock(&PageListLock
, oldIrql
);
601 MmSetSavedSwapEntryPage(PFN_TYPE Pfn
, SWAPENTRY SavedSwapEntry
)
605 ASSERT(Pfn
< MmPageArraySize
);
606 KeAcquireSpinLock(&PageListLock
, &oldIrql
);
607 MmPageArray
[Pfn
].SavedSwapEntry
= SavedSwapEntry
;
608 KeReleaseSpinLock(&PageListLock
, oldIrql
);
612 MmGetSavedSwapEntryPage(PFN_TYPE Pfn
)
614 SWAPENTRY SavedSwapEntry
;
617 ASSERT(Pfn
< MmPageArraySize
);
618 KeAcquireSpinLock(&PageListLock
, &oldIrql
);
619 SavedSwapEntry
= MmPageArray
[Pfn
].SavedSwapEntry
;
620 KeReleaseSpinLock(&PageListLock
, oldIrql
);
622 return(SavedSwapEntry
);
626 MmReferencePageUnsafe(PFN_TYPE Pfn
)
630 DPRINT("MmReferencePageUnsafe(PysicalAddress %x)\n", Pfn
<< PAGE_SHIFT
);
632 if (Pfn
== 0 || Pfn
>= MmPageArraySize
)
637 KeAcquireSpinLock(&PageListLock
, &oldIrql
);
639 if (MmPageArray
[Pfn
].Flags
.Type
!= MM_PHYSICAL_PAGE_USED
)
641 DbgPrint("Referencing non-used page\n");
645 MmPageArray
[Pfn
].ReferenceCount
++;
646 KeReleaseSpinLock(&PageListLock
, oldIrql
);
650 MmReferencePage(PFN_TYPE Pfn
)
652 DPRINT("MmReferencePage(PysicalAddress %x)\n", Pfn
<< PAGE_SHIFT
);
654 if (Pfn
== 0 || Pfn
>= MmPageArraySize
)
659 MmReferencePageUnsafe(Pfn
);
663 MmGetReferenceCountPage(PFN_TYPE Pfn
)
668 DPRINT("MmGetReferenceCountPage(PhysicalAddress %x)\n", Pfn
<< PAGE_SHIFT
);
670 if (Pfn
== 0 || Pfn
>= MmPageArraySize
)
675 KeAcquireSpinLock(&PageListLock
, &oldIrql
);
677 if (MmPageArray
[Pfn
].Flags
.Type
!= MM_PHYSICAL_PAGE_USED
)
679 DbgPrint("Getting reference count for free page\n");
683 RCount
= MmPageArray
[Pfn
].ReferenceCount
;
685 KeReleaseSpinLock(&PageListLock
, oldIrql
);
690 MmIsUsablePage(PFN_TYPE Pfn
)
693 DPRINT("MmIsUsablePage(PhysicalAddress %x)\n", Pfn
<< PAGE_SHIFT
);
695 if (Pfn
== 0 || Pfn
>= MmPageArraySize
)
700 if (MmPageArray
[Pfn
].Flags
.Type
!= MM_PHYSICAL_PAGE_USED
&&
701 MmPageArray
[Pfn
].Flags
.Type
!= MM_PHYSICAL_PAGE_BIOS
)
710 MmDereferencePage(PFN_TYPE Pfn
)
714 DPRINT("MmDereferencePage(PhysicalAddress %x)\n", Pfn
<< PAGE_SHIFT
);
716 if (Pfn
== 0 || Pfn
>= MmPageArraySize
)
721 KeAcquireSpinLock(&PageListLock
, &oldIrql
);
723 if (MmPageArray
[Pfn
].Flags
.Type
!= MM_PHYSICAL_PAGE_USED
)
725 DbgPrint("Dereferencing free page\n");
728 if (MmPageArray
[Pfn
].ReferenceCount
== 0)
730 DbgPrint("Derefrencing page with reference count 0\n");
734 MmPageArray
[Pfn
].ReferenceCount
--;
735 if (MmPageArray
[Pfn
].ReferenceCount
== 0)
737 MmStats
.NrFreePages
++;
738 MmStats
.NrSystemPages
--;
739 RemoveEntryList(&MmPageArray
[Pfn
].ListEntry
);
740 if (MmPageArray
[Pfn
].RmapListHead
!= NULL
)
742 DbgPrint("Freeing page with rmap entries.\n");
745 if (MmPageArray
[Pfn
].MapCount
!= 0)
747 DbgPrint("Freeing mapped page (0x%x count %d)\n",
748 Pfn
<< PAGE_SHIFT
, MmPageArray
[Pfn
].MapCount
);
751 if (MmPageArray
[Pfn
].LockCount
> 0)
753 DbgPrint("Freeing locked page\n");
756 if (MmPageArray
[Pfn
].SavedSwapEntry
!= 0)
758 DbgPrint("Freeing page with swap entry.\n");
761 if (MmPageArray
[Pfn
].Flags
.Type
!= MM_PHYSICAL_PAGE_USED
)
763 DbgPrint("Freeing page with flags %x\n",
764 MmPageArray
[Pfn
].Flags
.Type
);
767 MmPageArray
[Pfn
].Flags
.Type
= MM_PHYSICAL_PAGE_FREE
;
768 MmPageArray
[Pfn
].Flags
.Consumer
= MC_MAXIMUM
;
769 InsertTailList(&FreeUnzeroedPageListHead
,
770 &MmPageArray
[Pfn
].ListEntry
);
772 if (UnzeroedPageCount
> 8 && 0 == KeReadStateEvent(&ZeroPageThreadEvent
))
774 KeSetEvent(&ZeroPageThreadEvent
, IO_NO_INCREMENT
, FALSE
);
777 KeReleaseSpinLock(&PageListLock
, oldIrql
);
781 MmGetLockCountPage(PFN_TYPE Pfn
)
786 DPRINT("MmGetLockCountPage(PhysicalAddress %x)\n", Pfn
<< PAGE_SHIFT
);
788 if (Pfn
== 0 || Pfn
>= MmPageArraySize
)
793 KeAcquireSpinLock(&PageListLock
, &oldIrql
);
795 if (MmPageArray
[Pfn
].Flags
.Type
!= MM_PHYSICAL_PAGE_USED
)
797 DbgPrint("Getting lock count for free page\n");
801 LockCount
= MmPageArray
[Pfn
].LockCount
;
802 KeReleaseSpinLock(&PageListLock
, oldIrql
);
808 MmLockPageUnsafe(PFN_TYPE Pfn
)
812 DPRINT("MmLockPageUnsafe(PhysicalAddress %x)\n", Pfn
<< PAGE_SHIFT
);
814 if (Pfn
== 0 || Pfn
>= MmPageArraySize
)
819 KeAcquireSpinLock(&PageListLock
, &oldIrql
);
821 if (MmPageArray
[Pfn
].Flags
.Type
!= MM_PHYSICAL_PAGE_USED
)
823 DbgPrint("Locking free page\n");
827 MmPageArray
[Pfn
].LockCount
++;
828 KeReleaseSpinLock(&PageListLock
, oldIrql
);
832 MmLockPage(PFN_TYPE Pfn
)
834 DPRINT("MmLockPage(PhysicalAddress %x)\n", Pfn
<< PAGE_SHIFT
);
836 if (Pfn
== 0 || Pfn
>= MmPageArraySize
)
841 MmLockPageUnsafe(Pfn
);
845 MmUnlockPage(PFN_TYPE Pfn
)
849 DPRINT("MmUnlockPage(PhysicalAddress %x)\n", Pfn
<< PAGE_SHIFT
);
851 if (Pfn
== 0 || Pfn
>= MmPageArraySize
)
856 KeAcquireSpinLock(&PageListLock
, &oldIrql
);
858 if (MmPageArray
[Pfn
].Flags
.Type
!= MM_PHYSICAL_PAGE_USED
)
860 DbgPrint("Unlocking free page\n");
864 MmPageArray
[Pfn
].LockCount
--;
865 KeReleaseSpinLock(&PageListLock
, oldIrql
);
869 MmAllocPage(ULONG Consumer
, SWAPENTRY SavedSwapEntry
)
872 PLIST_ENTRY ListEntry
;
873 PPHYSICAL_PAGE PageDescriptor
;
875 BOOLEAN NeedClear
= FALSE
;
877 DPRINT("MmAllocPage()\n");
879 KeAcquireSpinLock(&PageListLock
, &oldIrql
);
880 if (IsListEmpty(&FreeZeroedPageListHead
))
882 if (IsListEmpty(&FreeUnzeroedPageListHead
))
884 DPRINT1("MmAllocPage(): Out of memory\n");
885 KeReleaseSpinLock(&PageListLock
, oldIrql
);
888 ListEntry
= RemoveTailList(&FreeUnzeroedPageListHead
);
891 PageDescriptor
= CONTAINING_RECORD(ListEntry
, PHYSICAL_PAGE
, ListEntry
);
897 ListEntry
= RemoveTailList(&FreeZeroedPageListHead
);
899 PageDescriptor
= CONTAINING_RECORD(ListEntry
, PHYSICAL_PAGE
, ListEntry
);
902 if (PageDescriptor
->Flags
.Type
!= MM_PHYSICAL_PAGE_FREE
)
904 DbgPrint("Got non-free page from freelist\n");
907 if (PageDescriptor
->MapCount
!= 0)
909 DbgPrint("Got mapped page from freelist\n");
912 if (PageDescriptor
->ReferenceCount
!= 0)
914 DPRINT1("%d\n", PageDescriptor
->ReferenceCount
);
917 PageDescriptor
->Flags
.Type
= MM_PHYSICAL_PAGE_USED
;
918 PageDescriptor
->Flags
.Consumer
= Consumer
;
919 PageDescriptor
->ReferenceCount
= 1;
920 PageDescriptor
->LockCount
= 0;
921 PageDescriptor
->MapCount
= 0;
922 PageDescriptor
->SavedSwapEntry
= SavedSwapEntry
;
923 InsertTailList(&UsedPageListHeads
[Consumer
], ListEntry
);
925 MmStats
.NrSystemPages
++;
926 MmStats
.NrFreePages
--;
928 KeReleaseSpinLock(&PageListLock
, oldIrql
);
930 PfnOffset
= PageDescriptor
- MmPageArray
;
933 MiZeroPage(PfnOffset
);
935 if (PageDescriptor
->MapCount
!= 0)
937 DbgPrint("Returning mapped page.\n");
944 MmAllocPagesSpecifyRange(ULONG Consumer
,
945 PHYSICAL_ADDRESS LowestAddress
,
946 PHYSICAL_ADDRESS HighestAddress
,
950 PPHYSICAL_PAGE PageDescriptor
;
952 PFN_TYPE LowestPage
, HighestPage
;
954 ULONG NumberOfPagesFound
= 0;
957 DPRINT("MmAllocPagesSpecifyRange()\n"
958 " LowestAddress = 0x%08x%08x\n"
959 " HighestAddress = 0x%08x%08x\n"
960 " NumberOfPages = %d\n",
961 LowestAddress
.u
.HighPart
, LowestAddress
.u
.LowPart
,
962 HighestAddress
.u
.HighPart
, HighestAddress
.u
.LowPart
,
965 if (NumberOfPages
== 0)
968 LowestPage
= LowestAddress
.QuadPart
/ PAGE_SIZE
;
969 HighestPage
= HighestAddress
.QuadPart
/ PAGE_SIZE
;
970 if ((HighestAddress
.u
.LowPart
% PAGE_SIZE
) != 0)
973 if (LowestPage
>= MmPageArraySize
)
975 DPRINT1("MmAllocPagesSpecifyRange(): Out of memory\n");
978 if (HighestPage
> MmPageArraySize
)
979 HighestPage
= MmPageArraySize
;
981 KeAcquireSpinLock(&PageListLock
, &oldIrql
);
982 if (LowestPage
== 0 && HighestPage
== MmPageArraySize
)
984 PLIST_ENTRY ListEntry
;
985 while (NumberOfPagesFound
< NumberOfPages
)
987 if (!IsListEmpty(&FreeZeroedPageListHead
))
989 ListEntry
= RemoveTailList(&FreeZeroedPageListHead
);
991 else if (!IsListEmpty(&FreeUnzeroedPageListHead
))
993 ListEntry
= RemoveTailList(&FreeUnzeroedPageListHead
);
998 if (NumberOfPagesFound
== 0)
1000 KeReleaseSpinLock(&PageListLock
, oldIrql
);
1001 DPRINT1("MmAllocPagesSpecifyRange(): Out of memory\n");
1009 PageDescriptor
= CONTAINING_RECORD(ListEntry
, PHYSICAL_PAGE
, ListEntry
);
1011 ASSERT(PageDescriptor
->Flags
.Type
== MM_PHYSICAL_PAGE_FREE
);
1012 ASSERT(PageDescriptor
->MapCount
== 0);
1013 ASSERT(PageDescriptor
->ReferenceCount
== 0);
1015 /* Allocate the page */
1016 PageDescriptor
->Flags
.Type
= MM_PHYSICAL_PAGE_USED
;
1017 PageDescriptor
->Flags
.Consumer
= Consumer
;
1018 PageDescriptor
->ReferenceCount
= 1;
1019 PageDescriptor
->LockCount
= 0;
1020 PageDescriptor
->MapCount
= 0;
1021 PageDescriptor
->SavedSwapEntry
= 0; /* FIXME: Do we need swap entries? */
1022 InsertTailList(&UsedPageListHeads
[Consumer
], &PageDescriptor
->ListEntry
);
1024 MmStats
.NrSystemPages
++;
1025 MmStats
.NrFreePages
--;
1027 /* Remember the page */
1028 pfn
= PageDescriptor
- MmPageArray
;
1029 Pages
[NumberOfPagesFound
++] = pfn
;
1034 INT LookForZeroedPages
;
1035 for (LookForZeroedPages
= 1; LookForZeroedPages
>= 0; LookForZeroedPages
--)
1037 for (pfn
= LowestPage
; pfn
< HighestPage
; pfn
++)
1039 PageDescriptor
= MmPageArray
+ pfn
;
1041 if (PageDescriptor
->Flags
.Type
!= MM_PHYSICAL_PAGE_FREE
)
1043 if (PageDescriptor
->Flags
.Zero
!= LookForZeroedPages
)
1046 ASSERT(PageDescriptor
->MapCount
== 0);
1047 ASSERT(PageDescriptor
->ReferenceCount
== 0);
1049 /* Allocate the page */
1050 PageDescriptor
->Flags
.Type
= MM_PHYSICAL_PAGE_USED
;
1051 PageDescriptor
->Flags
.Consumer
= Consumer
;
1052 PageDescriptor
->ReferenceCount
= 1;
1053 PageDescriptor
->LockCount
= 0;
1054 PageDescriptor
->MapCount
= 0;
1055 PageDescriptor
->SavedSwapEntry
= 0; /* FIXME: Do we need swap entries? */
1056 RemoveEntryList(&PageDescriptor
->ListEntry
);
1057 InsertTailList(&UsedPageListHeads
[Consumer
], &PageDescriptor
->ListEntry
);
1059 if (!PageDescriptor
->Flags
.Zero
)
1060 UnzeroedPageCount
--;
1061 MmStats
.NrSystemPages
++;
1062 MmStats
.NrFreePages
--;
1064 /* Remember the page */
1065 Pages
[NumberOfPagesFound
++] = pfn
;
1066 if (NumberOfPagesFound
== NumberOfPages
)
1069 if (NumberOfPagesFound
== NumberOfPages
)
1073 KeReleaseSpinLock(&PageListLock
, oldIrql
);
1075 /* Zero unzero-ed pages */
1076 for (i
= 0; i
< NumberOfPagesFound
; i
++)
1079 if (MmPageArray
[pfn
].Flags
.Zero
== 0)
1085 MmPageArray
[pfn
].Flags
.Zero
= 0;
1089 return NumberOfPagesFound
;
1093 MmZeroPageThreadMain(PVOID Ignored
)
1097 PLIST_ENTRY ListEntry
;
1098 PPHYSICAL_PAGE PageDescriptor
;
1104 Status
= KeWaitForSingleObject(&ZeroPageThreadEvent
,
1109 if (!NT_SUCCESS(Status
))
1111 DbgPrint("ZeroPageThread: Wait failed\n");
1116 if (ZeroPageThreadShouldTerminate
)
1118 DbgPrint("ZeroPageThread: Terminating\n");
1122 KeAcquireSpinLock(&PageListLock
, &oldIrql
);
1123 while (!IsListEmpty(&FreeUnzeroedPageListHead
))
1125 ListEntry
= RemoveTailList(&FreeUnzeroedPageListHead
);
1126 UnzeroedPageCount
--;
1127 PageDescriptor
= CONTAINING_RECORD(ListEntry
, PHYSICAL_PAGE
, ListEntry
);
1128 /* We set the page to used, because MmCreateVirtualMapping failed with unused pages */
1129 PageDescriptor
->Flags
.Type
= MM_PHYSICAL_PAGE_USED
;
1130 KeReleaseSpinLock(&PageListLock
, oldIrql
);
1131 Pfn
= PageDescriptor
- MmPageArray
;
1132 Status
= MiZeroPage(Pfn
);
1134 KeAcquireSpinLock(&PageListLock
, &oldIrql
);
1135 if (PageDescriptor
->MapCount
!= 0)
1137 DbgPrint("Mapped page on freelist.\n");
1140 PageDescriptor
->Flags
.Zero
= 1;
1141 PageDescriptor
->Flags
.Type
= MM_PHYSICAL_PAGE_FREE
;
1142 if (NT_SUCCESS(Status
))
1144 InsertHeadList(&FreeZeroedPageListHead
, ListEntry
);
1149 InsertHeadList(&FreeUnzeroedPageListHead
, ListEntry
);
1150 UnzeroedPageCount
++;
1154 DPRINT("Zeroed %d pages.\n", Count
);
1155 KeResetEvent(&ZeroPageThreadEvent
);
1156 KeReleaseSpinLock(&PageListLock
, oldIrql
);
1160 NTSTATUS INIT_FUNCTION
1161 MmInitZeroPageThread(VOID
)
1164 HANDLE ThreadHandle
;
1166 ZeroPageThreadShouldTerminate
= FALSE
;
1167 Status
= PsCreateSystemThread(&ThreadHandle
,
1172 MmZeroPageThreadMain
,
1174 if (!NT_SUCCESS(Status
))
1179 Status
= ObReferenceObjectByHandle(ThreadHandle
,
1183 (PVOID
*)&ZeroPageThread
,
1185 if (!NT_SUCCESS(Status
))
1190 KeSetPriorityThread(&ZeroPageThread
->Tcb
, LOW_PRIORITY
);
1191 NtClose(ThreadHandle
);
1192 return STATUS_SUCCESS
;