2 * COPYRIGHT: See COPYING in the top level directory
3 * PROJECT: ReactOS kernel
4 * FILE: ntoskrnl/mm/freelist.c
5 * PURPOSE: Handle the list of free physical pages
7 * PROGRAMMERS: David Welch (welch@cwcom.net)
11 /* INCLUDES ****************************************************************/
17 #if defined (ALLOC_PRAGMA)
18 #pragma alloc_text(INIT, MmInitializePageList)
21 #define MODULE_INVOLVED_IN_ARM3
22 #include "ARM3/miarm.h"
24 /* GLOBALS ****************************************************************/
26 // ReactOS to NT Physical Page Descriptor Entry Legacy Mapping Definitions
27 #define PHYSICAL_PAGE MMPFN
28 #define PPHYSICAL_PAGE PMMPFN
30 PPHYSICAL_PAGE MmPfnDatabase
;
32 PFN_NUMBER MmAvailablePages
;
33 PFN_NUMBER MmResidentAvailablePages
;
34 PFN_NUMBER MmResidentAvailableAtInit
;
36 SIZE_T MmTotalCommittedPages
;
37 SIZE_T MmSharedCommit
;
38 SIZE_T MmDriverCommit
;
39 SIZE_T MmProcessCommit
;
40 SIZE_T MmPagedPoolCommit
;
41 SIZE_T MmPeakCommitment
;
42 SIZE_T MmtotalCommitLimitMaximum
;
44 static RTL_BITMAP MiUserPfnBitMap
;
46 /* FUNCTIONS *************************************************************/
50 MiInitializeUserPfnBitmap(VOID
)
54 /* Allocate enough buffer for the PFN bitmap and align it on 32-bits */
55 Bitmap
= ExAllocatePoolWithTag(NonPagedPool
,
56 (((MmHighestPhysicalPage
+ 1) + 31) / 32) * 4,
60 /* Initialize it and clear all the bits to begin with */
61 RtlInitializeBitMap(&MiUserPfnBitMap
,
63 (ULONG
)MmHighestPhysicalPage
+ 1);
64 RtlClearAllBits(&MiUserPfnBitMap
);
69 MmGetLRUFirstUserPage(VOID
)
74 /* Find the first user page */
75 OldIrql
= KeAcquireQueuedSpinLock(LockQueuePfnLock
);
76 Position
= RtlFindSetBits(&MiUserPfnBitMap
, 1, 0);
77 KeReleaseQueuedSpinLock(LockQueuePfnLock
, OldIrql
);
78 if (Position
== 0xFFFFFFFF) return 0;
81 ASSERT(Position
!= 0);
82 ASSERT_IS_ROS_PFN(MiGetPfnEntry(Position
));
88 MmInsertLRULastUserPage(PFN_NUMBER Pfn
)
92 /* Set the page as a user page */
94 ASSERT_IS_ROS_PFN(MiGetPfnEntry(Pfn
));
95 ASSERT(!RtlCheckBit(&MiUserPfnBitMap
, (ULONG
)Pfn
));
96 OldIrql
= KeAcquireQueuedSpinLock(LockQueuePfnLock
);
97 RtlSetBit(&MiUserPfnBitMap
, (ULONG
)Pfn
);
98 KeReleaseQueuedSpinLock(LockQueuePfnLock
, OldIrql
);
103 MmGetLRUNextUserPage(PFN_NUMBER PreviousPfn
)
108 /* Find the next user page */
109 OldIrql
= KeAcquireQueuedSpinLock(LockQueuePfnLock
);
110 Position
= RtlFindSetBits(&MiUserPfnBitMap
, 1, (ULONG
)PreviousPfn
+ 1);
111 KeReleaseQueuedSpinLock(LockQueuePfnLock
, OldIrql
);
112 if (Position
== 0xFFFFFFFF) return 0;
115 ASSERT(Position
!= 0);
116 ASSERT_IS_ROS_PFN(MiGetPfnEntry(Position
));
122 MmRemoveLRUUserPage(PFN_NUMBER Page
)
126 /* Unset the page as a user page */
128 ASSERT_IS_ROS_PFN(MiGetPfnEntry(Page
));
129 ASSERT(RtlCheckBit(&MiUserPfnBitMap
, (ULONG
)Page
));
130 OldIrql
= KeAcquireQueuedSpinLock(LockQueuePfnLock
);
131 RtlClearBit(&MiUserPfnBitMap
, (ULONG
)Page
);
132 KeReleaseQueuedSpinLock(LockQueuePfnLock
, OldIrql
);
137 MiIsPfnFree(IN PMMPFN Pfn1
)
139 /* Must be a free or zero page, with no references, linked */
140 return ((Pfn1
->u3
.e1
.PageLocation
<= StandbyPageList
) &&
143 !(Pfn1
->u3
.e2
.ReferenceCount
));
148 MiIsPfnInUse(IN PMMPFN Pfn1
)
150 /* Standby list or higher, unlinked, and with references */
151 return !MiIsPfnFree(Pfn1
);
156 MiAllocatePagesForMdl(IN PHYSICAL_ADDRESS LowAddress
,
157 IN PHYSICAL_ADDRESS HighAddress
,
158 IN PHYSICAL_ADDRESS SkipBytes
,
159 IN SIZE_T TotalBytes
,
160 IN MI_PFN_CACHE_ATTRIBUTE CacheAttribute
,
164 PFN_NUMBER PageCount
, LowPage
, HighPage
, SkipPages
, PagesFound
= 0, Page
;
165 PPFN_NUMBER MdlPage
, LastMdlPage
;
168 INT LookForZeroedPages
;
169 ASSERT(KeGetCurrentIrql() <= APC_LEVEL
);
172 // Convert the low address into a PFN
174 LowPage
= (PFN_NUMBER
)(LowAddress
.QuadPart
>> PAGE_SHIFT
);
177 // Convert, and normalize, the high address into a PFN
179 HighPage
= (PFN_NUMBER
)(HighAddress
.QuadPart
>> PAGE_SHIFT
);
180 if (HighPage
> MmHighestPhysicalPage
) HighPage
= MmHighestPhysicalPage
;
183 // Validate skipbytes and convert them into pages
185 if (BYTE_OFFSET(SkipBytes
.LowPart
)) return NULL
;
186 SkipPages
= (PFN_NUMBER
)(SkipBytes
.QuadPart
>> PAGE_SHIFT
);
188 /* This isn't supported at all */
189 if (SkipPages
) DPRINT1("WARNING: Caller requesting SkipBytes, MDL might be mismatched\n");
192 // Now compute the number of pages the MDL will cover
194 PageCount
= (PFN_NUMBER
)ADDRESS_AND_SIZE_TO_SPAN_PAGES(0, TotalBytes
);
198 // Try creating an MDL for these many pages
200 Mdl
= MmCreateMdl(NULL
, NULL
, PageCount
<< PAGE_SHIFT
);
204 // This function is not required to return the amount of pages requested
205 // In fact, it can return as little as 1 page, and callers are supposed
206 // to deal with this scenario. So re-attempt the allocation with less
207 // pages than before, and see if it worked this time.
209 PageCount
-= (PageCount
>> 4);
213 // Wow, not even a single page was around!
215 if (!Mdl
) return NULL
;
218 // This is where the page array starts....
220 MdlPage
= (PPFN_NUMBER
)(Mdl
+ 1);
223 // Lock the PFN database
225 OldIrql
= KeAcquireQueuedSpinLock(LockQueuePfnLock
);
228 // Are we looking for any pages, without discriminating?
230 if ((LowPage
== 0) && (HighPage
== MmHighestPhysicalPage
))
233 // Well then, let's go shopping
235 while (PagesFound
< PageCount
)
238 MI_SET_USAGE(MI_USAGE_MDL
);
239 MI_SET_PROCESS2("Kernel");
240 Page
= MiRemoveAnyPage(0);
243 /* This is not good... hopefully we have at least SOME pages */
248 /* Grab the page entry for it */
249 Pfn1
= MiGetPfnEntry(Page
);
252 // Make sure it's really free
254 ASSERT(Pfn1
->u3
.e2
.ReferenceCount
== 0);
256 /* Now setup the page and mark it */
257 Pfn1
->u3
.e2
.ReferenceCount
= 1;
258 Pfn1
->u2
.ShareCount
= 1;
259 MI_SET_PFN_DELETED(Pfn1
);
260 Pfn1
->u4
.PteFrame
= 0x1FFEDCB;
261 Pfn1
->u3
.e1
.StartOfAllocation
= 1;
262 Pfn1
->u3
.e1
.EndOfAllocation
= 1;
263 Pfn1
->u4
.VerifierAllocation
= 0;
266 // Save it into the MDL
268 *MdlPage
++ = MiGetPfnEntryIndex(Pfn1
);
275 // You want specific range of pages. We'll do this in two runs
277 for (LookForZeroedPages
= 1; LookForZeroedPages
>= 0; LookForZeroedPages
--)
280 // Scan the range you specified
282 for (Page
= LowPage
; Page
< HighPage
; Page
++)
285 // Get the PFN entry for this page
287 Pfn1
= MiGetPfnEntry(Page
);
291 // Make sure it's free and if this is our first pass, zeroed
293 if (MiIsPfnInUse(Pfn1
)) continue;
294 if ((Pfn1
->u3
.e1
.PageLocation
== ZeroedPageList
) != LookForZeroedPages
) continue;
296 /* Remove the page from the free or zero list */
297 ASSERT(Pfn1
->u3
.e1
.ReadInProgress
== 0);
298 MI_SET_USAGE(MI_USAGE_MDL
);
299 MI_SET_PROCESS2("Kernel");
300 MiUnlinkFreeOrZeroedPage(Pfn1
);
305 ASSERT(Pfn1
->u3
.e2
.ReferenceCount
== 0);
308 // Now setup the page and mark it
310 Pfn1
->u3
.e2
.ReferenceCount
= 1;
311 Pfn1
->u2
.ShareCount
= 1;
312 MI_SET_PFN_DELETED(Pfn1
);
313 Pfn1
->u4
.PteFrame
= 0x1FFEDCB;
314 Pfn1
->u3
.e1
.StartOfAllocation
= 1;
315 Pfn1
->u3
.e1
.EndOfAllocation
= 1;
316 Pfn1
->u4
.VerifierAllocation
= 0;
319 // Save this page into the MDL
322 if (++PagesFound
== PageCount
) break;
326 // If the first pass was enough, don't keep going, otherwise, go again
328 if (PagesFound
== PageCount
) break;
333 // Now release the PFN count
335 KeReleaseQueuedSpinLock(LockQueuePfnLock
, OldIrql
);
338 // We might've found less pages, but not more ;-)
340 if (PagesFound
!= PageCount
) ASSERT(PagesFound
< PageCount
);
344 // If we didn' tfind any pages at all, fail
346 DPRINT1("NO MDL PAGES!\n");
352 // Write out how many pages we found
354 Mdl
->ByteCount
= (ULONG
)(PagesFound
<< PAGE_SHIFT
);
357 // Terminate the MDL array if there's certain missing pages
359 if (PagesFound
!= PageCount
) *MdlPage
= LIST_HEAD
;
362 // Now go back and loop over all the MDL pages
364 MdlPage
= (PPFN_NUMBER
)(Mdl
+ 1);
365 LastMdlPage
= MdlPage
+ PagesFound
;
366 while (MdlPage
< LastMdlPage
)
369 // Check if we've reached the end
372 if (Page
== LIST_HEAD
) break;
375 // Get the PFN entry for the page and check if we should zero it out
377 Pfn1
= MiGetPfnEntry(Page
);
379 if (Pfn1
->u3
.e1
.PageLocation
!= ZeroedPageList
) MiZeroPhysicalPage(Page
);
380 Pfn1
->u3
.e1
.PageLocation
= ActiveAndValid
;
384 // We're done, mark the pages as locked
387 Mdl
->MdlFlags
|= MDL_PAGES_LOCKED
;
393 MmSetRmapListHeadPage(PFN_NUMBER Pfn
, PMM_RMAP_ENTRY ListHead
)
398 oldIrql
= KeAcquireQueuedSpinLock(LockQueuePfnLock
);
399 Pfn1
= MiGetPfnEntry(Pfn
);
401 ASSERT_IS_ROS_PFN(Pfn1
);
405 /* Should not be trying to insert an RMAP for a non-active page */
406 ASSERT(MiIsPfnInUse(Pfn1
) == TRUE
);
408 /* Set the list head address */
409 MI_GET_ROS_DATA(Pfn1
)->RmapListHead
= ListHead
;
413 /* ReactOS semantics dictate the page is STILL active right now */
414 ASSERT(MiIsPfnInUse(Pfn1
) == TRUE
);
416 /* In this case, the RMAP is actually being removed, so clear field */
417 MI_GET_ROS_DATA(Pfn1
)->RmapListHead
= NULL
;
419 /* ReactOS semantics will now release the page, which will make it free and enter a colored list */
422 KeReleaseQueuedSpinLock(LockQueuePfnLock
, oldIrql
);
427 MmGetRmapListHeadPage(PFN_NUMBER Pfn
)
430 PMM_RMAP_ENTRY ListHead
;
433 /* Lock PFN database */
434 oldIrql
= KeAcquireQueuedSpinLock(LockQueuePfnLock
);
437 Pfn1
= MiGetPfnEntry(Pfn
);
439 ASSERT_IS_ROS_PFN(Pfn1
);
441 /* Get the list head */
442 ListHead
= MI_GET_ROS_DATA(Pfn1
)->RmapListHead
;
444 /* Should not have an RMAP for a non-active page */
445 ASSERT(MiIsPfnInUse(Pfn1
) == TRUE
);
447 /* Release PFN database and return rmap list head */
448 KeReleaseQueuedSpinLock(LockQueuePfnLock
, oldIrql
);
454 MmSetSavedSwapEntryPage(PFN_NUMBER Pfn
, SWAPENTRY SwapEntry
)
459 Page
= MiGetPfnEntry(Pfn
);
461 ASSERT_IS_ROS_PFN(Page
);
463 oldIrql
= KeAcquireQueuedSpinLock(LockQueuePfnLock
);
464 MI_GET_ROS_DATA(Page
)->SwapEntry
= SwapEntry
;
465 KeReleaseQueuedSpinLock(LockQueuePfnLock
, oldIrql
);
470 MmGetSavedSwapEntryPage(PFN_NUMBER Pfn
)
476 Page
= MiGetPfnEntry(Pfn
);
478 ASSERT_IS_ROS_PFN(Page
);
480 oldIrql
= KeAcquireQueuedSpinLock(LockQueuePfnLock
);
481 SwapEntry
= MI_GET_ROS_DATA(Page
)->SwapEntry
;
482 KeReleaseQueuedSpinLock(LockQueuePfnLock
, oldIrql
);
489 MmReferencePage(PFN_NUMBER Pfn
)
493 DPRINT("MmReferencePage(PysicalAddress %x)\n", Pfn
<< PAGE_SHIFT
);
495 if (Pfn
== 0 || Pfn
> MmHighestPhysicalPage
)
500 Page
= MiGetPfnEntry(Pfn
);
502 ASSERT_IS_ROS_PFN(Page
);
504 Page
->u3
.e2
.ReferenceCount
++;
509 MmGetReferenceCountPage(PFN_NUMBER Pfn
)
515 DPRINT("MmGetReferenceCountPage(PhysicalAddress %x)\n", Pfn
<< PAGE_SHIFT
);
517 oldIrql
= KeAcquireQueuedSpinLock(LockQueuePfnLock
);
518 Page
= MiGetPfnEntry(Pfn
);
520 ASSERT_IS_ROS_PFN(Page
);
522 RCount
= Page
->u3
.e2
.ReferenceCount
;
524 KeReleaseQueuedSpinLock(LockQueuePfnLock
, oldIrql
);
530 MmIsPageInUse(PFN_NUMBER Pfn
)
532 return MiIsPfnInUse(MiGetPfnEntry(Pfn
));
537 MmDereferencePage(PFN_NUMBER Pfn
)
540 DPRINT("MmDereferencePage(PhysicalAddress %x)\n", Pfn
<< PAGE_SHIFT
);
542 Page
= MiGetPfnEntry(Pfn
);
544 ASSERT_IS_ROS_PFN(Page
);
546 Page
->u3
.e2
.ReferenceCount
--;
547 if (Page
->u3
.e2
.ReferenceCount
== 0)
549 /* Mark the page temporarily as valid, we're going to make it free soon */
550 Page
->u3
.e1
.PageLocation
= ActiveAndValid
;
552 /* It's not a ROS PFN anymore */
553 Page
->u4
.AweAllocation
= FALSE
;
554 ExFreePool(MI_GET_ROS_DATA(Page
));
557 /* Bring it back into the free list */
558 DPRINT("Legacy free: %lx\n", Pfn
);
559 MiInsertPageInFreeList(Pfn
);
565 MmAllocPage(ULONG Type
)
567 PFN_NUMBER PfnOffset
;
570 PfnOffset
= MiRemoveZeroPage(MI_GET_NEXT_COLOR());
574 DPRINT1("MmAllocPage(): Out of memory\n");
578 DPRINT("Legacy allocate: %lx\n", PfnOffset
);
579 Pfn1
= MiGetPfnEntry(PfnOffset
);
580 Pfn1
->u3
.e2
.ReferenceCount
= 1;
581 Pfn1
->u3
.e1
.PageLocation
= ActiveAndValid
;
583 /* This marks the PFN as a ReactOS PFN */
584 Pfn1
->u4
.AweAllocation
= TRUE
;
586 /* Allocate the extra ReactOS Data and zero it out */
587 Pfn1
->RosMmData
= (LONG
)ExAllocatePoolWithTag(NonPagedPool
, sizeof(MMROSPFN
), 'RsPf');
588 ASSERT(MI_GET_ROS_DATA(Pfn1
) != NULL
);
589 ASSERT_IS_ROS_PFN(Pfn1
);
590 MI_GET_ROS_DATA(Pfn1
)->SwapEntry
= 0;
591 MI_GET_ROS_DATA(Pfn1
)->RmapListHead
= NULL
;