2 * COPYRIGHT: See COPYING in the top level directory
3 * PROJECT: ReactOS kernel
4 * FILE: ntoskrnl/mm/freelist.c
5 * PURPOSE: Handle the list of free physical pages
7 * PROGRAMMERS: David Welch (welch@cwcom.net)
11 /* INCLUDES ****************************************************************/
17 #if defined (ALLOC_PRAGMA)
18 #pragma alloc_text(INIT, MmInitializePageList)
21 #define MODULE_INVOLVED_IN_ARM3
22 #include "ARM3/miarm.h"
24 /* GLOBALS ****************************************************************/
26 // ReactOS to NT Physical Page Descriptor Entry Legacy Mapping Definitions
27 #define PHYSICAL_PAGE MMPFN
28 #define PPHYSICAL_PAGE PMMPFN
30 PPHYSICAL_PAGE MmPfnDatabase
;
32 PFN_NUMBER MmAvailablePages
;
33 PFN_NUMBER MmResidentAvailablePages
;
34 PFN_NUMBER MmResidentAvailableAtInit
;
36 SIZE_T MmTotalCommittedPages
;
37 SIZE_T MmSharedCommit
;
38 SIZE_T MmDriverCommit
;
39 SIZE_T MmProcessCommit
;
40 SIZE_T MmPagedPoolCommit
;
41 SIZE_T MmPeakCommitment
;
42 SIZE_T MmtotalCommitLimitMaximum
;
44 static RTL_BITMAP MiUserPfnBitMap
;
46 /* FUNCTIONS *************************************************************/
50 MiInitializeUserPfnBitmap(VOID
)
54 /* Allocate enough buffer for the PFN bitmap and align it on 32-bits */
55 Bitmap
= ExAllocatePoolWithTag(NonPagedPool
,
56 (((MmHighestPhysicalPage
+ 1) + 31) / 32) * 4,
60 /* Initialize it and clear all the bits to begin with */
61 RtlInitializeBitMap(&MiUserPfnBitMap
,
63 (ULONG
)MmHighestPhysicalPage
+ 1);
64 RtlClearAllBits(&MiUserPfnBitMap
);
69 MmGetLRUFirstUserPage(VOID
)
74 /* Find the first user page */
75 OldIrql
= KeAcquireQueuedSpinLock(LockQueuePfnLock
);
76 Position
= RtlFindSetBits(&MiUserPfnBitMap
, 1, 0);
77 KeReleaseQueuedSpinLock(LockQueuePfnLock
, OldIrql
);
78 if (Position
== 0xFFFFFFFF) return 0;
81 ASSERT(Position
!= 0);
82 ASSERT_IS_ROS_PFN(MiGetPfnEntry(Position
));
88 MmInsertLRULastUserPage(PFN_NUMBER Pfn
)
92 /* Set the page as a user page */
94 ASSERT_IS_ROS_PFN(MiGetPfnEntry(Pfn
));
95 OldIrql
= KeAcquireQueuedSpinLock(LockQueuePfnLock
);
96 RtlSetBit(&MiUserPfnBitMap
, (ULONG
)Pfn
);
97 KeReleaseQueuedSpinLock(LockQueuePfnLock
, OldIrql
);
102 MmGetLRUNextUserPage(PFN_NUMBER PreviousPfn
)
107 /* Find the next user page */
108 OldIrql
= KeAcquireQueuedSpinLock(LockQueuePfnLock
);
109 Position
= RtlFindSetBits(&MiUserPfnBitMap
, 1, (ULONG
)PreviousPfn
+ 1);
110 KeReleaseQueuedSpinLock(LockQueuePfnLock
, OldIrql
);
111 if (Position
== 0xFFFFFFFF) return 0;
114 ASSERT(Position
!= 0);
115 ASSERT_IS_ROS_PFN(MiGetPfnEntry(Position
));
121 MmRemoveLRUUserPage(PFN_NUMBER Page
)
123 /* Unset the page as a user page */
125 ASSERT_IS_ROS_PFN(MiGetPfnEntry(Page
));
126 RtlClearBit(&MiUserPfnBitMap
, (ULONG
)Page
);
131 MiIsPfnFree(IN PMMPFN Pfn1
)
133 /* Must be a free or zero page, with no references, linked */
134 return ((Pfn1
->u3
.e1
.PageLocation
<= StandbyPageList
) &&
137 !(Pfn1
->u3
.e2
.ReferenceCount
));
142 MiIsPfnInUse(IN PMMPFN Pfn1
)
144 /* Standby list or higher, unlinked, and with references */
145 return !MiIsPfnFree(Pfn1
);
150 MiAllocatePagesForMdl(IN PHYSICAL_ADDRESS LowAddress
,
151 IN PHYSICAL_ADDRESS HighAddress
,
152 IN PHYSICAL_ADDRESS SkipBytes
,
153 IN SIZE_T TotalBytes
,
154 IN MI_PFN_CACHE_ATTRIBUTE CacheAttribute
,
158 PFN_NUMBER PageCount
, LowPage
, HighPage
, SkipPages
, PagesFound
= 0, Page
;
159 PPFN_NUMBER MdlPage
, LastMdlPage
;
162 INT LookForZeroedPages
;
163 ASSERT(KeGetCurrentIrql() <= APC_LEVEL
);
166 // Convert the low address into a PFN
168 LowPage
= (PFN_NUMBER
)(LowAddress
.QuadPart
>> PAGE_SHIFT
);
171 // Convert, and normalize, the high address into a PFN
173 HighPage
= (PFN_NUMBER
)(HighAddress
.QuadPart
>> PAGE_SHIFT
);
174 if (HighPage
> MmHighestPhysicalPage
) HighPage
= MmHighestPhysicalPage
;
177 // Validate skipbytes and convert them into pages
179 if (BYTE_OFFSET(SkipBytes
.LowPart
)) return NULL
;
180 SkipPages
= (PFN_NUMBER
)(SkipBytes
.QuadPart
>> PAGE_SHIFT
);
182 /* This isn't supported at all */
183 if (SkipPages
) DPRINT1("WARNING: Caller requesting SkipBytes, MDL might be mismatched\n");
186 // Now compute the number of pages the MDL will cover
188 PageCount
= (PFN_NUMBER
)ADDRESS_AND_SIZE_TO_SPAN_PAGES(0, TotalBytes
);
192 // Try creating an MDL for these many pages
194 Mdl
= MmCreateMdl(NULL
, NULL
, PageCount
<< PAGE_SHIFT
);
198 // This function is not required to return the amount of pages requested
199 // In fact, it can return as little as 1 page, and callers are supposed
200 // to deal with this scenario. So re-attempt the allocation with less
201 // pages than before, and see if it worked this time.
203 PageCount
-= (PageCount
>> 4);
207 // Wow, not even a single page was around!
209 if (!Mdl
) return NULL
;
212 // This is where the page array starts....
214 MdlPage
= (PPFN_NUMBER
)(Mdl
+ 1);
217 // Lock the PFN database
219 OldIrql
= KeAcquireQueuedSpinLock(LockQueuePfnLock
);
222 // Are we looking for any pages, without discriminating?
224 if ((LowPage
== 0) && (HighPage
== MmHighestPhysicalPage
))
227 // Well then, let's go shopping
229 while (PagesFound
< PageCount
)
232 MI_SET_USAGE(MI_USAGE_MDL
);
233 MI_SET_PROCESS2("Kernel");
234 Page
= MiRemoveAnyPage(0);
237 /* This is not good... hopefully we have at least SOME pages */
242 /* Grab the page entry for it */
243 Pfn1
= MiGetPfnEntry(Page
);
246 // Make sure it's really free
248 ASSERT(Pfn1
->u3
.e2
.ReferenceCount
== 0);
250 /* Now setup the page and mark it */
251 Pfn1
->u3
.e2
.ReferenceCount
= 1;
252 Pfn1
->u2
.ShareCount
= 1;
253 MI_SET_PFN_DELETED(Pfn1
);
254 Pfn1
->u4
.PteFrame
= 0x1FFEDCB;
255 Pfn1
->u3
.e1
.StartOfAllocation
= 1;
256 Pfn1
->u3
.e1
.EndOfAllocation
= 1;
257 Pfn1
->u4
.VerifierAllocation
= 0;
260 // Save it into the MDL
262 *MdlPage
++ = MiGetPfnEntryIndex(Pfn1
);
269 // You want specific range of pages. We'll do this in two runs
271 for (LookForZeroedPages
= 1; LookForZeroedPages
>= 0; LookForZeroedPages
--)
274 // Scan the range you specified
276 for (Page
= LowPage
; Page
< HighPage
; Page
++)
279 // Get the PFN entry for this page
281 Pfn1
= MiGetPfnEntry(Page
);
285 // Make sure it's free and if this is our first pass, zeroed
287 if (MiIsPfnInUse(Pfn1
)) continue;
288 if ((Pfn1
->u3
.e1
.PageLocation
== ZeroedPageList
) != LookForZeroedPages
) continue;
290 /* Remove the page from the free or zero list */
291 ASSERT(Pfn1
->u3
.e1
.ReadInProgress
== 0);
292 MI_SET_USAGE(MI_USAGE_MDL
);
293 MI_SET_PROCESS2("Kernel");
294 MiUnlinkFreeOrZeroedPage(Pfn1
);
299 ASSERT(Pfn1
->u3
.e2
.ReferenceCount
== 0);
302 // Now setup the page and mark it
304 Pfn1
->u3
.e2
.ReferenceCount
= 1;
305 Pfn1
->u2
.ShareCount
= 1;
306 MI_SET_PFN_DELETED(Pfn1
);
307 Pfn1
->u4
.PteFrame
= 0x1FFEDCB;
308 Pfn1
->u3
.e1
.StartOfAllocation
= 1;
309 Pfn1
->u3
.e1
.EndOfAllocation
= 1;
310 Pfn1
->u4
.VerifierAllocation
= 0;
313 // Save this page into the MDL
316 if (++PagesFound
== PageCount
) break;
320 // If the first pass was enough, don't keep going, otherwise, go again
322 if (PagesFound
== PageCount
) break;
327 // Now release the PFN count
329 KeReleaseQueuedSpinLock(LockQueuePfnLock
, OldIrql
);
332 // We might've found less pages, but not more ;-)
334 if (PagesFound
!= PageCount
) ASSERT(PagesFound
< PageCount
);
338 // If we didn' tfind any pages at all, fail
340 DPRINT1("NO MDL PAGES!\n");
346 // Write out how many pages we found
348 Mdl
->ByteCount
= (ULONG
)(PagesFound
<< PAGE_SHIFT
);
351 // Terminate the MDL array if there's certain missing pages
353 if (PagesFound
!= PageCount
) *MdlPage
= LIST_HEAD
;
356 // Now go back and loop over all the MDL pages
358 MdlPage
= (PPFN_NUMBER
)(Mdl
+ 1);
359 LastMdlPage
= MdlPage
+ PagesFound
;
360 while (MdlPage
< LastMdlPage
)
363 // Check if we've reached the end
366 if (Page
== LIST_HEAD
) break;
369 // Get the PFN entry for the page and check if we should zero it out
371 Pfn1
= MiGetPfnEntry(Page
);
373 if (Pfn1
->u3
.e1
.PageLocation
!= ZeroedPageList
) MiZeroPhysicalPage(Page
);
374 Pfn1
->u3
.e1
.PageLocation
= ActiveAndValid
;
378 // We're done, mark the pages as locked
381 Mdl
->MdlFlags
|= MDL_PAGES_LOCKED
;
387 MmSetRmapListHeadPage(PFN_NUMBER Pfn
, PMM_RMAP_ENTRY ListHead
)
392 oldIrql
= KeAcquireQueuedSpinLock(LockQueuePfnLock
);
393 Pfn1
= MiGetPfnEntry(Pfn
);
395 ASSERT_IS_ROS_PFN(Pfn1
);
399 /* Should not be trying to insert an RMAP for a non-active page */
400 ASSERT(MiIsPfnInUse(Pfn1
) == TRUE
);
402 /* Set the list head address */
403 MI_GET_ROS_DATA(Pfn1
)->RmapListHead
= ListHead
;
407 /* ReactOS semantics dictate the page is STILL active right now */
408 ASSERT(MiIsPfnInUse(Pfn1
) == TRUE
);
410 /* In this case, the RMAP is actually being removed, so clear field */
411 MI_GET_ROS_DATA(Pfn1
)->RmapListHead
= NULL
;
413 /* ReactOS semantics will now release the page, which will make it free and enter a colored list */
416 KeReleaseQueuedSpinLock(LockQueuePfnLock
, oldIrql
);
421 MmGetRmapListHeadPage(PFN_NUMBER Pfn
)
424 PMM_RMAP_ENTRY ListHead
;
427 /* Lock PFN database */
428 oldIrql
= KeAcquireQueuedSpinLock(LockQueuePfnLock
);
431 Pfn1
= MiGetPfnEntry(Pfn
);
433 ASSERT_IS_ROS_PFN(Pfn1
);
435 /* Get the list head */
436 ListHead
= MI_GET_ROS_DATA(Pfn1
)->RmapListHead
;
438 /* Should not have an RMAP for a non-active page */
439 ASSERT(MiIsPfnInUse(Pfn1
) == TRUE
);
441 /* Release PFN database and return rmap list head */
442 KeReleaseQueuedSpinLock(LockQueuePfnLock
, oldIrql
);
448 MmSetSavedSwapEntryPage(PFN_NUMBER Pfn
, SWAPENTRY SwapEntry
)
453 Page
= MiGetPfnEntry(Pfn
);
455 ASSERT_IS_ROS_PFN(Page
);
457 oldIrql
= KeAcquireQueuedSpinLock(LockQueuePfnLock
);
458 MI_GET_ROS_DATA(Page
)->SwapEntry
= SwapEntry
;
459 KeReleaseQueuedSpinLock(LockQueuePfnLock
, oldIrql
);
464 MmGetSavedSwapEntryPage(PFN_NUMBER Pfn
)
470 Page
= MiGetPfnEntry(Pfn
);
472 ASSERT_IS_ROS_PFN(Page
);
474 oldIrql
= KeAcquireQueuedSpinLock(LockQueuePfnLock
);
475 SwapEntry
= MI_GET_ROS_DATA(Page
)->SwapEntry
;
476 KeReleaseQueuedSpinLock(LockQueuePfnLock
, oldIrql
);
483 MmReferencePage(PFN_NUMBER Pfn
)
487 DPRINT("MmReferencePage(PysicalAddress %x)\n", Pfn
<< PAGE_SHIFT
);
489 if (Pfn
== 0 || Pfn
> MmHighestPhysicalPage
)
494 Page
= MiGetPfnEntry(Pfn
);
496 ASSERT_IS_ROS_PFN(Page
);
498 Page
->u3
.e2
.ReferenceCount
++;
503 MmGetReferenceCountPage(PFN_NUMBER Pfn
)
509 DPRINT("MmGetReferenceCountPage(PhysicalAddress %x)\n", Pfn
<< PAGE_SHIFT
);
511 oldIrql
= KeAcquireQueuedSpinLock(LockQueuePfnLock
);
512 Page
= MiGetPfnEntry(Pfn
);
514 ASSERT_IS_ROS_PFN(Page
);
516 RCount
= Page
->u3
.e2
.ReferenceCount
;
518 KeReleaseQueuedSpinLock(LockQueuePfnLock
, oldIrql
);
524 MmIsPageInUse(PFN_NUMBER Pfn
)
526 return MiIsPfnInUse(MiGetPfnEntry(Pfn
));
531 MmDereferencePage(PFN_NUMBER Pfn
)
534 DPRINT("MmDereferencePage(PhysicalAddress %x)\n", Pfn
<< PAGE_SHIFT
);
536 Page
= MiGetPfnEntry(Pfn
);
538 ASSERT_IS_ROS_PFN(Page
);
540 Page
->u3
.e2
.ReferenceCount
--;
541 if (Page
->u3
.e2
.ReferenceCount
== 0)
543 /* Mark the page temporarily as valid, we're going to make it free soon */
544 Page
->u3
.e1
.PageLocation
= ActiveAndValid
;
546 /* It's not a ROS PFN anymore */
547 Page
->u4
.AweAllocation
= FALSE
;
548 ExFreePool(MI_GET_ROS_DATA(Page
));
551 /* Bring it back into the free list */
552 DPRINT("Legacy free: %lx\n", Pfn
);
553 MiInsertPageInFreeList(Pfn
);
559 MmAllocPage(ULONG Type
)
561 PFN_NUMBER PfnOffset
;
564 PfnOffset
= MiRemoveZeroPage(MI_GET_NEXT_COLOR());
568 DPRINT1("MmAllocPage(): Out of memory\n");
572 DPRINT("Legacy allocate: %lx\n", PfnOffset
);
573 Pfn1
= MiGetPfnEntry(PfnOffset
);
574 Pfn1
->u3
.e2
.ReferenceCount
= 1;
575 Pfn1
->u3
.e1
.PageLocation
= ActiveAndValid
;
577 /* This marks the PFN as a ReactOS PFN */
578 Pfn1
->u4
.AweAllocation
= TRUE
;
580 /* Allocate the extra ReactOS Data and zero it out */
581 Pfn1
->RosMmData
= (LONG
)ExAllocatePoolWithTag(NonPagedPool
, sizeof(MMROSPFN
), 'RsPf');
582 ASSERT(MI_GET_ROS_DATA(Pfn1
) != NULL
);
583 ASSERT_IS_ROS_PFN(Pfn1
);
584 MI_GET_ROS_DATA(Pfn1
)->SwapEntry
= 0;
585 MI_GET_ROS_DATA(Pfn1
)->RmapListHead
= NULL
;