2 * COPYRIGHT: See COPYING in the top level directory
3 * PROJECT: ReactOS kernel
4 * FILE: ntoskrnl/mm/freelist.c
5 * PURPOSE: Handle the list of free physical pages
7 * PROGRAMMERS: David Welch (welch@cwcom.net)
11 /* INCLUDES ****************************************************************/
17 #if defined (ALLOC_PRAGMA)
18 #pragma alloc_text(INIT, MmInitializePageList)
21 #define MODULE_INVOLVED_IN_ARM3
22 #include "ARM3/miarm.h"
24 /* GLOBALS ****************************************************************/
26 // ReactOS to NT Physical Page Descriptor Entry Legacy Mapping Definitions
27 #define PHYSICAL_PAGE MMPFN
28 #define PPHYSICAL_PAGE PMMPFN
30 PPHYSICAL_PAGE MmPfnDatabase
;
32 PFN_NUMBER MmAvailablePages
;
33 PFN_NUMBER MmResidentAvailablePages
;
34 PFN_NUMBER MmResidentAvailableAtInit
;
36 SIZE_T MmTotalCommittedPages
;
37 SIZE_T MmSharedCommit
;
38 SIZE_T MmDriverCommit
;
39 SIZE_T MmProcessCommit
;
40 SIZE_T MmPagedPoolCommit
;
41 SIZE_T MmPeakCommitment
;
42 SIZE_T MmtotalCommitLimitMaximum
;
44 static RTL_BITMAP MiUserPfnBitMap
;
46 /* FUNCTIONS *************************************************************/
50 MiInitializeUserPfnBitmap(VOID
)
54 /* Allocate enough buffer for the PFN bitmap and align it on 32-bits */
55 Bitmap
= ExAllocatePoolWithTag(NonPagedPool
,
56 (((MmHighestPhysicalPage
+ 1) + 31) / 32) * 4,
60 /* Initialize it and clear all the bits to begin with */
61 RtlInitializeBitMap(&MiUserPfnBitMap
,
63 (ULONG
)MmHighestPhysicalPage
+ 1);
64 RtlClearAllBits(&MiUserPfnBitMap
);
69 MmGetLRUFirstUserPage(VOID
)
74 /* Find the first user page */
75 OldIrql
= KeAcquireQueuedSpinLock(LockQueuePfnLock
);
76 Position
= RtlFindSetBits(&MiUserPfnBitMap
, 1, 0);
77 KeReleaseQueuedSpinLock(LockQueuePfnLock
, OldIrql
);
78 if (Position
== 0xFFFFFFFF) return 0;
81 ASSERT(Position
!= 0);
82 ASSERT_IS_ROS_PFN(MiGetPfnEntry(Position
));
88 MmInsertLRULastUserPage(PFN_NUMBER Pfn
)
92 /* Set the page as a user page */
94 ASSERT_IS_ROS_PFN(MiGetPfnEntry(Pfn
));
95 ASSERT(!RtlCheckBit(&MiUserPfnBitMap
, (ULONG
)Pfn
));
96 OldIrql
= KeAcquireQueuedSpinLock(LockQueuePfnLock
);
97 RtlSetBit(&MiUserPfnBitMap
, (ULONG
)Pfn
);
98 KeReleaseQueuedSpinLock(LockQueuePfnLock
, OldIrql
);
103 MmGetLRUNextUserPage(PFN_NUMBER PreviousPfn
)
108 /* Find the next user page */
109 OldIrql
= KeAcquireQueuedSpinLock(LockQueuePfnLock
);
110 Position
= RtlFindSetBits(&MiUserPfnBitMap
, 1, (ULONG
)PreviousPfn
+ 1);
111 KeReleaseQueuedSpinLock(LockQueuePfnLock
, OldIrql
);
112 if (Position
== 0xFFFFFFFF) return 0;
115 ASSERT(Position
!= 0);
116 ASSERT_IS_ROS_PFN(MiGetPfnEntry(Position
));
122 MmRemoveLRUUserPage(PFN_NUMBER Page
)
124 /* Unset the page as a user page */
126 ASSERT_IS_ROS_PFN(MiGetPfnEntry(Page
));
127 ASSERT(RtlCheckBit(&MiUserPfnBitMap
, (ULONG
)Page
));
128 RtlClearBit(&MiUserPfnBitMap
, (ULONG
)Page
);
133 MiIsPfnFree(IN PMMPFN Pfn1
)
135 /* Must be a free or zero page, with no references, linked */
136 return ((Pfn1
->u3
.e1
.PageLocation
<= StandbyPageList
) &&
139 !(Pfn1
->u3
.e2
.ReferenceCount
));
144 MiIsPfnInUse(IN PMMPFN Pfn1
)
146 /* Standby list or higher, unlinked, and with references */
147 return !MiIsPfnFree(Pfn1
);
152 MiAllocatePagesForMdl(IN PHYSICAL_ADDRESS LowAddress
,
153 IN PHYSICAL_ADDRESS HighAddress
,
154 IN PHYSICAL_ADDRESS SkipBytes
,
155 IN SIZE_T TotalBytes
,
156 IN MI_PFN_CACHE_ATTRIBUTE CacheAttribute
,
160 PFN_NUMBER PageCount
, LowPage
, HighPage
, SkipPages
, PagesFound
= 0, Page
;
161 PPFN_NUMBER MdlPage
, LastMdlPage
;
164 INT LookForZeroedPages
;
165 ASSERT(KeGetCurrentIrql() <= APC_LEVEL
);
168 // Convert the low address into a PFN
170 LowPage
= (PFN_NUMBER
)(LowAddress
.QuadPart
>> PAGE_SHIFT
);
173 // Convert, and normalize, the high address into a PFN
175 HighPage
= (PFN_NUMBER
)(HighAddress
.QuadPart
>> PAGE_SHIFT
);
176 if (HighPage
> MmHighestPhysicalPage
) HighPage
= MmHighestPhysicalPage
;
179 // Validate skipbytes and convert them into pages
181 if (BYTE_OFFSET(SkipBytes
.LowPart
)) return NULL
;
182 SkipPages
= (PFN_NUMBER
)(SkipBytes
.QuadPart
>> PAGE_SHIFT
);
184 /* This isn't supported at all */
185 if (SkipPages
) DPRINT1("WARNING: Caller requesting SkipBytes, MDL might be mismatched\n");
188 // Now compute the number of pages the MDL will cover
190 PageCount
= (PFN_NUMBER
)ADDRESS_AND_SIZE_TO_SPAN_PAGES(0, TotalBytes
);
194 // Try creating an MDL for these many pages
196 Mdl
= MmCreateMdl(NULL
, NULL
, PageCount
<< PAGE_SHIFT
);
200 // This function is not required to return the amount of pages requested
201 // In fact, it can return as little as 1 page, and callers are supposed
202 // to deal with this scenario. So re-attempt the allocation with less
203 // pages than before, and see if it worked this time.
205 PageCount
-= (PageCount
>> 4);
209 // Wow, not even a single page was around!
211 if (!Mdl
) return NULL
;
214 // This is where the page array starts....
216 MdlPage
= (PPFN_NUMBER
)(Mdl
+ 1);
219 // Lock the PFN database
221 OldIrql
= KeAcquireQueuedSpinLock(LockQueuePfnLock
);
224 // Are we looking for any pages, without discriminating?
226 if ((LowPage
== 0) && (HighPage
== MmHighestPhysicalPage
))
229 // Well then, let's go shopping
231 while (PagesFound
< PageCount
)
234 MI_SET_USAGE(MI_USAGE_MDL
);
235 MI_SET_PROCESS2("Kernel");
236 Page
= MiRemoveAnyPage(0);
239 /* This is not good... hopefully we have at least SOME pages */
244 /* Grab the page entry for it */
245 Pfn1
= MiGetPfnEntry(Page
);
248 // Make sure it's really free
250 ASSERT(Pfn1
->u3
.e2
.ReferenceCount
== 0);
252 /* Now setup the page and mark it */
253 Pfn1
->u3
.e2
.ReferenceCount
= 1;
254 Pfn1
->u2
.ShareCount
= 1;
255 MI_SET_PFN_DELETED(Pfn1
);
256 Pfn1
->u4
.PteFrame
= 0x1FFEDCB;
257 Pfn1
->u3
.e1
.StartOfAllocation
= 1;
258 Pfn1
->u3
.e1
.EndOfAllocation
= 1;
259 Pfn1
->u4
.VerifierAllocation
= 0;
262 // Save it into the MDL
264 *MdlPage
++ = MiGetPfnEntryIndex(Pfn1
);
271 // You want specific range of pages. We'll do this in two runs
273 for (LookForZeroedPages
= 1; LookForZeroedPages
>= 0; LookForZeroedPages
--)
276 // Scan the range you specified
278 for (Page
= LowPage
; Page
< HighPage
; Page
++)
281 // Get the PFN entry for this page
283 Pfn1
= MiGetPfnEntry(Page
);
287 // Make sure it's free and if this is our first pass, zeroed
289 if (MiIsPfnInUse(Pfn1
)) continue;
290 if ((Pfn1
->u3
.e1
.PageLocation
== ZeroedPageList
) != LookForZeroedPages
) continue;
292 /* Remove the page from the free or zero list */
293 ASSERT(Pfn1
->u3
.e1
.ReadInProgress
== 0);
294 MI_SET_USAGE(MI_USAGE_MDL
);
295 MI_SET_PROCESS2("Kernel");
296 MiUnlinkFreeOrZeroedPage(Pfn1
);
301 ASSERT(Pfn1
->u3
.e2
.ReferenceCount
== 0);
304 // Now setup the page and mark it
306 Pfn1
->u3
.e2
.ReferenceCount
= 1;
307 Pfn1
->u2
.ShareCount
= 1;
308 MI_SET_PFN_DELETED(Pfn1
);
309 Pfn1
->u4
.PteFrame
= 0x1FFEDCB;
310 Pfn1
->u3
.e1
.StartOfAllocation
= 1;
311 Pfn1
->u3
.e1
.EndOfAllocation
= 1;
312 Pfn1
->u4
.VerifierAllocation
= 0;
315 // Save this page into the MDL
318 if (++PagesFound
== PageCount
) break;
322 // If the first pass was enough, don't keep going, otherwise, go again
324 if (PagesFound
== PageCount
) break;
329 // Now release the PFN count
331 KeReleaseQueuedSpinLock(LockQueuePfnLock
, OldIrql
);
334 // We might've found less pages, but not more ;-)
336 if (PagesFound
!= PageCount
) ASSERT(PagesFound
< PageCount
);
340 // If we didn' tfind any pages at all, fail
342 DPRINT1("NO MDL PAGES!\n");
348 // Write out how many pages we found
350 Mdl
->ByteCount
= (ULONG
)(PagesFound
<< PAGE_SHIFT
);
353 // Terminate the MDL array if there's certain missing pages
355 if (PagesFound
!= PageCount
) *MdlPage
= LIST_HEAD
;
358 // Now go back and loop over all the MDL pages
360 MdlPage
= (PPFN_NUMBER
)(Mdl
+ 1);
361 LastMdlPage
= MdlPage
+ PagesFound
;
362 while (MdlPage
< LastMdlPage
)
365 // Check if we've reached the end
368 if (Page
== LIST_HEAD
) break;
371 // Get the PFN entry for the page and check if we should zero it out
373 Pfn1
= MiGetPfnEntry(Page
);
375 if (Pfn1
->u3
.e1
.PageLocation
!= ZeroedPageList
) MiZeroPhysicalPage(Page
);
376 Pfn1
->u3
.e1
.PageLocation
= ActiveAndValid
;
380 // We're done, mark the pages as locked
383 Mdl
->MdlFlags
|= MDL_PAGES_LOCKED
;
389 MmSetRmapListHeadPage(PFN_NUMBER Pfn
, PMM_RMAP_ENTRY ListHead
)
394 oldIrql
= KeAcquireQueuedSpinLock(LockQueuePfnLock
);
395 Pfn1
= MiGetPfnEntry(Pfn
);
397 ASSERT_IS_ROS_PFN(Pfn1
);
401 /* Should not be trying to insert an RMAP for a non-active page */
402 ASSERT(MiIsPfnInUse(Pfn1
) == TRUE
);
404 /* Set the list head address */
405 MI_GET_ROS_DATA(Pfn1
)->RmapListHead
= ListHead
;
409 /* ReactOS semantics dictate the page is STILL active right now */
410 ASSERT(MiIsPfnInUse(Pfn1
) == TRUE
);
412 /* In this case, the RMAP is actually being removed, so clear field */
413 MI_GET_ROS_DATA(Pfn1
)->RmapListHead
= NULL
;
415 /* ReactOS semantics will now release the page, which will make it free and enter a colored list */
418 KeReleaseQueuedSpinLock(LockQueuePfnLock
, oldIrql
);
423 MmGetRmapListHeadPage(PFN_NUMBER Pfn
)
426 PMM_RMAP_ENTRY ListHead
;
429 /* Lock PFN database */
430 oldIrql
= KeAcquireQueuedSpinLock(LockQueuePfnLock
);
433 Pfn1
= MiGetPfnEntry(Pfn
);
435 ASSERT_IS_ROS_PFN(Pfn1
);
437 /* Get the list head */
438 ListHead
= MI_GET_ROS_DATA(Pfn1
)->RmapListHead
;
440 /* Should not have an RMAP for a non-active page */
441 ASSERT(MiIsPfnInUse(Pfn1
) == TRUE
);
443 /* Release PFN database and return rmap list head */
444 KeReleaseQueuedSpinLock(LockQueuePfnLock
, oldIrql
);
450 MmSetSavedSwapEntryPage(PFN_NUMBER Pfn
, SWAPENTRY SwapEntry
)
455 Page
= MiGetPfnEntry(Pfn
);
457 ASSERT_IS_ROS_PFN(Page
);
459 oldIrql
= KeAcquireQueuedSpinLock(LockQueuePfnLock
);
460 MI_GET_ROS_DATA(Page
)->SwapEntry
= SwapEntry
;
461 KeReleaseQueuedSpinLock(LockQueuePfnLock
, oldIrql
);
466 MmGetSavedSwapEntryPage(PFN_NUMBER Pfn
)
472 Page
= MiGetPfnEntry(Pfn
);
474 ASSERT_IS_ROS_PFN(Page
);
476 oldIrql
= KeAcquireQueuedSpinLock(LockQueuePfnLock
);
477 SwapEntry
= MI_GET_ROS_DATA(Page
)->SwapEntry
;
478 KeReleaseQueuedSpinLock(LockQueuePfnLock
, oldIrql
);
485 MmReferencePage(PFN_NUMBER Pfn
)
489 DPRINT("MmReferencePage(PysicalAddress %x)\n", Pfn
<< PAGE_SHIFT
);
491 if (Pfn
== 0 || Pfn
> MmHighestPhysicalPage
)
496 Page
= MiGetPfnEntry(Pfn
);
498 ASSERT_IS_ROS_PFN(Page
);
500 Page
->u3
.e2
.ReferenceCount
++;
505 MmGetReferenceCountPage(PFN_NUMBER Pfn
)
511 DPRINT("MmGetReferenceCountPage(PhysicalAddress %x)\n", Pfn
<< PAGE_SHIFT
);
513 oldIrql
= KeAcquireQueuedSpinLock(LockQueuePfnLock
);
514 Page
= MiGetPfnEntry(Pfn
);
516 ASSERT_IS_ROS_PFN(Page
);
518 RCount
= Page
->u3
.e2
.ReferenceCount
;
520 KeReleaseQueuedSpinLock(LockQueuePfnLock
, oldIrql
);
526 MmIsPageInUse(PFN_NUMBER Pfn
)
528 return MiIsPfnInUse(MiGetPfnEntry(Pfn
));
533 MmDereferencePage(PFN_NUMBER Pfn
)
536 DPRINT("MmDereferencePage(PhysicalAddress %x)\n", Pfn
<< PAGE_SHIFT
);
538 Page
= MiGetPfnEntry(Pfn
);
540 ASSERT_IS_ROS_PFN(Page
);
542 Page
->u3
.e2
.ReferenceCount
--;
543 if (Page
->u3
.e2
.ReferenceCount
== 0)
545 /* Mark the page temporarily as valid, we're going to make it free soon */
546 Page
->u3
.e1
.PageLocation
= ActiveAndValid
;
548 /* It's not a ROS PFN anymore */
549 Page
->u4
.AweAllocation
= FALSE
;
550 ExFreePool(MI_GET_ROS_DATA(Page
));
553 /* Bring it back into the free list */
554 DPRINT("Legacy free: %lx\n", Pfn
);
555 MiInsertPageInFreeList(Pfn
);
561 MmAllocPage(ULONG Type
)
563 PFN_NUMBER PfnOffset
;
566 PfnOffset
= MiRemoveZeroPage(MI_GET_NEXT_COLOR());
570 DPRINT1("MmAllocPage(): Out of memory\n");
574 DPRINT("Legacy allocate: %lx\n", PfnOffset
);
575 Pfn1
= MiGetPfnEntry(PfnOffset
);
576 Pfn1
->u3
.e2
.ReferenceCount
= 1;
577 Pfn1
->u3
.e1
.PageLocation
= ActiveAndValid
;
579 /* This marks the PFN as a ReactOS PFN */
580 Pfn1
->u4
.AweAllocation
= TRUE
;
582 /* Allocate the extra ReactOS Data and zero it out */
583 Pfn1
->RosMmData
= (LONG
)ExAllocatePoolWithTag(NonPagedPool
, sizeof(MMROSPFN
), 'RsPf');
584 ASSERT(MI_GET_ROS_DATA(Pfn1
) != NULL
);
585 ASSERT_IS_ROS_PFN(Pfn1
);
586 MI_GET_ROS_DATA(Pfn1
)->SwapEntry
= 0;
587 MI_GET_ROS_DATA(Pfn1
)->RmapListHead
= NULL
;