[NTOSKRNL]
[reactos.git] / reactos / ntoskrnl / mm / freelist.c
1 /*
2 * COPYRIGHT: See COPYING in the top level directory
3 * PROJECT: ReactOS kernel
4 * FILE: ntoskrnl/mm/freelist.c
5 * PURPOSE: Handle the list of free physical pages
6 *
7 * PROGRAMMERS: David Welch (welch@cwcom.net)
8 * Robert Bergkvist
9 */
10
11 /* INCLUDES ****************************************************************/
12
13 #include <ntoskrnl.h>
14 #define NDEBUG
15 #include <debug.h>
16
17 #if defined (ALLOC_PRAGMA)
18 #pragma alloc_text(INIT, MmInitializePageList)
19 #endif
20
21 #define MODULE_INVOLVED_IN_ARM3
22 #include "ARM3/miarm.h"
23
24 /* GLOBALS ****************************************************************/
25
26 // ReactOS to NT Physical Page Descriptor Entry Legacy Mapping Definitions
27 #define PHYSICAL_PAGE MMPFN
28 #define PPHYSICAL_PAGE PMMPFN
29
30 PPHYSICAL_PAGE MmPfnDatabase;
31
32 PFN_NUMBER MmAvailablePages;
33 PFN_NUMBER MmResidentAvailablePages;
34 PFN_NUMBER MmResidentAvailableAtInit;
35
36 SIZE_T MmTotalCommittedPages;
37 SIZE_T MmSharedCommit;
38 SIZE_T MmDriverCommit;
39 SIZE_T MmProcessCommit;
40 SIZE_T MmPagedPoolCommit;
41 SIZE_T MmPeakCommitment;
42 SIZE_T MmtotalCommitLimitMaximum;
43
44 static RTL_BITMAP MiUserPfnBitMap;
45
46 /* FUNCTIONS *************************************************************/
47
48 VOID
49 NTAPI
50 MiInitializeUserPfnBitmap(VOID)
51 {
52 PVOID Bitmap;
53
54 /* Allocate enough buffer for the PFN bitmap and align it on 32-bits */
55 Bitmap = ExAllocatePoolWithTag(NonPagedPool,
56 (((MmHighestPhysicalPage + 1) + 31) / 32) * 4,
57 ' mM');
58 ASSERT(Bitmap);
59
60 /* Initialize it and clear all the bits to begin with */
61 RtlInitializeBitMap(&MiUserPfnBitMap,
62 Bitmap,
63 (ULONG)MmHighestPhysicalPage + 1);
64 RtlClearAllBits(&MiUserPfnBitMap);
65 }
66
67 PFN_NUMBER
68 NTAPI
69 MmGetLRUFirstUserPage(VOID)
70 {
71 ULONG Position;
72 KIRQL OldIrql;
73
74 /* Find the first user page */
75 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
76 Position = RtlFindSetBits(&MiUserPfnBitMap, 1, 0);
77 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
78 if (Position == 0xFFFFFFFF) return 0;
79
80 /* Return it */
81 ASSERT(Position != 0);
82 ASSERT_IS_ROS_PFN(MiGetPfnEntry(Position));
83 return Position;
84 }
85
86 VOID
87 NTAPI
88 MmInsertLRULastUserPage(PFN_NUMBER Pfn)
89 {
90 KIRQL OldIrql;
91
92 /* Set the page as a user page */
93 ASSERT(Pfn != 0);
94 ASSERT_IS_ROS_PFN(MiGetPfnEntry(Pfn));
95 ASSERT(!RtlCheckBit(&MiUserPfnBitMap, (ULONG)Pfn));
96 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
97 RtlSetBit(&MiUserPfnBitMap, (ULONG)Pfn);
98 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
99 }
100
101 PFN_NUMBER
102 NTAPI
103 MmGetLRUNextUserPage(PFN_NUMBER PreviousPfn)
104 {
105 ULONG Position;
106 KIRQL OldIrql;
107
108 /* Find the next user page */
109 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
110 Position = RtlFindSetBits(&MiUserPfnBitMap, 1, (ULONG)PreviousPfn + 1);
111 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
112 if (Position == 0xFFFFFFFF) return 0;
113
114 /* Return it */
115 ASSERT(Position != 0);
116 ASSERT_IS_ROS_PFN(MiGetPfnEntry(Position));
117 return Position;
118 }
119
120 VOID
121 NTAPI
122 MmRemoveLRUUserPage(PFN_NUMBER Page)
123 {
124 KIRQL OldIrql;
125
126 /* Unset the page as a user page */
127 ASSERT(Page != 0);
128 ASSERT_IS_ROS_PFN(MiGetPfnEntry(Page));
129 ASSERT(RtlCheckBit(&MiUserPfnBitMap, (ULONG)Page));
130 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
131 RtlClearBit(&MiUserPfnBitMap, (ULONG)Page);
132 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
133 }
134
135 BOOLEAN
136 NTAPI
137 MiIsPfnFree(IN PMMPFN Pfn1)
138 {
139 /* Must be a free or zero page, with no references, linked */
140 return ((Pfn1->u3.e1.PageLocation <= StandbyPageList) &&
141 (Pfn1->u1.Flink) &&
142 (Pfn1->u2.Blink) &&
143 !(Pfn1->u3.e2.ReferenceCount));
144 }
145
146 BOOLEAN
147 NTAPI
148 MiIsPfnInUse(IN PMMPFN Pfn1)
149 {
150 /* Standby list or higher, unlinked, and with references */
151 return !MiIsPfnFree(Pfn1);
152 }
153
154 PMDL
155 NTAPI
156 MiAllocatePagesForMdl(IN PHYSICAL_ADDRESS LowAddress,
157 IN PHYSICAL_ADDRESS HighAddress,
158 IN PHYSICAL_ADDRESS SkipBytes,
159 IN SIZE_T TotalBytes,
160 IN MI_PFN_CACHE_ATTRIBUTE CacheAttribute,
161 IN ULONG MdlFlags)
162 {
163 PMDL Mdl;
164 PFN_NUMBER PageCount, LowPage, HighPage, SkipPages, PagesFound = 0, Page;
165 PPFN_NUMBER MdlPage, LastMdlPage;
166 KIRQL OldIrql;
167 PPHYSICAL_PAGE Pfn1;
168 INT LookForZeroedPages;
169 ASSERT(KeGetCurrentIrql() <= APC_LEVEL);
170
171 //
172 // Convert the low address into a PFN
173 //
174 LowPage = (PFN_NUMBER)(LowAddress.QuadPart >> PAGE_SHIFT);
175
176 //
177 // Convert, and normalize, the high address into a PFN
178 //
179 HighPage = (PFN_NUMBER)(HighAddress.QuadPart >> PAGE_SHIFT);
180 if (HighPage > MmHighestPhysicalPage) HighPage = MmHighestPhysicalPage;
181
182 //
183 // Validate skipbytes and convert them into pages
184 //
185 if (BYTE_OFFSET(SkipBytes.LowPart)) return NULL;
186 SkipPages = (PFN_NUMBER)(SkipBytes.QuadPart >> PAGE_SHIFT);
187
188 /* This isn't supported at all */
189 if (SkipPages) DPRINT1("WARNING: Caller requesting SkipBytes, MDL might be mismatched\n");
190
191 //
192 // Now compute the number of pages the MDL will cover
193 //
194 PageCount = (PFN_NUMBER)ADDRESS_AND_SIZE_TO_SPAN_PAGES(0, TotalBytes);
195 do
196 {
197 //
198 // Try creating an MDL for these many pages
199 //
200 Mdl = MmCreateMdl(NULL, NULL, PageCount << PAGE_SHIFT);
201 if (Mdl) break;
202
203 //
204 // This function is not required to return the amount of pages requested
205 // In fact, it can return as little as 1 page, and callers are supposed
206 // to deal with this scenario. So re-attempt the allocation with less
207 // pages than before, and see if it worked this time.
208 //
209 PageCount -= (PageCount >> 4);
210 } while (PageCount);
211
212 //
213 // Wow, not even a single page was around!
214 //
215 if (!Mdl) return NULL;
216
217 //
218 // This is where the page array starts....
219 //
220 MdlPage = (PPFN_NUMBER)(Mdl + 1);
221
222 //
223 // Lock the PFN database
224 //
225 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
226
227 //
228 // Are we looking for any pages, without discriminating?
229 //
230 if ((LowPage == 0) && (HighPage == MmHighestPhysicalPage))
231 {
232 //
233 // Well then, let's go shopping
234 //
235 while (PagesFound < PageCount)
236 {
237 /* Grab a page */
238 MI_SET_USAGE(MI_USAGE_MDL);
239 MI_SET_PROCESS2("Kernel");
240 Page = MiRemoveAnyPage(0);
241 if (Page == 0)
242 {
243 /* This is not good... hopefully we have at least SOME pages */
244 ASSERT(PagesFound);
245 break;
246 }
247
248 /* Grab the page entry for it */
249 Pfn1 = MiGetPfnEntry(Page);
250
251 //
252 // Make sure it's really free
253 //
254 ASSERT(Pfn1->u3.e2.ReferenceCount == 0);
255
256 /* Now setup the page and mark it */
257 Pfn1->u3.e2.ReferenceCount = 1;
258 Pfn1->u2.ShareCount = 1;
259 MI_SET_PFN_DELETED(Pfn1);
260 Pfn1->u4.PteFrame = 0x1FFEDCB;
261 Pfn1->u3.e1.StartOfAllocation = 1;
262 Pfn1->u3.e1.EndOfAllocation = 1;
263 Pfn1->u4.VerifierAllocation = 0;
264
265 //
266 // Save it into the MDL
267 //
268 *MdlPage++ = MiGetPfnEntryIndex(Pfn1);
269 PagesFound++;
270 }
271 }
272 else
273 {
274 //
275 // You want specific range of pages. We'll do this in two runs
276 //
277 for (LookForZeroedPages = 1; LookForZeroedPages >= 0; LookForZeroedPages--)
278 {
279 //
280 // Scan the range you specified
281 //
282 for (Page = LowPage; Page < HighPage; Page++)
283 {
284 //
285 // Get the PFN entry for this page
286 //
287 Pfn1 = MiGetPfnEntry(Page);
288 ASSERT(Pfn1);
289
290 //
291 // Make sure it's free and if this is our first pass, zeroed
292 //
293 if (MiIsPfnInUse(Pfn1)) continue;
294 if ((Pfn1->u3.e1.PageLocation == ZeroedPageList) != LookForZeroedPages) continue;
295
296 /* Remove the page from the free or zero list */
297 ASSERT(Pfn1->u3.e1.ReadInProgress == 0);
298 MI_SET_USAGE(MI_USAGE_MDL);
299 MI_SET_PROCESS2("Kernel");
300 MiUnlinkFreeOrZeroedPage(Pfn1);
301
302 //
303 // Sanity checks
304 //
305 ASSERT(Pfn1->u3.e2.ReferenceCount == 0);
306
307 //
308 // Now setup the page and mark it
309 //
310 Pfn1->u3.e2.ReferenceCount = 1;
311 Pfn1->u2.ShareCount = 1;
312 MI_SET_PFN_DELETED(Pfn1);
313 Pfn1->u4.PteFrame = 0x1FFEDCB;
314 Pfn1->u3.e1.StartOfAllocation = 1;
315 Pfn1->u3.e1.EndOfAllocation = 1;
316 Pfn1->u4.VerifierAllocation = 0;
317
318 //
319 // Save this page into the MDL
320 //
321 *MdlPage++ = Page;
322 if (++PagesFound == PageCount) break;
323 }
324
325 //
326 // If the first pass was enough, don't keep going, otherwise, go again
327 //
328 if (PagesFound == PageCount) break;
329 }
330 }
331
332 //
333 // Now release the PFN count
334 //
335 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
336
337 //
338 // We might've found less pages, but not more ;-)
339 //
340 if (PagesFound != PageCount) ASSERT(PagesFound < PageCount);
341 if (!PagesFound)
342 {
343 //
344 // If we didn' tfind any pages at all, fail
345 //
346 DPRINT1("NO MDL PAGES!\n");
347 ExFreePool(Mdl);
348 return NULL;
349 }
350
351 //
352 // Write out how many pages we found
353 //
354 Mdl->ByteCount = (ULONG)(PagesFound << PAGE_SHIFT);
355
356 //
357 // Terminate the MDL array if there's certain missing pages
358 //
359 if (PagesFound != PageCount) *MdlPage = LIST_HEAD;
360
361 //
362 // Now go back and loop over all the MDL pages
363 //
364 MdlPage = (PPFN_NUMBER)(Mdl + 1);
365 LastMdlPage = MdlPage + PagesFound;
366 while (MdlPage < LastMdlPage)
367 {
368 //
369 // Check if we've reached the end
370 //
371 Page = *MdlPage++;
372 if (Page == LIST_HEAD) break;
373
374 //
375 // Get the PFN entry for the page and check if we should zero it out
376 //
377 Pfn1 = MiGetPfnEntry(Page);
378 ASSERT(Pfn1);
379 if (Pfn1->u3.e1.PageLocation != ZeroedPageList) MiZeroPhysicalPage(Page);
380 Pfn1->u3.e1.PageLocation = ActiveAndValid;
381 }
382
383 //
384 // We're done, mark the pages as locked
385 //
386 Mdl->Process = NULL;
387 Mdl->MdlFlags |= MDL_PAGES_LOCKED;
388 return Mdl;
389 }
390
391 VOID
392 NTAPI
393 MmSetRmapListHeadPage(PFN_NUMBER Pfn, PMM_RMAP_ENTRY ListHead)
394 {
395 KIRQL oldIrql;
396 PMMPFN Pfn1;
397
398 oldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
399 Pfn1 = MiGetPfnEntry(Pfn);
400 ASSERT(Pfn1);
401 ASSERT_IS_ROS_PFN(Pfn1);
402
403 if (ListHead)
404 {
405 /* Should not be trying to insert an RMAP for a non-active page */
406 ASSERT(MiIsPfnInUse(Pfn1) == TRUE);
407
408 /* Set the list head address */
409 MI_GET_ROS_DATA(Pfn1)->RmapListHead = ListHead;
410 }
411 else
412 {
413 /* ReactOS semantics dictate the page is STILL active right now */
414 ASSERT(MiIsPfnInUse(Pfn1) == TRUE);
415
416 /* In this case, the RMAP is actually being removed, so clear field */
417 MI_GET_ROS_DATA(Pfn1)->RmapListHead = NULL;
418
419 /* ReactOS semantics will now release the page, which will make it free and enter a colored list */
420 }
421
422 KeReleaseQueuedSpinLock(LockQueuePfnLock, oldIrql);
423 }
424
425 PMM_RMAP_ENTRY
426 NTAPI
427 MmGetRmapListHeadPage(PFN_NUMBER Pfn)
428 {
429 KIRQL oldIrql;
430 PMM_RMAP_ENTRY ListHead;
431 PMMPFN Pfn1;
432
433 /* Lock PFN database */
434 oldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
435
436 /* Get the entry */
437 Pfn1 = MiGetPfnEntry(Pfn);
438 ASSERT(Pfn1);
439 ASSERT_IS_ROS_PFN(Pfn1);
440
441 /* Get the list head */
442 ListHead = MI_GET_ROS_DATA(Pfn1)->RmapListHead;
443
444 /* Should not have an RMAP for a non-active page */
445 ASSERT(MiIsPfnInUse(Pfn1) == TRUE);
446
447 /* Release PFN database and return rmap list head */
448 KeReleaseQueuedSpinLock(LockQueuePfnLock, oldIrql);
449 return ListHead;
450 }
451
452 VOID
453 NTAPI
454 MmSetSavedSwapEntryPage(PFN_NUMBER Pfn, SWAPENTRY SwapEntry)
455 {
456 KIRQL oldIrql;
457 PPHYSICAL_PAGE Page;
458
459 Page = MiGetPfnEntry(Pfn);
460 ASSERT(Page);
461 ASSERT_IS_ROS_PFN(Page);
462
463 oldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
464 MI_GET_ROS_DATA(Page)->SwapEntry = SwapEntry;
465 KeReleaseQueuedSpinLock(LockQueuePfnLock, oldIrql);
466 }
467
468 SWAPENTRY
469 NTAPI
470 MmGetSavedSwapEntryPage(PFN_NUMBER Pfn)
471 {
472 SWAPENTRY SwapEntry;
473 KIRQL oldIrql;
474 PPHYSICAL_PAGE Page;
475
476 Page = MiGetPfnEntry(Pfn);
477 ASSERT(Page);
478 ASSERT_IS_ROS_PFN(Page);
479
480 oldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
481 SwapEntry = MI_GET_ROS_DATA(Page)->SwapEntry;
482 KeReleaseQueuedSpinLock(LockQueuePfnLock, oldIrql);
483
484 return(SwapEntry);
485 }
486
487 VOID
488 NTAPI
489 MmReferencePage(PFN_NUMBER Pfn)
490 {
491 PPHYSICAL_PAGE Page;
492
493 DPRINT("MmReferencePage(PysicalAddress %x)\n", Pfn << PAGE_SHIFT);
494
495 if (Pfn == 0 || Pfn > MmHighestPhysicalPage)
496 {
497 return;
498 }
499
500 Page = MiGetPfnEntry(Pfn);
501 ASSERT(Page);
502 ASSERT_IS_ROS_PFN(Page);
503
504 Page->u3.e2.ReferenceCount++;
505 }
506
507 ULONG
508 NTAPI
509 MmGetReferenceCountPage(PFN_NUMBER Pfn)
510 {
511 KIRQL oldIrql;
512 ULONG RCount;
513 PPHYSICAL_PAGE Page;
514
515 DPRINT("MmGetReferenceCountPage(PhysicalAddress %x)\n", Pfn << PAGE_SHIFT);
516
517 oldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
518 Page = MiGetPfnEntry(Pfn);
519 ASSERT(Page);
520 ASSERT_IS_ROS_PFN(Page);
521
522 RCount = Page->u3.e2.ReferenceCount;
523
524 KeReleaseQueuedSpinLock(LockQueuePfnLock, oldIrql);
525 return(RCount);
526 }
527
528 BOOLEAN
529 NTAPI
530 MmIsPageInUse(PFN_NUMBER Pfn)
531 {
532 return MiIsPfnInUse(MiGetPfnEntry(Pfn));
533 }
534
535 VOID
536 NTAPI
537 MmDereferencePage(PFN_NUMBER Pfn)
538 {
539 PPHYSICAL_PAGE Page;
540 DPRINT("MmDereferencePage(PhysicalAddress %x)\n", Pfn << PAGE_SHIFT);
541
542 Page = MiGetPfnEntry(Pfn);
543 ASSERT(Page);
544 ASSERT_IS_ROS_PFN(Page);
545
546 Page->u3.e2.ReferenceCount--;
547 if (Page->u3.e2.ReferenceCount == 0)
548 {
549 /* Mark the page temporarily as valid, we're going to make it free soon */
550 Page->u3.e1.PageLocation = ActiveAndValid;
551
552 /* It's not a ROS PFN anymore */
553 Page->u4.AweAllocation = FALSE;
554 ExFreePool(MI_GET_ROS_DATA(Page));
555 Page->RosMmData = 0;
556
557 /* Bring it back into the free list */
558 DPRINT("Legacy free: %lx\n", Pfn);
559 MiInsertPageInFreeList(Pfn);
560 }
561 }
562
563 PFN_NUMBER
564 NTAPI
565 MmAllocPage(ULONG Type)
566 {
567 PFN_NUMBER PfnOffset;
568 PMMPFN Pfn1;
569
570 PfnOffset = MiRemoveZeroPage(MI_GET_NEXT_COLOR());
571
572 if (!PfnOffset)
573 {
574 DPRINT1("MmAllocPage(): Out of memory\n");
575 return 0;
576 }
577
578 DPRINT("Legacy allocate: %lx\n", PfnOffset);
579 Pfn1 = MiGetPfnEntry(PfnOffset);
580 Pfn1->u3.e2.ReferenceCount = 1;
581 Pfn1->u3.e1.PageLocation = ActiveAndValid;
582
583 /* This marks the PFN as a ReactOS PFN */
584 Pfn1->u4.AweAllocation = TRUE;
585
586 /* Allocate the extra ReactOS Data and zero it out */
587 Pfn1->RosMmData = (LONG)ExAllocatePoolWithTag(NonPagedPool, sizeof(MMROSPFN), 'RsPf');
588 ASSERT(MI_GET_ROS_DATA(Pfn1) != NULL);
589 ASSERT_IS_ROS_PFN(Pfn1);
590 MI_GET_ROS_DATA(Pfn1)->SwapEntry = 0;
591 MI_GET_ROS_DATA(Pfn1)->RmapListHead = NULL;
592
593 return PfnOffset;
594 }
595
596 /* EOF */