f8efc073964061b937d8d63a28d0ffe3bfde24d6
[reactos.git] / reactos / ntoskrnl / mm / freelist.c
1 /*
2 * COPYRIGHT: See COPYING in the top level directory
3 * PROJECT: ReactOS kernel
4 * FILE: ntoskrnl/mm/freelist.c
5 * PURPOSE: Handle the list of free physical pages
6 *
7 * PROGRAMMERS: David Welch (welch@cwcom.net)
8 * Robert Bergkvist
9 */
10
11 /* INCLUDES ****************************************************************/
12
13 #include <ntoskrnl.h>
14 #define NDEBUG
15 #include <debug.h>
16
17 #if defined (ALLOC_PRAGMA)
18 #pragma alloc_text(INIT, MmInitializePageList)
19 #endif
20
21 #define MODULE_INVOLVED_IN_ARM3
22 #include "ARM3/miarm.h"
23
24 /* GLOBALS ****************************************************************/
25
26 // ReactOS to NT Physical Page Descriptor Entry Legacy Mapping Definitions
27 #define PHYSICAL_PAGE MMPFN
28 #define PPHYSICAL_PAGE PMMPFN
29
30 PPHYSICAL_PAGE MmPfnDatabase;
31
32 PFN_NUMBER MmAvailablePages;
33 PFN_NUMBER MmResidentAvailablePages;
34 PFN_NUMBER MmResidentAvailableAtInit;
35
36 SIZE_T MmTotalCommittedPages;
37 SIZE_T MmSharedCommit;
38 SIZE_T MmDriverCommit;
39 SIZE_T MmProcessCommit;
40 SIZE_T MmPagedPoolCommit;
41 SIZE_T MmPeakCommitment;
42 SIZE_T MmtotalCommitLimitMaximum;
43
44 static RTL_BITMAP MiUserPfnBitMap;
45
46 /* FUNCTIONS *************************************************************/
47
48 VOID
49 NTAPI
50 MiInitializeUserPfnBitmap(VOID)
51 {
52 PVOID Bitmap;
53
54 /* Allocate enough buffer for the PFN bitmap and align it on 32-bits */
55 Bitmap = ExAllocatePoolWithTag(NonPagedPool,
56 (((MmHighestPhysicalPage + 1) + 31) / 32) * 4,
57 ' mM');
58 ASSERT(Bitmap);
59
60 /* Initialize it and clear all the bits to begin with */
61 RtlInitializeBitMap(&MiUserPfnBitMap,
62 Bitmap,
63 (ULONG)MmHighestPhysicalPage + 1);
64 RtlClearAllBits(&MiUserPfnBitMap);
65 }
66
67 PFN_NUMBER
68 NTAPI
69 MmGetLRUFirstUserPage(VOID)
70 {
71 ULONG Position;
72 KIRQL OldIrql;
73
74 /* Find the first user page */
75 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
76 Position = RtlFindSetBits(&MiUserPfnBitMap, 1, 0);
77 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
78 if (Position == 0xFFFFFFFF) return 0;
79
80 /* Return it */
81 ASSERT(Position != 0);
82 ASSERT_IS_ROS_PFN(MiGetPfnEntry(Position));
83 return Position;
84 }
85
86 VOID
87 NTAPI
88 MmInsertLRULastUserPage(PFN_NUMBER Pfn)
89 {
90 KIRQL OldIrql;
91
92 /* Set the page as a user page */
93 ASSERT(Pfn != 0);
94 ASSERT_IS_ROS_PFN(MiGetPfnEntry(Pfn));
95 ASSERT(!RtlCheckBit(&MiUserPfnBitMap, (ULONG)Pfn));
96 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
97 RtlSetBit(&MiUserPfnBitMap, (ULONG)Pfn);
98 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
99 }
100
101 PFN_NUMBER
102 NTAPI
103 MmGetLRUNextUserPage(PFN_NUMBER PreviousPfn)
104 {
105 ULONG Position;
106 KIRQL OldIrql;
107
108 /* Find the next user page */
109 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
110 Position = RtlFindSetBits(&MiUserPfnBitMap, 1, (ULONG)PreviousPfn + 1);
111 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
112 if (Position == 0xFFFFFFFF) return 0;
113
114 /* Return it */
115 ASSERT(Position != 0);
116 ASSERT_IS_ROS_PFN(MiGetPfnEntry(Position));
117 return Position;
118 }
119
120 VOID
121 NTAPI
122 MmRemoveLRUUserPage(PFN_NUMBER Page)
123 {
124 /* Unset the page as a user page */
125 ASSERT(Page != 0);
126 ASSERT_IS_ROS_PFN(MiGetPfnEntry(Page));
127 ASSERT(RtlCheckBit(&MiUserPfnBitMap, (ULONG)Page));
128 RtlClearBit(&MiUserPfnBitMap, (ULONG)Page);
129 }
130
131 BOOLEAN
132 NTAPI
133 MiIsPfnFree(IN PMMPFN Pfn1)
134 {
135 /* Must be a free or zero page, with no references, linked */
136 return ((Pfn1->u3.e1.PageLocation <= StandbyPageList) &&
137 (Pfn1->u1.Flink) &&
138 (Pfn1->u2.Blink) &&
139 !(Pfn1->u3.e2.ReferenceCount));
140 }
141
142 BOOLEAN
143 NTAPI
144 MiIsPfnInUse(IN PMMPFN Pfn1)
145 {
146 /* Standby list or higher, unlinked, and with references */
147 return !MiIsPfnFree(Pfn1);
148 }
149
150 PMDL
151 NTAPI
152 MiAllocatePagesForMdl(IN PHYSICAL_ADDRESS LowAddress,
153 IN PHYSICAL_ADDRESS HighAddress,
154 IN PHYSICAL_ADDRESS SkipBytes,
155 IN SIZE_T TotalBytes,
156 IN MI_PFN_CACHE_ATTRIBUTE CacheAttribute,
157 IN ULONG MdlFlags)
158 {
159 PMDL Mdl;
160 PFN_NUMBER PageCount, LowPage, HighPage, SkipPages, PagesFound = 0, Page;
161 PPFN_NUMBER MdlPage, LastMdlPage;
162 KIRQL OldIrql;
163 PPHYSICAL_PAGE Pfn1;
164 INT LookForZeroedPages;
165 ASSERT(KeGetCurrentIrql() <= APC_LEVEL);
166
167 //
168 // Convert the low address into a PFN
169 //
170 LowPage = (PFN_NUMBER)(LowAddress.QuadPart >> PAGE_SHIFT);
171
172 //
173 // Convert, and normalize, the high address into a PFN
174 //
175 HighPage = (PFN_NUMBER)(HighAddress.QuadPart >> PAGE_SHIFT);
176 if (HighPage > MmHighestPhysicalPage) HighPage = MmHighestPhysicalPage;
177
178 //
179 // Validate skipbytes and convert them into pages
180 //
181 if (BYTE_OFFSET(SkipBytes.LowPart)) return NULL;
182 SkipPages = (PFN_NUMBER)(SkipBytes.QuadPart >> PAGE_SHIFT);
183
184 /* This isn't supported at all */
185 if (SkipPages) DPRINT1("WARNING: Caller requesting SkipBytes, MDL might be mismatched\n");
186
187 //
188 // Now compute the number of pages the MDL will cover
189 //
190 PageCount = (PFN_NUMBER)ADDRESS_AND_SIZE_TO_SPAN_PAGES(0, TotalBytes);
191 do
192 {
193 //
194 // Try creating an MDL for these many pages
195 //
196 Mdl = MmCreateMdl(NULL, NULL, PageCount << PAGE_SHIFT);
197 if (Mdl) break;
198
199 //
200 // This function is not required to return the amount of pages requested
201 // In fact, it can return as little as 1 page, and callers are supposed
202 // to deal with this scenario. So re-attempt the allocation with less
203 // pages than before, and see if it worked this time.
204 //
205 PageCount -= (PageCount >> 4);
206 } while (PageCount);
207
208 //
209 // Wow, not even a single page was around!
210 //
211 if (!Mdl) return NULL;
212
213 //
214 // This is where the page array starts....
215 //
216 MdlPage = (PPFN_NUMBER)(Mdl + 1);
217
218 //
219 // Lock the PFN database
220 //
221 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
222
223 //
224 // Are we looking for any pages, without discriminating?
225 //
226 if ((LowPage == 0) && (HighPage == MmHighestPhysicalPage))
227 {
228 //
229 // Well then, let's go shopping
230 //
231 while (PagesFound < PageCount)
232 {
233 /* Grab a page */
234 MI_SET_USAGE(MI_USAGE_MDL);
235 MI_SET_PROCESS2("Kernel");
236 Page = MiRemoveAnyPage(0);
237 if (Page == 0)
238 {
239 /* This is not good... hopefully we have at least SOME pages */
240 ASSERT(PagesFound);
241 break;
242 }
243
244 /* Grab the page entry for it */
245 Pfn1 = MiGetPfnEntry(Page);
246
247 //
248 // Make sure it's really free
249 //
250 ASSERT(Pfn1->u3.e2.ReferenceCount == 0);
251
252 /* Now setup the page and mark it */
253 Pfn1->u3.e2.ReferenceCount = 1;
254 Pfn1->u2.ShareCount = 1;
255 MI_SET_PFN_DELETED(Pfn1);
256 Pfn1->u4.PteFrame = 0x1FFEDCB;
257 Pfn1->u3.e1.StartOfAllocation = 1;
258 Pfn1->u3.e1.EndOfAllocation = 1;
259 Pfn1->u4.VerifierAllocation = 0;
260
261 //
262 // Save it into the MDL
263 //
264 *MdlPage++ = MiGetPfnEntryIndex(Pfn1);
265 PagesFound++;
266 }
267 }
268 else
269 {
270 //
271 // You want specific range of pages. We'll do this in two runs
272 //
273 for (LookForZeroedPages = 1; LookForZeroedPages >= 0; LookForZeroedPages--)
274 {
275 //
276 // Scan the range you specified
277 //
278 for (Page = LowPage; Page < HighPage; Page++)
279 {
280 //
281 // Get the PFN entry for this page
282 //
283 Pfn1 = MiGetPfnEntry(Page);
284 ASSERT(Pfn1);
285
286 //
287 // Make sure it's free and if this is our first pass, zeroed
288 //
289 if (MiIsPfnInUse(Pfn1)) continue;
290 if ((Pfn1->u3.e1.PageLocation == ZeroedPageList) != LookForZeroedPages) continue;
291
292 /* Remove the page from the free or zero list */
293 ASSERT(Pfn1->u3.e1.ReadInProgress == 0);
294 MI_SET_USAGE(MI_USAGE_MDL);
295 MI_SET_PROCESS2("Kernel");
296 MiUnlinkFreeOrZeroedPage(Pfn1);
297
298 //
299 // Sanity checks
300 //
301 ASSERT(Pfn1->u3.e2.ReferenceCount == 0);
302
303 //
304 // Now setup the page and mark it
305 //
306 Pfn1->u3.e2.ReferenceCount = 1;
307 Pfn1->u2.ShareCount = 1;
308 MI_SET_PFN_DELETED(Pfn1);
309 Pfn1->u4.PteFrame = 0x1FFEDCB;
310 Pfn1->u3.e1.StartOfAllocation = 1;
311 Pfn1->u3.e1.EndOfAllocation = 1;
312 Pfn1->u4.VerifierAllocation = 0;
313
314 //
315 // Save this page into the MDL
316 //
317 *MdlPage++ = Page;
318 if (++PagesFound == PageCount) break;
319 }
320
321 //
322 // If the first pass was enough, don't keep going, otherwise, go again
323 //
324 if (PagesFound == PageCount) break;
325 }
326 }
327
328 //
329 // Now release the PFN count
330 //
331 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
332
333 //
334 // We might've found less pages, but not more ;-)
335 //
336 if (PagesFound != PageCount) ASSERT(PagesFound < PageCount);
337 if (!PagesFound)
338 {
339 //
340 // If we didn' tfind any pages at all, fail
341 //
342 DPRINT1("NO MDL PAGES!\n");
343 ExFreePool(Mdl);
344 return NULL;
345 }
346
347 //
348 // Write out how many pages we found
349 //
350 Mdl->ByteCount = (ULONG)(PagesFound << PAGE_SHIFT);
351
352 //
353 // Terminate the MDL array if there's certain missing pages
354 //
355 if (PagesFound != PageCount) *MdlPage = LIST_HEAD;
356
357 //
358 // Now go back and loop over all the MDL pages
359 //
360 MdlPage = (PPFN_NUMBER)(Mdl + 1);
361 LastMdlPage = MdlPage + PagesFound;
362 while (MdlPage < LastMdlPage)
363 {
364 //
365 // Check if we've reached the end
366 //
367 Page = *MdlPage++;
368 if (Page == LIST_HEAD) break;
369
370 //
371 // Get the PFN entry for the page and check if we should zero it out
372 //
373 Pfn1 = MiGetPfnEntry(Page);
374 ASSERT(Pfn1);
375 if (Pfn1->u3.e1.PageLocation != ZeroedPageList) MiZeroPhysicalPage(Page);
376 Pfn1->u3.e1.PageLocation = ActiveAndValid;
377 }
378
379 //
380 // We're done, mark the pages as locked
381 //
382 Mdl->Process = NULL;
383 Mdl->MdlFlags |= MDL_PAGES_LOCKED;
384 return Mdl;
385 }
386
387 VOID
388 NTAPI
389 MmSetRmapListHeadPage(PFN_NUMBER Pfn, PMM_RMAP_ENTRY ListHead)
390 {
391 KIRQL oldIrql;
392 PMMPFN Pfn1;
393
394 oldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
395 Pfn1 = MiGetPfnEntry(Pfn);
396 ASSERT(Pfn1);
397 ASSERT_IS_ROS_PFN(Pfn1);
398
399 if (ListHead)
400 {
401 /* Should not be trying to insert an RMAP for a non-active page */
402 ASSERT(MiIsPfnInUse(Pfn1) == TRUE);
403
404 /* Set the list head address */
405 MI_GET_ROS_DATA(Pfn1)->RmapListHead = ListHead;
406 }
407 else
408 {
409 /* ReactOS semantics dictate the page is STILL active right now */
410 ASSERT(MiIsPfnInUse(Pfn1) == TRUE);
411
412 /* In this case, the RMAP is actually being removed, so clear field */
413 MI_GET_ROS_DATA(Pfn1)->RmapListHead = NULL;
414
415 /* ReactOS semantics will now release the page, which will make it free and enter a colored list */
416 }
417
418 KeReleaseQueuedSpinLock(LockQueuePfnLock, oldIrql);
419 }
420
421 PMM_RMAP_ENTRY
422 NTAPI
423 MmGetRmapListHeadPage(PFN_NUMBER Pfn)
424 {
425 KIRQL oldIrql;
426 PMM_RMAP_ENTRY ListHead;
427 PMMPFN Pfn1;
428
429 /* Lock PFN database */
430 oldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
431
432 /* Get the entry */
433 Pfn1 = MiGetPfnEntry(Pfn);
434 ASSERT(Pfn1);
435 ASSERT_IS_ROS_PFN(Pfn1);
436
437 /* Get the list head */
438 ListHead = MI_GET_ROS_DATA(Pfn1)->RmapListHead;
439
440 /* Should not have an RMAP for a non-active page */
441 ASSERT(MiIsPfnInUse(Pfn1) == TRUE);
442
443 /* Release PFN database and return rmap list head */
444 KeReleaseQueuedSpinLock(LockQueuePfnLock, oldIrql);
445 return ListHead;
446 }
447
448 VOID
449 NTAPI
450 MmSetSavedSwapEntryPage(PFN_NUMBER Pfn, SWAPENTRY SwapEntry)
451 {
452 KIRQL oldIrql;
453 PPHYSICAL_PAGE Page;
454
455 Page = MiGetPfnEntry(Pfn);
456 ASSERT(Page);
457 ASSERT_IS_ROS_PFN(Page);
458
459 oldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
460 MI_GET_ROS_DATA(Page)->SwapEntry = SwapEntry;
461 KeReleaseQueuedSpinLock(LockQueuePfnLock, oldIrql);
462 }
463
464 SWAPENTRY
465 NTAPI
466 MmGetSavedSwapEntryPage(PFN_NUMBER Pfn)
467 {
468 SWAPENTRY SwapEntry;
469 KIRQL oldIrql;
470 PPHYSICAL_PAGE Page;
471
472 Page = MiGetPfnEntry(Pfn);
473 ASSERT(Page);
474 ASSERT_IS_ROS_PFN(Page);
475
476 oldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
477 SwapEntry = MI_GET_ROS_DATA(Page)->SwapEntry;
478 KeReleaseQueuedSpinLock(LockQueuePfnLock, oldIrql);
479
480 return(SwapEntry);
481 }
482
483 VOID
484 NTAPI
485 MmReferencePage(PFN_NUMBER Pfn)
486 {
487 PPHYSICAL_PAGE Page;
488
489 DPRINT("MmReferencePage(PysicalAddress %x)\n", Pfn << PAGE_SHIFT);
490
491 if (Pfn == 0 || Pfn > MmHighestPhysicalPage)
492 {
493 return;
494 }
495
496 Page = MiGetPfnEntry(Pfn);
497 ASSERT(Page);
498 ASSERT_IS_ROS_PFN(Page);
499
500 Page->u3.e2.ReferenceCount++;
501 }
502
503 ULONG
504 NTAPI
505 MmGetReferenceCountPage(PFN_NUMBER Pfn)
506 {
507 KIRQL oldIrql;
508 ULONG RCount;
509 PPHYSICAL_PAGE Page;
510
511 DPRINT("MmGetReferenceCountPage(PhysicalAddress %x)\n", Pfn << PAGE_SHIFT);
512
513 oldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
514 Page = MiGetPfnEntry(Pfn);
515 ASSERT(Page);
516 ASSERT_IS_ROS_PFN(Page);
517
518 RCount = Page->u3.e2.ReferenceCount;
519
520 KeReleaseQueuedSpinLock(LockQueuePfnLock, oldIrql);
521 return(RCount);
522 }
523
524 BOOLEAN
525 NTAPI
526 MmIsPageInUse(PFN_NUMBER Pfn)
527 {
528 return MiIsPfnInUse(MiGetPfnEntry(Pfn));
529 }
530
531 VOID
532 NTAPI
533 MmDereferencePage(PFN_NUMBER Pfn)
534 {
535 PPHYSICAL_PAGE Page;
536 DPRINT("MmDereferencePage(PhysicalAddress %x)\n", Pfn << PAGE_SHIFT);
537
538 Page = MiGetPfnEntry(Pfn);
539 ASSERT(Page);
540 ASSERT_IS_ROS_PFN(Page);
541
542 Page->u3.e2.ReferenceCount--;
543 if (Page->u3.e2.ReferenceCount == 0)
544 {
545 /* Mark the page temporarily as valid, we're going to make it free soon */
546 Page->u3.e1.PageLocation = ActiveAndValid;
547
548 /* It's not a ROS PFN anymore */
549 Page->u4.AweAllocation = FALSE;
550 ExFreePool(MI_GET_ROS_DATA(Page));
551 Page->RosMmData = 0;
552
553 /* Bring it back into the free list */
554 DPRINT("Legacy free: %lx\n", Pfn);
555 MiInsertPageInFreeList(Pfn);
556 }
557 }
558
559 PFN_NUMBER
560 NTAPI
561 MmAllocPage(ULONG Type)
562 {
563 PFN_NUMBER PfnOffset;
564 PMMPFN Pfn1;
565
566 PfnOffset = MiRemoveZeroPage(MI_GET_NEXT_COLOR());
567
568 if (!PfnOffset)
569 {
570 DPRINT1("MmAllocPage(): Out of memory\n");
571 return 0;
572 }
573
574 DPRINT("Legacy allocate: %lx\n", PfnOffset);
575 Pfn1 = MiGetPfnEntry(PfnOffset);
576 Pfn1->u3.e2.ReferenceCount = 1;
577 Pfn1->u3.e1.PageLocation = ActiveAndValid;
578
579 /* This marks the PFN as a ReactOS PFN */
580 Pfn1->u4.AweAllocation = TRUE;
581
582 /* Allocate the extra ReactOS Data and zero it out */
583 Pfn1->RosMmData = (LONG)ExAllocatePoolWithTag(NonPagedPool, sizeof(MMROSPFN), 'RsPf');
584 ASSERT(MI_GET_ROS_DATA(Pfn1) != NULL);
585 ASSERT_IS_ROS_PFN(Pfn1);
586 MI_GET_ROS_DATA(Pfn1)->SwapEntry = 0;
587 MI_GET_ROS_DATA(Pfn1)->RmapListHead = NULL;
588
589 return PfnOffset;
590 }
591
592 /* EOF */