9cff79cb2136109ba036358ad1b9eaabb78ce7d0
[reactos.git] / ntoskrnl / mm / freelist.c
1 /*
2 * COPYRIGHT: See COPYING in the top level directory
3 * PROJECT: ReactOS kernel
4 * FILE: ntoskrnl/mm/freelist.c
5 * PURPOSE: Handle the list of free physical pages
6 *
7 * PROGRAMMERS: David Welch (welch@cwcom.net)
8 * Robert Bergkvist
9 */
10
11 /* INCLUDES ****************************************************************/
12
13 #include <ntoskrnl.h>
14 #define NDEBUG
15 #include <debug.h>
16
17 #define MODULE_INVOLVED_IN_ARM3
18 #include "ARM3/miarm.h"
19
20 #define ASSERT_IS_ROS_PFN(x) ASSERT(MI_IS_ROS_PFN(x) == TRUE);
21
22 /* GLOBALS ****************************************************************/
23
24 PMMPFN MmPfnDatabase;
25
26 PFN_NUMBER MmAvailablePages;
27 PFN_NUMBER MmResidentAvailablePages;
28 PFN_NUMBER MmResidentAvailableAtInit;
29
30 SIZE_T MmTotalCommittedPages;
31 SIZE_T MmSharedCommit;
32 SIZE_T MmDriverCommit;
33 SIZE_T MmProcessCommit;
34 SIZE_T MmPagedPoolCommit;
35 SIZE_T MmPeakCommitment;
36 SIZE_T MmtotalCommitLimitMaximum;
37
38 static RTL_BITMAP MiUserPfnBitMap;
39
40 /* FUNCTIONS *************************************************************/
41
42 VOID
43 NTAPI
44 MiInitializeUserPfnBitmap(VOID)
45 {
46 PVOID Bitmap;
47
48 /* Allocate enough buffer for the PFN bitmap and align it on 32-bits */
49 Bitmap = ExAllocatePoolWithTag(NonPagedPool,
50 (((MmHighestPhysicalPage + 1) + 31) / 32) * 4,
51 TAG_MM);
52 ASSERT(Bitmap);
53
54 /* Initialize it and clear all the bits to begin with */
55 RtlInitializeBitMap(&MiUserPfnBitMap,
56 Bitmap,
57 (ULONG)MmHighestPhysicalPage + 1);
58 RtlClearAllBits(&MiUserPfnBitMap);
59 }
60
61 PFN_NUMBER
62 NTAPI
63 MmGetLRUFirstUserPage(VOID)
64 {
65 ULONG Position;
66 KIRQL OldIrql;
67
68 /* Find the first user page */
69 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
70 Position = RtlFindSetBits(&MiUserPfnBitMap, 1, 0);
71 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
72 if (Position == 0xFFFFFFFF) return 0;
73
74 /* Return it */
75 ASSERT(Position != 0);
76 ASSERT_IS_ROS_PFN(MiGetPfnEntry(Position));
77 return Position;
78 }
79
80 VOID
81 NTAPI
82 MmInsertLRULastUserPage(PFN_NUMBER Pfn)
83 {
84 KIRQL OldIrql;
85
86 /* Set the page as a user page */
87 ASSERT(Pfn != 0);
88 ASSERT_IS_ROS_PFN(MiGetPfnEntry(Pfn));
89 ASSERT(!RtlCheckBit(&MiUserPfnBitMap, (ULONG)Pfn));
90 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
91 RtlSetBit(&MiUserPfnBitMap, (ULONG)Pfn);
92 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
93 }
94
95 PFN_NUMBER
96 NTAPI
97 MmGetLRUNextUserPage(PFN_NUMBER PreviousPfn)
98 {
99 ULONG Position;
100 KIRQL OldIrql;
101
102 /* Find the next user page */
103 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
104 Position = RtlFindSetBits(&MiUserPfnBitMap, 1, (ULONG)PreviousPfn + 1);
105 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
106 if (Position == 0xFFFFFFFF) return 0;
107
108 /* Return it */
109 ASSERT(Position != 0);
110 ASSERT_IS_ROS_PFN(MiGetPfnEntry(Position));
111 return Position;
112 }
113
114 VOID
115 NTAPI
116 MmRemoveLRUUserPage(PFN_NUMBER Page)
117 {
118 KIRQL OldIrql;
119
120 /* Unset the page as a user page */
121 ASSERT(Page != 0);
122 ASSERT_IS_ROS_PFN(MiGetPfnEntry(Page));
123 ASSERT(RtlCheckBit(&MiUserPfnBitMap, (ULONG)Page));
124 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
125 RtlClearBit(&MiUserPfnBitMap, (ULONG)Page);
126 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
127 }
128
129 BOOLEAN
130 NTAPI
131 MiIsPfnFree(IN PMMPFN Pfn1)
132 {
133 /* Must be a free or zero page, with no references, linked */
134 return ((Pfn1->u3.e1.PageLocation <= StandbyPageList) &&
135 (Pfn1->u1.Flink) &&
136 (Pfn1->u2.Blink) &&
137 !(Pfn1->u3.e2.ReferenceCount));
138 }
139
140 BOOLEAN
141 NTAPI
142 MiIsPfnInUse(IN PMMPFN Pfn1)
143 {
144 /* Standby list or higher, unlinked, and with references */
145 return !MiIsPfnFree(Pfn1);
146 }
147
148 PMDL
149 NTAPI
150 MiAllocatePagesForMdl(IN PHYSICAL_ADDRESS LowAddress,
151 IN PHYSICAL_ADDRESS HighAddress,
152 IN PHYSICAL_ADDRESS SkipBytes,
153 IN SIZE_T TotalBytes,
154 IN MI_PFN_CACHE_ATTRIBUTE CacheAttribute,
155 IN ULONG MdlFlags)
156 {
157 PMDL Mdl;
158 PFN_NUMBER PageCount, LowPage, HighPage, SkipPages, PagesFound = 0, Page;
159 PPFN_NUMBER MdlPage, LastMdlPage;
160 KIRQL OldIrql;
161 PMMPFN Pfn1;
162 INT LookForZeroedPages;
163 ASSERT(KeGetCurrentIrql() <= APC_LEVEL);
164 DPRINT1("ARM3-DEBUG: Being called with %I64x %I64x %I64x %lx %d %lu\n", LowAddress, HighAddress, SkipBytes, TotalBytes, CacheAttribute, MdlFlags);
165
166 //
167 // Convert the low address into a PFN
168 //
169 LowPage = (PFN_NUMBER)(LowAddress.QuadPart >> PAGE_SHIFT);
170
171 //
172 // Convert, and normalize, the high address into a PFN
173 //
174 HighPage = (PFN_NUMBER)(HighAddress.QuadPart >> PAGE_SHIFT);
175 if (HighPage > MmHighestPhysicalPage) HighPage = MmHighestPhysicalPage;
176
177 //
178 // Validate skipbytes and convert them into pages
179 //
180 if (BYTE_OFFSET(SkipBytes.LowPart)) return NULL;
181 SkipPages = (PFN_NUMBER)(SkipBytes.QuadPart >> PAGE_SHIFT);
182
183 /* This isn't supported at all */
184 if (SkipPages) DPRINT1("WARNING: Caller requesting SkipBytes, MDL might be mismatched\n");
185
186 //
187 // Now compute the number of pages the MDL will cover
188 //
189 PageCount = (PFN_NUMBER)ADDRESS_AND_SIZE_TO_SPAN_PAGES(0, TotalBytes);
190 do
191 {
192 //
193 // Try creating an MDL for these many pages
194 //
195 Mdl = MmCreateMdl(NULL, NULL, PageCount << PAGE_SHIFT);
196 if (Mdl) break;
197
198 //
199 // This function is not required to return the amount of pages requested
200 // In fact, it can return as little as 1 page, and callers are supposed
201 // to deal with this scenario. So re-attempt the allocation with less
202 // pages than before, and see if it worked this time.
203 //
204 PageCount -= (PageCount >> 4);
205 } while (PageCount);
206
207 //
208 // Wow, not even a single page was around!
209 //
210 if (!Mdl) return NULL;
211
212 //
213 // This is where the page array starts....
214 //
215 MdlPage = (PPFN_NUMBER)(Mdl + 1);
216
217 //
218 // Lock the PFN database
219 //
220 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
221
222 //
223 // Are we looking for any pages, without discriminating?
224 //
225 if ((LowPage == 0) && (HighPage == MmHighestPhysicalPage))
226 {
227 //
228 // Well then, let's go shopping
229 //
230 while (PagesFound < PageCount)
231 {
232 /* Grab a page */
233 MI_SET_USAGE(MI_USAGE_MDL);
234 MI_SET_PROCESS2("Kernel");
235
236 /* FIXME: This check should be smarter */
237 Page = 0;
238 if (MmAvailablePages != 0)
239 Page = MiRemoveAnyPage(0);
240
241 if (Page == 0)
242 {
243 /* This is not good... hopefully we have at least SOME pages */
244 ASSERT(PagesFound);
245 break;
246 }
247
248 /* Grab the page entry for it */
249 Pfn1 = MiGetPfnEntry(Page);
250
251 //
252 // Make sure it's really free
253 //
254 ASSERT(Pfn1->u3.e2.ReferenceCount == 0);
255
256 /* Now setup the page and mark it */
257 Pfn1->u3.e2.ReferenceCount = 1;
258 Pfn1->u2.ShareCount = 1;
259 MI_SET_PFN_DELETED(Pfn1);
260 Pfn1->u4.PteFrame = 0x1FFEDCB;
261 Pfn1->u3.e1.StartOfAllocation = 1;
262 Pfn1->u3.e1.EndOfAllocation = 1;
263 Pfn1->u4.VerifierAllocation = 0;
264
265 //
266 // Save it into the MDL
267 //
268 *MdlPage++ = MiGetPfnEntryIndex(Pfn1);
269 PagesFound++;
270 }
271 }
272 else
273 {
274 //
275 // You want specific range of pages. We'll do this in two runs
276 //
277 for (LookForZeroedPages = 1; LookForZeroedPages >= 0; LookForZeroedPages--)
278 {
279 //
280 // Scan the range you specified
281 //
282 for (Page = LowPage; Page < HighPage; Page++)
283 {
284 //
285 // Get the PFN entry for this page
286 //
287 Pfn1 = MiGetPfnEntry(Page);
288 ASSERT(Pfn1);
289
290 //
291 // Make sure it's free and if this is our first pass, zeroed
292 //
293 if (MiIsPfnInUse(Pfn1)) continue;
294 if ((Pfn1->u3.e1.PageLocation == ZeroedPageList) != LookForZeroedPages) continue;
295
296 /* Remove the page from the free or zero list */
297 ASSERT(Pfn1->u3.e1.ReadInProgress == 0);
298 MI_SET_USAGE(MI_USAGE_MDL);
299 MI_SET_PROCESS2("Kernel");
300 MiUnlinkFreeOrZeroedPage(Pfn1);
301
302 //
303 // Sanity checks
304 //
305 ASSERT(Pfn1->u3.e2.ReferenceCount == 0);
306
307 //
308 // Now setup the page and mark it
309 //
310 Pfn1->u3.e2.ReferenceCount = 1;
311 Pfn1->u2.ShareCount = 1;
312 MI_SET_PFN_DELETED(Pfn1);
313 Pfn1->u4.PteFrame = 0x1FFEDCB;
314 Pfn1->u3.e1.StartOfAllocation = 1;
315 Pfn1->u3.e1.EndOfAllocation = 1;
316 Pfn1->u4.VerifierAllocation = 0;
317
318 //
319 // Save this page into the MDL
320 //
321 *MdlPage++ = Page;
322 if (++PagesFound == PageCount) break;
323 }
324
325 //
326 // If the first pass was enough, don't keep going, otherwise, go again
327 //
328 if (PagesFound == PageCount) break;
329 }
330 }
331
332 //
333 // Now release the PFN count
334 //
335 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
336
337 //
338 // We might've found less pages, but not more ;-)
339 //
340 if (PagesFound != PageCount) ASSERT(PagesFound < PageCount);
341 if (!PagesFound)
342 {
343 //
344 // If we didn' tfind any pages at all, fail
345 //
346 DPRINT1("NO MDL PAGES!\n");
347 ExFreePoolWithTag(Mdl, TAG_MDL);
348 return NULL;
349 }
350
351 //
352 // Write out how many pages we found
353 //
354 Mdl->ByteCount = (ULONG)(PagesFound << PAGE_SHIFT);
355
356 //
357 // Terminate the MDL array if there's certain missing pages
358 //
359 if (PagesFound != PageCount) *MdlPage = LIST_HEAD;
360
361 //
362 // Now go back and loop over all the MDL pages
363 //
364 MdlPage = (PPFN_NUMBER)(Mdl + 1);
365 LastMdlPage = MdlPage + PagesFound;
366 while (MdlPage < LastMdlPage)
367 {
368 //
369 // Check if we've reached the end
370 //
371 Page = *MdlPage++;
372 if (Page == LIST_HEAD) break;
373
374 //
375 // Get the PFN entry for the page and check if we should zero it out
376 //
377 Pfn1 = MiGetPfnEntry(Page);
378 ASSERT(Pfn1);
379 if (Pfn1->u3.e1.PageLocation != ZeroedPageList) MiZeroPhysicalPage(Page);
380 Pfn1->u3.e1.PageLocation = ActiveAndValid;
381 }
382
383 //
384 // We're done, mark the pages as locked
385 //
386 Mdl->Process = NULL;
387 Mdl->MdlFlags |= MDL_PAGES_LOCKED;
388 return Mdl;
389 }
390
391 VOID
392 NTAPI
393 MmSetRmapListHeadPage(PFN_NUMBER Pfn, PMM_RMAP_ENTRY ListHead)
394 {
395 KIRQL oldIrql;
396 PMMPFN Pfn1;
397
398 oldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
399 Pfn1 = MiGetPfnEntry(Pfn);
400 ASSERT(Pfn1);
401 ASSERT_IS_ROS_PFN(Pfn1);
402
403 if (ListHead)
404 {
405 /* Should not be trying to insert an RMAP for a non-active page */
406 ASSERT(MiIsPfnInUse(Pfn1) == TRUE);
407
408 /* Set the list head address */
409 Pfn1->RmapListHead = ListHead;
410 }
411 else
412 {
413 /* ReactOS semantics dictate the page is STILL active right now */
414 ASSERT(MiIsPfnInUse(Pfn1) == TRUE);
415
416 /* In this case, the RMAP is actually being removed, so clear field */
417 Pfn1->RmapListHead = NULL;
418
419 /* ReactOS semantics will now release the page, which will make it free and enter a colored list */
420 }
421
422 KeReleaseQueuedSpinLock(LockQueuePfnLock, oldIrql);
423 }
424
425 PMM_RMAP_ENTRY
426 NTAPI
427 MmGetRmapListHeadPage(PFN_NUMBER Pfn)
428 {
429 KIRQL oldIrql;
430 PMM_RMAP_ENTRY ListHead;
431 PMMPFN Pfn1;
432
433 /* Lock PFN database */
434 oldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
435
436 /* Get the entry */
437 Pfn1 = MiGetPfnEntry(Pfn);
438 ASSERT(Pfn1);
439 ASSERT_IS_ROS_PFN(Pfn1);
440
441 /* Get the list head */
442 ListHead = Pfn1->RmapListHead;
443
444 /* Should not have an RMAP for a non-active page */
445 ASSERT(MiIsPfnInUse(Pfn1) == TRUE);
446
447 /* Release PFN database and return rmap list head */
448 KeReleaseQueuedSpinLock(LockQueuePfnLock, oldIrql);
449 return ListHead;
450 }
451
452 VOID
453 NTAPI
454 MmSetSavedSwapEntryPage(PFN_NUMBER Pfn, SWAPENTRY SwapEntry)
455 {
456 KIRQL oldIrql;
457 PMMPFN Pfn1;
458
459 Pfn1 = MiGetPfnEntry(Pfn);
460 ASSERT(Pfn1);
461 ASSERT_IS_ROS_PFN(Pfn1);
462
463 oldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
464 Pfn1->u1.SwapEntry = SwapEntry;
465 KeReleaseQueuedSpinLock(LockQueuePfnLock, oldIrql);
466 }
467
468 SWAPENTRY
469 NTAPI
470 MmGetSavedSwapEntryPage(PFN_NUMBER Pfn)
471 {
472 SWAPENTRY SwapEntry;
473 KIRQL oldIrql;
474 PMMPFN Pfn1;
475
476 Pfn1 = MiGetPfnEntry(Pfn);
477 ASSERT(Pfn1);
478 ASSERT_IS_ROS_PFN(Pfn1);
479
480 oldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
481 SwapEntry = Pfn1->u1.SwapEntry;
482 KeReleaseQueuedSpinLock(LockQueuePfnLock, oldIrql);
483
484 return(SwapEntry);
485 }
486
487 VOID
488 NTAPI
489 MmReferencePage(PFN_NUMBER Pfn)
490 {
491 PMMPFN Pfn1;
492
493 DPRINT("MmReferencePage(PysicalAddress %x)\n", Pfn << PAGE_SHIFT);
494
495 ASSERT(KeGetCurrentIrql() == DISPATCH_LEVEL);
496 ASSERT(Pfn != 0);
497 ASSERT(Pfn <= MmHighestPhysicalPage);
498
499 Pfn1 = MiGetPfnEntry(Pfn);
500 ASSERT(Pfn1);
501 ASSERT_IS_ROS_PFN(Pfn1);
502
503 ASSERT(Pfn1->u3.e2.ReferenceCount != 0);
504 Pfn1->u3.e2.ReferenceCount++;
505 }
506
507 ULONG
508 NTAPI
509 MmGetReferenceCountPage(PFN_NUMBER Pfn)
510 {
511 KIRQL oldIrql;
512 ULONG RCount;
513 PMMPFN Pfn1;
514
515 DPRINT("MmGetReferenceCountPage(PhysicalAddress %x)\n", Pfn << PAGE_SHIFT);
516
517 oldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
518 Pfn1 = MiGetPfnEntry(Pfn);
519 ASSERT(Pfn1);
520 ASSERT_IS_ROS_PFN(Pfn1);
521
522 RCount = Pfn1->u3.e2.ReferenceCount;
523
524 KeReleaseQueuedSpinLock(LockQueuePfnLock, oldIrql);
525 return(RCount);
526 }
527
528 BOOLEAN
529 NTAPI
530 MmIsPageInUse(PFN_NUMBER Pfn)
531 {
532 return MiIsPfnInUse(MiGetPfnEntry(Pfn));
533 }
534
535 VOID
536 NTAPI
537 MmDereferencePage(PFN_NUMBER Pfn)
538 {
539 PMMPFN Pfn1;
540 KIRQL OldIrql;
541 DPRINT("MmDereferencePage(PhysicalAddress %x)\n", Pfn << PAGE_SHIFT);
542
543 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
544
545 Pfn1 = MiGetPfnEntry(Pfn);
546 ASSERT(Pfn1);
547 ASSERT_IS_ROS_PFN(Pfn1);
548
549 ASSERT(Pfn1->u3.e2.ReferenceCount != 0);
550 Pfn1->u3.e2.ReferenceCount--;
551 if (Pfn1->u3.e2.ReferenceCount == 0)
552 {
553 /* Mark the page temporarily as valid, we're going to make it free soon */
554 Pfn1->u3.e1.PageLocation = ActiveAndValid;
555
556 /* It's not a ROS PFN anymore */
557 Pfn1->u4.AweAllocation = FALSE;
558
559 /* Bring it back into the free list */
560 DPRINT("Legacy free: %lx\n", Pfn);
561 MiInsertPageInFreeList(Pfn);
562 }
563
564 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
565 }
566
567 PFN_NUMBER
568 NTAPI
569 MmAllocPage(ULONG Type)
570 {
571 PFN_NUMBER PfnOffset;
572 PMMPFN Pfn1;
573 KIRQL OldIrql;
574
575 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
576
577 PfnOffset = MiRemoveZeroPage(MI_GET_NEXT_COLOR());
578 if (!PfnOffset)
579 {
580 KeBugCheck(NO_PAGES_AVAILABLE);
581 }
582
583 DPRINT("Legacy allocate: %lx\n", PfnOffset);
584 Pfn1 = MiGetPfnEntry(PfnOffset);
585 Pfn1->u3.e2.ReferenceCount = 1;
586 Pfn1->u3.e1.PageLocation = ActiveAndValid;
587
588 /* This marks the PFN as a ReactOS PFN */
589 Pfn1->u4.AweAllocation = TRUE;
590
591 /* Allocate the extra ReactOS Data and zero it out */
592 Pfn1->u1.SwapEntry = 0;
593 Pfn1->RmapListHead = NULL;
594
595 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
596 return PfnOffset;
597 }
598
599 /* EOF */