Sync with trunk r63743.
[reactos.git] / ntoskrnl / mm / freelist.c
1 /*
2 * COPYRIGHT: See COPYING in the top level directory
3 * PROJECT: ReactOS kernel
4 * FILE: ntoskrnl/mm/freelist.c
5 * PURPOSE: Handle the list of free physical pages
6 *
7 * PROGRAMMERS: David Welch (welch@cwcom.net)
8 * Robert Bergkvist
9 */
10
11 /* INCLUDES ****************************************************************/
12
13 #include <ntoskrnl.h>
14 #define NDEBUG
15 #include <debug.h>
16
17 #define MODULE_INVOLVED_IN_ARM3
18 #include "ARM3/miarm.h"
19
20 #define ASSERT_IS_ROS_PFN(x) ASSERT(MI_IS_ROS_PFN(x) == TRUE);
21
22 /* GLOBALS ****************************************************************/
23
24 PMMPFN MmPfnDatabase;
25
26 PFN_NUMBER MmAvailablePages;
27 PFN_NUMBER MmResidentAvailablePages;
28 PFN_NUMBER MmResidentAvailableAtInit;
29
30 SIZE_T MmTotalCommittedPages;
31 SIZE_T MmSharedCommit;
32 SIZE_T MmDriverCommit;
33 SIZE_T MmProcessCommit;
34 SIZE_T MmPagedPoolCommit;
35 SIZE_T MmPeakCommitment;
36 SIZE_T MmtotalCommitLimitMaximum;
37
38 static RTL_BITMAP MiUserPfnBitMap;
39
40 /* FUNCTIONS *************************************************************/
41
42 VOID
43 NTAPI
44 MiInitializeUserPfnBitmap(VOID)
45 {
46 PVOID Bitmap;
47
48 /* Allocate enough buffer for the PFN bitmap and align it on 32-bits */
49 Bitmap = ExAllocatePoolWithTag(NonPagedPool,
50 (((MmHighestPhysicalPage + 1) + 31) / 32) * 4,
51 ' mM');
52 ASSERT(Bitmap);
53
54 /* Initialize it and clear all the bits to begin with */
55 RtlInitializeBitMap(&MiUserPfnBitMap,
56 Bitmap,
57 (ULONG)MmHighestPhysicalPage + 1);
58 RtlClearAllBits(&MiUserPfnBitMap);
59 }
60
61 PFN_NUMBER
62 NTAPI
63 MmGetLRUFirstUserPage(VOID)
64 {
65 ULONG Position;
66 KIRQL OldIrql;
67
68 /* Find the first user page */
69 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
70 Position = RtlFindSetBits(&MiUserPfnBitMap, 1, 0);
71 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
72 if (Position == 0xFFFFFFFF) return 0;
73
74 /* Return it */
75 ASSERT(Position != 0);
76 ASSERT_IS_ROS_PFN(MiGetPfnEntry(Position));
77 return Position;
78 }
79
80 VOID
81 NTAPI
82 MmInsertLRULastUserPage(PFN_NUMBER Pfn)
83 {
84 KIRQL OldIrql;
85
86 /* Set the page as a user page */
87 ASSERT(Pfn != 0);
88 ASSERT_IS_ROS_PFN(MiGetPfnEntry(Pfn));
89 ASSERT(!RtlCheckBit(&MiUserPfnBitMap, (ULONG)Pfn));
90 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
91 RtlSetBit(&MiUserPfnBitMap, (ULONG)Pfn);
92 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
93 }
94
95 PFN_NUMBER
96 NTAPI
97 MmGetLRUNextUserPage(PFN_NUMBER PreviousPfn)
98 {
99 ULONG Position;
100 KIRQL OldIrql;
101
102 /* Find the next user page */
103 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
104 Position = RtlFindSetBits(&MiUserPfnBitMap, 1, (ULONG)PreviousPfn + 1);
105 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
106 if (Position == 0xFFFFFFFF) return 0;
107
108 /* Return it */
109 ASSERT(Position != 0);
110 ASSERT_IS_ROS_PFN(MiGetPfnEntry(Position));
111 return Position;
112 }
113
114 VOID
115 NTAPI
116 MmRemoveLRUUserPage(PFN_NUMBER Page)
117 {
118 KIRQL OldIrql;
119
120 /* Unset the page as a user page */
121 ASSERT(Page != 0);
122 ASSERT_IS_ROS_PFN(MiGetPfnEntry(Page));
123 ASSERT(RtlCheckBit(&MiUserPfnBitMap, (ULONG)Page));
124 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
125 RtlClearBit(&MiUserPfnBitMap, (ULONG)Page);
126 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
127 }
128
129 BOOLEAN
130 NTAPI
131 MiIsPfnFree(IN PMMPFN Pfn1)
132 {
133 /* Must be a free or zero page, with no references, linked */
134 return ((Pfn1->u3.e1.PageLocation <= StandbyPageList) &&
135 (Pfn1->u1.Flink) &&
136 (Pfn1->u2.Blink) &&
137 !(Pfn1->u3.e2.ReferenceCount));
138 }
139
140 BOOLEAN
141 NTAPI
142 MiIsPfnInUse(IN PMMPFN Pfn1)
143 {
144 /* Standby list or higher, unlinked, and with references */
145 return !MiIsPfnFree(Pfn1);
146 }
147
148 PMDL
149 NTAPI
150 MiAllocatePagesForMdl(IN PHYSICAL_ADDRESS LowAddress,
151 IN PHYSICAL_ADDRESS HighAddress,
152 IN PHYSICAL_ADDRESS SkipBytes,
153 IN SIZE_T TotalBytes,
154 IN MI_PFN_CACHE_ATTRIBUTE CacheAttribute,
155 IN ULONG MdlFlags)
156 {
157 PMDL Mdl;
158 PFN_NUMBER PageCount, LowPage, HighPage, SkipPages, PagesFound = 0, Page;
159 PPFN_NUMBER MdlPage, LastMdlPage;
160 KIRQL OldIrql;
161 PMMPFN Pfn1;
162 INT LookForZeroedPages;
163 ASSERT(KeGetCurrentIrql() <= APC_LEVEL);
164 DPRINT1("ARM3-DEBUG: Being called with %I64x %I64x %I64x %lx %d %lu\n", LowAddress, HighAddress, SkipBytes, TotalBytes, CacheAttribute, MdlFlags);
165
166 //
167 // Convert the low address into a PFN
168 //
169 LowPage = (PFN_NUMBER)(LowAddress.QuadPart >> PAGE_SHIFT);
170
171 //
172 // Convert, and normalize, the high address into a PFN
173 //
174 HighPage = (PFN_NUMBER)(HighAddress.QuadPart >> PAGE_SHIFT);
175 if (HighPage > MmHighestPhysicalPage) HighPage = MmHighestPhysicalPage;
176
177 //
178 // Validate skipbytes and convert them into pages
179 //
180 if (BYTE_OFFSET(SkipBytes.LowPart)) return NULL;
181 SkipPages = (PFN_NUMBER)(SkipBytes.QuadPart >> PAGE_SHIFT);
182
183 /* This isn't supported at all */
184 if (SkipPages) DPRINT1("WARNING: Caller requesting SkipBytes, MDL might be mismatched\n");
185
186 //
187 // Now compute the number of pages the MDL will cover
188 //
189 PageCount = (PFN_NUMBER)ADDRESS_AND_SIZE_TO_SPAN_PAGES(0, TotalBytes);
190 do
191 {
192 //
193 // Try creating an MDL for these many pages
194 //
195 Mdl = MmCreateMdl(NULL, NULL, PageCount << PAGE_SHIFT);
196 if (Mdl) break;
197
198 //
199 // This function is not required to return the amount of pages requested
200 // In fact, it can return as little as 1 page, and callers are supposed
201 // to deal with this scenario. So re-attempt the allocation with less
202 // pages than before, and see if it worked this time.
203 //
204 PageCount -= (PageCount >> 4);
205 } while (PageCount);
206
207 //
208 // Wow, not even a single page was around!
209 //
210 if (!Mdl) return NULL;
211
212 //
213 // This is where the page array starts....
214 //
215 MdlPage = (PPFN_NUMBER)(Mdl + 1);
216
217 //
218 // Lock the PFN database
219 //
220 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
221
222 //
223 // Are we looking for any pages, without discriminating?
224 //
225 if ((LowPage == 0) && (HighPage == MmHighestPhysicalPage))
226 {
227 //
228 // Well then, let's go shopping
229 //
230 while (PagesFound < PageCount)
231 {
232 /* Grab a page */
233 MI_SET_USAGE(MI_USAGE_MDL);
234 MI_SET_PROCESS2("Kernel");
235 Page = MiRemoveAnyPage(0);
236 if (Page == 0)
237 {
238 /* This is not good... hopefully we have at least SOME pages */
239 ASSERT(PagesFound);
240 break;
241 }
242
243 /* Grab the page entry for it */
244 Pfn1 = MiGetPfnEntry(Page);
245
246 //
247 // Make sure it's really free
248 //
249 ASSERT(Pfn1->u3.e2.ReferenceCount == 0);
250
251 /* Now setup the page and mark it */
252 Pfn1->u3.e2.ReferenceCount = 1;
253 Pfn1->u2.ShareCount = 1;
254 MI_SET_PFN_DELETED(Pfn1);
255 Pfn1->u4.PteFrame = 0x1FFEDCB;
256 Pfn1->u3.e1.StartOfAllocation = 1;
257 Pfn1->u3.e1.EndOfAllocation = 1;
258 Pfn1->u4.VerifierAllocation = 0;
259
260 //
261 // Save it into the MDL
262 //
263 *MdlPage++ = MiGetPfnEntryIndex(Pfn1);
264 PagesFound++;
265 }
266 }
267 else
268 {
269 //
270 // You want specific range of pages. We'll do this in two runs
271 //
272 for (LookForZeroedPages = 1; LookForZeroedPages >= 0; LookForZeroedPages--)
273 {
274 //
275 // Scan the range you specified
276 //
277 for (Page = LowPage; Page < HighPage; Page++)
278 {
279 //
280 // Get the PFN entry for this page
281 //
282 Pfn1 = MiGetPfnEntry(Page);
283 ASSERT(Pfn1);
284
285 //
286 // Make sure it's free and if this is our first pass, zeroed
287 //
288 if (MiIsPfnInUse(Pfn1)) continue;
289 if ((Pfn1->u3.e1.PageLocation == ZeroedPageList) != LookForZeroedPages) continue;
290
291 /* Remove the page from the free or zero list */
292 ASSERT(Pfn1->u3.e1.ReadInProgress == 0);
293 MI_SET_USAGE(MI_USAGE_MDL);
294 MI_SET_PROCESS2("Kernel");
295 MiUnlinkFreeOrZeroedPage(Pfn1);
296
297 //
298 // Sanity checks
299 //
300 ASSERT(Pfn1->u3.e2.ReferenceCount == 0);
301
302 //
303 // Now setup the page and mark it
304 //
305 Pfn1->u3.e2.ReferenceCount = 1;
306 Pfn1->u2.ShareCount = 1;
307 MI_SET_PFN_DELETED(Pfn1);
308 Pfn1->u4.PteFrame = 0x1FFEDCB;
309 Pfn1->u3.e1.StartOfAllocation = 1;
310 Pfn1->u3.e1.EndOfAllocation = 1;
311 Pfn1->u4.VerifierAllocation = 0;
312
313 //
314 // Save this page into the MDL
315 //
316 *MdlPage++ = Page;
317 if (++PagesFound == PageCount) break;
318 }
319
320 //
321 // If the first pass was enough, don't keep going, otherwise, go again
322 //
323 if (PagesFound == PageCount) break;
324 }
325 }
326
327 //
328 // Now release the PFN count
329 //
330 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
331
332 //
333 // We might've found less pages, but not more ;-)
334 //
335 if (PagesFound != PageCount) ASSERT(PagesFound < PageCount);
336 if (!PagesFound)
337 {
338 //
339 // If we didn' tfind any pages at all, fail
340 //
341 DPRINT1("NO MDL PAGES!\n");
342 ExFreePoolWithTag(Mdl, TAG_MDL);
343 return NULL;
344 }
345
346 //
347 // Write out how many pages we found
348 //
349 Mdl->ByteCount = (ULONG)(PagesFound << PAGE_SHIFT);
350
351 //
352 // Terminate the MDL array if there's certain missing pages
353 //
354 if (PagesFound != PageCount) *MdlPage = LIST_HEAD;
355
356 //
357 // Now go back and loop over all the MDL pages
358 //
359 MdlPage = (PPFN_NUMBER)(Mdl + 1);
360 LastMdlPage = MdlPage + PagesFound;
361 while (MdlPage < LastMdlPage)
362 {
363 //
364 // Check if we've reached the end
365 //
366 Page = *MdlPage++;
367 if (Page == LIST_HEAD) break;
368
369 //
370 // Get the PFN entry for the page and check if we should zero it out
371 //
372 Pfn1 = MiGetPfnEntry(Page);
373 ASSERT(Pfn1);
374 if (Pfn1->u3.e1.PageLocation != ZeroedPageList) MiZeroPhysicalPage(Page);
375 Pfn1->u3.e1.PageLocation = ActiveAndValid;
376 }
377
378 //
379 // We're done, mark the pages as locked
380 //
381 Mdl->Process = NULL;
382 Mdl->MdlFlags |= MDL_PAGES_LOCKED;
383 return Mdl;
384 }
385
386 VOID
387 NTAPI
388 MmSetRmapListHeadPage(PFN_NUMBER Pfn, PMM_RMAP_ENTRY ListHead)
389 {
390 KIRQL oldIrql;
391 PMMPFN Pfn1;
392
393 oldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
394 Pfn1 = MiGetPfnEntry(Pfn);
395 ASSERT(Pfn1);
396 ASSERT_IS_ROS_PFN(Pfn1);
397
398 if (ListHead)
399 {
400 /* Should not be trying to insert an RMAP for a non-active page */
401 ASSERT(MiIsPfnInUse(Pfn1) == TRUE);
402
403 /* Set the list head address */
404 Pfn1->RmapListHead = ListHead;
405 }
406 else
407 {
408 /* ReactOS semantics dictate the page is STILL active right now */
409 ASSERT(MiIsPfnInUse(Pfn1) == TRUE);
410
411 /* In this case, the RMAP is actually being removed, so clear field */
412 Pfn1->RmapListHead = NULL;
413
414 /* ReactOS semantics will now release the page, which will make it free and enter a colored list */
415 }
416
417 KeReleaseQueuedSpinLock(LockQueuePfnLock, oldIrql);
418 }
419
420 PMM_RMAP_ENTRY
421 NTAPI
422 MmGetRmapListHeadPage(PFN_NUMBER Pfn)
423 {
424 KIRQL oldIrql;
425 PMM_RMAP_ENTRY ListHead;
426 PMMPFN Pfn1;
427
428 /* Lock PFN database */
429 oldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
430
431 /* Get the entry */
432 Pfn1 = MiGetPfnEntry(Pfn);
433 ASSERT(Pfn1);
434 ASSERT_IS_ROS_PFN(Pfn1);
435
436 /* Get the list head */
437 ListHead = Pfn1->RmapListHead;
438
439 /* Should not have an RMAP for a non-active page */
440 ASSERT(MiIsPfnInUse(Pfn1) == TRUE);
441
442 /* Release PFN database and return rmap list head */
443 KeReleaseQueuedSpinLock(LockQueuePfnLock, oldIrql);
444 return ListHead;
445 }
446
447 VOID
448 NTAPI
449 MmSetSavedSwapEntryPage(PFN_NUMBER Pfn, SWAPENTRY SwapEntry)
450 {
451 KIRQL oldIrql;
452 PMMPFN Pfn1;
453
454 Pfn1 = MiGetPfnEntry(Pfn);
455 ASSERT(Pfn1);
456 ASSERT_IS_ROS_PFN(Pfn1);
457
458 oldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
459 Pfn1->u1.SwapEntry = SwapEntry;
460 KeReleaseQueuedSpinLock(LockQueuePfnLock, oldIrql);
461 }
462
463 SWAPENTRY
464 NTAPI
465 MmGetSavedSwapEntryPage(PFN_NUMBER Pfn)
466 {
467 SWAPENTRY SwapEntry;
468 KIRQL oldIrql;
469 PMMPFN Pfn1;
470
471 Pfn1 = MiGetPfnEntry(Pfn);
472 ASSERT(Pfn1);
473 ASSERT_IS_ROS_PFN(Pfn1);
474
475 oldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
476 SwapEntry = Pfn1->u1.SwapEntry;
477 KeReleaseQueuedSpinLock(LockQueuePfnLock, oldIrql);
478
479 return(SwapEntry);
480 }
481
482 VOID
483 NTAPI
484 MmReferencePage(PFN_NUMBER Pfn)
485 {
486 PMMPFN Pfn1;
487
488 DPRINT("MmReferencePage(PysicalAddress %x)\n", Pfn << PAGE_SHIFT);
489
490 ASSERT(KeGetCurrentIrql() == DISPATCH_LEVEL);
491 ASSERT(Pfn != 0);
492 ASSERT(Pfn <= MmHighestPhysicalPage);
493
494 Pfn1 = MiGetPfnEntry(Pfn);
495 ASSERT(Pfn1);
496 ASSERT_IS_ROS_PFN(Pfn1);
497
498 ASSERT(Pfn1->u3.e2.ReferenceCount != 0);
499 Pfn1->u3.e2.ReferenceCount++;
500 }
501
502 ULONG
503 NTAPI
504 MmGetReferenceCountPage(PFN_NUMBER Pfn)
505 {
506 KIRQL oldIrql;
507 ULONG RCount;
508 PMMPFN Pfn1;
509
510 DPRINT("MmGetReferenceCountPage(PhysicalAddress %x)\n", Pfn << PAGE_SHIFT);
511
512 oldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
513 Pfn1 = MiGetPfnEntry(Pfn);
514 ASSERT(Pfn1);
515 ASSERT_IS_ROS_PFN(Pfn1);
516
517 RCount = Pfn1->u3.e2.ReferenceCount;
518
519 KeReleaseQueuedSpinLock(LockQueuePfnLock, oldIrql);
520 return(RCount);
521 }
522
523 BOOLEAN
524 NTAPI
525 MmIsPageInUse(PFN_NUMBER Pfn)
526 {
527 return MiIsPfnInUse(MiGetPfnEntry(Pfn));
528 }
529
530 VOID
531 NTAPI
532 MmDereferencePage(PFN_NUMBER Pfn)
533 {
534 PMMPFN Pfn1;
535 DPRINT("MmDereferencePage(PhysicalAddress %x)\n", Pfn << PAGE_SHIFT);
536
537 Pfn1 = MiGetPfnEntry(Pfn);
538 ASSERT(Pfn1);
539 ASSERT_IS_ROS_PFN(Pfn1);
540
541 ASSERT(Pfn1->u3.e2.ReferenceCount != 0);
542 Pfn1->u3.e2.ReferenceCount--;
543 if (Pfn1->u3.e2.ReferenceCount == 0)
544 {
545 /* Mark the page temporarily as valid, we're going to make it free soon */
546 Pfn1->u3.e1.PageLocation = ActiveAndValid;
547
548 /* It's not a ROS PFN anymore */
549 Pfn1->u4.AweAllocation = FALSE;
550
551 /* Bring it back into the free list */
552 DPRINT("Legacy free: %lx\n", Pfn);
553 MiInsertPageInFreeList(Pfn);
554 }
555 }
556
557 PFN_NUMBER
558 NTAPI
559 MmAllocPage(ULONG Type)
560 {
561 PFN_NUMBER PfnOffset;
562 PMMPFN Pfn1;
563
564 PfnOffset = MiRemoveZeroPage(MI_GET_NEXT_COLOR());
565
566 if (!PfnOffset)
567 {
568 DPRINT1("MmAllocPage(): Out of memory\n");
569 return 0;
570 }
571
572 DPRINT("Legacy allocate: %lx\n", PfnOffset);
573 Pfn1 = MiGetPfnEntry(PfnOffset);
574 Pfn1->u3.e2.ReferenceCount = 1;
575 Pfn1->u3.e1.PageLocation = ActiveAndValid;
576
577 /* This marks the PFN as a ReactOS PFN */
578 Pfn1->u4.AweAllocation = TRUE;
579
580 /* Allocate the extra ReactOS Data and zero it out */
581 Pfn1->u1.SwapEntry = 0;
582 Pfn1->RmapListHead = NULL;
583
584 return PfnOffset;
585 }
586
587 /* EOF */