- Revert 49927 "Update to trunk" as it breaks KsStudio (again)
[reactos.git] / ntoskrnl / mm / freelist.c
1 /*
2 * COPYRIGHT: See COPYING in the top level directory
3 * PROJECT: ReactOS kernel
4 * FILE: ntoskrnl/mm/freelist.c
5 * PURPOSE: Handle the list of free physical pages
6 *
7 * PROGRAMMERS: David Welch (welch@cwcom.net)
8 * Robert Bergkvist
9 */
10
11 /* INCLUDES ****************************************************************/
12
13 #include <ntoskrnl.h>
14 #define NDEBUG
15 #include <debug.h>
16
17 #if defined (ALLOC_PRAGMA)
18 #pragma alloc_text(INIT, MmInitializePageList)
19 #endif
20
21 #define MODULE_INVOLVED_IN_ARM3
22 #include "ARM3/miarm.h"
23
24 /* GLOBALS ****************************************************************/
25
26 //
27 //
28 // ReactOS to NT Physical Page Descriptor Entry Legacy Mapping Definitions
29 //
30 // REACTOS NT
31 //
32 #define RmapListHead AweReferenceCount
33 #define PHYSICAL_PAGE MMPFN
34 #define PPHYSICAL_PAGE PMMPFN
35
36 PPHYSICAL_PAGE MmPfnDatabase;
37
38 PFN_NUMBER MmAvailablePages;
39 PFN_NUMBER MmResidentAvailablePages;
40 PFN_NUMBER MmResidentAvailableAtInit;
41
42 SIZE_T MmTotalCommitLimit;
43 SIZE_T MmTotalCommittedPages;
44 SIZE_T MmSharedCommit;
45 SIZE_T MmDriverCommit;
46 SIZE_T MmProcessCommit;
47 SIZE_T MmPagedPoolCommit;
48 SIZE_T MmPeakCommitment;
49 SIZE_T MmtotalCommitLimitMaximum;
50
51 static RTL_BITMAP MiUserPfnBitMap;
52
53 /* FUNCTIONS *************************************************************/
54
55 VOID
56 NTAPI
57 MiInitializeUserPfnBitmap(VOID)
58 {
59 PVOID Bitmap;
60
61 /* Allocate enough buffer for the PFN bitmap and align it on 32-bits */
62 Bitmap = ExAllocatePoolWithTag(NonPagedPool,
63 (((MmHighestPhysicalPage + 1) + 31) / 32) * 4,
64 ' mM');
65 ASSERT(Bitmap);
66
67 /* Initialize it and clear all the bits to begin with */
68 RtlInitializeBitMap(&MiUserPfnBitMap,
69 Bitmap,
70 MmHighestPhysicalPage + 1);
71 RtlClearAllBits(&MiUserPfnBitMap);
72 }
73
74 PFN_NUMBER
75 NTAPI
76 MmGetLRUFirstUserPage(VOID)
77 {
78 ULONG Position;
79 KIRQL OldIrql;
80
81 /* Find the first user page */
82 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
83 Position = RtlFindSetBits(&MiUserPfnBitMap, 1, 0);
84 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
85 if (Position == 0xFFFFFFFF) return 0;
86
87 /* Return it */
88 return Position;
89 }
90
91 VOID
92 NTAPI
93 MmInsertLRULastUserPage(PFN_NUMBER Pfn)
94 {
95 KIRQL OldIrql;
96
97 /* Set the page as a user page */
98 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
99 RtlSetBit(&MiUserPfnBitMap, Pfn);
100 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
101 }
102
103 PFN_NUMBER
104 NTAPI
105 MmGetLRUNextUserPage(PFN_NUMBER PreviousPfn)
106 {
107 ULONG Position;
108 KIRQL OldIrql;
109
110 /* Find the next user page */
111 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
112 Position = RtlFindSetBits(&MiUserPfnBitMap, 1, PreviousPfn + 1);
113 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
114 if (Position == 0xFFFFFFFF) return 0;
115
116 /* Return it */
117 return Position;
118 }
119
120 VOID
121 NTAPI
122 MmRemoveLRUUserPage(PFN_NUMBER Page)
123 {
124 /* Unset the page as a user page */
125 RtlClearBit(&MiUserPfnBitMap, Page);
126 }
127
128 BOOLEAN
129 NTAPI
130 MiIsPfnFree(IN PMMPFN Pfn1)
131 {
132 /* Must be a free or zero page, with no references, linked */
133 return ((Pfn1->u3.e1.PageLocation <= StandbyPageList) &&
134 (Pfn1->u1.Flink) &&
135 (Pfn1->u2.Blink) &&
136 !(Pfn1->u3.e2.ReferenceCount));
137 }
138
139 BOOLEAN
140 NTAPI
141 MiIsPfnInUse(IN PMMPFN Pfn1)
142 {
143 /* Standby list or higher, unlinked, and with references */
144 return !MiIsPfnFree(Pfn1);
145 }
146
147 PMDL
148 NTAPI
149 MiAllocatePagesForMdl(IN PHYSICAL_ADDRESS LowAddress,
150 IN PHYSICAL_ADDRESS HighAddress,
151 IN PHYSICAL_ADDRESS SkipBytes,
152 IN SIZE_T TotalBytes,
153 IN MI_PFN_CACHE_ATTRIBUTE CacheAttribute,
154 IN ULONG MdlFlags)
155 {
156 PMDL Mdl;
157 PFN_NUMBER PageCount, LowPage, HighPage, SkipPages, PagesFound = 0, Page;
158 PPFN_NUMBER MdlPage, LastMdlPage;
159 KIRQL OldIrql;
160 PPHYSICAL_PAGE Pfn1;
161 INT LookForZeroedPages;
162 ASSERT (KeGetCurrentIrql() <= APC_LEVEL);
163
164 //
165 // Convert the low address into a PFN
166 //
167 LowPage = (PFN_NUMBER)(LowAddress.QuadPart >> PAGE_SHIFT);
168
169 //
170 // Convert, and normalize, the high address into a PFN
171 //
172 HighPage = (PFN_NUMBER)(HighAddress.QuadPart >> PAGE_SHIFT);
173 if (HighPage > MmHighestPhysicalPage) HighPage = MmHighestPhysicalPage;
174
175 //
176 // Validate skipbytes and convert them into pages
177 //
178 if (BYTE_OFFSET(SkipBytes.LowPart)) return NULL;
179 SkipPages = (PFN_NUMBER)(SkipBytes.QuadPart >> PAGE_SHIFT);
180
181 /* This isn't supported at all */
182 if (SkipPages) DPRINT1("WARNING: Caller requesting SkipBytes, MDL might be mismatched\n");
183
184 //
185 // Now compute the number of pages the MDL will cover
186 //
187 PageCount = (PFN_NUMBER)ADDRESS_AND_SIZE_TO_SPAN_PAGES(0, TotalBytes);
188 do
189 {
190 //
191 // Try creating an MDL for these many pages
192 //
193 Mdl = MmCreateMdl(NULL, NULL, PageCount << PAGE_SHIFT);
194 if (Mdl) break;
195
196 //
197 // This function is not required to return the amount of pages requested
198 // In fact, it can return as little as 1 page, and callers are supposed
199 // to deal with this scenario. So re-attempt the allocation with less
200 // pages than before, and see if it worked this time.
201 //
202 PageCount -= (PageCount >> 4);
203 } while (PageCount);
204
205 //
206 // Wow, not even a single page was around!
207 //
208 if (!Mdl) return NULL;
209
210 //
211 // This is where the page array starts....
212 //
213 MdlPage = (PPFN_NUMBER)(Mdl + 1);
214
215 //
216 // Lock the PFN database
217 //
218 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
219
220 //
221 // Are we looking for any pages, without discriminating?
222 //
223 if ((LowPage == 0) && (HighPage == MmHighestPhysicalPage))
224 {
225 //
226 // Well then, let's go shopping
227 //
228 while (PagesFound < PageCount)
229 {
230 /* Grab a page */
231 Page = MiRemoveAnyPage(0);
232 if (Page == 0)
233 {
234 /* This is not good... hopefully we have at least SOME pages */
235 ASSERT(PagesFound);
236 break;
237 }
238
239 /* Grab the page entry for it */
240 Pfn1 = MiGetPfnEntry(Page);
241
242 //
243 // Make sure it's really free
244 //
245 ASSERT(Pfn1->u3.e2.ReferenceCount == 0);
246
247 //
248 // Allocate it and mark it
249 //
250 Pfn1->u3.e1.StartOfAllocation = 1;
251 Pfn1->u3.e1.EndOfAllocation = 1;
252 Pfn1->u3.e2.ReferenceCount = 1;
253
254 //
255 // Save it into the MDL
256 //
257 *MdlPage++ = MiGetPfnEntryIndex(Pfn1);
258 PagesFound++;
259 }
260 }
261 else
262 {
263 //
264 // You want specific range of pages. We'll do this in two runs
265 //
266 for (LookForZeroedPages = 1; LookForZeroedPages >= 0; LookForZeroedPages--)
267 {
268 //
269 // Scan the range you specified
270 //
271 for (Page = LowPage; Page < HighPage; Page++)
272 {
273 //
274 // Get the PFN entry for this page
275 //
276 Pfn1 = MiGetPfnEntry(Page);
277 ASSERT(Pfn1);
278
279 //
280 // Make sure it's free and if this is our first pass, zeroed
281 //
282 if (MiIsPfnInUse(Pfn1)) continue;
283 if ((Pfn1->u3.e1.PageLocation == ZeroedPageList) != LookForZeroedPages) continue;
284
285 /* Remove the page from the free or zero list */
286 MiUnlinkFreeOrZeroedPage(Pfn1);
287
288 //
289 // Sanity checks
290 //
291 ASSERT(Pfn1->u3.e2.ReferenceCount == 0);
292
293 //
294 // Now setup the page and mark it
295 //
296 Pfn1->u3.e2.ReferenceCount = 1;
297 Pfn1->u3.e1.StartOfAllocation = 1;
298 Pfn1->u3.e1.EndOfAllocation = 1;
299
300 //
301 // Save this page into the MDL
302 //
303 *MdlPage++ = Page;
304 if (++PagesFound == PageCount) break;
305 }
306
307 //
308 // If the first pass was enough, don't keep going, otherwise, go again
309 //
310 if (PagesFound == PageCount) break;
311 }
312 }
313
314 //
315 // Now release the PFN count
316 //
317 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
318
319 //
320 // We might've found less pages, but not more ;-)
321 //
322 if (PagesFound != PageCount) ASSERT(PagesFound < PageCount);
323 if (!PagesFound)
324 {
325 //
326 // If we didn' tfind any pages at all, fail
327 //
328 DPRINT1("NO MDL PAGES!\n");
329 ExFreePool(Mdl);
330 return NULL;
331 }
332
333 //
334 // Write out how many pages we found
335 //
336 Mdl->ByteCount = (ULONG)(PagesFound << PAGE_SHIFT);
337
338 //
339 // Terminate the MDL array if there's certain missing pages
340 //
341 if (PagesFound != PageCount) *MdlPage = -1;
342
343 //
344 // Now go back and loop over all the MDL pages
345 //
346 MdlPage = (PPFN_NUMBER)(Mdl + 1);
347 LastMdlPage = MdlPage + PagesFound;
348 while (MdlPage < LastMdlPage)
349 {
350 //
351 // Check if we've reached the end
352 //
353 Page = *MdlPage++;
354 if (Page == (PFN_NUMBER)-1) break;
355
356 //
357 // Get the PFN entry for the page and check if we should zero it out
358 //
359 Pfn1 = MiGetPfnEntry(Page);
360 ASSERT(Pfn1);
361 if (Pfn1->u3.e1.PageLocation != ZeroedPageList) MiZeroPhysicalPage(Page);
362 Pfn1->u3.e1.PageLocation = ActiveAndValid;
363 }
364
365 //
366 // We're done, mark the pages as locked (should we lock them, though???)
367 //
368 Mdl->Process = NULL;
369 Mdl->MdlFlags |= MDL_PAGES_LOCKED;
370 return Mdl;
371 }
372
373 VOID
374 NTAPI
375 MmDumpPfnDatabase(VOID)
376 {
377 ULONG i;
378 PPHYSICAL_PAGE Pfn1;
379 PCHAR State = "????", Type = "Unknown";
380 KIRQL OldIrql;
381 ULONG Totals[5] = {0}, FreePages = 0;
382
383 KeRaiseIrql(HIGH_LEVEL, &OldIrql);
384
385 //
386 // Loop the PFN database
387 //
388 for (i = 0; i <= MmHighestPhysicalPage; i++)
389 {
390 Pfn1 = MiGetPfnEntry(i);
391 if (!Pfn1) continue;
392
393 //
394 // Get the type
395 //
396 if (MiIsPfnInUse(Pfn1))
397 {
398 State = "Used";
399 }
400 else
401 {
402 State = "Free";
403 Type = "Free";
404 FreePages++;
405 break;
406 }
407
408 //
409 // Pretty-print the page
410 //
411 DbgPrint("0x%08p:\t%04s\t%20s\t(%02d) [%08p])\n",
412 i << PAGE_SHIFT,
413 State,
414 Type,
415 Pfn1->u3.e2.ReferenceCount,
416 Pfn1->RmapListHead);
417 }
418
419 DbgPrint("Nonpaged Pool: %d pages\t[%d KB]\n", Totals[MC_NPPOOL], (Totals[MC_NPPOOL] << PAGE_SHIFT) / 1024);
420 DbgPrint("Paged Pool: %d pages\t[%d KB]\n", Totals[MC_PPOOL], (Totals[MC_PPOOL] << PAGE_SHIFT) / 1024);
421 DbgPrint("File System Cache: %d pages\t[%d KB]\n", Totals[MC_CACHE], (Totals[MC_CACHE] << PAGE_SHIFT) / 1024);
422 DbgPrint("Process Working Set: %d pages\t[%d KB]\n", Totals[MC_USER], (Totals[MC_USER] << PAGE_SHIFT) / 1024);
423 DbgPrint("System: %d pages\t[%d KB]\n", Totals[MC_SYSTEM], (Totals[MC_SYSTEM] << PAGE_SHIFT) / 1024);
424 DbgPrint("Free: %d pages\t[%d KB]\n", FreePages, (FreePages << PAGE_SHIFT) / 1024);
425
426 KeLowerIrql(OldIrql);
427 }
428
429 VOID
430 NTAPI
431 MmSetRmapListHeadPage(PFN_NUMBER Pfn, struct _MM_RMAP_ENTRY* ListHead)
432 {
433 KIRQL oldIrql;
434 PMMPFN Pfn1;
435
436 oldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
437 Pfn1 = MiGetPfnEntry(Pfn);
438 if (ListHead)
439 {
440 /* Should not be trying to insert an RMAP for a non-active page */
441 ASSERT(MiIsPfnInUse(Pfn1) == TRUE);
442
443 /* Set the list head address */
444 Pfn1->RmapListHead = (LONG)ListHead;
445
446 /* Mark that the page has an actual RMAP, not a residual color link */
447 Pfn1->u3.e1.ParityError = TRUE;
448 }
449 else
450 {
451 /* ReactOS semantics dictate the page is STILL active right now */
452 ASSERT(MiIsPfnInUse(Pfn1) == TRUE);
453
454 /* In this case, the RMAP is actually being removed, so clear field */
455 Pfn1->RmapListHead = 0;
456
457 /* Mark that the page has no RMAP, not a residual color link */
458 Pfn1->u3.e1.ParityError = FALSE;
459
460 /* ReactOS semantics will now release the page, which will make it free and enter a colored list */
461 }
462
463 KeReleaseQueuedSpinLock(LockQueuePfnLock, oldIrql);
464 }
465
466 struct _MM_RMAP_ENTRY*
467 NTAPI
468 MmGetRmapListHeadPage(PFN_NUMBER Pfn)
469 {
470 KIRQL oldIrql;
471 struct _MM_RMAP_ENTRY* ListHead;
472 PMMPFN Pfn1;
473
474 /* Lock PFN database */
475 oldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
476
477 /* Get the entry */
478 Pfn1 = MiGetPfnEntry(Pfn);
479
480 /* Check if the page doesn't really have an RMAP */
481 if (Pfn1->u3.e1.ParityError == FALSE)
482 {
483 KeReleaseQueuedSpinLock(LockQueuePfnLock, oldIrql);
484 return NULL;
485 }
486
487 ListHead = (struct _MM_RMAP_ENTRY*)Pfn1->RmapListHead;
488
489 /* Should not have an RMAP for a non-active page */
490 ASSERT(MiIsPfnInUse(Pfn1) == TRUE);
491
492 /* Release PFN database and return rmap list head */
493 KeReleaseQueuedSpinLock(LockQueuePfnLock, oldIrql);
494 return ListHead;
495 }
496
497 VOID
498 NTAPI
499 MmSetSavedSwapEntryPage(PFN_NUMBER Pfn, SWAPENTRY SwapEntry)
500 {
501 KIRQL oldIrql;
502
503 oldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
504 MiGetPfnEntry(Pfn)->u1.WsIndex = SwapEntry;
505 KeReleaseQueuedSpinLock(LockQueuePfnLock, oldIrql);
506 }
507
508 SWAPENTRY
509 NTAPI
510 MmGetSavedSwapEntryPage(PFN_NUMBER Pfn)
511 {
512 SWAPENTRY SwapEntry;
513 KIRQL oldIrql;
514
515 oldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
516 SwapEntry = MiGetPfnEntry(Pfn)->u1.WsIndex;
517 KeReleaseQueuedSpinLock(LockQueuePfnLock, oldIrql);
518
519 return(SwapEntry);
520 }
521
522 VOID
523 NTAPI
524 MmReferencePage(PFN_NUMBER Pfn)
525 {
526 PPHYSICAL_PAGE Page;
527
528 DPRINT("MmReferencePage(PysicalAddress %x)\n", Pfn << PAGE_SHIFT);
529
530 if (Pfn == 0 || Pfn > MmHighestPhysicalPage)
531 {
532 return;
533 }
534
535 Page = MiGetPfnEntry(Pfn);
536 ASSERT(Page);
537
538 Page->u3.e2.ReferenceCount++;
539 }
540
541 ULONG
542 NTAPI
543 MmGetReferenceCountPage(PFN_NUMBER Pfn)
544 {
545 KIRQL oldIrql;
546 ULONG RCount;
547 PPHYSICAL_PAGE Page;
548
549 DPRINT("MmGetReferenceCountPage(PhysicalAddress %x)\n", Pfn << PAGE_SHIFT);
550
551 oldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
552 Page = MiGetPfnEntry(Pfn);
553 ASSERT(Page);
554
555 RCount = Page->u3.e2.ReferenceCount;
556
557 KeReleaseQueuedSpinLock(LockQueuePfnLock, oldIrql);
558 return(RCount);
559 }
560
561 BOOLEAN
562 NTAPI
563 MmIsPageInUse(PFN_NUMBER Pfn)
564 {
565 return MiIsPfnInUse(MiGetPfnEntry(Pfn));
566 }
567
568 VOID
569 NTAPI
570 MmDereferencePage(PFN_NUMBER Pfn)
571 {
572 PPHYSICAL_PAGE Page;
573
574 DPRINT("MmDereferencePage(PhysicalAddress %x)\n", Pfn << PAGE_SHIFT);
575
576 Page = MiGetPfnEntry(Pfn);
577 ASSERT(Page);
578
579 Page->u3.e2.ReferenceCount--;
580 if (Page->u3.e2.ReferenceCount == 0)
581 {
582 /* Mark the page temporarily as valid, we're going to make it free soon */
583 Page->u3.e1.PageLocation = ActiveAndValid;
584
585 /* Bring it back into the free list */
586 DPRINT("Legacy free: %lx\n", Pfn);
587 MiInsertPageInFreeList(Pfn);
588 }
589 }
590
591 PFN_NUMBER
592 NTAPI
593 MmAllocPage(ULONG Type)
594 {
595 PFN_NUMBER PfnOffset;
596 PMMPFN Pfn1;
597
598 if (Type != MC_SYSTEM)
599 {
600 PfnOffset = MiRemoveZeroPage(MI_GET_NEXT_COLOR());
601 }
602 else
603 {
604 PfnOffset = MiRemoveAnyPage(MI_GET_NEXT_COLOR());
605 }
606
607 if (!PfnOffset)
608 {
609 DPRINT1("MmAllocPage(): Out of memory\n");
610 return 0;
611 }
612
613 DPRINT("Legacy allocate: %lx\n", PfnOffset);
614 Pfn1 = MiGetPfnEntry(PfnOffset);
615 Pfn1->u3.e2.ReferenceCount = 1;
616 Pfn1->u3.e1.PageLocation = ActiveAndValid;
617 return PfnOffset;
618 }
619
620 /* EOF */