Synchronize up to trunk's revision r57689.
[reactos.git] / ntoskrnl / mm / freelist.c
1 /*
2 * COPYRIGHT: See COPYING in the top level directory
3 * PROJECT: ReactOS kernel
4 * FILE: ntoskrnl/mm/freelist.c
5 * PURPOSE: Handle the list of free physical pages
6 *
7 * PROGRAMMERS: David Welch (welch@cwcom.net)
8 * Robert Bergkvist
9 */
10
11 /* INCLUDES ****************************************************************/
12
13 #include <ntoskrnl.h>
14 #define NDEBUG
15 #include <debug.h>
16
17 #if defined (ALLOC_PRAGMA)
18 #pragma alloc_text(INIT, MmInitializePageList)
19 #endif
20
21 #define MODULE_INVOLVED_IN_ARM3
22 #include "ARM3/miarm.h"
23
24 /* GLOBALS ****************************************************************/
25
26 // ReactOS to NT Physical Page Descriptor Entry Legacy Mapping Definitions
27 #define PHYSICAL_PAGE MMPFN
28 #define PPHYSICAL_PAGE PMMPFN
29
30 PPHYSICAL_PAGE MmPfnDatabase;
31
32 PFN_NUMBER MmAvailablePages;
33 PFN_NUMBER MmResidentAvailablePages;
34 PFN_NUMBER MmResidentAvailableAtInit;
35
36 SIZE_T MmTotalCommittedPages;
37 SIZE_T MmSharedCommit;
38 SIZE_T MmDriverCommit;
39 SIZE_T MmProcessCommit;
40 SIZE_T MmPagedPoolCommit;
41 SIZE_T MmPeakCommitment;
42 SIZE_T MmtotalCommitLimitMaximum;
43
44 static RTL_BITMAP MiUserPfnBitMap;
45
46 /* FUNCTIONS *************************************************************/
47
48 VOID
49 NTAPI
50 MiInitializeUserPfnBitmap(VOID)
51 {
52 PVOID Bitmap;
53
54 /* Allocate enough buffer for the PFN bitmap and align it on 32-bits */
55 Bitmap = ExAllocatePoolWithTag(NonPagedPool,
56 (((MmHighestPhysicalPage + 1) + 31) / 32) * 4,
57 ' mM');
58 ASSERT(Bitmap);
59
60 /* Initialize it and clear all the bits to begin with */
61 RtlInitializeBitMap(&MiUserPfnBitMap,
62 Bitmap,
63 (ULONG)MmHighestPhysicalPage + 1);
64 RtlClearAllBits(&MiUserPfnBitMap);
65 }
66
67 PFN_NUMBER
68 NTAPI
69 MmGetLRUFirstUserPage(VOID)
70 {
71 ULONG Position;
72 KIRQL OldIrql;
73
74 /* Find the first user page */
75 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
76 Position = RtlFindSetBits(&MiUserPfnBitMap, 1, 0);
77 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
78 if (Position == 0xFFFFFFFF) return 0;
79
80 /* Return it */
81 ASSERT(Position != 0);
82 ASSERT_IS_ROS_PFN(MiGetPfnEntry(Position));
83 return Position;
84 }
85
86 VOID
87 NTAPI
88 MmInsertLRULastUserPage(PFN_NUMBER Pfn)
89 {
90 KIRQL OldIrql;
91
92 /* Set the page as a user page */
93 ASSERT(Pfn != 0);
94 ASSERT_IS_ROS_PFN(MiGetPfnEntry(Pfn));
95 ASSERT(!RtlCheckBit(&MiUserPfnBitMap, (ULONG)Pfn));
96 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
97 RtlSetBit(&MiUserPfnBitMap, (ULONG)Pfn);
98 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
99 }
100
101 PFN_NUMBER
102 NTAPI
103 MmGetLRUNextUserPage(PFN_NUMBER PreviousPfn)
104 {
105 ULONG Position;
106 KIRQL OldIrql;
107
108 /* Find the next user page */
109 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
110 Position = RtlFindSetBits(&MiUserPfnBitMap, 1, (ULONG)PreviousPfn + 1);
111 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
112 if (Position == 0xFFFFFFFF) return 0;
113
114 /* Return it */
115 ASSERT(Position != 0);
116 ASSERT_IS_ROS_PFN(MiGetPfnEntry(Position));
117 return Position;
118 }
119
120 VOID
121 NTAPI
122 MmRemoveLRUUserPage(PFN_NUMBER Page)
123 {
124 KIRQL OldIrql;
125
126 /* Unset the page as a user page */
127 ASSERT(Page != 0);
128 ASSERT_IS_ROS_PFN(MiGetPfnEntry(Page));
129 ASSERT(RtlCheckBit(&MiUserPfnBitMap, (ULONG)Page));
130 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
131 RtlClearBit(&MiUserPfnBitMap, (ULONG)Page);
132 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
133 }
134
135 BOOLEAN
136 NTAPI
137 MiIsPfnFree(IN PMMPFN Pfn1)
138 {
139 /* Must be a free or zero page, with no references, linked */
140 return ((Pfn1->u3.e1.PageLocation <= StandbyPageList) &&
141 (Pfn1->u1.Flink) &&
142 (Pfn1->u2.Blink) &&
143 !(Pfn1->u3.e2.ReferenceCount));
144 }
145
146 BOOLEAN
147 NTAPI
148 MiIsPfnInUse(IN PMMPFN Pfn1)
149 {
150 /* Standby list or higher, unlinked, and with references */
151 return !MiIsPfnFree(Pfn1);
152 }
153
154 PMDL
155 NTAPI
156 MiAllocatePagesForMdl(IN PHYSICAL_ADDRESS LowAddress,
157 IN PHYSICAL_ADDRESS HighAddress,
158 IN PHYSICAL_ADDRESS SkipBytes,
159 IN SIZE_T TotalBytes,
160 IN MI_PFN_CACHE_ATTRIBUTE CacheAttribute,
161 IN ULONG MdlFlags)
162 {
163 PMDL Mdl;
164 PFN_NUMBER PageCount, LowPage, HighPage, SkipPages, PagesFound = 0, Page;
165 PPFN_NUMBER MdlPage, LastMdlPage;
166 KIRQL OldIrql;
167 PPHYSICAL_PAGE Pfn1;
168 INT LookForZeroedPages;
169 ASSERT(KeGetCurrentIrql() <= APC_LEVEL);
170 DPRINT1("ARM3-DEBUG: Being called with %I64x %I64x %I64x %lx %d %d\n", LowAddress, HighAddress, SkipBytes, TotalBytes, CacheAttribute, MdlFlags);
171
172 //
173 // Convert the low address into a PFN
174 //
175 LowPage = (PFN_NUMBER)(LowAddress.QuadPart >> PAGE_SHIFT);
176
177 //
178 // Convert, and normalize, the high address into a PFN
179 //
180 HighPage = (PFN_NUMBER)(HighAddress.QuadPart >> PAGE_SHIFT);
181 if (HighPage > MmHighestPhysicalPage) HighPage = MmHighestPhysicalPage;
182
183 //
184 // Validate skipbytes and convert them into pages
185 //
186 if (BYTE_OFFSET(SkipBytes.LowPart)) return NULL;
187 SkipPages = (PFN_NUMBER)(SkipBytes.QuadPart >> PAGE_SHIFT);
188
189 /* This isn't supported at all */
190 if (SkipPages) DPRINT1("WARNING: Caller requesting SkipBytes, MDL might be mismatched\n");
191
192 //
193 // Now compute the number of pages the MDL will cover
194 //
195 PageCount = (PFN_NUMBER)ADDRESS_AND_SIZE_TO_SPAN_PAGES(0, TotalBytes);
196 do
197 {
198 //
199 // Try creating an MDL for these many pages
200 //
201 Mdl = MmCreateMdl(NULL, NULL, PageCount << PAGE_SHIFT);
202 if (Mdl) break;
203
204 //
205 // This function is not required to return the amount of pages requested
206 // In fact, it can return as little as 1 page, and callers are supposed
207 // to deal with this scenario. So re-attempt the allocation with less
208 // pages than before, and see if it worked this time.
209 //
210 PageCount -= (PageCount >> 4);
211 } while (PageCount);
212
213 //
214 // Wow, not even a single page was around!
215 //
216 if (!Mdl) return NULL;
217
218 //
219 // This is where the page array starts....
220 //
221 MdlPage = (PPFN_NUMBER)(Mdl + 1);
222
223 //
224 // Lock the PFN database
225 //
226 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
227
228 //
229 // Are we looking for any pages, without discriminating?
230 //
231 if ((LowPage == 0) && (HighPage == MmHighestPhysicalPage))
232 {
233 //
234 // Well then, let's go shopping
235 //
236 while (PagesFound < PageCount)
237 {
238 /* Grab a page */
239 MI_SET_USAGE(MI_USAGE_MDL);
240 MI_SET_PROCESS2("Kernel");
241 Page = MiRemoveAnyPage(0);
242 if (Page == 0)
243 {
244 /* This is not good... hopefully we have at least SOME pages */
245 ASSERT(PagesFound);
246 break;
247 }
248
249 /* Grab the page entry for it */
250 Pfn1 = MiGetPfnEntry(Page);
251
252 //
253 // Make sure it's really free
254 //
255 ASSERT(Pfn1->u3.e2.ReferenceCount == 0);
256
257 /* Now setup the page and mark it */
258 Pfn1->u3.e2.ReferenceCount = 1;
259 Pfn1->u2.ShareCount = 1;
260 MI_SET_PFN_DELETED(Pfn1);
261 Pfn1->u4.PteFrame = 0x1FFEDCB;
262 Pfn1->u3.e1.StartOfAllocation = 1;
263 Pfn1->u3.e1.EndOfAllocation = 1;
264 Pfn1->u4.VerifierAllocation = 0;
265
266 //
267 // Save it into the MDL
268 //
269 *MdlPage++ = MiGetPfnEntryIndex(Pfn1);
270 PagesFound++;
271 }
272 }
273 else
274 {
275 //
276 // You want specific range of pages. We'll do this in two runs
277 //
278 for (LookForZeroedPages = 1; LookForZeroedPages >= 0; LookForZeroedPages--)
279 {
280 //
281 // Scan the range you specified
282 //
283 for (Page = LowPage; Page < HighPage; Page++)
284 {
285 //
286 // Get the PFN entry for this page
287 //
288 Pfn1 = MiGetPfnEntry(Page);
289 ASSERT(Pfn1);
290
291 //
292 // Make sure it's free and if this is our first pass, zeroed
293 //
294 if (MiIsPfnInUse(Pfn1)) continue;
295 if ((Pfn1->u3.e1.PageLocation == ZeroedPageList) != LookForZeroedPages) continue;
296
297 /* Remove the page from the free or zero list */
298 ASSERT(Pfn1->u3.e1.ReadInProgress == 0);
299 MI_SET_USAGE(MI_USAGE_MDL);
300 MI_SET_PROCESS2("Kernel");
301 MiUnlinkFreeOrZeroedPage(Pfn1);
302
303 //
304 // Sanity checks
305 //
306 ASSERT(Pfn1->u3.e2.ReferenceCount == 0);
307
308 //
309 // Now setup the page and mark it
310 //
311 Pfn1->u3.e2.ReferenceCount = 1;
312 Pfn1->u2.ShareCount = 1;
313 MI_SET_PFN_DELETED(Pfn1);
314 Pfn1->u4.PteFrame = 0x1FFEDCB;
315 Pfn1->u3.e1.StartOfAllocation = 1;
316 Pfn1->u3.e1.EndOfAllocation = 1;
317 Pfn1->u4.VerifierAllocation = 0;
318
319 //
320 // Save this page into the MDL
321 //
322 *MdlPage++ = Page;
323 if (++PagesFound == PageCount) break;
324 }
325
326 //
327 // If the first pass was enough, don't keep going, otherwise, go again
328 //
329 if (PagesFound == PageCount) break;
330 }
331 }
332
333 //
334 // Now release the PFN count
335 //
336 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
337
338 //
339 // We might've found less pages, but not more ;-)
340 //
341 if (PagesFound != PageCount) ASSERT(PagesFound < PageCount);
342 if (!PagesFound)
343 {
344 //
345 // If we didn' tfind any pages at all, fail
346 //
347 DPRINT1("NO MDL PAGES!\n");
348 ExFreePoolWithTag(Mdl, TAG_MDL);
349 return NULL;
350 }
351
352 //
353 // Write out how many pages we found
354 //
355 Mdl->ByteCount = (ULONG)(PagesFound << PAGE_SHIFT);
356
357 //
358 // Terminate the MDL array if there's certain missing pages
359 //
360 if (PagesFound != PageCount) *MdlPage = LIST_HEAD;
361
362 //
363 // Now go back and loop over all the MDL pages
364 //
365 MdlPage = (PPFN_NUMBER)(Mdl + 1);
366 LastMdlPage = MdlPage + PagesFound;
367 while (MdlPage < LastMdlPage)
368 {
369 //
370 // Check if we've reached the end
371 //
372 Page = *MdlPage++;
373 if (Page == LIST_HEAD) break;
374
375 //
376 // Get the PFN entry for the page and check if we should zero it out
377 //
378 Pfn1 = MiGetPfnEntry(Page);
379 ASSERT(Pfn1);
380 if (Pfn1->u3.e1.PageLocation != ZeroedPageList) MiZeroPhysicalPage(Page);
381 Pfn1->u3.e1.PageLocation = ActiveAndValid;
382 }
383
384 //
385 // We're done, mark the pages as locked
386 //
387 Mdl->Process = NULL;
388 Mdl->MdlFlags |= MDL_PAGES_LOCKED;
389 return Mdl;
390 }
391
392 VOID
393 NTAPI
394 MmSetRmapListHeadPage(PFN_NUMBER Pfn, PMM_RMAP_ENTRY ListHead)
395 {
396 KIRQL oldIrql;
397 PMMPFN Pfn1;
398
399 oldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
400 Pfn1 = MiGetPfnEntry(Pfn);
401 ASSERT(Pfn1);
402 ASSERT_IS_ROS_PFN(Pfn1);
403
404 if (ListHead)
405 {
406 /* Should not be trying to insert an RMAP for a non-active page */
407 ASSERT(MiIsPfnInUse(Pfn1) == TRUE);
408
409 /* Set the list head address */
410 MI_GET_ROS_DATA(Pfn1)->RmapListHead = ListHead;
411 }
412 else
413 {
414 /* ReactOS semantics dictate the page is STILL active right now */
415 ASSERT(MiIsPfnInUse(Pfn1) == TRUE);
416
417 /* In this case, the RMAP is actually being removed, so clear field */
418 MI_GET_ROS_DATA(Pfn1)->RmapListHead = NULL;
419
420 /* ReactOS semantics will now release the page, which will make it free and enter a colored list */
421 }
422
423 KeReleaseQueuedSpinLock(LockQueuePfnLock, oldIrql);
424 }
425
426 PMM_RMAP_ENTRY
427 NTAPI
428 MmGetRmapListHeadPage(PFN_NUMBER Pfn)
429 {
430 KIRQL oldIrql;
431 PMM_RMAP_ENTRY ListHead;
432 PMMPFN Pfn1;
433
434 /* Lock PFN database */
435 oldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
436
437 /* Get the entry */
438 Pfn1 = MiGetPfnEntry(Pfn);
439 ASSERT(Pfn1);
440 ASSERT_IS_ROS_PFN(Pfn1);
441
442 /* Get the list head */
443 ListHead = MI_GET_ROS_DATA(Pfn1)->RmapListHead;
444
445 /* Should not have an RMAP for a non-active page */
446 ASSERT(MiIsPfnInUse(Pfn1) == TRUE);
447
448 /* Release PFN database and return rmap list head */
449 KeReleaseQueuedSpinLock(LockQueuePfnLock, oldIrql);
450 return ListHead;
451 }
452
453 VOID
454 NTAPI
455 MmSetSavedSwapEntryPage(PFN_NUMBER Pfn, SWAPENTRY SwapEntry)
456 {
457 KIRQL oldIrql;
458 PPHYSICAL_PAGE Page;
459
460 Page = MiGetPfnEntry(Pfn);
461 ASSERT(Page);
462 ASSERT_IS_ROS_PFN(Page);
463
464 oldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
465 MI_GET_ROS_DATA(Page)->SwapEntry = SwapEntry;
466 KeReleaseQueuedSpinLock(LockQueuePfnLock, oldIrql);
467 }
468
469 SWAPENTRY
470 NTAPI
471 MmGetSavedSwapEntryPage(PFN_NUMBER Pfn)
472 {
473 SWAPENTRY SwapEntry;
474 KIRQL oldIrql;
475 PPHYSICAL_PAGE Page;
476
477 Page = MiGetPfnEntry(Pfn);
478 ASSERT(Page);
479 ASSERT_IS_ROS_PFN(Page);
480
481 oldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
482 SwapEntry = MI_GET_ROS_DATA(Page)->SwapEntry;
483 KeReleaseQueuedSpinLock(LockQueuePfnLock, oldIrql);
484
485 return(SwapEntry);
486 }
487
488 VOID
489 NTAPI
490 MmReferencePage(PFN_NUMBER Pfn)
491 {
492 PPHYSICAL_PAGE Page;
493
494 DPRINT("MmReferencePage(PysicalAddress %x)\n", Pfn << PAGE_SHIFT);
495
496 ASSERT(KeGetCurrentIrql() == DISPATCH_LEVEL);
497 ASSERT(Pfn != 0);
498 ASSERT(Pfn <= MmHighestPhysicalPage);
499
500 Page = MiGetPfnEntry(Pfn);
501 ASSERT(Page);
502 ASSERT_IS_ROS_PFN(Page);
503
504 ASSERT(Page->u3.e2.ReferenceCount != 0);
505 Page->u3.e2.ReferenceCount++;
506 }
507
508 ULONG
509 NTAPI
510 MmGetReferenceCountPage(PFN_NUMBER Pfn)
511 {
512 KIRQL oldIrql;
513 ULONG RCount;
514 PPHYSICAL_PAGE Page;
515
516 DPRINT("MmGetReferenceCountPage(PhysicalAddress %x)\n", Pfn << PAGE_SHIFT);
517
518 oldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
519 Page = MiGetPfnEntry(Pfn);
520 ASSERT(Page);
521 ASSERT_IS_ROS_PFN(Page);
522
523 RCount = Page->u3.e2.ReferenceCount;
524
525 KeReleaseQueuedSpinLock(LockQueuePfnLock, oldIrql);
526 return(RCount);
527 }
528
529 BOOLEAN
530 NTAPI
531 MmIsPageInUse(PFN_NUMBER Pfn)
532 {
533 return MiIsPfnInUse(MiGetPfnEntry(Pfn));
534 }
535
536 VOID
537 NTAPI
538 MmDereferencePage(PFN_NUMBER Pfn)
539 {
540 PPHYSICAL_PAGE Page;
541 DPRINT("MmDereferencePage(PhysicalAddress %x)\n", Pfn << PAGE_SHIFT);
542
543 Page = MiGetPfnEntry(Pfn);
544 ASSERT(Page);
545 ASSERT_IS_ROS_PFN(Page);
546
547 ASSERT(Page->u3.e2.ReferenceCount != 0);
548 Page->u3.e2.ReferenceCount--;
549 if (Page->u3.e2.ReferenceCount == 0)
550 {
551 /* Mark the page temporarily as valid, we're going to make it free soon */
552 Page->u3.e1.PageLocation = ActiveAndValid;
553
554 /* It's not a ROS PFN anymore */
555 Page->u4.AweAllocation = FALSE;
556 ExFreePool(MI_GET_ROS_DATA(Page));
557 Page->RosMmData = 0;
558
559 /* Bring it back into the free list */
560 DPRINT("Legacy free: %lx\n", Pfn);
561 MiInsertPageInFreeList(Pfn);
562 }
563 }
564
565 PFN_NUMBER
566 NTAPI
567 MmAllocPage(ULONG Type)
568 {
569 PFN_NUMBER PfnOffset;
570 PMMPFN Pfn1;
571
572 PfnOffset = MiRemoveZeroPage(MI_GET_NEXT_COLOR());
573
574 if (!PfnOffset)
575 {
576 DPRINT1("MmAllocPage(): Out of memory\n");
577 return 0;
578 }
579
580 DPRINT("Legacy allocate: %lx\n", PfnOffset);
581 Pfn1 = MiGetPfnEntry(PfnOffset);
582 Pfn1->u3.e2.ReferenceCount = 1;
583 Pfn1->u3.e1.PageLocation = ActiveAndValid;
584
585 /* This marks the PFN as a ReactOS PFN */
586 Pfn1->u4.AweAllocation = TRUE;
587
588 /* Allocate the extra ReactOS Data and zero it out */
589 Pfn1->RosMmData = (LONG)ExAllocatePoolWithTag(NonPagedPool, sizeof(MMROSPFN), 'RsPf');
590 ASSERT(MI_GET_ROS_DATA(Pfn1) != NULL);
591 ASSERT_IS_ROS_PFN(Pfn1);
592 MI_GET_ROS_DATA(Pfn1)->SwapEntry = 0;
593 MI_GET_ROS_DATA(Pfn1)->RmapListHead = NULL;
594
595 return PfnOffset;
596 }
597
598 /* EOF */