[RSHELL]
[reactos.git] / ntoskrnl / mm / freelist.c
1 /*
2 * COPYRIGHT: See COPYING in the top level directory
3 * PROJECT: ReactOS kernel
4 * FILE: ntoskrnl/mm/freelist.c
5 * PURPOSE: Handle the list of free physical pages
6 *
7 * PROGRAMMERS: David Welch (welch@cwcom.net)
8 * Robert Bergkvist
9 */
10
11 /* INCLUDES ****************************************************************/
12
13 #include <ntoskrnl.h>
14 #define NDEBUG
15 #include <debug.h>
16
17 #if defined (ALLOC_PRAGMA)
18 #pragma alloc_text(INIT, MmInitializePageList)
19 #endif
20
21 #define MODULE_INVOLVED_IN_ARM3
22 #include "ARM3/miarm.h"
23
24 #define ASSERT_IS_ROS_PFN(x) ASSERT(MI_IS_ROS_PFN(x) == TRUE);
25
26 /* GLOBALS ****************************************************************/
27
28 PMMPFN MmPfnDatabase;
29
30 PFN_NUMBER MmAvailablePages;
31 PFN_NUMBER MmResidentAvailablePages;
32 PFN_NUMBER MmResidentAvailableAtInit;
33
34 SIZE_T MmTotalCommittedPages;
35 SIZE_T MmSharedCommit;
36 SIZE_T MmDriverCommit;
37 SIZE_T MmProcessCommit;
38 SIZE_T MmPagedPoolCommit;
39 SIZE_T MmPeakCommitment;
40 SIZE_T MmtotalCommitLimitMaximum;
41
42 static RTL_BITMAP MiUserPfnBitMap;
43
44 /* FUNCTIONS *************************************************************/
45
46 VOID
47 NTAPI
48 MiInitializeUserPfnBitmap(VOID)
49 {
50 PVOID Bitmap;
51
52 /* Allocate enough buffer for the PFN bitmap and align it on 32-bits */
53 Bitmap = ExAllocatePoolWithTag(NonPagedPool,
54 (((MmHighestPhysicalPage + 1) + 31) / 32) * 4,
55 ' mM');
56 ASSERT(Bitmap);
57
58 /* Initialize it and clear all the bits to begin with */
59 RtlInitializeBitMap(&MiUserPfnBitMap,
60 Bitmap,
61 (ULONG)MmHighestPhysicalPage + 1);
62 RtlClearAllBits(&MiUserPfnBitMap);
63 }
64
65 PFN_NUMBER
66 NTAPI
67 MmGetLRUFirstUserPage(VOID)
68 {
69 ULONG Position;
70 KIRQL OldIrql;
71
72 /* Find the first user page */
73 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
74 Position = RtlFindSetBits(&MiUserPfnBitMap, 1, 0);
75 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
76 if (Position == 0xFFFFFFFF) return 0;
77
78 /* Return it */
79 ASSERT(Position != 0);
80 ASSERT_IS_ROS_PFN(MiGetPfnEntry(Position));
81 return Position;
82 }
83
84 VOID
85 NTAPI
86 MmInsertLRULastUserPage(PFN_NUMBER Pfn)
87 {
88 KIRQL OldIrql;
89
90 /* Set the page as a user page */
91 ASSERT(Pfn != 0);
92 ASSERT_IS_ROS_PFN(MiGetPfnEntry(Pfn));
93 ASSERT(!RtlCheckBit(&MiUserPfnBitMap, (ULONG)Pfn));
94 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
95 RtlSetBit(&MiUserPfnBitMap, (ULONG)Pfn);
96 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
97 }
98
99 PFN_NUMBER
100 NTAPI
101 MmGetLRUNextUserPage(PFN_NUMBER PreviousPfn)
102 {
103 ULONG Position;
104 KIRQL OldIrql;
105
106 /* Find the next user page */
107 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
108 Position = RtlFindSetBits(&MiUserPfnBitMap, 1, (ULONG)PreviousPfn + 1);
109 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
110 if (Position == 0xFFFFFFFF) return 0;
111
112 /* Return it */
113 ASSERT(Position != 0);
114 ASSERT_IS_ROS_PFN(MiGetPfnEntry(Position));
115 return Position;
116 }
117
118 VOID
119 NTAPI
120 MmRemoveLRUUserPage(PFN_NUMBER Page)
121 {
122 KIRQL OldIrql;
123
124 /* Unset the page as a user page */
125 ASSERT(Page != 0);
126 ASSERT_IS_ROS_PFN(MiGetPfnEntry(Page));
127 ASSERT(RtlCheckBit(&MiUserPfnBitMap, (ULONG)Page));
128 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
129 RtlClearBit(&MiUserPfnBitMap, (ULONG)Page);
130 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
131 }
132
133 BOOLEAN
134 NTAPI
135 MiIsPfnFree(IN PMMPFN Pfn1)
136 {
137 /* Must be a free or zero page, with no references, linked */
138 return ((Pfn1->u3.e1.PageLocation <= StandbyPageList) &&
139 (Pfn1->u1.Flink) &&
140 (Pfn1->u2.Blink) &&
141 !(Pfn1->u3.e2.ReferenceCount));
142 }
143
144 BOOLEAN
145 NTAPI
146 MiIsPfnInUse(IN PMMPFN Pfn1)
147 {
148 /* Standby list or higher, unlinked, and with references */
149 return !MiIsPfnFree(Pfn1);
150 }
151
152 PMDL
153 NTAPI
154 MiAllocatePagesForMdl(IN PHYSICAL_ADDRESS LowAddress,
155 IN PHYSICAL_ADDRESS HighAddress,
156 IN PHYSICAL_ADDRESS SkipBytes,
157 IN SIZE_T TotalBytes,
158 IN MI_PFN_CACHE_ATTRIBUTE CacheAttribute,
159 IN ULONG MdlFlags)
160 {
161 PMDL Mdl;
162 PFN_NUMBER PageCount, LowPage, HighPage, SkipPages, PagesFound = 0, Page;
163 PPFN_NUMBER MdlPage, LastMdlPage;
164 KIRQL OldIrql;
165 PMMPFN Pfn1;
166 INT LookForZeroedPages;
167 ASSERT(KeGetCurrentIrql() <= APC_LEVEL);
168 DPRINT1("ARM3-DEBUG: Being called with %I64x %I64x %I64x %lx %d %lu\n", LowAddress, HighAddress, SkipBytes, TotalBytes, CacheAttribute, MdlFlags);
169
170 //
171 // Convert the low address into a PFN
172 //
173 LowPage = (PFN_NUMBER)(LowAddress.QuadPart >> PAGE_SHIFT);
174
175 //
176 // Convert, and normalize, the high address into a PFN
177 //
178 HighPage = (PFN_NUMBER)(HighAddress.QuadPart >> PAGE_SHIFT);
179 if (HighPage > MmHighestPhysicalPage) HighPage = MmHighestPhysicalPage;
180
181 //
182 // Validate skipbytes and convert them into pages
183 //
184 if (BYTE_OFFSET(SkipBytes.LowPart)) return NULL;
185 SkipPages = (PFN_NUMBER)(SkipBytes.QuadPart >> PAGE_SHIFT);
186
187 /* This isn't supported at all */
188 if (SkipPages) DPRINT1("WARNING: Caller requesting SkipBytes, MDL might be mismatched\n");
189
190 //
191 // Now compute the number of pages the MDL will cover
192 //
193 PageCount = (PFN_NUMBER)ADDRESS_AND_SIZE_TO_SPAN_PAGES(0, TotalBytes);
194 do
195 {
196 //
197 // Try creating an MDL for these many pages
198 //
199 Mdl = MmCreateMdl(NULL, NULL, PageCount << PAGE_SHIFT);
200 if (Mdl) break;
201
202 //
203 // This function is not required to return the amount of pages requested
204 // In fact, it can return as little as 1 page, and callers are supposed
205 // to deal with this scenario. So re-attempt the allocation with less
206 // pages than before, and see if it worked this time.
207 //
208 PageCount -= (PageCount >> 4);
209 } while (PageCount);
210
211 //
212 // Wow, not even a single page was around!
213 //
214 if (!Mdl) return NULL;
215
216 //
217 // This is where the page array starts....
218 //
219 MdlPage = (PPFN_NUMBER)(Mdl + 1);
220
221 //
222 // Lock the PFN database
223 //
224 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
225
226 //
227 // Are we looking for any pages, without discriminating?
228 //
229 if ((LowPage == 0) && (HighPage == MmHighestPhysicalPage))
230 {
231 //
232 // Well then, let's go shopping
233 //
234 while (PagesFound < PageCount)
235 {
236 /* Grab a page */
237 MI_SET_USAGE(MI_USAGE_MDL);
238 MI_SET_PROCESS2("Kernel");
239 Page = MiRemoveAnyPage(0);
240 if (Page == 0)
241 {
242 /* This is not good... hopefully we have at least SOME pages */
243 ASSERT(PagesFound);
244 break;
245 }
246
247 /* Grab the page entry for it */
248 Pfn1 = MiGetPfnEntry(Page);
249
250 //
251 // Make sure it's really free
252 //
253 ASSERT(Pfn1->u3.e2.ReferenceCount == 0);
254
255 /* Now setup the page and mark it */
256 Pfn1->u3.e2.ReferenceCount = 1;
257 Pfn1->u2.ShareCount = 1;
258 MI_SET_PFN_DELETED(Pfn1);
259 Pfn1->u4.PteFrame = 0x1FFEDCB;
260 Pfn1->u3.e1.StartOfAllocation = 1;
261 Pfn1->u3.e1.EndOfAllocation = 1;
262 Pfn1->u4.VerifierAllocation = 0;
263
264 //
265 // Save it into the MDL
266 //
267 *MdlPage++ = MiGetPfnEntryIndex(Pfn1);
268 PagesFound++;
269 }
270 }
271 else
272 {
273 //
274 // You want specific range of pages. We'll do this in two runs
275 //
276 for (LookForZeroedPages = 1; LookForZeroedPages >= 0; LookForZeroedPages--)
277 {
278 //
279 // Scan the range you specified
280 //
281 for (Page = LowPage; Page < HighPage; Page++)
282 {
283 //
284 // Get the PFN entry for this page
285 //
286 Pfn1 = MiGetPfnEntry(Page);
287 ASSERT(Pfn1);
288
289 //
290 // Make sure it's free and if this is our first pass, zeroed
291 //
292 if (MiIsPfnInUse(Pfn1)) continue;
293 if ((Pfn1->u3.e1.PageLocation == ZeroedPageList) != LookForZeroedPages) continue;
294
295 /* Remove the page from the free or zero list */
296 ASSERT(Pfn1->u3.e1.ReadInProgress == 0);
297 MI_SET_USAGE(MI_USAGE_MDL);
298 MI_SET_PROCESS2("Kernel");
299 MiUnlinkFreeOrZeroedPage(Pfn1);
300
301 //
302 // Sanity checks
303 //
304 ASSERT(Pfn1->u3.e2.ReferenceCount == 0);
305
306 //
307 // Now setup the page and mark it
308 //
309 Pfn1->u3.e2.ReferenceCount = 1;
310 Pfn1->u2.ShareCount = 1;
311 MI_SET_PFN_DELETED(Pfn1);
312 Pfn1->u4.PteFrame = 0x1FFEDCB;
313 Pfn1->u3.e1.StartOfAllocation = 1;
314 Pfn1->u3.e1.EndOfAllocation = 1;
315 Pfn1->u4.VerifierAllocation = 0;
316
317 //
318 // Save this page into the MDL
319 //
320 *MdlPage++ = Page;
321 if (++PagesFound == PageCount) break;
322 }
323
324 //
325 // If the first pass was enough, don't keep going, otherwise, go again
326 //
327 if (PagesFound == PageCount) break;
328 }
329 }
330
331 //
332 // Now release the PFN count
333 //
334 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
335
336 //
337 // We might've found less pages, but not more ;-)
338 //
339 if (PagesFound != PageCount) ASSERT(PagesFound < PageCount);
340 if (!PagesFound)
341 {
342 //
343 // If we didn' tfind any pages at all, fail
344 //
345 DPRINT1("NO MDL PAGES!\n");
346 ExFreePoolWithTag(Mdl, TAG_MDL);
347 return NULL;
348 }
349
350 //
351 // Write out how many pages we found
352 //
353 Mdl->ByteCount = (ULONG)(PagesFound << PAGE_SHIFT);
354
355 //
356 // Terminate the MDL array if there's certain missing pages
357 //
358 if (PagesFound != PageCount) *MdlPage = LIST_HEAD;
359
360 //
361 // Now go back and loop over all the MDL pages
362 //
363 MdlPage = (PPFN_NUMBER)(Mdl + 1);
364 LastMdlPage = MdlPage + PagesFound;
365 while (MdlPage < LastMdlPage)
366 {
367 //
368 // Check if we've reached the end
369 //
370 Page = *MdlPage++;
371 if (Page == LIST_HEAD) break;
372
373 //
374 // Get the PFN entry for the page and check if we should zero it out
375 //
376 Pfn1 = MiGetPfnEntry(Page);
377 ASSERT(Pfn1);
378 if (Pfn1->u3.e1.PageLocation != ZeroedPageList) MiZeroPhysicalPage(Page);
379 Pfn1->u3.e1.PageLocation = ActiveAndValid;
380 }
381
382 //
383 // We're done, mark the pages as locked
384 //
385 Mdl->Process = NULL;
386 Mdl->MdlFlags |= MDL_PAGES_LOCKED;
387 return Mdl;
388 }
389
390 VOID
391 NTAPI
392 MmSetRmapListHeadPage(PFN_NUMBER Pfn, PMM_RMAP_ENTRY ListHead)
393 {
394 KIRQL oldIrql;
395 PMMPFN Pfn1;
396
397 oldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
398 Pfn1 = MiGetPfnEntry(Pfn);
399 ASSERT(Pfn1);
400 ASSERT_IS_ROS_PFN(Pfn1);
401
402 if (ListHead)
403 {
404 /* Should not be trying to insert an RMAP for a non-active page */
405 ASSERT(MiIsPfnInUse(Pfn1) == TRUE);
406
407 /* Set the list head address */
408 Pfn1->RmapListHead = ListHead;
409 }
410 else
411 {
412 /* ReactOS semantics dictate the page is STILL active right now */
413 ASSERT(MiIsPfnInUse(Pfn1) == TRUE);
414
415 /* In this case, the RMAP is actually being removed, so clear field */
416 Pfn1->RmapListHead = NULL;
417
418 /* ReactOS semantics will now release the page, which will make it free and enter a colored list */
419 }
420
421 KeReleaseQueuedSpinLock(LockQueuePfnLock, oldIrql);
422 }
423
424 PMM_RMAP_ENTRY
425 NTAPI
426 MmGetRmapListHeadPage(PFN_NUMBER Pfn)
427 {
428 KIRQL oldIrql;
429 PMM_RMAP_ENTRY ListHead;
430 PMMPFN Pfn1;
431
432 /* Lock PFN database */
433 oldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
434
435 /* Get the entry */
436 Pfn1 = MiGetPfnEntry(Pfn);
437 ASSERT(Pfn1);
438 ASSERT_IS_ROS_PFN(Pfn1);
439
440 /* Get the list head */
441 ListHead = Pfn1->RmapListHead;
442
443 /* Should not have an RMAP for a non-active page */
444 ASSERT(MiIsPfnInUse(Pfn1) == TRUE);
445
446 /* Release PFN database and return rmap list head */
447 KeReleaseQueuedSpinLock(LockQueuePfnLock, oldIrql);
448 return ListHead;
449 }
450
451 VOID
452 NTAPI
453 MmSetSavedSwapEntryPage(PFN_NUMBER Pfn, SWAPENTRY SwapEntry)
454 {
455 KIRQL oldIrql;
456 PMMPFN Pfn1;
457
458 Pfn1 = MiGetPfnEntry(Pfn);
459 ASSERT(Pfn1);
460 ASSERT_IS_ROS_PFN(Pfn1);
461
462 oldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
463 Pfn1->u1.SwapEntry = SwapEntry;
464 KeReleaseQueuedSpinLock(LockQueuePfnLock, oldIrql);
465 }
466
467 SWAPENTRY
468 NTAPI
469 MmGetSavedSwapEntryPage(PFN_NUMBER Pfn)
470 {
471 SWAPENTRY SwapEntry;
472 KIRQL oldIrql;
473 PMMPFN Pfn1;
474
475 Pfn1 = MiGetPfnEntry(Pfn);
476 ASSERT(Pfn1);
477 ASSERT_IS_ROS_PFN(Pfn1);
478
479 oldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
480 SwapEntry = Pfn1->u1.SwapEntry;
481 KeReleaseQueuedSpinLock(LockQueuePfnLock, oldIrql);
482
483 return(SwapEntry);
484 }
485
486 VOID
487 NTAPI
488 MmReferencePage(PFN_NUMBER Pfn)
489 {
490 PMMPFN Pfn1;
491
492 DPRINT("MmReferencePage(PysicalAddress %x)\n", Pfn << PAGE_SHIFT);
493
494 ASSERT(KeGetCurrentIrql() == DISPATCH_LEVEL);
495 ASSERT(Pfn != 0);
496 ASSERT(Pfn <= MmHighestPhysicalPage);
497
498 Pfn1 = MiGetPfnEntry(Pfn);
499 ASSERT(Pfn1);
500 ASSERT_IS_ROS_PFN(Pfn1);
501
502 ASSERT(Pfn1->u3.e2.ReferenceCount != 0);
503 Pfn1->u3.e2.ReferenceCount++;
504 }
505
506 ULONG
507 NTAPI
508 MmGetReferenceCountPage(PFN_NUMBER Pfn)
509 {
510 KIRQL oldIrql;
511 ULONG RCount;
512 PMMPFN Pfn1;
513
514 DPRINT("MmGetReferenceCountPage(PhysicalAddress %x)\n", Pfn << PAGE_SHIFT);
515
516 oldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
517 Pfn1 = MiGetPfnEntry(Pfn);
518 ASSERT(Pfn1);
519 ASSERT_IS_ROS_PFN(Pfn1);
520
521 RCount = Pfn1->u3.e2.ReferenceCount;
522
523 KeReleaseQueuedSpinLock(LockQueuePfnLock, oldIrql);
524 return(RCount);
525 }
526
527 BOOLEAN
528 NTAPI
529 MmIsPageInUse(PFN_NUMBER Pfn)
530 {
531 return MiIsPfnInUse(MiGetPfnEntry(Pfn));
532 }
533
534 VOID
535 NTAPI
536 MmDereferencePage(PFN_NUMBER Pfn)
537 {
538 PMMPFN Pfn1;
539 DPRINT("MmDereferencePage(PhysicalAddress %x)\n", Pfn << PAGE_SHIFT);
540
541 Pfn1 = MiGetPfnEntry(Pfn);
542 ASSERT(Pfn1);
543 ASSERT_IS_ROS_PFN(Pfn1);
544
545 ASSERT(Pfn1->u3.e2.ReferenceCount != 0);
546 Pfn1->u3.e2.ReferenceCount--;
547 if (Pfn1->u3.e2.ReferenceCount == 0)
548 {
549 /* Mark the page temporarily as valid, we're going to make it free soon */
550 Pfn1->u3.e1.PageLocation = ActiveAndValid;
551
552 /* It's not a ROS PFN anymore */
553 Pfn1->u4.AweAllocation = FALSE;
554
555 /* Bring it back into the free list */
556 DPRINT("Legacy free: %lx\n", Pfn);
557 MiInsertPageInFreeList(Pfn);
558 }
559 }
560
561 PFN_NUMBER
562 NTAPI
563 MmAllocPage(ULONG Type)
564 {
565 PFN_NUMBER PfnOffset;
566 PMMPFN Pfn1;
567
568 PfnOffset = MiRemoveZeroPage(MI_GET_NEXT_COLOR());
569
570 if (!PfnOffset)
571 {
572 DPRINT1("MmAllocPage(): Out of memory\n");
573 return 0;
574 }
575
576 DPRINT("Legacy allocate: %lx\n", PfnOffset);
577 Pfn1 = MiGetPfnEntry(PfnOffset);
578 Pfn1->u3.e2.ReferenceCount = 1;
579 Pfn1->u3.e1.PageLocation = ActiveAndValid;
580
581 /* This marks the PFN as a ReactOS PFN */
582 Pfn1->u4.AweAllocation = TRUE;
583
584 /* Allocate the extra ReactOS Data and zero it out */
585 Pfn1->u1.SwapEntry = 0;
586 Pfn1->RmapListHead = NULL;
587
588 return PfnOffset;
589 }
590
591 /* EOF */