[NTOS]: Read almost all the Memory Management variables into the system configuration...
[reactos.git] / reactos / ntoskrnl / mm / freelist.c
1 /*
2 * COPYRIGHT: See COPYING in the top level directory
3 * PROJECT: ReactOS kernel
4 * FILE: ntoskrnl/mm/freelist.c
5 * PURPOSE: Handle the list of free physical pages
6 *
7 * PROGRAMMERS: David Welch (welch@cwcom.net)
8 * Robert Bergkvist
9 */
10
11 /* INCLUDES ****************************************************************/
12
13 #include <ntoskrnl.h>
14 #define NDEBUG
15 #include <debug.h>
16
17 #if defined (ALLOC_PRAGMA)
18 #pragma alloc_text(INIT, MmInitializePageList)
19 #endif
20
21 #define MODULE_INVOLVED_IN_ARM3
22 #include "ARM3/miarm.h"
23
24 /* GLOBALS ****************************************************************/
25
26 //
27 //
28 // ReactOS to NT Physical Page Descriptor Entry Legacy Mapping Definitions
29 //
30 // REACTOS NT
31 //
32 #define RmapListHead AweReferenceCount
33 #define PHYSICAL_PAGE MMPFN
34 #define PPHYSICAL_PAGE PMMPFN
35
36 /* The first array contains ReactOS PFNs, the second contains ARM3 PFNs */
37 PPHYSICAL_PAGE MmPfnDatabase[2];
38
39 PFN_NUMBER MmAvailablePages;
40 PFN_NUMBER MmResidentAvailablePages;
41 PFN_NUMBER MmResidentAvailableAtInit;
42
43 SIZE_T MmTotalCommitLimit;
44 SIZE_T MmTotalCommittedPages;
45 SIZE_T MmSharedCommit;
46 SIZE_T MmDriverCommit;
47 SIZE_T MmProcessCommit;
48 SIZE_T MmPagedPoolCommit;
49 SIZE_T MmPeakCommitment;
50 SIZE_T MmtotalCommitLimitMaximum;
51
52 static KEVENT ZeroPageThreadEvent;
53 static BOOLEAN ZeroPageThreadShouldTerminate = FALSE;
54 static RTL_BITMAP MiUserPfnBitMap;
55
56 /* FUNCTIONS *************************************************************/
57
58 VOID
59 NTAPI
60 MiInitializeUserPfnBitmap(VOID)
61 {
62 PVOID Bitmap;
63
64 /* Allocate enough buffer for the PFN bitmap and align it on 32-bits */
65 Bitmap = ExAllocatePoolWithTag(NonPagedPool,
66 (((MmHighestPhysicalPage + 1) + 31) / 32) * 4,
67 ' mM');
68 ASSERT(Bitmap);
69
70 /* Initialize it and clear all the bits to begin with */
71 RtlInitializeBitMap(&MiUserPfnBitMap,
72 Bitmap,
73 MmHighestPhysicalPage + 1);
74 RtlClearAllBits(&MiUserPfnBitMap);
75 }
76
77 PFN_TYPE
78 NTAPI
79 MmGetLRUFirstUserPage(VOID)
80 {
81 ULONG Position;
82 KIRQL OldIrql;
83
84 /* Find the first user page */
85 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
86 Position = RtlFindSetBits(&MiUserPfnBitMap, 1, 0);
87 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
88 if (Position == 0xFFFFFFFF) return 0;
89
90 /* Return it */
91 return Position;
92 }
93
94 VOID
95 NTAPI
96 MmInsertLRULastUserPage(PFN_TYPE Pfn)
97 {
98 KIRQL OldIrql;
99
100 /* Set the page as a user page */
101 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
102 RtlSetBit(&MiUserPfnBitMap, Pfn);
103 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
104 }
105
106 PFN_TYPE
107 NTAPI
108 MmGetLRUNextUserPage(PFN_TYPE PreviousPfn)
109 {
110 ULONG Position;
111 KIRQL OldIrql;
112
113 /* Find the next user page */
114 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
115 Position = RtlFindSetBits(&MiUserPfnBitMap, 1, PreviousPfn + 1);
116 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
117 if (Position == 0xFFFFFFFF) return 0;
118
119 /* Return it */
120 return Position;
121 }
122
123 VOID
124 NTAPI
125 MmRemoveLRUUserPage(PFN_TYPE Page)
126 {
127 /* Unset the page as a user page */
128 RtlClearBit(&MiUserPfnBitMap, Page);
129 }
130
131 BOOLEAN
132 NTAPI
133 MiIsPfnInUse(IN PMMPFN Pfn1)
134 {
135 return ((Pfn1->u3.e1.PageLocation != FreePageList) &&
136 (Pfn1->u3.e1.PageLocation != ZeroedPageList));
137 }
138
139 PFN_NUMBER
140 NTAPI
141 MiFindContiguousPages(IN PFN_NUMBER LowestPfn,
142 IN PFN_NUMBER HighestPfn,
143 IN PFN_NUMBER BoundaryPfn,
144 IN PFN_NUMBER SizeInPages,
145 IN MEMORY_CACHING_TYPE CacheType)
146 {
147 PFN_NUMBER Page, PageCount, LastPage, Length, BoundaryMask;
148 ULONG i = 0;
149 PMMPFN Pfn1, EndPfn;
150 KIRQL OldIrql;
151 PAGED_CODE ();
152 ASSERT(SizeInPages != 0);
153
154 //
155 // Convert the boundary PFN into an alignment mask
156 //
157 BoundaryMask = ~(BoundaryPfn - 1);
158
159 //
160 // Loop all the physical memory blocks
161 //
162 do
163 {
164 //
165 // Capture the base page and length of this memory block
166 //
167 Page = MmPhysicalMemoryBlock->Run[i].BasePage;
168 PageCount = MmPhysicalMemoryBlock->Run[i].PageCount;
169
170 //
171 // Check how far this memory block will go
172 //
173 LastPage = Page + PageCount;
174
175 //
176 // Trim it down to only the PFNs we're actually interested in
177 //
178 if ((LastPage - 1) > HighestPfn) LastPage = HighestPfn + 1;
179 if (Page < LowestPfn) Page = LowestPfn;
180
181 //
182 // Skip this run if it's empty or fails to contain all the pages we need
183 //
184 if (!(PageCount) || ((Page + SizeInPages) > LastPage)) continue;
185
186 //
187 // Now scan all the relevant PFNs in this run
188 //
189 Length = 0;
190 for (Pfn1 = MiGetPfnEntry(Page); Page < LastPage; Page++, Pfn1++)
191 {
192 //
193 // If this PFN is in use, ignore it
194 //
195 if (MiIsPfnInUse(Pfn1)) continue;
196
197 //
198 // If we haven't chosen a start PFN yet and the caller specified an
199 // alignment, make sure the page matches the alignment restriction
200 //
201 if ((!(Length) && (BoundaryPfn)) &&
202 (((Page ^ (Page + SizeInPages - 1)) & BoundaryMask)))
203 {
204 //
205 // It does not, so bail out
206 //
207 continue;
208 }
209
210 //
211 // Increase the number of valid pages, and check if we have enough
212 //
213 if (++Length == SizeInPages)
214 {
215 //
216 // It appears we've amassed enough legitimate pages, rollback
217 //
218 Pfn1 -= (Length - 1);
219 Page -= (Length - 1);
220
221 //
222 // Acquire the PFN lock
223 //
224 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
225 do
226 {
227 //
228 // Things might've changed for us. Is the page still free?
229 //
230 if (MiIsPfnInUse(Pfn1)) break;
231
232 //
233 // So far so good. Is this the last confirmed valid page?
234 //
235 if (!--Length)
236 {
237 //
238 // Sanity check that we didn't go out of bounds
239 //
240 ASSERT(i != MmPhysicalMemoryBlock->NumberOfRuns);
241
242 //
243 // Loop until all PFN entries have been processed
244 //
245 EndPfn = Pfn1 - SizeInPages + 1;
246 do
247 {
248 //
249 // This PFN is now a used page, set it up
250 //
251 MiUnlinkFreeOrZeroedPage(Pfn1);
252 Pfn1->u3.e2.ReferenceCount = 1;
253
254 //
255 // Check if it was already zeroed
256 //
257 if (Pfn1->u3.e1.PageLocation != ZeroedPageList)
258 {
259 //
260 // It wasn't, so zero it
261 //
262 MiZeroPage(MiGetPfnEntryIndex(Pfn1));
263 }
264
265 //
266 // Mark it in use
267 //
268 Pfn1->u3.e1.PageLocation = ActiveAndValid;
269
270 //
271 // Check if this is the last PFN, otherwise go on
272 //
273 if (Pfn1 == EndPfn) break;
274 Pfn1--;
275 } while (TRUE);
276
277 //
278 // Mark the first and last PFN so we can find them later
279 //
280 Pfn1->u3.e1.StartOfAllocation = 1;
281 (Pfn1 + SizeInPages - 1)->u3.e1.EndOfAllocation = 1;
282
283 //
284 // Now it's safe to let go of the PFN lock
285 //
286 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
287
288 //
289 // Quick sanity check that the last PFN is consistent
290 //
291 EndPfn = Pfn1 + SizeInPages;
292 ASSERT(EndPfn == MiGetPfnEntry(Page + 1));
293
294 //
295 // Compute the first page, and make sure it's consistent
296 //
297 Page -= SizeInPages - 1;
298 ASSERT(Pfn1 == MiGetPfnEntry(Page));
299 ASSERT(Page != 0);
300 return Page;
301 }
302
303 //
304 // Keep going. The purpose of this loop is to reconfirm that
305 // after acquiring the PFN lock these pages are still usable
306 //
307 Pfn1++;
308 Page++;
309 } while (TRUE);
310
311 //
312 // If we got here, something changed while we hadn't acquired
313 // the PFN lock yet, so we'll have to restart
314 //
315 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
316 Length = 0;
317 }
318 }
319 } while (++i != MmPhysicalMemoryBlock->NumberOfRuns);
320
321 //
322 // And if we get here, it means no suitable physical memory runs were found
323 //
324 return 0;
325 }
326
327 PMDL
328 NTAPI
329 MiAllocatePagesForMdl(IN PHYSICAL_ADDRESS LowAddress,
330 IN PHYSICAL_ADDRESS HighAddress,
331 IN PHYSICAL_ADDRESS SkipBytes,
332 IN SIZE_T TotalBytes,
333 IN MI_PFN_CACHE_ATTRIBUTE CacheAttribute,
334 IN ULONG MdlFlags)
335 {
336 PMDL Mdl;
337 PFN_NUMBER PageCount, LowPage, HighPage, SkipPages, PagesFound = 0, Page;
338 PPFN_NUMBER MdlPage, LastMdlPage;
339 KIRQL OldIrql;
340 PPHYSICAL_PAGE Pfn1;
341 INT LookForZeroedPages;
342 ASSERT (KeGetCurrentIrql() <= APC_LEVEL);
343
344 //
345 // Convert the low address into a PFN
346 //
347 LowPage = (PFN_NUMBER)(LowAddress.QuadPart >> PAGE_SHIFT);
348
349 //
350 // Convert, and normalize, the high address into a PFN
351 //
352 HighPage = (PFN_NUMBER)(HighAddress.QuadPart >> PAGE_SHIFT);
353 if (HighPage > MmHighestPhysicalPage) HighPage = MmHighestPhysicalPage;
354
355 //
356 // Validate skipbytes and convert them into pages
357 //
358 if (BYTE_OFFSET(SkipBytes.LowPart)) return NULL;
359 SkipPages = (PFN_NUMBER)(SkipBytes.QuadPart >> PAGE_SHIFT);
360
361 //
362 // Now compute the number of pages the MDL will cover
363 //
364 PageCount = (PFN_NUMBER)ADDRESS_AND_SIZE_TO_SPAN_PAGES(0, TotalBytes);
365 do
366 {
367 //
368 // Try creating an MDL for these many pages
369 //
370 Mdl = MmCreateMdl(NULL, NULL, PageCount << PAGE_SHIFT);
371 if (Mdl) break;
372
373 //
374 // This function is not required to return the amount of pages requested
375 // In fact, it can return as little as 1 page, and callers are supposed
376 // to deal with this scenario. So re-attempt the allocation with less
377 // pages than before, and see if it worked this time.
378 //
379 PageCount -= (PageCount >> 4);
380 } while (PageCount);
381
382 //
383 // Wow, not even a single page was around!
384 //
385 if (!Mdl) return NULL;
386
387 //
388 // This is where the page array starts....
389 //
390 MdlPage = (PPFN_NUMBER)(Mdl + 1);
391
392 //
393 // Lock the PFN database
394 //
395 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
396
397 //
398 // Are we looking for any pages, without discriminating?
399 //
400 if ((LowPage == 0) && (HighPage == MmHighestPhysicalPage))
401 {
402 //
403 // Well then, let's go shopping
404 //
405 while (PagesFound < PageCount)
406 {
407 //
408 // Do we have zeroed pages?
409 //
410 if (MmZeroedPageListHead.Total)
411 {
412 //
413 // Grab a zero page
414 //
415 Pfn1 = MiRemoveHeadList(&MmZeroedPageListHead);
416 }
417 else if (MmFreePageListHead.Total)
418 {
419 //
420 // Nope, grab an unzeroed page
421 //
422 Pfn1 = MiRemoveHeadList(&MmFreePageListHead);
423 }
424 else
425 {
426 //
427 // This is not good... hopefully we have at least SOME pages
428 //
429 ASSERT(PagesFound);
430 break;
431 }
432
433 //
434 // Make sure it's really free
435 //
436 ASSERT(MiIsPfnInUse(Pfn1) == FALSE);
437 ASSERT(Pfn1->u3.e2.ReferenceCount == 0);
438
439 //
440 // Allocate it and mark it
441 //
442 Pfn1->u3.e1.StartOfAllocation = 1;
443 Pfn1->u3.e1.EndOfAllocation = 1;
444 Pfn1->u3.e2.ReferenceCount = 1;
445
446 //
447 // Decrease available pages
448 //
449 MmAvailablePages--;
450
451 //
452 // Save it into the MDL
453 //
454 *MdlPage++ = MiGetPfnEntryIndex(Pfn1);
455 PagesFound++;
456 }
457 }
458 else
459 {
460 //
461 // You want specific range of pages. We'll do this in two runs
462 //
463 for (LookForZeroedPages = 1; LookForZeroedPages >= 0; LookForZeroedPages--)
464 {
465 //
466 // Scan the range you specified
467 //
468 for (Page = LowPage; Page < HighPage; Page++)
469 {
470 //
471 // Get the PFN entry for this page
472 //
473 Pfn1 = MiGetPfnEntry(Page);
474 ASSERT(Pfn1);
475
476 //
477 // Make sure it's free and if this is our first pass, zeroed
478 //
479 if (MiIsPfnInUse(Pfn1)) continue;
480 if ((Pfn1->u3.e1.PageLocation == ZeroedPageList) != LookForZeroedPages) continue;
481
482 //
483 // Sanity checks
484 //
485 ASSERT(Pfn1->u3.e2.ReferenceCount == 0);
486
487 //
488 // Now setup the page and mark it
489 //
490 Pfn1->u3.e2.ReferenceCount = 1;
491 Pfn1->u3.e1.StartOfAllocation = 1;
492 Pfn1->u3.e1.EndOfAllocation = 1;
493
494 //
495 // Decrease available pages
496 //
497 MmAvailablePages--;
498
499 //
500 // Save this page into the MDL
501 //
502 *MdlPage++ = Page;
503 if (++PagesFound == PageCount) break;
504 }
505
506 //
507 // If the first pass was enough, don't keep going, otherwise, go again
508 //
509 if (PagesFound == PageCount) break;
510 }
511 }
512
513 //
514 // Now release the PFN count
515 //
516 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
517
518 //
519 // We might've found less pages, but not more ;-)
520 //
521 if (PagesFound != PageCount) ASSERT(PagesFound < PageCount);
522 if (!PagesFound)
523 {
524 //
525 // If we didn' tfind any pages at all, fail
526 //
527 DPRINT1("NO MDL PAGES!\n");
528 ExFreePool(Mdl);
529 return NULL;
530 }
531
532 //
533 // Write out how many pages we found
534 //
535 Mdl->ByteCount = (ULONG)(PagesFound << PAGE_SHIFT);
536
537 //
538 // Terminate the MDL array if there's certain missing pages
539 //
540 if (PagesFound != PageCount) *MdlPage = -1;
541
542 //
543 // Now go back and loop over all the MDL pages
544 //
545 MdlPage = (PPFN_NUMBER)(Mdl + 1);
546 LastMdlPage = MdlPage + PagesFound;
547 while (MdlPage < LastMdlPage)
548 {
549 //
550 // Check if we've reached the end
551 //
552 Page = *MdlPage++;
553 if (Page == (PFN_NUMBER)-1) break;
554
555 //
556 // Get the PFN entry for the page and check if we should zero it out
557 //
558 Pfn1 = MiGetPfnEntry(Page);
559 ASSERT(Pfn1);
560 if (Pfn1->u3.e1.PageLocation != ZeroedPageList) MiZeroPage(Page);
561 Pfn1->u3.e1.PageLocation = ActiveAndValid;
562 }
563
564 //
565 // We're done, mark the pages as locked (should we lock them, though???)
566 //
567 Mdl->Process = NULL;
568 Mdl->MdlFlags |= MDL_PAGES_LOCKED;
569 return Mdl;
570 }
571
572 VOID
573 NTAPI
574 MmDumpPfnDatabase(VOID)
575 {
576 ULONG i;
577 PPHYSICAL_PAGE Pfn1;
578 PCHAR State = "????", Type = "Unknown";
579 KIRQL OldIrql;
580 ULONG Totals[5] = {0}, FreePages = 0;
581
582 KeRaiseIrql(HIGH_LEVEL, &OldIrql);
583
584 //
585 // Loop the PFN database
586 //
587 for (i = 0; i <= MmHighestPhysicalPage; i++)
588 {
589 Pfn1 = MiGetPfnEntry(i);
590 if (!Pfn1) continue;
591
592 //
593 // Get the type
594 //
595 if (MiIsPfnInUse(Pfn1))
596 {
597 State = "Used";
598 }
599 else
600 {
601 State = "Free";
602 Type = "Free";
603 FreePages++;
604 break;
605 }
606
607 //
608 // Pretty-print the page
609 //
610 DbgPrint("0x%08p:\t%04s\t%20s\t(%02d) [%08p])\n",
611 i << PAGE_SHIFT,
612 State,
613 Type,
614 Pfn1->u3.e2.ReferenceCount,
615 Pfn1->RmapListHead);
616 }
617
618 DbgPrint("Nonpaged Pool: %d pages\t[%d KB]\n", Totals[MC_NPPOOL], (Totals[MC_NPPOOL] << PAGE_SHIFT) / 1024);
619 DbgPrint("Paged Pool: %d pages\t[%d KB]\n", Totals[MC_PPOOL], (Totals[MC_PPOOL] << PAGE_SHIFT) / 1024);
620 DbgPrint("File System Cache: %d pages\t[%d KB]\n", Totals[MC_CACHE], (Totals[MC_CACHE] << PAGE_SHIFT) / 1024);
621 DbgPrint("Process Working Set: %d pages\t[%d KB]\n", Totals[MC_USER], (Totals[MC_USER] << PAGE_SHIFT) / 1024);
622 DbgPrint("System: %d pages\t[%d KB]\n", Totals[MC_SYSTEM], (Totals[MC_SYSTEM] << PAGE_SHIFT) / 1024);
623 DbgPrint("Free: %d pages\t[%d KB]\n", FreePages, (FreePages << PAGE_SHIFT) / 1024);
624
625 KeLowerIrql(OldIrql);
626 }
627
628 VOID
629 NTAPI
630 MmInitializePageList(VOID)
631 {
632 ULONG i;
633 PHYSICAL_PAGE UsedPage;
634 PMEMORY_ALLOCATION_DESCRIPTOR Md;
635 PLIST_ENTRY NextEntry;
636 ULONG NrSystemPages = 0;
637
638 /* This is what a used page looks like */
639 RtlZeroMemory(&UsedPage, sizeof(UsedPage));
640 UsedPage.u3.e1.PageLocation = ActiveAndValid;
641 UsedPage.u3.e2.ReferenceCount = 1;
642
643 /* Loop the memory descriptors */
644 for (NextEntry = KeLoaderBlock->MemoryDescriptorListHead.Flink;
645 NextEntry != &KeLoaderBlock->MemoryDescriptorListHead;
646 NextEntry = NextEntry->Flink)
647 {
648 /* Get the descriptor */
649 Md = CONTAINING_RECORD(NextEntry,
650 MEMORY_ALLOCATION_DESCRIPTOR,
651 ListEntry);
652
653 /* Skip bad memory */
654 if ((Md->MemoryType == LoaderFirmwarePermanent) ||
655 (Md->MemoryType == LoaderBBTMemory) ||
656 (Md->MemoryType == LoaderSpecialMemory) ||
657 (Md->MemoryType == LoaderBad))
658 {
659 //
660 // We do not build PFN entries for this
661 //
662 continue;
663 }
664 else if ((Md->MemoryType == LoaderFree) ||
665 (Md->MemoryType == LoaderLoadedProgram) ||
666 (Md->MemoryType == LoaderFirmwareTemporary) ||
667 (Md->MemoryType == LoaderOsloaderStack))
668 {
669 /* Loop every page part of the block */
670 for (i = 0; i < Md->PageCount; i++)
671 {
672 /* Mark it as a free page */
673 MmPfnDatabase[0][Md->BasePage + i].u3.e1.PageLocation = FreePageList;
674 MiInsertInListTail(&MmFreePageListHead,
675 &MmPfnDatabase[0][Md->BasePage + i]);
676 MmAvailablePages++;
677 }
678 }
679 else
680 {
681 /* Loop every page part of the block */
682 for (i = 0; i < Md->PageCount; i++)
683 {
684 /* Everything else is used memory */
685 MmPfnDatabase[0][Md->BasePage + i] = UsedPage;
686 NrSystemPages++;
687 }
688 }
689 }
690
691 /* Finally handle the pages describing the PFN database themselves */
692 for (i = MxOldFreeDescriptor.BasePage; i < MxFreeDescriptor->BasePage; i++)
693 {
694 /* Mark it as used kernel memory */
695 MmPfnDatabase[0][i] = UsedPage;
696 NrSystemPages++;
697 }
698
699 KeInitializeEvent(&ZeroPageThreadEvent, NotificationEvent, TRUE);
700 DPRINT("Pages: %x %x\n", MmAvailablePages, NrSystemPages);
701 MmInitializeBalancer(MmAvailablePages, NrSystemPages);
702 }
703
704 VOID
705 NTAPI
706 MmSetRmapListHeadPage(PFN_TYPE Pfn, struct _MM_RMAP_ENTRY* ListHead)
707 {
708 KIRQL oldIrql;
709
710 oldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
711 MiGetPfnEntry(Pfn)->RmapListHead = (LONG)ListHead;
712 KeReleaseQueuedSpinLock(LockQueuePfnLock, oldIrql);
713 }
714
715 struct _MM_RMAP_ENTRY*
716 NTAPI
717 MmGetRmapListHeadPage(PFN_TYPE Pfn)
718 {
719 KIRQL oldIrql;
720 struct _MM_RMAP_ENTRY* ListHead;
721
722 oldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
723 ListHead = (struct _MM_RMAP_ENTRY*)MiGetPfnEntry(Pfn)->RmapListHead;
724 KeReleaseQueuedSpinLock(LockQueuePfnLock, oldIrql);
725
726 return(ListHead);
727 }
728
729 VOID
730 NTAPI
731 MmSetSavedSwapEntryPage(PFN_TYPE Pfn, SWAPENTRY SwapEntry)
732 {
733 KIRQL oldIrql;
734
735 oldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
736 MiGetPfnEntry(Pfn)->u1.WsIndex = SwapEntry;
737 KeReleaseQueuedSpinLock(LockQueuePfnLock, oldIrql);
738 }
739
740 SWAPENTRY
741 NTAPI
742 MmGetSavedSwapEntryPage(PFN_TYPE Pfn)
743 {
744 SWAPENTRY SwapEntry;
745 KIRQL oldIrql;
746
747 oldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
748 SwapEntry = MiGetPfnEntry(Pfn)->u1.WsIndex;
749 KeReleaseQueuedSpinLock(LockQueuePfnLock, oldIrql);
750
751 return(SwapEntry);
752 }
753
754 VOID
755 NTAPI
756 MmReferencePage(PFN_TYPE Pfn)
757 {
758 PPHYSICAL_PAGE Page;
759
760 DPRINT("MmReferencePage(PysicalAddress %x)\n", Pfn << PAGE_SHIFT);
761
762 if (Pfn == 0 || Pfn > MmHighestPhysicalPage)
763 {
764 return;
765 }
766
767 Page = MiGetPfnEntry(Pfn);
768 ASSERT(Page);
769
770 Page->u3.e2.ReferenceCount++;
771 }
772
773 ULONG
774 NTAPI
775 MmGetReferenceCountPage(PFN_TYPE Pfn)
776 {
777 KIRQL oldIrql;
778 ULONG RCount;
779 PPHYSICAL_PAGE Page;
780
781 DPRINT("MmGetReferenceCountPage(PhysicalAddress %x)\n", Pfn << PAGE_SHIFT);
782
783 oldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
784 Page = MiGetPfnEntry(Pfn);
785 ASSERT(Page);
786
787 RCount = Page->u3.e2.ReferenceCount;
788
789 KeReleaseQueuedSpinLock(LockQueuePfnLock, oldIrql);
790 return(RCount);
791 }
792
793 BOOLEAN
794 NTAPI
795 MmIsPageInUse(PFN_TYPE Pfn)
796 {
797 return MiIsPfnInUse(MiGetPfnEntry(Pfn));
798 }
799
800 VOID
801 NTAPI
802 MiSetConsumer(IN PFN_TYPE Pfn,
803 IN ULONG Type)
804 {
805 MiGetPfnEntry(Pfn)->u3.e1.PageLocation = ActiveAndValid;
806 }
807
808 VOID
809 NTAPI
810 MmDereferencePage(PFN_TYPE Pfn)
811 {
812 PPHYSICAL_PAGE Page;
813
814 DPRINT("MmDereferencePage(PhysicalAddress %x)\n", Pfn << PAGE_SHIFT);
815
816 Page = MiGetPfnEntry(Pfn);
817 ASSERT(Page);
818
819 Page->u3.e2.ReferenceCount--;
820 if (Page->u3.e2.ReferenceCount == 0)
821 {
822 MmAvailablePages++;
823 Page->u3.e1.PageLocation = FreePageList;
824 MiInsertInListTail(&MmFreePageListHead, Page);
825 if (MmFreePageListHead.Total > 8 && 0 == KeReadStateEvent(&ZeroPageThreadEvent))
826 {
827 KeSetEvent(&ZeroPageThreadEvent, IO_NO_INCREMENT, FALSE);
828 }
829 }
830 }
831
832 PFN_TYPE
833 NTAPI
834 MmAllocPage(ULONG Type)
835 {
836 PFN_TYPE PfnOffset;
837 PPHYSICAL_PAGE PageDescriptor;
838 BOOLEAN NeedClear = FALSE;
839
840 DPRINT("MmAllocPage()\n");
841
842 if (MmZeroedPageListHead.Total == 0)
843 {
844 if (MmFreePageListHead.Total == 0)
845 {
846 /* Check if this allocation is for the PFN DB itself */
847 if (MmNumberOfPhysicalPages == 0)
848 {
849 ASSERT(FALSE);
850 }
851
852 DPRINT1("MmAllocPage(): Out of memory\n");
853 return 0;
854 }
855 PageDescriptor = MiRemoveHeadList(&MmFreePageListHead);
856
857 NeedClear = TRUE;
858 }
859 else
860 {
861 PageDescriptor = MiRemoveHeadList(&MmZeroedPageListHead);
862 }
863
864 PageDescriptor->u3.e2.ReferenceCount = 1;
865
866 MmAvailablePages--;
867
868 PfnOffset = PageDescriptor - MmPfnDatabase[0];
869 if ((NeedClear) && (Type != MC_SYSTEM))
870 {
871 MiZeroPage(PfnOffset);
872 }
873
874 PageDescriptor->u3.e1.PageLocation = ActiveAndValid;
875 return PfnOffset;
876 }
877
878 NTSTATUS
879 NTAPI
880 MiZeroPage(PFN_TYPE Page)
881 {
882 KIRQL Irql;
883 PVOID TempAddress;
884
885 Irql = KeRaiseIrqlToDpcLevel();
886 TempAddress = MiMapPageToZeroInHyperSpace(Page);
887 if (TempAddress == NULL)
888 {
889 return(STATUS_NO_MEMORY);
890 }
891 memset(TempAddress, 0, PAGE_SIZE);
892 MiUnmapPagesInZeroSpace(TempAddress, 1);
893 KeLowerIrql(Irql);
894 return(STATUS_SUCCESS);
895 }
896
897 NTSTATUS
898 NTAPI
899 MmZeroPageThreadMain(PVOID Ignored)
900 {
901 NTSTATUS Status;
902 KIRQL oldIrql;
903 PPHYSICAL_PAGE PageDescriptor;
904 PFN_TYPE Pfn;
905 ULONG Count;
906
907 /* Free initial kernel memory */
908 //MiFreeInitMemory();
909
910 /* Set our priority to 0 */
911 KeGetCurrentThread()->BasePriority = 0;
912 KeSetPriorityThread(KeGetCurrentThread(), 0);
913
914 while(1)
915 {
916 Status = KeWaitForSingleObject(&ZeroPageThreadEvent,
917 0,
918 KernelMode,
919 FALSE,
920 NULL);
921
922 if (ZeroPageThreadShouldTerminate)
923 {
924 DPRINT1("ZeroPageThread: Terminating\n");
925 return STATUS_SUCCESS;
926 }
927 Count = 0;
928 oldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
929 while (MmFreePageListHead.Total)
930 {
931 PageDescriptor = MiRemoveHeadList(&MmFreePageListHead);
932 /* We set the page to used, because MmCreateVirtualMapping failed with unused pages */
933 KeReleaseQueuedSpinLock(LockQueuePfnLock, oldIrql);
934 Pfn = PageDescriptor - MmPfnDatabase[0];
935 Status = MiZeroPage(Pfn);
936
937 oldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
938 if (NT_SUCCESS(Status))
939 {
940 MiInsertZeroListAtBack(Pfn);
941 Count++;
942 }
943 else
944 {
945 MiInsertInListTail(&MmFreePageListHead, PageDescriptor);
946 PageDescriptor->u3.e1.PageLocation = FreePageList;
947 }
948
949 }
950 DPRINT("Zeroed %d pages.\n", Count);
951 KeResetEvent(&ZeroPageThreadEvent);
952 KeReleaseQueuedSpinLock(LockQueuePfnLock, oldIrql);
953 }
954
955 return STATUS_SUCCESS;
956 }
957
958 /* EOF */