Merge from amd64 branch:
[reactos.git] / reactos / ntoskrnl / mm / freelist.c
1 /*
2 * COPYRIGHT: See COPYING in the top level directory
3 * PROJECT: ReactOS kernel
4 * FILE: ntoskrnl/mm/freelist.c
5 * PURPOSE: Handle the list of free physical pages
6 *
7 * PROGRAMMERS: David Welch (welch@cwcom.net)
8 * Robert Bergkvist
9 */
10
11 /* INCLUDES ****************************************************************/
12
13 #include <ntoskrnl.h>
14 #define NDEBUG
15 #include <debug.h>
16
17 #if defined (ALLOC_PRAGMA)
18 #pragma alloc_text(INIT, MmInitializePageList)
19 #endif
20
21 #define MODULE_INVOLVED_IN_ARM3
22 #include "ARM3/miarm.h"
23
24 /* GLOBALS ****************************************************************/
25
26 //
27 //
28 // ReactOS to NT Physical Page Descriptor Entry Legacy Mapping Definitions
29 //
30 // REACTOS NT
31 //
32 #define RmapListHead AweReferenceCount
33 #define PHYSICAL_PAGE MMPFN
34 #define PPHYSICAL_PAGE PMMPFN
35
36 /* The first array contains ReactOS PFNs, the second contains ARM3 PFNs */
37 PPHYSICAL_PAGE MmPfnDatabase[2];
38
39 ULONG MmAvailablePages;
40 ULONG MmResidentAvailablePages;
41
42 SIZE_T MmTotalCommitLimit;
43 SIZE_T MmTotalCommittedPages;
44 SIZE_T MmSharedCommit;
45 SIZE_T MmDriverCommit;
46 SIZE_T MmProcessCommit;
47 SIZE_T MmPagedPoolCommit;
48 SIZE_T MmPeakCommitment;
49 SIZE_T MmtotalCommitLimitMaximum;
50
51 static KEVENT ZeroPageThreadEvent;
52 static BOOLEAN ZeroPageThreadShouldTerminate = FALSE;
53 static RTL_BITMAP MiUserPfnBitMap;
54
55 /* FUNCTIONS *************************************************************/
56
57 VOID
58 NTAPI
59 MiInitializeUserPfnBitmap(VOID)
60 {
61 PVOID Bitmap;
62
63 /* Allocate enough buffer for the PFN bitmap and align it on 32-bits */
64 Bitmap = ExAllocatePoolWithTag(NonPagedPool,
65 (((MmHighestPhysicalPage + 1) + 31) / 32) * 4,
66 ' mM');
67 ASSERT(Bitmap);
68
69 /* Initialize it and clear all the bits to begin with */
70 RtlInitializeBitMap(&MiUserPfnBitMap,
71 Bitmap,
72 MmHighestPhysicalPage + 1);
73 RtlClearAllBits(&MiUserPfnBitMap);
74 }
75
76 PFN_TYPE
77 NTAPI
78 MmGetLRUFirstUserPage(VOID)
79 {
80 ULONG Position;
81 KIRQL OldIrql;
82
83 /* Find the first user page */
84 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
85 Position = RtlFindSetBits(&MiUserPfnBitMap, 1, 0);
86 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
87 if (Position == 0xFFFFFFFF) return 0;
88
89 /* Return it */
90 return Position;
91 }
92
93 VOID
94 NTAPI
95 MmInsertLRULastUserPage(PFN_TYPE Pfn)
96 {
97 KIRQL OldIrql;
98
99 /* Set the page as a user page */
100 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
101 RtlSetBit(&MiUserPfnBitMap, Pfn);
102 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
103 }
104
105 PFN_TYPE
106 NTAPI
107 MmGetLRUNextUserPage(PFN_TYPE PreviousPfn)
108 {
109 ULONG Position;
110 KIRQL OldIrql;
111
112 /* Find the next user page */
113 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
114 Position = RtlFindSetBits(&MiUserPfnBitMap, 1, PreviousPfn + 1);
115 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
116 if (Position == 0xFFFFFFFF) return 0;
117
118 /* Return it */
119 return Position;
120 }
121
122 VOID
123 NTAPI
124 MmRemoveLRUUserPage(PFN_TYPE Page)
125 {
126 /* Unset the page as a user page */
127 RtlClearBit(&MiUserPfnBitMap, Page);
128 }
129
130 BOOLEAN
131 NTAPI
132 MiIsPfnInUse(IN PMMPFN Pfn1)
133 {
134 return ((Pfn1->u3.e1.PageLocation != FreePageList) &&
135 (Pfn1->u3.e1.PageLocation != ZeroedPageList));
136 }
137
138 PFN_NUMBER
139 NTAPI
140 MiFindContiguousPages(IN PFN_NUMBER LowestPfn,
141 IN PFN_NUMBER HighestPfn,
142 IN PFN_NUMBER BoundaryPfn,
143 IN PFN_NUMBER SizeInPages,
144 IN MEMORY_CACHING_TYPE CacheType)
145 {
146 PFN_NUMBER Page, PageCount, LastPage, Length, BoundaryMask;
147 ULONG i = 0;
148 PMMPFN Pfn1, EndPfn;
149 KIRQL OldIrql;
150 PAGED_CODE ();
151 ASSERT(SizeInPages != 0);
152
153 //
154 // Convert the boundary PFN into an alignment mask
155 //
156 BoundaryMask = ~(BoundaryPfn - 1);
157
158 //
159 // Loop all the physical memory blocks
160 //
161 do
162 {
163 //
164 // Capture the base page and length of this memory block
165 //
166 Page = MmPhysicalMemoryBlock->Run[i].BasePage;
167 PageCount = MmPhysicalMemoryBlock->Run[i].PageCount;
168
169 //
170 // Check how far this memory block will go
171 //
172 LastPage = Page + PageCount;
173
174 //
175 // Trim it down to only the PFNs we're actually interested in
176 //
177 if ((LastPage - 1) > HighestPfn) LastPage = HighestPfn + 1;
178 if (Page < LowestPfn) Page = LowestPfn;
179
180 //
181 // Skip this run if it's empty or fails to contain all the pages we need
182 //
183 if (!(PageCount) || ((Page + SizeInPages) > LastPage)) continue;
184
185 //
186 // Now scan all the relevant PFNs in this run
187 //
188 Length = 0;
189 for (Pfn1 = MiGetPfnEntry(Page); Page < LastPage; Page++, Pfn1++)
190 {
191 //
192 // If this PFN is in use, ignore it
193 //
194 if (MiIsPfnInUse(Pfn1)) continue;
195
196 //
197 // If we haven't chosen a start PFN yet and the caller specified an
198 // alignment, make sure the page matches the alignment restriction
199 //
200 if ((!(Length) && (BoundaryPfn)) &&
201 (((Page ^ (Page + SizeInPages - 1)) & BoundaryMask)))
202 {
203 //
204 // It does not, so bail out
205 //
206 continue;
207 }
208
209 //
210 // Increase the number of valid pages, and check if we have enough
211 //
212 if (++Length == SizeInPages)
213 {
214 //
215 // It appears we've amassed enough legitimate pages, rollback
216 //
217 Pfn1 -= (Length - 1);
218 Page -= (Length - 1);
219
220 //
221 // Acquire the PFN lock
222 //
223 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
224 do
225 {
226 //
227 // Things might've changed for us. Is the page still free?
228 //
229 if (MiIsPfnInUse(Pfn1)) break;
230
231 //
232 // So far so good. Is this the last confirmed valid page?
233 //
234 if (!--Length)
235 {
236 //
237 // Sanity check that we didn't go out of bounds
238 //
239 ASSERT(i != MmPhysicalMemoryBlock->NumberOfRuns);
240
241 //
242 // Loop until all PFN entries have been processed
243 //
244 EndPfn = Pfn1 - SizeInPages + 1;
245 do
246 {
247 //
248 // This PFN is now a used page, set it up
249 //
250 MiUnlinkFreeOrZeroedPage(Pfn1);
251 Pfn1->u3.e2.ReferenceCount = 1;
252
253 //
254 // Check if it was already zeroed
255 //
256 if (Pfn1->u3.e1.PageLocation != ZeroedPageList)
257 {
258 //
259 // It wasn't, so zero it
260 //
261 MiZeroPage(MiGetPfnEntryIndex(Pfn1));
262 }
263
264 //
265 // Mark it in use
266 //
267 Pfn1->u3.e1.PageLocation = ActiveAndValid;
268
269 //
270 // Check if this is the last PFN, otherwise go on
271 //
272 if (Pfn1 == EndPfn) break;
273 Pfn1--;
274 } while (TRUE);
275
276 //
277 // Mark the first and last PFN so we can find them later
278 //
279 Pfn1->u3.e1.StartOfAllocation = 1;
280 (Pfn1 + SizeInPages - 1)->u3.e1.EndOfAllocation = 1;
281
282 //
283 // Now it's safe to let go of the PFN lock
284 //
285 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
286
287 //
288 // Quick sanity check that the last PFN is consistent
289 //
290 EndPfn = Pfn1 + SizeInPages;
291 ASSERT(EndPfn == MiGetPfnEntry(Page + 1));
292
293 //
294 // Compute the first page, and make sure it's consistent
295 //
296 Page -= SizeInPages - 1;
297 ASSERT(Pfn1 == MiGetPfnEntry(Page));
298 ASSERT(Page != 0);
299 return Page;
300 }
301
302 //
303 // Keep going. The purpose of this loop is to reconfirm that
304 // after acquiring the PFN lock these pages are still usable
305 //
306 Pfn1++;
307 Page++;
308 } while (TRUE);
309
310 //
311 // If we got here, something changed while we hadn't acquired
312 // the PFN lock yet, so we'll have to restart
313 //
314 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
315 Length = 0;
316 }
317 }
318 } while (++i != MmPhysicalMemoryBlock->NumberOfRuns);
319
320 //
321 // And if we get here, it means no suitable physical memory runs were found
322 //
323 return 0;
324 }
325
326 PMDL
327 NTAPI
328 MiAllocatePagesForMdl(IN PHYSICAL_ADDRESS LowAddress,
329 IN PHYSICAL_ADDRESS HighAddress,
330 IN PHYSICAL_ADDRESS SkipBytes,
331 IN SIZE_T TotalBytes,
332 IN MI_PFN_CACHE_ATTRIBUTE CacheAttribute,
333 IN ULONG MdlFlags)
334 {
335 PMDL Mdl;
336 PFN_NUMBER PageCount, LowPage, HighPage, SkipPages, PagesFound = 0, Page;
337 PPFN_NUMBER MdlPage, LastMdlPage;
338 KIRQL OldIrql;
339 PPHYSICAL_PAGE Pfn1;
340 INT LookForZeroedPages;
341 ASSERT (KeGetCurrentIrql() <= APC_LEVEL);
342
343 //
344 // Convert the low address into a PFN
345 //
346 LowPage = (PFN_NUMBER)(LowAddress.QuadPart >> PAGE_SHIFT);
347
348 //
349 // Convert, and normalize, the high address into a PFN
350 //
351 HighPage = (PFN_NUMBER)(HighAddress.QuadPart >> PAGE_SHIFT);
352 if (HighPage > MmHighestPhysicalPage) HighPage = MmHighestPhysicalPage;
353
354 //
355 // Validate skipbytes and convert them into pages
356 //
357 if (BYTE_OFFSET(SkipBytes.LowPart)) return NULL;
358 SkipPages = (PFN_NUMBER)(SkipBytes.QuadPart >> PAGE_SHIFT);
359
360 //
361 // Now compute the number of pages the MDL will cover
362 //
363 PageCount = (PFN_NUMBER)ADDRESS_AND_SIZE_TO_SPAN_PAGES(0, TotalBytes);
364 do
365 {
366 //
367 // Try creating an MDL for these many pages
368 //
369 Mdl = MmCreateMdl(NULL, NULL, PageCount << PAGE_SHIFT);
370 if (Mdl) break;
371
372 //
373 // This function is not required to return the amount of pages requested
374 // In fact, it can return as little as 1 page, and callers are supposed
375 // to deal with this scenario. So re-attempt the allocation with less
376 // pages than before, and see if it worked this time.
377 //
378 PageCount -= (PageCount >> 4);
379 } while (PageCount);
380
381 //
382 // Wow, not even a single page was around!
383 //
384 if (!Mdl) return NULL;
385
386 //
387 // This is where the page array starts....
388 //
389 MdlPage = (PPFN_NUMBER)(Mdl + 1);
390
391 //
392 // Lock the PFN database
393 //
394 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
395
396 //
397 // Are we looking for any pages, without discriminating?
398 //
399 if ((LowPage == 0) && (HighPage == MmHighestPhysicalPage))
400 {
401 //
402 // Well then, let's go shopping
403 //
404 while (PagesFound < PageCount)
405 {
406 //
407 // Do we have zeroed pages?
408 //
409 if (MmZeroedPageListHead.Total)
410 {
411 //
412 // Grab a zero page
413 //
414 Pfn1 = MiRemoveHeadList(&MmZeroedPageListHead);
415 }
416 else if (MmFreePageListHead.Total)
417 {
418 //
419 // Nope, grab an unzeroed page
420 //
421 Pfn1 = MiRemoveHeadList(&MmFreePageListHead);
422 }
423 else
424 {
425 //
426 // This is not good... hopefully we have at least SOME pages
427 //
428 ASSERT(PagesFound);
429 break;
430 }
431
432 //
433 // Make sure it's really free
434 //
435 ASSERT(MiIsPfnInUse(Pfn1) == FALSE);
436 ASSERT(Pfn1->u3.e2.ReferenceCount == 0);
437
438 //
439 // Allocate it and mark it
440 //
441 Pfn1->u3.e1.StartOfAllocation = 1;
442 Pfn1->u3.e1.EndOfAllocation = 1;
443 Pfn1->u3.e2.ReferenceCount = 1;
444
445 //
446 // Decrease available pages
447 //
448 MmAvailablePages--;
449
450 //
451 // Save it into the MDL
452 //
453 *MdlPage++ = MiGetPfnEntryIndex(Pfn1);
454 PagesFound++;
455 }
456 }
457 else
458 {
459 //
460 // You want specific range of pages. We'll do this in two runs
461 //
462 for (LookForZeroedPages = 1; LookForZeroedPages >= 0; LookForZeroedPages--)
463 {
464 //
465 // Scan the range you specified
466 //
467 for (Page = LowPage; Page < HighPage; Page++)
468 {
469 //
470 // Get the PFN entry for this page
471 //
472 Pfn1 = MiGetPfnEntry(Page);
473 ASSERT(Pfn1);
474
475 //
476 // Make sure it's free and if this is our first pass, zeroed
477 //
478 if (MiIsPfnInUse(Pfn1)) continue;
479 if ((Pfn1->u3.e1.PageLocation == ZeroedPageList) != LookForZeroedPages) continue;
480
481 //
482 // Sanity checks
483 //
484 ASSERT(Pfn1->u3.e2.ReferenceCount == 0);
485
486 //
487 // Now setup the page and mark it
488 //
489 Pfn1->u3.e2.ReferenceCount = 1;
490 Pfn1->u3.e1.StartOfAllocation = 1;
491 Pfn1->u3.e1.EndOfAllocation = 1;
492
493 //
494 // Decrease available pages
495 //
496 MmAvailablePages--;
497
498 //
499 // Save this page into the MDL
500 //
501 *MdlPage++ = Page;
502 if (++PagesFound == PageCount) break;
503 }
504
505 //
506 // If the first pass was enough, don't keep going, otherwise, go again
507 //
508 if (PagesFound == PageCount) break;
509 }
510 }
511
512 //
513 // Now release the PFN count
514 //
515 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
516
517 //
518 // We might've found less pages, but not more ;-)
519 //
520 if (PagesFound != PageCount) ASSERT(PagesFound < PageCount);
521 if (!PagesFound)
522 {
523 //
524 // If we didn' tfind any pages at all, fail
525 //
526 DPRINT1("NO MDL PAGES!\n");
527 ExFreePool(Mdl);
528 return NULL;
529 }
530
531 //
532 // Write out how many pages we found
533 //
534 Mdl->ByteCount = (ULONG)(PagesFound << PAGE_SHIFT);
535
536 //
537 // Terminate the MDL array if there's certain missing pages
538 //
539 if (PagesFound != PageCount) *MdlPage = -1;
540
541 //
542 // Now go back and loop over all the MDL pages
543 //
544 MdlPage = (PPFN_NUMBER)(Mdl + 1);
545 LastMdlPage = MdlPage + PagesFound;
546 while (MdlPage < LastMdlPage)
547 {
548 //
549 // Check if we've reached the end
550 //
551 Page = *MdlPage++;
552 if (Page == (PFN_NUMBER)-1) break;
553
554 //
555 // Get the PFN entry for the page and check if we should zero it out
556 //
557 Pfn1 = MiGetPfnEntry(Page);
558 ASSERT(Pfn1);
559 if (Pfn1->u3.e1.PageLocation != ZeroedPageList) MiZeroPage(Page);
560 Pfn1->u3.e1.PageLocation = ActiveAndValid;
561 }
562
563 //
564 // We're done, mark the pages as locked (should we lock them, though???)
565 //
566 Mdl->Process = NULL;
567 Mdl->MdlFlags |= MDL_PAGES_LOCKED;
568 return Mdl;
569 }
570
571 VOID
572 NTAPI
573 MmDumpPfnDatabase(VOID)
574 {
575 ULONG i;
576 PPHYSICAL_PAGE Pfn1;
577 PCHAR State = "????", Type = "Unknown";
578 KIRQL OldIrql;
579 ULONG Totals[5] = {0}, FreePages = 0;
580
581 KeRaiseIrql(HIGH_LEVEL, &OldIrql);
582
583 //
584 // Loop the PFN database
585 //
586 for (i = 0; i <= MmHighestPhysicalPage; i++)
587 {
588 Pfn1 = MiGetPfnEntry(i);
589 if (!Pfn1) continue;
590
591 //
592 // Get the type
593 //
594 if (MiIsPfnInUse(Pfn1))
595 {
596 State = "Used";
597 }
598 else
599 {
600 State = "Free";
601 Type = "Free";
602 FreePages++;
603 break;
604 }
605
606 //
607 // Pretty-print the page
608 //
609 DbgPrint("0x%08p:\t%04s\t%20s\t(%02d) [%08p])\n",
610 i << PAGE_SHIFT,
611 State,
612 Type,
613 Pfn1->u3.e2.ReferenceCount,
614 Pfn1->RmapListHead);
615 }
616
617 DbgPrint("Nonpaged Pool: %d pages\t[%d KB]\n", Totals[MC_NPPOOL], (Totals[MC_NPPOOL] << PAGE_SHIFT) / 1024);
618 DbgPrint("Paged Pool: %d pages\t[%d KB]\n", Totals[MC_PPOOL], (Totals[MC_PPOOL] << PAGE_SHIFT) / 1024);
619 DbgPrint("File System Cache: %d pages\t[%d KB]\n", Totals[MC_CACHE], (Totals[MC_CACHE] << PAGE_SHIFT) / 1024);
620 DbgPrint("Process Working Set: %d pages\t[%d KB]\n", Totals[MC_USER], (Totals[MC_USER] << PAGE_SHIFT) / 1024);
621 DbgPrint("System: %d pages\t[%d KB]\n", Totals[MC_SYSTEM], (Totals[MC_SYSTEM] << PAGE_SHIFT) / 1024);
622 DbgPrint("Free: %d pages\t[%d KB]\n", FreePages, (FreePages << PAGE_SHIFT) / 1024);
623
624 KeLowerIrql(OldIrql);
625 }
626
627 VOID
628 NTAPI
629 MmInitializePageList(VOID)
630 {
631 ULONG i;
632 PHYSICAL_PAGE UsedPage;
633 PMEMORY_ALLOCATION_DESCRIPTOR Md;
634 PLIST_ENTRY NextEntry;
635 ULONG NrSystemPages = 0;
636
637 /* This is what a used page looks like */
638 RtlZeroMemory(&UsedPage, sizeof(UsedPage));
639 UsedPage.u3.e1.PageLocation = ActiveAndValid;
640 UsedPage.u3.e2.ReferenceCount = 1;
641
642 /* Loop the memory descriptors */
643 for (NextEntry = KeLoaderBlock->MemoryDescriptorListHead.Flink;
644 NextEntry != &KeLoaderBlock->MemoryDescriptorListHead;
645 NextEntry = NextEntry->Flink)
646 {
647 /* Get the descriptor */
648 Md = CONTAINING_RECORD(NextEntry,
649 MEMORY_ALLOCATION_DESCRIPTOR,
650 ListEntry);
651
652 /* Skip bad memory */
653 if ((Md->MemoryType == LoaderFirmwarePermanent) ||
654 (Md->MemoryType == LoaderBBTMemory) ||
655 (Md->MemoryType == LoaderSpecialMemory) ||
656 (Md->MemoryType == LoaderBad))
657 {
658 //
659 // We do not build PFN entries for this
660 //
661 continue;
662 }
663 else if ((Md->MemoryType == LoaderFree) ||
664 (Md->MemoryType == LoaderLoadedProgram) ||
665 (Md->MemoryType == LoaderFirmwareTemporary) ||
666 (Md->MemoryType == LoaderOsloaderStack))
667 {
668 /* Loop every page part of the block */
669 for (i = 0; i < Md->PageCount; i++)
670 {
671 /* Mark it as a free page */
672 MmPfnDatabase[0][Md->BasePage + i].u3.e1.PageLocation = FreePageList;
673 MiInsertInListTail(&MmFreePageListHead,
674 &MmPfnDatabase[0][Md->BasePage + i]);
675 MmAvailablePages++;
676 }
677 }
678 else
679 {
680 /* Loop every page part of the block */
681 for (i = 0; i < Md->PageCount; i++)
682 {
683 /* Everything else is used memory */
684 MmPfnDatabase[0][Md->BasePage + i] = UsedPage;
685 NrSystemPages++;
686 }
687 }
688 }
689
690 /* Finally handle the pages describing the PFN database themselves */
691 for (i = MxOldFreeDescriptor.BasePage; i < MxFreeDescriptor->BasePage; i++)
692 {
693 /* Mark it as used kernel memory */
694 MmPfnDatabase[0][i] = UsedPage;
695 NrSystemPages++;
696 }
697
698 KeInitializeEvent(&ZeroPageThreadEvent, NotificationEvent, TRUE);
699 DPRINT("Pages: %x %x\n", MmAvailablePages, NrSystemPages);
700 MmInitializeBalancer(MmAvailablePages, NrSystemPages);
701 }
702
703 VOID
704 NTAPI
705 MmSetRmapListHeadPage(PFN_TYPE Pfn, struct _MM_RMAP_ENTRY* ListHead)
706 {
707 KIRQL oldIrql;
708
709 oldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
710 MiGetPfnEntry(Pfn)->RmapListHead = (LONG)ListHead;
711 KeReleaseQueuedSpinLock(LockQueuePfnLock, oldIrql);
712 }
713
714 struct _MM_RMAP_ENTRY*
715 NTAPI
716 MmGetRmapListHeadPage(PFN_TYPE Pfn)
717 {
718 KIRQL oldIrql;
719 struct _MM_RMAP_ENTRY* ListHead;
720
721 oldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
722 ListHead = (struct _MM_RMAP_ENTRY*)MiGetPfnEntry(Pfn)->RmapListHead;
723 KeReleaseQueuedSpinLock(LockQueuePfnLock, oldIrql);
724
725 return(ListHead);
726 }
727
728 VOID
729 NTAPI
730 MmSetSavedSwapEntryPage(PFN_TYPE Pfn, SWAPENTRY SwapEntry)
731 {
732 KIRQL oldIrql;
733
734 oldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
735 MiGetPfnEntry(Pfn)->u1.WsIndex = SwapEntry;
736 KeReleaseQueuedSpinLock(LockQueuePfnLock, oldIrql);
737 }
738
739 SWAPENTRY
740 NTAPI
741 MmGetSavedSwapEntryPage(PFN_TYPE Pfn)
742 {
743 SWAPENTRY SwapEntry;
744 KIRQL oldIrql;
745
746 oldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
747 SwapEntry = MiGetPfnEntry(Pfn)->u1.WsIndex;
748 KeReleaseQueuedSpinLock(LockQueuePfnLock, oldIrql);
749
750 return(SwapEntry);
751 }
752
753 VOID
754 NTAPI
755 MmReferencePage(PFN_TYPE Pfn)
756 {
757 PPHYSICAL_PAGE Page;
758
759 DPRINT("MmReferencePage(PysicalAddress %x)\n", Pfn << PAGE_SHIFT);
760
761 if (Pfn == 0 || Pfn > MmHighestPhysicalPage)
762 {
763 return;
764 }
765
766 Page = MiGetPfnEntry(Pfn);
767 ASSERT(Page);
768
769 Page->u3.e2.ReferenceCount++;
770 }
771
772 ULONG
773 NTAPI
774 MmGetReferenceCountPage(PFN_TYPE Pfn)
775 {
776 KIRQL oldIrql;
777 ULONG RCount;
778 PPHYSICAL_PAGE Page;
779
780 DPRINT("MmGetReferenceCountPage(PhysicalAddress %x)\n", Pfn << PAGE_SHIFT);
781
782 oldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
783 Page = MiGetPfnEntry(Pfn);
784 ASSERT(Page);
785
786 RCount = Page->u3.e2.ReferenceCount;
787
788 KeReleaseQueuedSpinLock(LockQueuePfnLock, oldIrql);
789 return(RCount);
790 }
791
792 BOOLEAN
793 NTAPI
794 MmIsPageInUse(PFN_TYPE Pfn)
795 {
796 return MiIsPfnInUse(MiGetPfnEntry(Pfn));
797 }
798
799 VOID
800 NTAPI
801 MiSetConsumer(IN PFN_TYPE Pfn,
802 IN ULONG Type)
803 {
804 MiGetPfnEntry(Pfn)->u3.e1.PageLocation = ActiveAndValid;
805 }
806
807 VOID
808 NTAPI
809 MmDereferencePage(PFN_TYPE Pfn)
810 {
811 PPHYSICAL_PAGE Page;
812
813 DPRINT("MmDereferencePage(PhysicalAddress %x)\n", Pfn << PAGE_SHIFT);
814
815 Page = MiGetPfnEntry(Pfn);
816 ASSERT(Page);
817
818 Page->u3.e2.ReferenceCount--;
819 if (Page->u3.e2.ReferenceCount == 0)
820 {
821 MmAvailablePages++;
822 Page->u3.e1.PageLocation = FreePageList;
823 MiInsertInListTail(&MmFreePageListHead, Page);
824 if (MmFreePageListHead.Total > 8 && 0 == KeReadStateEvent(&ZeroPageThreadEvent))
825 {
826 KeSetEvent(&ZeroPageThreadEvent, IO_NO_INCREMENT, FALSE);
827 }
828 }
829 }
830
831 PFN_TYPE
832 NTAPI
833 MmAllocPage(ULONG Type)
834 {
835 PFN_TYPE PfnOffset;
836 PPHYSICAL_PAGE PageDescriptor;
837 BOOLEAN NeedClear = FALSE;
838
839 DPRINT("MmAllocPage()\n");
840
841 if (MmZeroedPageListHead.Total == 0)
842 {
843 if (MmFreePageListHead.Total == 0)
844 {
845 /* Check if this allocation is for the PFN DB itself */
846 if (MmNumberOfPhysicalPages == 0)
847 {
848 ASSERT(FALSE);
849 }
850
851 DPRINT1("MmAllocPage(): Out of memory\n");
852 return 0;
853 }
854 PageDescriptor = MiRemoveHeadList(&MmFreePageListHead);
855
856 NeedClear = TRUE;
857 }
858 else
859 {
860 PageDescriptor = MiRemoveHeadList(&MmZeroedPageListHead);
861 }
862
863 PageDescriptor->u3.e2.ReferenceCount = 1;
864
865 MmAvailablePages--;
866
867 PfnOffset = PageDescriptor - MmPfnDatabase[0];
868 if ((NeedClear) && (Type != MC_SYSTEM))
869 {
870 MiZeroPage(PfnOffset);
871 }
872
873 PageDescriptor->u3.e1.PageLocation = ActiveAndValid;
874 return PfnOffset;
875 }
876
877 NTSTATUS
878 NTAPI
879 MiZeroPage(PFN_TYPE Page)
880 {
881 KIRQL Irql;
882 PVOID TempAddress;
883
884 Irql = KeRaiseIrqlToDpcLevel();
885 TempAddress = MiMapPageToZeroInHyperSpace(Page);
886 if (TempAddress == NULL)
887 {
888 return(STATUS_NO_MEMORY);
889 }
890 memset(TempAddress, 0, PAGE_SIZE);
891 MiUnmapPagesInZeroSpace(TempAddress, 1);
892 KeLowerIrql(Irql);
893 return(STATUS_SUCCESS);
894 }
895
896 NTSTATUS
897 NTAPI
898 MmZeroPageThreadMain(PVOID Ignored)
899 {
900 NTSTATUS Status;
901 KIRQL oldIrql;
902 PPHYSICAL_PAGE PageDescriptor;
903 PFN_TYPE Pfn;
904 ULONG Count;
905
906 /* Free initial kernel memory */
907 //MiFreeInitMemory();
908
909 /* Set our priority to 0 */
910 KeGetCurrentThread()->BasePriority = 0;
911 KeSetPriorityThread(KeGetCurrentThread(), 0);
912
913 while(1)
914 {
915 Status = KeWaitForSingleObject(&ZeroPageThreadEvent,
916 0,
917 KernelMode,
918 FALSE,
919 NULL);
920
921 if (ZeroPageThreadShouldTerminate)
922 {
923 DPRINT1("ZeroPageThread: Terminating\n");
924 return STATUS_SUCCESS;
925 }
926 Count = 0;
927 oldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
928 while (MmFreePageListHead.Total)
929 {
930 PageDescriptor = MiRemoveHeadList(&MmFreePageListHead);
931 /* We set the page to used, because MmCreateVirtualMapping failed with unused pages */
932 KeReleaseQueuedSpinLock(LockQueuePfnLock, oldIrql);
933 Pfn = PageDescriptor - MmPfnDatabase[0];
934 Status = MiZeroPage(Pfn);
935
936 oldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
937 if (NT_SUCCESS(Status))
938 {
939 MiInsertZeroListAtBack(Pfn);
940 Count++;
941 }
942 else
943 {
944 MiInsertInListTail(&MmFreePageListHead, PageDescriptor);
945 PageDescriptor->u3.e1.PageLocation = FreePageList;
946 }
947
948 }
949 DPRINT("Zeroed %d pages.\n", Count);
950 KeResetEvent(&ZeroPageThreadEvent);
951 KeReleaseQueuedSpinLock(LockQueuePfnLock, oldIrql);
952 }
953
954 return STATUS_SUCCESS;
955 }
956
957 /* EOF */