Use upper-case ASSERT macros.
[reactos.git] / reactos / ntoskrnl / mm / freelist.c
1 /*
2 * COPYRIGHT: See COPYING in the top level directory
3 * PROJECT: ReactOS kernel
4 * FILE: ntoskrnl/mm/freelist.c
5 * PURPOSE: Handle the list of free physical pages
6 * PROGRAMMER: David Welch (welch@cwcom.net)
7 * UPDATE HISTORY:
8 * 27/05/98: Created
9 * 18/08/98: Added a fix from Robert Bergkvist
10 */
11
12 /* INCLUDES ****************************************************************/
13
14 #include <ntoskrnl.h>
15 #define NDEBUG
16 #include <internal/debug.h>
17
18 /* TYPES *******************************************************************/
19
20 #define MM_PHYSICAL_PAGE_FREE (0x1)
21 #define MM_PHYSICAL_PAGE_USED (0x2)
22 #define MM_PHYSICAL_PAGE_BIOS (0x3)
23
24 typedef struct _PHYSICAL_PAGE
25 {
26 union
27 {
28 struct
29 {
30 ULONG Type: 2;
31 ULONG Consumer: 3;
32 ULONG Zero: 1;
33 }
34 Flags;
35 ULONG AllFlags;
36 };
37
38 LIST_ENTRY ListEntry;
39 ULONG ReferenceCount;
40 SWAPENTRY SavedSwapEntry;
41 ULONG LockCount;
42 ULONG MapCount;
43 struct _MM_RMAP_ENTRY* RmapListHead;
44 }
45 PHYSICAL_PAGE, *PPHYSICAL_PAGE;
46
47
48 /* GLOBALS ****************************************************************/
49
50 static PPHYSICAL_PAGE MmPageArray;
51 ULONG MmPageArraySize;
52
53 static KSPIN_LOCK PageListLock;
54 static LIST_ENTRY UsedPageListHeads[MC_MAXIMUM];
55 static LIST_ENTRY FreeZeroedPageListHead;
56 static LIST_ENTRY FreeUnzeroedPageListHead;
57 static LIST_ENTRY BiosPageListHead;
58
59 static HANDLE ZeroPageThreadHandle;
60 static CLIENT_ID ZeroPageThreadId;
61 static KEVENT ZeroPageThreadEvent;
62
63 static ULONG UnzeroedPageCount = 0;
64
65 /* FUNCTIONS *************************************************************/
66
67 VOID
68 MmTransferOwnershipPage(PFN_TYPE Pfn, ULONG NewConsumer)
69 {
70 KIRQL oldIrql;
71
72 KeAcquireSpinLock(&PageListLock, &oldIrql);
73 if (MmPageArray[Pfn].MapCount != 0)
74 {
75 DbgPrint("Transfering mapped page.\n");
76 KEBUGCHECK(0);
77 }
78 if (MmPageArray[Pfn].Flags.Type != MM_PHYSICAL_PAGE_USED)
79 {
80 DPRINT1("Type: %d\n", MmPageArray[Pfn].Flags.Type);
81 KEBUGCHECK(0);
82 }
83 if (MmPageArray[Pfn].ReferenceCount != 1)
84 {
85 DPRINT1("ReferenceCount: %d\n", MmPageArray[Pfn].ReferenceCount);
86 KEBUGCHECK(0);
87 }
88 RemoveEntryList(&MmPageArray[Pfn].ListEntry);
89 InsertTailList(&UsedPageListHeads[NewConsumer],
90 &MmPageArray[Pfn].ListEntry);
91 MmPageArray[Pfn].Flags.Consumer = NewConsumer;
92 KeReleaseSpinLock(&PageListLock, oldIrql);
93 MiZeroPage(Pfn);
94 }
95
96 PFN_TYPE
97 MmGetLRUFirstUserPage(VOID)
98 {
99 PLIST_ENTRY NextListEntry;
100 PHYSICAL_PAGE* PageDescriptor;
101 KIRQL oldIrql;
102
103 KeAcquireSpinLock(&PageListLock, &oldIrql);
104 NextListEntry = UsedPageListHeads[MC_USER].Flink;
105 if (NextListEntry == &UsedPageListHeads[MC_USER])
106 {
107 KeReleaseSpinLock(&PageListLock, oldIrql);
108 return 0;
109 }
110 PageDescriptor = CONTAINING_RECORD(NextListEntry, PHYSICAL_PAGE, ListEntry);
111 KeReleaseSpinLock(&PageListLock, oldIrql);
112 return PageDescriptor - MmPageArray;
113 }
114
115 VOID
116 MmSetLRULastPage(PFN_TYPE Pfn)
117 {
118 KIRQL oldIrql;
119
120 ASSERT(Pfn < MmPageArraySize);
121 KeAcquireSpinLock(&PageListLock, &oldIrql);
122 if (MmPageArray[Pfn].Flags.Type == MM_PHYSICAL_PAGE_USED &&
123 MmPageArray[Pfn].Flags.Consumer == MC_USER)
124 {
125 RemoveEntryList(&MmPageArray[Pfn].ListEntry);
126 InsertTailList(&UsedPageListHeads[MC_USER],
127 &MmPageArray[Pfn].ListEntry);
128 }
129 KeReleaseSpinLock(&PageListLock, oldIrql);
130 }
131
132 PFN_TYPE
133 MmGetLRUNextUserPage(PFN_TYPE PreviousPfn)
134 {
135 PLIST_ENTRY NextListEntry;
136 PHYSICAL_PAGE* PageDescriptor;
137 KIRQL oldIrql;
138
139 KeAcquireSpinLock(&PageListLock, &oldIrql);
140 if (MmPageArray[PreviousPfn].Flags.Type != MM_PHYSICAL_PAGE_USED ||
141 MmPageArray[PreviousPfn].Flags.Consumer != MC_USER)
142 {
143 NextListEntry = UsedPageListHeads[MC_USER].Flink;
144 }
145 else
146 {
147 NextListEntry = MmPageArray[PreviousPfn].ListEntry.Flink;
148 }
149 if (NextListEntry == &UsedPageListHeads[MC_USER])
150 {
151 KeReleaseSpinLock(&PageListLock, oldIrql);
152 return 0;
153 }
154 PageDescriptor = CONTAINING_RECORD(NextListEntry, PHYSICAL_PAGE, ListEntry);
155 KeReleaseSpinLock(&PageListLock, oldIrql);
156 return PageDescriptor - MmPageArray;
157 }
158
159 PFN_TYPE
160 MmGetContinuousPages(ULONG NumberOfBytes,
161 PHYSICAL_ADDRESS LowestAcceptableAddress,
162 PHYSICAL_ADDRESS HighestAcceptableAddress,
163 ULONG Alignment)
164 {
165 ULONG NrPages;
166 ULONG i;
167 ULONG start;
168 ULONG length;
169 KIRQL oldIrql;
170
171 NrPages = PAGE_ROUND_UP(NumberOfBytes) / PAGE_SIZE;
172
173 KeAcquireSpinLock(&PageListLock, &oldIrql);
174
175 start = -1;
176 length = 0;
177 for (i = (LowestAcceptableAddress.QuadPart / PAGE_SIZE); i < (HighestAcceptableAddress.QuadPart / PAGE_SIZE); )
178 {
179 if (MmPageArray[i].Flags.Type == MM_PHYSICAL_PAGE_FREE)
180 {
181 if (start == -1)
182 {
183 start = i;
184 length = 1;
185 }
186 else
187 {
188 length++;
189 }
190 i++;
191 if (length == NrPages)
192 {
193 break;
194 }
195 }
196 else
197 {
198 start = -1;
199 /*
200 * Fast forward to the base of the next aligned region
201 */
202 i = ROUND_UP((i + 1), (Alignment / PAGE_SIZE));
203 }
204 }
205 if (start == -1 || length != NrPages)
206 {
207 KeReleaseSpinLock(&PageListLock, oldIrql);
208 return 0;
209 }
210 for (i = start; i < (start + length); i++)
211 {
212 RemoveEntryList(&MmPageArray[i].ListEntry);
213 if (MmPageArray[i].Flags.Zero == 0)
214 {
215 UnzeroedPageCount--;
216 }
217 MmStats.NrFreePages--;
218 MmStats.NrSystemPages++;
219 MmPageArray[i].Flags.Type = MM_PHYSICAL_PAGE_USED;
220 MmPageArray[i].Flags.Consumer = MC_NPPOOL;
221 MmPageArray[i].ReferenceCount = 1;
222 MmPageArray[i].LockCount = 0;
223 MmPageArray[i].MapCount = 0;
224 MmPageArray[i].SavedSwapEntry = 0;
225 InsertTailList(&UsedPageListHeads[MC_NPPOOL],
226 &MmPageArray[i].ListEntry);
227 }
228 KeReleaseSpinLock(&PageListLock, oldIrql);
229 for (i = start; i < (start + length); i++)
230 {
231 if (MmPageArray[i].Flags.Zero == 0)
232 {
233 MiZeroPage(i);
234 }
235 else
236 {
237 MmPageArray[i].Flags.Zero = 0;
238 }
239 }
240
241 return start;
242 }
243
244
245 BOOLEAN
246 MiIsPfnRam(PADDRESS_RANGE BIOSMemoryMap,
247 ULONG AddressRangeCount,
248 PFN_TYPE Pfn)
249 {
250 BOOLEAN IsUsable;
251 LARGE_INTEGER BaseAddress;
252 LARGE_INTEGER EndAddress;
253 ULONG i;
254 if (BIOSMemoryMap != NULL && AddressRangeCount > 0)
255 {
256 IsUsable = FALSE;
257 for (i = 0; i < AddressRangeCount; i++)
258 {
259 BaseAddress.u.LowPart = BIOSMemoryMap[i].BaseAddrLow;
260 BaseAddress.u.HighPart = BIOSMemoryMap[i].BaseAddrHigh;
261 EndAddress.u.LowPart = BIOSMemoryMap[i].LengthLow;
262 EndAddress.u.HighPart = BIOSMemoryMap[i].LengthHigh;
263 EndAddress.QuadPart += BaseAddress.QuadPart;
264 BaseAddress.QuadPart = PAGE_ROUND_DOWN(BaseAddress.QuadPart);
265 EndAddress.QuadPart = PAGE_ROUND_UP(EndAddress.QuadPart);
266
267 if ((BaseAddress.QuadPart >> PAGE_SHIFT) <= Pfn &&
268 Pfn < (EndAddress.QuadPart >> PAGE_SHIFT))
269 {
270 if (BIOSMemoryMap[i].Type == 1)
271 {
272 IsUsable = TRUE;
273 }
274 else
275 {
276 return FALSE;
277 }
278 }
279 }
280 return IsUsable;
281 }
282 return TRUE;
283 }
284
285
286 PVOID INIT_FUNCTION
287 MmInitializePageList(PVOID FirstPhysKernelAddress,
288 PVOID LastPhysKernelAddress,
289 ULONG MemorySizeInPages,
290 ULONG LastKernelAddress,
291 PADDRESS_RANGE BIOSMemoryMap,
292 ULONG AddressRangeCount)
293 /*
294 * FUNCTION: Initializes the page list with all pages free
295 * except those known to be reserved and those used by the kernel
296 * ARGUMENTS:
297 * FirstKernelAddress = First physical address used by the kernel
298 * LastKernelAddress = Last physical address used by the kernel
299 */
300 {
301 ULONG i;
302 ULONG Reserved;
303 NTSTATUS Status;
304 PFN_TYPE LastPage;
305 PFN_TYPE FirstUninitializedPage;
306
307 DPRINT("MmInitializePageList(FirstPhysKernelAddress %x, "
308 "LastPhysKernelAddress %x, "
309 "MemorySizeInPages %x, LastKernelAddress %x)\n",
310 FirstPhysKernelAddress,
311 LastPhysKernelAddress,
312 MemorySizeInPages,
313 LastKernelAddress);
314
315 for (i = 0; i < MC_MAXIMUM; i++)
316 {
317 InitializeListHead(&UsedPageListHeads[i]);
318 }
319 KeInitializeSpinLock(&PageListLock);
320 InitializeListHead(&FreeUnzeroedPageListHead);
321 InitializeListHead(&FreeZeroedPageListHead);
322 InitializeListHead(&BiosPageListHead);
323
324 LastKernelAddress = PAGE_ROUND_UP(LastKernelAddress);
325
326 MmPageArraySize = MemorySizeInPages;
327 Reserved =
328 PAGE_ROUND_UP((MmPageArraySize * sizeof(PHYSICAL_PAGE))) / PAGE_SIZE;
329 MmPageArray = (PHYSICAL_PAGE *)LastKernelAddress;
330
331 DPRINT("Reserved %d\n", Reserved);
332
333 LastKernelAddress = PAGE_ROUND_UP(LastKernelAddress);
334 LastKernelAddress = ((ULONG)LastKernelAddress + (Reserved * PAGE_SIZE));
335 LastPhysKernelAddress = (PVOID)PAGE_ROUND_UP(LastPhysKernelAddress);
336 LastPhysKernelAddress = (char*)LastPhysKernelAddress + (Reserved * PAGE_SIZE);
337
338 MmStats.NrTotalPages = 0;
339 MmStats.NrSystemPages = 0;
340 MmStats.NrUserPages = 0;
341 MmStats.NrReservedPages = 0;
342 MmStats.NrFreePages = 0;
343 MmStats.NrLockedPages = 0;
344
345 /* Preinitialize the Balancer because we need some pages for pte's */
346 MmInitializeBalancer(MemorySizeInPages, 0);
347
348 FirstUninitializedPage = (ULONG_PTR)LastPhysKernelAddress / PAGE_SIZE;
349 LastPage = MmPageArraySize;
350 for (i = 0; i < Reserved; i++)
351 {
352 PVOID Address = (char*)(ULONG)MmPageArray + (i * PAGE_SIZE);
353 ULONG j, start, end;
354 if (!MmIsPagePresent(NULL, Address))
355 {
356 PFN_TYPE Pfn;
357 Pfn = 0;
358 while (Pfn == 0 && LastPage > FirstUninitializedPage)
359 {
360 /* Allocate the page from the upper end of the RAM */
361 if (MiIsPfnRam(BIOSMemoryMap, AddressRangeCount, --LastPage))
362 {
363 Pfn = LastPage;
364 }
365 }
366 if (Pfn == 0)
367 {
368 Pfn = MmAllocPage(MC_NPPOOL, 0);
369 if (Pfn == 0)
370 {
371 KEBUGCHECK(0);
372 }
373 }
374 Status = MmCreateVirtualMappingForKernel(Address,
375 PAGE_READWRITE,
376 &Pfn,
377 1);
378 if (!NT_SUCCESS(Status))
379 {
380 DbgPrint("Unable to create virtual mapping\n");
381 KEBUGCHECK(0);
382 }
383 }
384 memset(Address, 0, PAGE_SIZE);
385
386 start = ((ULONG_PTR)Address - (ULONG_PTR)MmPageArray) / sizeof(PHYSICAL_PAGE);
387 end = ((ULONG_PTR)Address - (ULONG_PTR)MmPageArray + PAGE_SIZE) / sizeof(PHYSICAL_PAGE);
388
389 for (j = start; j < end && j < LastPage; j++)
390 {
391 if (MiIsPfnRam(BIOSMemoryMap, AddressRangeCount, j))
392 {
393 if (j == 0)
394 {
395 /*
396 * Page zero is reserved
397 */
398 MmPageArray[0].Flags.Type = MM_PHYSICAL_PAGE_BIOS;
399 MmPageArray[0].Flags.Consumer = MC_NPPOOL;
400 MmPageArray[0].Flags.Zero = 0;
401 MmPageArray[0].ReferenceCount = 0;
402 InsertTailList(&BiosPageListHead,
403 &MmPageArray[0].ListEntry);
404 MmStats.NrReservedPages++;
405 }
406 else if (j == 1)
407 {
408
409 /*
410 * Page one is reserved for the initial KPCR
411 */
412 MmPageArray[1].Flags.Type = MM_PHYSICAL_PAGE_BIOS;
413 MmPageArray[1].Flags.Consumer = MC_NPPOOL;
414 MmPageArray[1].Flags.Zero = 0;
415 MmPageArray[1].ReferenceCount = 0;
416 InsertTailList(&BiosPageListHead,
417 &MmPageArray[1].ListEntry);
418 MmStats.NrReservedPages++;
419 }
420 else if (j >= 0xa0000 / PAGE_SIZE && j < 0x100000 / PAGE_SIZE)
421 {
422 MmPageArray[j].Flags.Type = MM_PHYSICAL_PAGE_BIOS;
423 MmPageArray[j].Flags.Zero = 0;
424 MmPageArray[j].Flags.Consumer = MC_NPPOOL;
425 MmPageArray[j].ReferenceCount = 1;
426 InsertTailList(&BiosPageListHead,
427 &MmPageArray[j].ListEntry);
428 MmStats.NrReservedPages++;
429 }
430 else if (j >= (ULONG)FirstPhysKernelAddress/PAGE_SIZE &&
431 j < (ULONG)LastPhysKernelAddress/PAGE_SIZE)
432 {
433 MmPageArray[j].Flags.Type = MM_PHYSICAL_PAGE_USED;
434 MmPageArray[j].Flags.Zero = 0;
435 MmPageArray[j].Flags.Consumer = MC_NPPOOL;
436 MmPageArray[j].ReferenceCount = 1;
437 MmPageArray[j].MapCount = 1;
438 InsertTailList(&UsedPageListHeads[MC_NPPOOL],
439 &MmPageArray[j].ListEntry);
440 MmStats.NrSystemPages++;
441 }
442 else
443 {
444 MmPageArray[j].Flags.Type = MM_PHYSICAL_PAGE_FREE;
445 MmPageArray[j].Flags.Zero = 0;
446 MmPageArray[j].ReferenceCount = 0;
447 InsertTailList(&FreeUnzeroedPageListHead,
448 &MmPageArray[j].ListEntry);
449 UnzeroedPageCount++;
450 MmStats.NrFreePages++;
451 }
452 }
453 else
454 {
455 MmPageArray[j].Flags.Type = MM_PHYSICAL_PAGE_BIOS;
456 MmPageArray[j].Flags.Consumer = MC_NPPOOL;
457 MmPageArray[j].Flags.Zero = 0;
458 MmPageArray[j].ReferenceCount = 0;
459 InsertTailList(&BiosPageListHead,
460 &MmPageArray[j].ListEntry);
461 MmStats.NrReservedPages++;
462 }
463 }
464 FirstUninitializedPage = j;
465
466 }
467
468 /* Add the pages from the upper end to the list */
469 for (i = LastPage; i < MmPageArraySize; i++)
470 {
471 if (MiIsPfnRam(BIOSMemoryMap, AddressRangeCount, i))
472 {
473 MmPageArray[i].Flags.Type = MM_PHYSICAL_PAGE_USED;
474 MmPageArray[i].Flags.Zero = 0;
475 MmPageArray[i].Flags.Consumer = MC_NPPOOL;
476 MmPageArray[i].ReferenceCount = 1;
477 MmPageArray[i].MapCount = 1;
478 InsertTailList(&UsedPageListHeads[MC_NPPOOL],
479 &MmPageArray[i].ListEntry);
480 MmStats.NrSystemPages++;
481 }
482 else
483 {
484 MmPageArray[i].Flags.Type = MM_PHYSICAL_PAGE_BIOS;
485 MmPageArray[i].Flags.Consumer = MC_NPPOOL;
486 MmPageArray[i].Flags.Zero = 0;
487 MmPageArray[i].ReferenceCount = 0;
488 InsertTailList(&BiosPageListHead,
489 &MmPageArray[i].ListEntry);
490 MmStats.NrReservedPages++;
491 }
492 }
493
494
495
496 KeInitializeEvent(&ZeroPageThreadEvent, NotificationEvent, TRUE);
497
498 MmStats.NrTotalPages = MmStats.NrFreePages + MmStats.NrSystemPages +
499 MmStats.NrReservedPages + MmStats.NrUserPages;
500 MmInitializeBalancer(MmStats.NrFreePages, MmStats.NrSystemPages + MmStats.NrReservedPages);
501 return((PVOID)LastKernelAddress);
502 }
503
504 VOID
505 MmSetFlagsPage(PFN_TYPE Pfn, ULONG Flags)
506 {
507 KIRQL oldIrql;
508
509 ASSERT(Pfn < MmPageArraySize);
510 KeAcquireSpinLock(&PageListLock, &oldIrql);
511 MmPageArray[Pfn].AllFlags = Flags;
512 KeReleaseSpinLock(&PageListLock, oldIrql);
513 }
514
515 VOID
516 MmSetRmapListHeadPage(PFN_TYPE Pfn, struct _MM_RMAP_ENTRY* ListHead)
517 {
518 MmPageArray[Pfn].RmapListHead = ListHead;
519 }
520
521 struct _MM_RMAP_ENTRY*
522 MmGetRmapListHeadPage(PFN_TYPE Pfn)
523 {
524 return(MmPageArray[Pfn].RmapListHead);
525 }
526
527 VOID
528 MmMarkPageMapped(PFN_TYPE Pfn)
529 {
530 KIRQL oldIrql;
531
532 if (Pfn < MmPageArraySize)
533 {
534 KeAcquireSpinLock(&PageListLock, &oldIrql);
535 if (MmPageArray[Pfn].Flags.Type == MM_PHYSICAL_PAGE_FREE)
536 {
537 DbgPrint("Mapping non-used page\n");
538 KEBUGCHECK(0);
539 }
540 MmPageArray[Pfn].MapCount++;
541 KeReleaseSpinLock(&PageListLock, oldIrql);
542 }
543 }
544
545 VOID
546 MmMarkPageUnmapped(PFN_TYPE Pfn)
547 {
548 KIRQL oldIrql;
549
550 if (Pfn < MmPageArraySize)
551 {
552 KeAcquireSpinLock(&PageListLock, &oldIrql);
553 if (MmPageArray[Pfn].Flags.Type == MM_PHYSICAL_PAGE_FREE)
554 {
555 DbgPrint("Unmapping non-used page\n");
556 KEBUGCHECK(0);
557 }
558 if (MmPageArray[Pfn].MapCount == 0)
559 {
560 DbgPrint("Unmapping not mapped page\n");
561 KEBUGCHECK(0);
562 }
563 MmPageArray[Pfn].MapCount--;
564 KeReleaseSpinLock(&PageListLock, oldIrql);
565 }
566 }
567
568 ULONG
569 MmGetFlagsPage(PFN_TYPE Pfn)
570 {
571 KIRQL oldIrql;
572 ULONG Flags;
573
574 ASSERT(Pfn < MmPageArraySize);
575 KeAcquireSpinLock(&PageListLock, &oldIrql);
576 Flags = MmPageArray[Pfn].AllFlags;
577 KeReleaseSpinLock(&PageListLock, oldIrql);
578
579 return(Flags);
580 }
581
582
583 VOID
584 MmSetSavedSwapEntryPage(PFN_TYPE Pfn, SWAPENTRY SavedSwapEntry)
585 {
586 KIRQL oldIrql;
587
588 ASSERT(Pfn < MmPageArraySize);
589 KeAcquireSpinLock(&PageListLock, &oldIrql);
590 MmPageArray[Pfn].SavedSwapEntry = SavedSwapEntry;
591 KeReleaseSpinLock(&PageListLock, oldIrql);
592 }
593
594 SWAPENTRY
595 MmGetSavedSwapEntryPage(PFN_TYPE Pfn)
596 {
597 SWAPENTRY SavedSwapEntry;
598 KIRQL oldIrql;
599
600 ASSERT(Pfn < MmPageArraySize);
601 KeAcquireSpinLock(&PageListLock, &oldIrql);
602 SavedSwapEntry = MmPageArray[Pfn].SavedSwapEntry;
603 KeReleaseSpinLock(&PageListLock, oldIrql);
604
605 return(SavedSwapEntry);
606 }
607
608 VOID
609 MmReferencePage(PFN_TYPE Pfn)
610 {
611 KIRQL oldIrql;
612
613 DPRINT("MmReferencePage(PysicalAddress %x)\n", Pfn << PAGE_SHIFT);
614
615 if (Pfn == 0 || Pfn >= MmPageArraySize)
616 {
617 KEBUGCHECK(0);
618 }
619
620 KeAcquireSpinLock(&PageListLock, &oldIrql);
621
622 if (MmPageArray[Pfn].Flags.Type != MM_PHYSICAL_PAGE_USED)
623 {
624 DbgPrint("Referencing non-used page\n");
625 KEBUGCHECK(0);
626 }
627
628 MmPageArray[Pfn].ReferenceCount++;
629 KeReleaseSpinLock(&PageListLock, oldIrql);
630 }
631
632 ULONG
633 MmGetReferenceCountPage(PFN_TYPE Pfn)
634 {
635 KIRQL oldIrql;
636 ULONG RCount;
637
638 DPRINT("MmGetReferenceCountPage(PhysicalAddress %x)\n", Pfn << PAGE_SHIFT);
639
640 if (Pfn == 0 || Pfn >= MmPageArraySize)
641 {
642 KEBUGCHECK(0);
643 }
644
645 KeAcquireSpinLock(&PageListLock, &oldIrql);
646
647 if (MmPageArray[Pfn].Flags.Type != MM_PHYSICAL_PAGE_USED)
648 {
649 DbgPrint("Getting reference count for free page\n");
650 KEBUGCHECK(0);
651 }
652
653 RCount = MmPageArray[Pfn].ReferenceCount;
654
655 KeReleaseSpinLock(&PageListLock, oldIrql);
656 return(RCount);
657 }
658
659 BOOLEAN
660 MmIsUsablePage(PFN_TYPE Pfn)
661 {
662
663 DPRINT("MmIsUsablePage(PhysicalAddress %x)\n", Pfn << PAGE_SHIFT);
664
665 if (Pfn == 0 || Pfn >= MmPageArraySize)
666 {
667 KEBUGCHECK(0);
668 }
669
670 if (MmPageArray[Pfn].Flags.Type != MM_PHYSICAL_PAGE_USED &&
671 MmPageArray[Pfn].Flags.Type != MM_PHYSICAL_PAGE_BIOS)
672 {
673 return(FALSE);
674 }
675
676 return(TRUE);
677 }
678
679 VOID
680 MmDereferencePage(PFN_TYPE Pfn)
681 {
682 KIRQL oldIrql;
683
684 DPRINT("MmDereferencePage(PhysicalAddress %x)\n", Pfn << PAGE_SHIFT);
685
686 if (Pfn == 0 || Pfn >= MmPageArraySize)
687 {
688 KEBUGCHECK(0);
689 }
690
691 KeAcquireSpinLock(&PageListLock, &oldIrql);
692
693 if (MmPageArray[Pfn].Flags.Type != MM_PHYSICAL_PAGE_USED)
694 {
695 DbgPrint("Dereferencing free page\n");
696 KEBUGCHECK(0);
697 }
698 if (MmPageArray[Pfn].ReferenceCount == 0)
699 {
700 DbgPrint("Derefrencing page with reference count 0\n");
701 KEBUGCHECK(0);
702 }
703
704 MmPageArray[Pfn].ReferenceCount--;
705 if (MmPageArray[Pfn].ReferenceCount == 0)
706 {
707 MmStats.NrFreePages++;
708 MmStats.NrSystemPages--;
709 RemoveEntryList(&MmPageArray[Pfn].ListEntry);
710 if (MmPageArray[Pfn].RmapListHead != NULL)
711 {
712 DbgPrint("Freeing page with rmap entries.\n");
713 KEBUGCHECK(0);
714 }
715 if (MmPageArray[Pfn].MapCount != 0)
716 {
717 DbgPrint("Freeing mapped page (0x%x count %d)\n",
718 Pfn << PAGE_SHIFT, MmPageArray[Pfn].MapCount);
719 KEBUGCHECK(0);
720 }
721 if (MmPageArray[Pfn].LockCount > 0)
722 {
723 DbgPrint("Freeing locked page\n");
724 KEBUGCHECK(0);
725 }
726 if (MmPageArray[Pfn].SavedSwapEntry != 0)
727 {
728 DbgPrint("Freeing page with swap entry.\n");
729 KEBUGCHECK(0);
730 }
731 if (MmPageArray[Pfn].Flags.Type != MM_PHYSICAL_PAGE_USED)
732 {
733 DbgPrint("Freeing page with flags %x\n",
734 MmPageArray[Pfn].Flags.Type);
735 KEBUGCHECK(0);
736 }
737 MmPageArray[Pfn].Flags.Type = MM_PHYSICAL_PAGE_FREE;
738 MmPageArray[Pfn].Flags.Consumer = MC_MAXIMUM;
739 InsertTailList(&FreeUnzeroedPageListHead,
740 &MmPageArray[Pfn].ListEntry);
741 UnzeroedPageCount++;
742 if (UnzeroedPageCount > 8 && 0 == KeReadStateEvent(&ZeroPageThreadEvent))
743 {
744 KeSetEvent(&ZeroPageThreadEvent, IO_NO_INCREMENT, FALSE);
745 }
746 }
747 KeReleaseSpinLock(&PageListLock, oldIrql);
748 }
749
750 ULONG
751 MmGetLockCountPage(PFN_TYPE Pfn)
752 {
753 KIRQL oldIrql;
754 ULONG LockCount;
755
756 DPRINT("MmGetLockCountPage(PhysicalAddress %x)\n", Pfn << PAGE_SHIFT);
757
758 if (Pfn == 0 || Pfn >= MmPageArraySize)
759 {
760 KEBUGCHECK(0);
761 }
762
763 KeAcquireSpinLock(&PageListLock, &oldIrql);
764
765 if (MmPageArray[Pfn].Flags.Type != MM_PHYSICAL_PAGE_USED)
766 {
767 DbgPrint("Getting lock count for free page\n");
768 KEBUGCHECK(0);
769 }
770
771 LockCount = MmPageArray[Pfn].LockCount;
772 KeReleaseSpinLock(&PageListLock, oldIrql);
773
774 return(LockCount);
775 }
776
777 VOID
778 MmLockPage(PFN_TYPE Pfn)
779 {
780 KIRQL oldIrql;
781
782 DPRINT("MmLockPage(PhysicalAddress %x)\n", Pfn << PAGE_SHIFT);
783
784 if (Pfn == 0 || Pfn >= MmPageArraySize)
785 {
786 KEBUGCHECK(0);
787 }
788
789 KeAcquireSpinLock(&PageListLock, &oldIrql);
790
791 if (MmPageArray[Pfn].Flags.Type != MM_PHYSICAL_PAGE_USED)
792 {
793 DbgPrint("Locking free page\n");
794 KEBUGCHECK(0);
795 }
796
797 MmPageArray[Pfn].LockCount++;
798 KeReleaseSpinLock(&PageListLock, oldIrql);
799 }
800
801 VOID
802 MmUnlockPage(PFN_TYPE Pfn)
803 {
804 KIRQL oldIrql;
805
806 DPRINT("MmUnlockPage(PhysicalAddress %x)\n", Pfn << PAGE_SHIFT);
807
808 if (Pfn == 0 || Pfn >= MmPageArraySize)
809 {
810 KEBUGCHECK(0);
811 }
812
813 KeAcquireSpinLock(&PageListLock, &oldIrql);
814
815 if (MmPageArray[Pfn].Flags.Type != MM_PHYSICAL_PAGE_USED)
816 {
817 DbgPrint("Unlocking free page\n");
818 KEBUGCHECK(0);
819 }
820
821 MmPageArray[Pfn].LockCount--;
822 KeReleaseSpinLock(&PageListLock, oldIrql);
823 }
824
825 PFN_TYPE
826 MmAllocPage(ULONG Consumer, SWAPENTRY SavedSwapEntry)
827 {
828 PFN_TYPE PfnOffset;
829 PLIST_ENTRY ListEntry;
830 PPHYSICAL_PAGE PageDescriptor;
831 KIRQL oldIrql;
832 BOOLEAN NeedClear = FALSE;
833
834 DPRINT("MmAllocPage()\n");
835
836 KeAcquireSpinLock(&PageListLock, &oldIrql);
837 if (IsListEmpty(&FreeZeroedPageListHead))
838 {
839 if (IsListEmpty(&FreeUnzeroedPageListHead))
840 {
841 DPRINT1("MmAllocPage(): Out of memory\n");
842 KeReleaseSpinLock(&PageListLock, oldIrql);
843 return 0;
844 }
845 ListEntry = RemoveTailList(&FreeUnzeroedPageListHead);
846 UnzeroedPageCount--;
847
848 PageDescriptor = CONTAINING_RECORD(ListEntry, PHYSICAL_PAGE, ListEntry);
849
850 NeedClear = TRUE;
851 }
852 else
853 {
854 ListEntry = RemoveTailList(&FreeZeroedPageListHead);
855
856 PageDescriptor = CONTAINING_RECORD(ListEntry, PHYSICAL_PAGE, ListEntry);
857 }
858
859 if (PageDescriptor->Flags.Type != MM_PHYSICAL_PAGE_FREE)
860 {
861 DbgPrint("Got non-free page from freelist\n");
862 KEBUGCHECK(0);
863 }
864 if (PageDescriptor->MapCount != 0)
865 {
866 DbgPrint("Got mapped page from freelist\n");
867 KEBUGCHECK(0);
868 }
869 if (PageDescriptor->ReferenceCount != 0)
870 {
871 DPRINT1("%d\n", PageDescriptor->ReferenceCount);
872 KEBUGCHECK(0);
873 }
874 PageDescriptor->Flags.Type = MM_PHYSICAL_PAGE_USED;
875 PageDescriptor->Flags.Consumer = Consumer;
876 PageDescriptor->ReferenceCount = 1;
877 PageDescriptor->LockCount = 0;
878 PageDescriptor->MapCount = 0;
879 PageDescriptor->SavedSwapEntry = SavedSwapEntry;
880 InsertTailList(&UsedPageListHeads[Consumer], ListEntry);
881
882 MmStats.NrSystemPages++;
883 MmStats.NrFreePages--;
884
885 KeReleaseSpinLock(&PageListLock, oldIrql);
886
887 PfnOffset = PageDescriptor - MmPageArray;
888 if (NeedClear)
889 {
890 MiZeroPage(PfnOffset);
891 }
892 if (PageDescriptor->MapCount != 0)
893 {
894 DbgPrint("Returning mapped page.\n");
895 KEBUGCHECK(0);
896 }
897 return PfnOffset;
898 }
899
900
901 NTSTATUS STDCALL
902 MmZeroPageThreadMain(PVOID Ignored)
903 {
904 NTSTATUS Status;
905 KIRQL oldIrql;
906 PLIST_ENTRY ListEntry;
907 PPHYSICAL_PAGE PageDescriptor;
908 PFN_TYPE Pfn;
909 static PVOID Address = NULL;
910 ULONG Count;
911
912 while(1)
913 {
914 Status = KeWaitForSingleObject(&ZeroPageThreadEvent,
915 0,
916 KernelMode,
917 FALSE,
918 NULL);
919 if (!NT_SUCCESS(Status))
920 {
921 DbgPrint("ZeroPageThread: Wait failed\n");
922 KEBUGCHECK(0);
923 return(STATUS_UNSUCCESSFUL);
924 }
925
926 Count = 0;
927 KeAcquireSpinLock(&PageListLock, &oldIrql);
928 while (!IsListEmpty(&FreeUnzeroedPageListHead))
929 {
930 ListEntry = RemoveTailList(&FreeUnzeroedPageListHead);
931 UnzeroedPageCount--;
932 PageDescriptor = CONTAINING_RECORD(ListEntry, PHYSICAL_PAGE, ListEntry);
933 /* We set the page to used, because MmCreateVirtualMapping failed with unused pages */
934 PageDescriptor->Flags.Type = MM_PHYSICAL_PAGE_USED;
935 KeReleaseSpinLock(&PageListLock, oldIrql);
936 Count++;
937 Pfn = PageDescriptor - MmPageArray;
938 if (Address == NULL)
939 {
940 Address = ExAllocatePageWithPhysPage(Pfn);
941 }
942 else
943 {
944 Status = MmCreateVirtualMapping(NULL,
945 Address,
946 PAGE_READWRITE | PAGE_SYSTEM,
947 &Pfn,
948 1);
949 if (!NT_SUCCESS(Status))
950 {
951 DbgPrint("Unable to create virtual mapping\n");
952 KEBUGCHECK(0);
953 }
954 }
955 memset(Address, 0, PAGE_SIZE);
956 MmDeleteVirtualMapping(NULL, (PVOID)Address, FALSE, NULL, NULL);
957 KeAcquireSpinLock(&PageListLock, &oldIrql);
958 if (PageDescriptor->MapCount != 0)
959 {
960 DbgPrint("Mapped page on freelist.\n");
961 KEBUGCHECK(0);
962 }
963 PageDescriptor->Flags.Zero = 1;
964 PageDescriptor->Flags.Type = MM_PHYSICAL_PAGE_FREE;
965 InsertHeadList(&FreeZeroedPageListHead, ListEntry);
966 }
967 DPRINT("Zeroed %d pages.\n", Count);
968 KeResetEvent(&ZeroPageThreadEvent);
969 KeReleaseSpinLock(&PageListLock, oldIrql);
970 }
971 }
972
973 NTSTATUS INIT_FUNCTION
974 MmInitZeroPageThread(VOID)
975 {
976 KPRIORITY Priority;
977 NTSTATUS Status;
978
979 Status = PsCreateSystemThread(&ZeroPageThreadHandle,
980 THREAD_ALL_ACCESS,
981 NULL,
982 NULL,
983 &ZeroPageThreadId,
984 (PKSTART_ROUTINE) MmZeroPageThreadMain,
985 NULL);
986 if (!NT_SUCCESS(Status))
987 {
988 return(Status);
989 }
990
991 Priority = 1;
992 NtSetInformationThread(ZeroPageThreadHandle,
993 ThreadPriority,
994 &Priority,
995 sizeof(Priority));
996
997 return(STATUS_SUCCESS);
998 }
999
1000 /* EOF */