strip whitespace from end of lines
[reactos.git] / reactos / ntoskrnl / mm / freelist.c
1 /* $Id$
2 *
3 * COPYRIGHT: See COPYING in the top level directory
4 * PROJECT: ReactOS kernel
5 * FILE: ntoskrnl/mm/freelist.c
6 * PURPOSE: Handle the list of free physical pages
7 *
8 * PROGRAMMERS: David Welch (welch@cwcom.net)
9 * Robert Bergkvist
10 */
11
12 /* INCLUDES ****************************************************************/
13
14 #include <ntoskrnl.h>
15 #define NDEBUG
16 #include <internal/debug.h>
17
18 /* TYPES *******************************************************************/
19
20 #define MM_PHYSICAL_PAGE_FREE (0x1)
21 #define MM_PHYSICAL_PAGE_USED (0x2)
22 #define MM_PHYSICAL_PAGE_BIOS (0x3)
23
24 typedef struct _PHYSICAL_PAGE
25 {
26 union
27 {
28 struct
29 {
30 ULONG Type: 2;
31 ULONG Consumer: 3;
32 ULONG Zero: 1;
33 }
34 Flags;
35 ULONG AllFlags;
36 };
37
38 LIST_ENTRY ListEntry;
39 ULONG ReferenceCount;
40 SWAPENTRY SavedSwapEntry;
41 ULONG LockCount;
42 ULONG MapCount;
43 struct _MM_RMAP_ENTRY* RmapListHead;
44 }
45 PHYSICAL_PAGE, *PPHYSICAL_PAGE;
46
47
48 /* GLOBALS ****************************************************************/
49
50 static PPHYSICAL_PAGE MmPageArray;
51 ULONG MmPageArraySize;
52
53 static KSPIN_LOCK PageListLock;
54 static LIST_ENTRY UsedPageListHeads[MC_MAXIMUM];
55 static LIST_ENTRY FreeZeroedPageListHead;
56 static LIST_ENTRY FreeUnzeroedPageListHead;
57 static LIST_ENTRY BiosPageListHead;
58
59 static PETHREAD ZeroPageThread;
60 static CLIENT_ID ZeroPageThreadId;
61 static KEVENT ZeroPageThreadEvent;
62 static BOOLEAN ZeroPageThreadShouldTerminate = FALSE;
63
64 static ULONG UnzeroedPageCount = 0;
65
66 /* FUNCTIONS *************************************************************/
67
68 VOID
69 MmTransferOwnershipPage(PFN_TYPE Pfn, ULONG NewConsumer)
70 {
71 KIRQL oldIrql;
72
73 KeAcquireSpinLock(&PageListLock, &oldIrql);
74 if (MmPageArray[Pfn].MapCount != 0)
75 {
76 DbgPrint("Transfering mapped page.\n");
77 KEBUGCHECK(0);
78 }
79 if (MmPageArray[Pfn].Flags.Type != MM_PHYSICAL_PAGE_USED)
80 {
81 DPRINT1("Type: %d\n", MmPageArray[Pfn].Flags.Type);
82 KEBUGCHECK(0);
83 }
84 if (MmPageArray[Pfn].ReferenceCount != 1)
85 {
86 DPRINT1("ReferenceCount: %d\n", MmPageArray[Pfn].ReferenceCount);
87 KEBUGCHECK(0);
88 }
89 RemoveEntryList(&MmPageArray[Pfn].ListEntry);
90 InsertTailList(&UsedPageListHeads[NewConsumer],
91 &MmPageArray[Pfn].ListEntry);
92 MmPageArray[Pfn].Flags.Consumer = NewConsumer;
93 KeReleaseSpinLock(&PageListLock, oldIrql);
94 MiZeroPage(Pfn);
95 }
96
97 PFN_TYPE
98 MmGetLRUFirstUserPage(VOID)
99 {
100 PLIST_ENTRY NextListEntry;
101 PHYSICAL_PAGE* PageDescriptor;
102 KIRQL oldIrql;
103
104 KeAcquireSpinLock(&PageListLock, &oldIrql);
105 NextListEntry = UsedPageListHeads[MC_USER].Flink;
106 if (NextListEntry == &UsedPageListHeads[MC_USER])
107 {
108 KeReleaseSpinLock(&PageListLock, oldIrql);
109 return 0;
110 }
111 PageDescriptor = CONTAINING_RECORD(NextListEntry, PHYSICAL_PAGE, ListEntry);
112 KeReleaseSpinLock(&PageListLock, oldIrql);
113 return PageDescriptor - MmPageArray;
114 }
115
116 VOID
117 MmSetLRULastPage(PFN_TYPE Pfn)
118 {
119 KIRQL oldIrql;
120
121 ASSERT(Pfn < MmPageArraySize);
122 KeAcquireSpinLock(&PageListLock, &oldIrql);
123 if (MmPageArray[Pfn].Flags.Type == MM_PHYSICAL_PAGE_USED &&
124 MmPageArray[Pfn].Flags.Consumer == MC_USER)
125 {
126 RemoveEntryList(&MmPageArray[Pfn].ListEntry);
127 InsertTailList(&UsedPageListHeads[MC_USER],
128 &MmPageArray[Pfn].ListEntry);
129 }
130 KeReleaseSpinLock(&PageListLock, oldIrql);
131 }
132
133 PFN_TYPE
134 MmGetLRUNextUserPage(PFN_TYPE PreviousPfn)
135 {
136 PLIST_ENTRY NextListEntry;
137 PHYSICAL_PAGE* PageDescriptor;
138 KIRQL oldIrql;
139
140 KeAcquireSpinLock(&PageListLock, &oldIrql);
141 if (MmPageArray[PreviousPfn].Flags.Type != MM_PHYSICAL_PAGE_USED ||
142 MmPageArray[PreviousPfn].Flags.Consumer != MC_USER)
143 {
144 NextListEntry = UsedPageListHeads[MC_USER].Flink;
145 }
146 else
147 {
148 NextListEntry = MmPageArray[PreviousPfn].ListEntry.Flink;
149 }
150 if (NextListEntry == &UsedPageListHeads[MC_USER])
151 {
152 KeReleaseSpinLock(&PageListLock, oldIrql);
153 return 0;
154 }
155 PageDescriptor = CONTAINING_RECORD(NextListEntry, PHYSICAL_PAGE, ListEntry);
156 KeReleaseSpinLock(&PageListLock, oldIrql);
157 return PageDescriptor - MmPageArray;
158 }
159
160 PFN_TYPE
161 MmGetContinuousPages(ULONG NumberOfBytes,
162 PHYSICAL_ADDRESS LowestAcceptableAddress,
163 PHYSICAL_ADDRESS HighestAcceptableAddress,
164 ULONG Alignment)
165 {
166 ULONG NrPages;
167 ULONG i;
168 ULONG start;
169 ULONG length;
170 KIRQL oldIrql;
171
172 NrPages = PAGE_ROUND_UP(NumberOfBytes) / PAGE_SIZE;
173
174 KeAcquireSpinLock(&PageListLock, &oldIrql);
175
176 start = -1;
177 length = 0;
178 for (i = (LowestAcceptableAddress.QuadPart / PAGE_SIZE); i < (HighestAcceptableAddress.QuadPart / PAGE_SIZE); )
179 {
180 if (MmPageArray[i].Flags.Type == MM_PHYSICAL_PAGE_FREE)
181 {
182 if (start == -1)
183 {
184 start = i;
185 length = 1;
186 }
187 else
188 {
189 length++;
190 }
191 i++;
192 if (length == NrPages)
193 {
194 break;
195 }
196 }
197 else
198 {
199 start = -1;
200 /*
201 * Fast forward to the base of the next aligned region
202 */
203 i = ROUND_UP((i + 1), (Alignment / PAGE_SIZE));
204 }
205 }
206 if (start == -1 || length != NrPages)
207 {
208 KeReleaseSpinLock(&PageListLock, oldIrql);
209 return 0;
210 }
211 for (i = start; i < (start + length); i++)
212 {
213 RemoveEntryList(&MmPageArray[i].ListEntry);
214 if (MmPageArray[i].Flags.Zero == 0)
215 {
216 UnzeroedPageCount--;
217 }
218 MmStats.NrFreePages--;
219 MmStats.NrSystemPages++;
220 MmPageArray[i].Flags.Type = MM_PHYSICAL_PAGE_USED;
221 MmPageArray[i].Flags.Consumer = MC_NPPOOL;
222 MmPageArray[i].ReferenceCount = 1;
223 MmPageArray[i].LockCount = 0;
224 MmPageArray[i].MapCount = 0;
225 MmPageArray[i].SavedSwapEntry = 0;
226 InsertTailList(&UsedPageListHeads[MC_NPPOOL],
227 &MmPageArray[i].ListEntry);
228 }
229 KeReleaseSpinLock(&PageListLock, oldIrql);
230 for (i = start; i < (start + length); i++)
231 {
232 if (MmPageArray[i].Flags.Zero == 0)
233 {
234 MiZeroPage(i);
235 }
236 else
237 {
238 MmPageArray[i].Flags.Zero = 0;
239 }
240 }
241
242 return start;
243 }
244
245
246 BOOLEAN
247 MiIsPfnRam(PADDRESS_RANGE BIOSMemoryMap,
248 ULONG AddressRangeCount,
249 PFN_TYPE Pfn)
250 {
251 BOOLEAN IsUsable;
252 LARGE_INTEGER BaseAddress;
253 LARGE_INTEGER EndAddress;
254 ULONG i;
255 if (BIOSMemoryMap != NULL && AddressRangeCount > 0)
256 {
257 IsUsable = FALSE;
258 for (i = 0; i < AddressRangeCount; i++)
259 {
260 BaseAddress.u.LowPart = BIOSMemoryMap[i].BaseAddrLow;
261 BaseAddress.u.HighPart = BIOSMemoryMap[i].BaseAddrHigh;
262 EndAddress.u.LowPart = BIOSMemoryMap[i].LengthLow;
263 EndAddress.u.HighPart = BIOSMemoryMap[i].LengthHigh;
264 EndAddress.QuadPart += BaseAddress.QuadPart;
265 BaseAddress.QuadPart = PAGE_ROUND_DOWN(BaseAddress.QuadPart);
266 EndAddress.QuadPart = PAGE_ROUND_UP(EndAddress.QuadPart);
267
268 if ((BaseAddress.QuadPart >> PAGE_SHIFT) <= Pfn &&
269 Pfn < (EndAddress.QuadPart >> PAGE_SHIFT))
270 {
271 if (BIOSMemoryMap[i].Type == 1)
272 {
273 IsUsable = TRUE;
274 }
275 else
276 {
277 return FALSE;
278 }
279 }
280 }
281 return IsUsable;
282 }
283 return TRUE;
284 }
285
286
287 PVOID INIT_FUNCTION
288 MmInitializePageList(ULONG_PTR FirstPhysKernelAddress,
289 ULONG_PTR LastPhysKernelAddress,
290 ULONG MemorySizeInPages,
291 ULONG_PTR LastKernelAddress,
292 PADDRESS_RANGE BIOSMemoryMap,
293 ULONG AddressRangeCount)
294 /*
295 * FUNCTION: Initializes the page list with all pages free
296 * except those known to be reserved and those used by the kernel
297 * ARGUMENTS:
298 * FirstKernelAddress = First physical address used by the kernel
299 * LastKernelAddress = Last physical address used by the kernel
300 */
301 {
302 ULONG i;
303 ULONG Reserved;
304 NTSTATUS Status;
305 PFN_TYPE LastPage;
306 PFN_TYPE FirstUninitializedPage;
307
308 DPRINT("MmInitializePageList(FirstPhysKernelAddress %x, "
309 "LastPhysKernelAddress %x, "
310 "MemorySizeInPages %x, LastKernelAddress %x)\n",
311 FirstPhysKernelAddress,
312 LastPhysKernelAddress,
313 MemorySizeInPages,
314 LastKernelAddress);
315
316 for (i = 0; i < MC_MAXIMUM; i++)
317 {
318 InitializeListHead(&UsedPageListHeads[i]);
319 }
320 KeInitializeSpinLock(&PageListLock);
321 InitializeListHead(&FreeUnzeroedPageListHead);
322 InitializeListHead(&FreeZeroedPageListHead);
323 InitializeListHead(&BiosPageListHead);
324
325 LastKernelAddress = PAGE_ROUND_UP(LastKernelAddress);
326
327 MmPageArraySize = MemorySizeInPages;
328 Reserved =
329 PAGE_ROUND_UP((MmPageArraySize * sizeof(PHYSICAL_PAGE))) / PAGE_SIZE;
330 MmPageArray = (PHYSICAL_PAGE *)LastKernelAddress;
331
332 DPRINT("Reserved %d\n", Reserved);
333
334 LastKernelAddress = PAGE_ROUND_UP(LastKernelAddress);
335 LastKernelAddress = ((ULONG_PTR)LastKernelAddress + (Reserved * PAGE_SIZE));
336 LastPhysKernelAddress = (ULONG_PTR)PAGE_ROUND_UP(LastPhysKernelAddress);
337 LastPhysKernelAddress = (ULONG_PTR)LastPhysKernelAddress + (Reserved * PAGE_SIZE);
338
339 MmStats.NrTotalPages = 0;
340 MmStats.NrSystemPages = 0;
341 MmStats.NrUserPages = 0;
342 MmStats.NrReservedPages = 0;
343 MmStats.NrFreePages = 0;
344 MmStats.NrLockedPages = 0;
345
346 /* Preinitialize the Balancer because we need some pages for pte's */
347 MmInitializeBalancer(MemorySizeInPages, 0);
348
349 FirstUninitializedPage = (ULONG_PTR)LastPhysKernelAddress / PAGE_SIZE;
350 LastPage = MmPageArraySize;
351 for (i = 0; i < Reserved; i++)
352 {
353 PVOID Address = (char*)MmPageArray + (i * PAGE_SIZE);
354 ULONG j, start, end;
355 if (!MmIsPagePresent(NULL, Address))
356 {
357 PFN_TYPE Pfn;
358 Pfn = 0;
359 while (Pfn == 0 && LastPage > FirstUninitializedPage)
360 {
361 /* Allocate the page from the upper end of the RAM */
362 if (MiIsPfnRam(BIOSMemoryMap, AddressRangeCount, --LastPage))
363 {
364 Pfn = LastPage;
365 }
366 }
367 if (Pfn == 0)
368 {
369 Pfn = MmAllocPage(MC_NPPOOL, 0);
370 if (Pfn == 0)
371 {
372 KEBUGCHECK(0);
373 }
374 }
375 Status = MmCreateVirtualMappingForKernel(Address,
376 PAGE_READWRITE,
377 &Pfn,
378 1);
379 if (!NT_SUCCESS(Status))
380 {
381 DbgPrint("Unable to create virtual mapping\n");
382 KEBUGCHECK(0);
383 }
384 }
385 else
386 {
387 /* Setting the page protection is necessary to set the global bit on IA32 */
388 MmSetPageProtect(NULL, Address, PAGE_READWRITE);
389 }
390 memset(Address, 0, PAGE_SIZE);
391
392 start = ((ULONG_PTR)Address - (ULONG_PTR)MmPageArray) / sizeof(PHYSICAL_PAGE);
393 end = ((ULONG_PTR)Address - (ULONG_PTR)MmPageArray + PAGE_SIZE) / sizeof(PHYSICAL_PAGE);
394
395 for (j = start; j < end && j < LastPage; j++)
396 {
397 if (MiIsPfnRam(BIOSMemoryMap, AddressRangeCount, j))
398 {
399 if (j == 0)
400 {
401 /*
402 * Page zero is reserved
403 */
404 MmPageArray[0].Flags.Type = MM_PHYSICAL_PAGE_BIOS;
405 MmPageArray[0].Flags.Consumer = MC_NPPOOL;
406 MmPageArray[0].Flags.Zero = 0;
407 MmPageArray[0].ReferenceCount = 0;
408 InsertTailList(&BiosPageListHead,
409 &MmPageArray[0].ListEntry);
410 MmStats.NrReservedPages++;
411 }
412 else if (j == 1)
413 {
414
415 /*
416 * Page one is reserved for the initial KPCR
417 */
418 MmPageArray[1].Flags.Type = MM_PHYSICAL_PAGE_BIOS;
419 MmPageArray[1].Flags.Consumer = MC_NPPOOL;
420 MmPageArray[1].Flags.Zero = 0;
421 MmPageArray[1].ReferenceCount = 0;
422 InsertTailList(&BiosPageListHead,
423 &MmPageArray[1].ListEntry);
424 MmStats.NrReservedPages++;
425 }
426 /* Protect the Page Directory. This will be changed in r3 */
427 else if (j >= (KeLoaderBlock.PageDirectoryStart / PAGE_SIZE) && j < (KeLoaderBlock.PageDirectoryEnd / PAGE_SIZE))
428 {
429 MmPageArray[j].Flags.Type = MM_PHYSICAL_PAGE_BIOS;
430 MmPageArray[j].Flags.Zero = 0;
431 MmPageArray[j].Flags.Consumer = MC_NPPOOL;
432 MmPageArray[j].ReferenceCount = 1;
433 InsertTailList(&BiosPageListHead,
434 &MmPageArray[j].ListEntry);
435 MmStats.NrReservedPages++;
436 }
437 else if (j >= 0xa0000 / PAGE_SIZE && j < 0x100000 / PAGE_SIZE)
438 {
439 MmPageArray[j].Flags.Type = MM_PHYSICAL_PAGE_BIOS;
440 MmPageArray[j].Flags.Zero = 0;
441 MmPageArray[j].Flags.Consumer = MC_NPPOOL;
442 MmPageArray[j].ReferenceCount = 1;
443 InsertTailList(&BiosPageListHead,
444 &MmPageArray[j].ListEntry);
445 MmStats.NrReservedPages++;
446 }
447 else if (j >= (ULONG)FirstPhysKernelAddress/PAGE_SIZE &&
448 j < (ULONG)LastPhysKernelAddress/PAGE_SIZE)
449 {
450 MmPageArray[j].Flags.Type = MM_PHYSICAL_PAGE_USED;
451 MmPageArray[j].Flags.Zero = 0;
452 MmPageArray[j].Flags.Consumer = MC_NPPOOL;
453 MmPageArray[j].ReferenceCount = 1;
454 MmPageArray[j].MapCount = 1;
455 InsertTailList(&UsedPageListHeads[MC_NPPOOL],
456 &MmPageArray[j].ListEntry);
457 MmStats.NrSystemPages++;
458 }
459 else
460 {
461 MmPageArray[j].Flags.Type = MM_PHYSICAL_PAGE_FREE;
462 MmPageArray[j].Flags.Zero = 0;
463 MmPageArray[j].ReferenceCount = 0;
464 InsertTailList(&FreeUnzeroedPageListHead,
465 &MmPageArray[j].ListEntry);
466 UnzeroedPageCount++;
467 MmStats.NrFreePages++;
468 }
469 }
470 else
471 {
472 MmPageArray[j].Flags.Type = MM_PHYSICAL_PAGE_BIOS;
473 MmPageArray[j].Flags.Consumer = MC_NPPOOL;
474 MmPageArray[j].Flags.Zero = 0;
475 MmPageArray[j].ReferenceCount = 0;
476 InsertTailList(&BiosPageListHead,
477 &MmPageArray[j].ListEntry);
478 MmStats.NrReservedPages++;
479 }
480 }
481 FirstUninitializedPage = j;
482
483 }
484
485 /* Add the pages from the upper end to the list */
486 for (i = LastPage; i < MmPageArraySize; i++)
487 {
488 if (MiIsPfnRam(BIOSMemoryMap, AddressRangeCount, i))
489 {
490 MmPageArray[i].Flags.Type = MM_PHYSICAL_PAGE_USED;
491 MmPageArray[i].Flags.Zero = 0;
492 MmPageArray[i].Flags.Consumer = MC_NPPOOL;
493 MmPageArray[i].ReferenceCount = 1;
494 MmPageArray[i].MapCount = 1;
495 InsertTailList(&UsedPageListHeads[MC_NPPOOL],
496 &MmPageArray[i].ListEntry);
497 MmStats.NrSystemPages++;
498 }
499 else
500 {
501 MmPageArray[i].Flags.Type = MM_PHYSICAL_PAGE_BIOS;
502 MmPageArray[i].Flags.Consumer = MC_NPPOOL;
503 MmPageArray[i].Flags.Zero = 0;
504 MmPageArray[i].ReferenceCount = 0;
505 InsertTailList(&BiosPageListHead,
506 &MmPageArray[i].ListEntry);
507 MmStats.NrReservedPages++;
508 }
509 }
510
511
512
513 KeInitializeEvent(&ZeroPageThreadEvent, NotificationEvent, TRUE);
514
515 MmStats.NrTotalPages = MmStats.NrFreePages + MmStats.NrSystemPages +
516 MmStats.NrReservedPages + MmStats.NrUserPages;
517 MmInitializeBalancer(MmStats.NrFreePages, MmStats.NrSystemPages + MmStats.NrReservedPages);
518 return((PVOID)LastKernelAddress);
519 }
520
521 VOID
522 MmSetFlagsPage(PFN_TYPE Pfn, ULONG Flags)
523 {
524 KIRQL oldIrql;
525
526 ASSERT(Pfn < MmPageArraySize);
527 KeAcquireSpinLock(&PageListLock, &oldIrql);
528 MmPageArray[Pfn].AllFlags = Flags;
529 KeReleaseSpinLock(&PageListLock, oldIrql);
530 }
531
532 VOID
533 MmSetRmapListHeadPage(PFN_TYPE Pfn, struct _MM_RMAP_ENTRY* ListHead)
534 {
535 MmPageArray[Pfn].RmapListHead = ListHead;
536 }
537
538 struct _MM_RMAP_ENTRY*
539 MmGetRmapListHeadPage(PFN_TYPE Pfn)
540 {
541 return(MmPageArray[Pfn].RmapListHead);
542 }
543
544 VOID
545 MmMarkPageMapped(PFN_TYPE Pfn)
546 {
547 KIRQL oldIrql;
548
549 if (Pfn < MmPageArraySize)
550 {
551 KeAcquireSpinLock(&PageListLock, &oldIrql);
552 if (MmPageArray[Pfn].Flags.Type == MM_PHYSICAL_PAGE_FREE)
553 {
554 DbgPrint("Mapping non-used page\n");
555 KEBUGCHECK(0);
556 }
557 MmPageArray[Pfn].MapCount++;
558 KeReleaseSpinLock(&PageListLock, oldIrql);
559 }
560 }
561
562 VOID
563 MmMarkPageUnmapped(PFN_TYPE Pfn)
564 {
565 KIRQL oldIrql;
566
567 if (Pfn < MmPageArraySize)
568 {
569 KeAcquireSpinLock(&PageListLock, &oldIrql);
570 if (MmPageArray[Pfn].Flags.Type == MM_PHYSICAL_PAGE_FREE)
571 {
572 DbgPrint("Unmapping non-used page\n");
573 KEBUGCHECK(0);
574 }
575 if (MmPageArray[Pfn].MapCount == 0)
576 {
577 DbgPrint("Unmapping not mapped page\n");
578 KEBUGCHECK(0);
579 }
580 MmPageArray[Pfn].MapCount--;
581 KeReleaseSpinLock(&PageListLock, oldIrql);
582 }
583 }
584
585 ULONG
586 MmGetFlagsPage(PFN_TYPE Pfn)
587 {
588 KIRQL oldIrql;
589 ULONG Flags;
590
591 ASSERT(Pfn < MmPageArraySize);
592 KeAcquireSpinLock(&PageListLock, &oldIrql);
593 Flags = MmPageArray[Pfn].AllFlags;
594 KeReleaseSpinLock(&PageListLock, oldIrql);
595
596 return(Flags);
597 }
598
599
600 VOID
601 MmSetSavedSwapEntryPage(PFN_TYPE Pfn, SWAPENTRY SavedSwapEntry)
602 {
603 KIRQL oldIrql;
604
605 ASSERT(Pfn < MmPageArraySize);
606 KeAcquireSpinLock(&PageListLock, &oldIrql);
607 MmPageArray[Pfn].SavedSwapEntry = SavedSwapEntry;
608 KeReleaseSpinLock(&PageListLock, oldIrql);
609 }
610
611 SWAPENTRY
612 MmGetSavedSwapEntryPage(PFN_TYPE Pfn)
613 {
614 SWAPENTRY SavedSwapEntry;
615 KIRQL oldIrql;
616
617 ASSERT(Pfn < MmPageArraySize);
618 KeAcquireSpinLock(&PageListLock, &oldIrql);
619 SavedSwapEntry = MmPageArray[Pfn].SavedSwapEntry;
620 KeReleaseSpinLock(&PageListLock, oldIrql);
621
622 return(SavedSwapEntry);
623 }
624
625 VOID
626 MmReferencePageUnsafe(PFN_TYPE Pfn)
627 {
628 KIRQL oldIrql;
629
630 DPRINT("MmReferencePageUnsafe(PysicalAddress %x)\n", Pfn << PAGE_SHIFT);
631
632 if (Pfn == 0 || Pfn >= MmPageArraySize)
633 {
634 return;
635 }
636
637 KeAcquireSpinLock(&PageListLock, &oldIrql);
638
639 if (MmPageArray[Pfn].Flags.Type != MM_PHYSICAL_PAGE_USED)
640 {
641 DbgPrint("Referencing non-used page\n");
642 KEBUGCHECK(0);
643 }
644
645 MmPageArray[Pfn].ReferenceCount++;
646 KeReleaseSpinLock(&PageListLock, oldIrql);
647 }
648
649 VOID
650 MmReferencePage(PFN_TYPE Pfn)
651 {
652 DPRINT("MmReferencePage(PysicalAddress %x)\n", Pfn << PAGE_SHIFT);
653
654 if (Pfn == 0 || Pfn >= MmPageArraySize)
655 {
656 KEBUGCHECK(0);
657 }
658
659 MmReferencePageUnsafe(Pfn);
660 }
661
662 ULONG
663 MmGetReferenceCountPage(PFN_TYPE Pfn)
664 {
665 KIRQL oldIrql;
666 ULONG RCount;
667
668 DPRINT("MmGetReferenceCountPage(PhysicalAddress %x)\n", Pfn << PAGE_SHIFT);
669
670 if (Pfn == 0 || Pfn >= MmPageArraySize)
671 {
672 KEBUGCHECK(0);
673 }
674
675 KeAcquireSpinLock(&PageListLock, &oldIrql);
676
677 if (MmPageArray[Pfn].Flags.Type != MM_PHYSICAL_PAGE_USED)
678 {
679 DbgPrint("Getting reference count for free page\n");
680 KEBUGCHECK(0);
681 }
682
683 RCount = MmPageArray[Pfn].ReferenceCount;
684
685 KeReleaseSpinLock(&PageListLock, oldIrql);
686 return(RCount);
687 }
688
689 BOOLEAN
690 MmIsUsablePage(PFN_TYPE Pfn)
691 {
692
693 DPRINT("MmIsUsablePage(PhysicalAddress %x)\n", Pfn << PAGE_SHIFT);
694
695 if (Pfn == 0 || Pfn >= MmPageArraySize)
696 {
697 KEBUGCHECK(0);
698 }
699
700 if (MmPageArray[Pfn].Flags.Type != MM_PHYSICAL_PAGE_USED &&
701 MmPageArray[Pfn].Flags.Type != MM_PHYSICAL_PAGE_BIOS)
702 {
703 return(FALSE);
704 }
705
706 return(TRUE);
707 }
708
709 VOID
710 MmDereferencePage(PFN_TYPE Pfn)
711 {
712 KIRQL oldIrql;
713
714 DPRINT("MmDereferencePage(PhysicalAddress %x)\n", Pfn << PAGE_SHIFT);
715
716 if (Pfn == 0 || Pfn >= MmPageArraySize)
717 {
718 KEBUGCHECK(0);
719 }
720
721 KeAcquireSpinLock(&PageListLock, &oldIrql);
722
723 if (MmPageArray[Pfn].Flags.Type != MM_PHYSICAL_PAGE_USED)
724 {
725 DbgPrint("Dereferencing free page\n");
726 KEBUGCHECK(0);
727 }
728 if (MmPageArray[Pfn].ReferenceCount == 0)
729 {
730 DbgPrint("Derefrencing page with reference count 0\n");
731 KEBUGCHECK(0);
732 }
733
734 MmPageArray[Pfn].ReferenceCount--;
735 if (MmPageArray[Pfn].ReferenceCount == 0)
736 {
737 MmStats.NrFreePages++;
738 MmStats.NrSystemPages--;
739 RemoveEntryList(&MmPageArray[Pfn].ListEntry);
740 if (MmPageArray[Pfn].RmapListHead != NULL)
741 {
742 DbgPrint("Freeing page with rmap entries.\n");
743 KEBUGCHECK(0);
744 }
745 if (MmPageArray[Pfn].MapCount != 0)
746 {
747 DbgPrint("Freeing mapped page (0x%x count %d)\n",
748 Pfn << PAGE_SHIFT, MmPageArray[Pfn].MapCount);
749 KEBUGCHECK(0);
750 }
751 if (MmPageArray[Pfn].LockCount > 0)
752 {
753 DbgPrint("Freeing locked page\n");
754 KEBUGCHECK(0);
755 }
756 if (MmPageArray[Pfn].SavedSwapEntry != 0)
757 {
758 DbgPrint("Freeing page with swap entry.\n");
759 KEBUGCHECK(0);
760 }
761 if (MmPageArray[Pfn].Flags.Type != MM_PHYSICAL_PAGE_USED)
762 {
763 DbgPrint("Freeing page with flags %x\n",
764 MmPageArray[Pfn].Flags.Type);
765 KEBUGCHECK(0);
766 }
767 MmPageArray[Pfn].Flags.Type = MM_PHYSICAL_PAGE_FREE;
768 MmPageArray[Pfn].Flags.Consumer = MC_MAXIMUM;
769 InsertTailList(&FreeUnzeroedPageListHead,
770 &MmPageArray[Pfn].ListEntry);
771 UnzeroedPageCount++;
772 if (UnzeroedPageCount > 8 && 0 == KeReadStateEvent(&ZeroPageThreadEvent))
773 {
774 KeSetEvent(&ZeroPageThreadEvent, IO_NO_INCREMENT, FALSE);
775 }
776 }
777 KeReleaseSpinLock(&PageListLock, oldIrql);
778 }
779
780 ULONG
781 MmGetLockCountPage(PFN_TYPE Pfn)
782 {
783 KIRQL oldIrql;
784 ULONG LockCount;
785
786 DPRINT("MmGetLockCountPage(PhysicalAddress %x)\n", Pfn << PAGE_SHIFT);
787
788 if (Pfn == 0 || Pfn >= MmPageArraySize)
789 {
790 KEBUGCHECK(0);
791 }
792
793 KeAcquireSpinLock(&PageListLock, &oldIrql);
794
795 if (MmPageArray[Pfn].Flags.Type != MM_PHYSICAL_PAGE_USED)
796 {
797 DbgPrint("Getting lock count for free page\n");
798 KEBUGCHECK(0);
799 }
800
801 LockCount = MmPageArray[Pfn].LockCount;
802 KeReleaseSpinLock(&PageListLock, oldIrql);
803
804 return(LockCount);
805 }
806
807 VOID
808 MmLockPageUnsafe(PFN_TYPE Pfn)
809 {
810 KIRQL oldIrql;
811
812 DPRINT("MmLockPageUnsafe(PhysicalAddress %x)\n", Pfn << PAGE_SHIFT);
813
814 if (Pfn == 0 || Pfn >= MmPageArraySize)
815 {
816 return;
817 }
818
819 KeAcquireSpinLock(&PageListLock, &oldIrql);
820
821 if (MmPageArray[Pfn].Flags.Type != MM_PHYSICAL_PAGE_USED)
822 {
823 DbgPrint("Locking free page\n");
824 KEBUGCHECK(0);
825 }
826
827 MmPageArray[Pfn].LockCount++;
828 KeReleaseSpinLock(&PageListLock, oldIrql);
829 }
830
831 VOID
832 MmLockPage(PFN_TYPE Pfn)
833 {
834 DPRINT("MmLockPage(PhysicalAddress %x)\n", Pfn << PAGE_SHIFT);
835
836 if (Pfn == 0 || Pfn >= MmPageArraySize)
837 {
838 KEBUGCHECK(0);
839 }
840
841 MmLockPageUnsafe(Pfn);
842 }
843
844 VOID
845 MmUnlockPage(PFN_TYPE Pfn)
846 {
847 KIRQL oldIrql;
848
849 DPRINT("MmUnlockPage(PhysicalAddress %x)\n", Pfn << PAGE_SHIFT);
850
851 if (Pfn == 0 || Pfn >= MmPageArraySize)
852 {
853 KEBUGCHECK(0);
854 }
855
856 KeAcquireSpinLock(&PageListLock, &oldIrql);
857
858 if (MmPageArray[Pfn].Flags.Type != MM_PHYSICAL_PAGE_USED)
859 {
860 DbgPrint("Unlocking free page\n");
861 KEBUGCHECK(0);
862 }
863
864 MmPageArray[Pfn].LockCount--;
865 KeReleaseSpinLock(&PageListLock, oldIrql);
866 }
867
868 PFN_TYPE
869 MmAllocPage(ULONG Consumer, SWAPENTRY SavedSwapEntry)
870 {
871 PFN_TYPE PfnOffset;
872 PLIST_ENTRY ListEntry;
873 PPHYSICAL_PAGE PageDescriptor;
874 KIRQL oldIrql;
875 BOOLEAN NeedClear = FALSE;
876
877 DPRINT("MmAllocPage()\n");
878
879 KeAcquireSpinLock(&PageListLock, &oldIrql);
880 if (IsListEmpty(&FreeZeroedPageListHead))
881 {
882 if (IsListEmpty(&FreeUnzeroedPageListHead))
883 {
884 DPRINT1("MmAllocPage(): Out of memory\n");
885 KeReleaseSpinLock(&PageListLock, oldIrql);
886 return 0;
887 }
888 ListEntry = RemoveTailList(&FreeUnzeroedPageListHead);
889 UnzeroedPageCount--;
890
891 PageDescriptor = CONTAINING_RECORD(ListEntry, PHYSICAL_PAGE, ListEntry);
892
893 NeedClear = TRUE;
894 }
895 else
896 {
897 ListEntry = RemoveTailList(&FreeZeroedPageListHead);
898
899 PageDescriptor = CONTAINING_RECORD(ListEntry, PHYSICAL_PAGE, ListEntry);
900 }
901
902 if (PageDescriptor->Flags.Type != MM_PHYSICAL_PAGE_FREE)
903 {
904 DbgPrint("Got non-free page from freelist\n");
905 KEBUGCHECK(0);
906 }
907 if (PageDescriptor->MapCount != 0)
908 {
909 DbgPrint("Got mapped page from freelist\n");
910 KEBUGCHECK(0);
911 }
912 if (PageDescriptor->ReferenceCount != 0)
913 {
914 DPRINT1("%d\n", PageDescriptor->ReferenceCount);
915 KEBUGCHECK(0);
916 }
917 PageDescriptor->Flags.Type = MM_PHYSICAL_PAGE_USED;
918 PageDescriptor->Flags.Consumer = Consumer;
919 PageDescriptor->ReferenceCount = 1;
920 PageDescriptor->LockCount = 0;
921 PageDescriptor->MapCount = 0;
922 PageDescriptor->SavedSwapEntry = SavedSwapEntry;
923 InsertTailList(&UsedPageListHeads[Consumer], ListEntry);
924
925 MmStats.NrSystemPages++;
926 MmStats.NrFreePages--;
927
928 KeReleaseSpinLock(&PageListLock, oldIrql);
929
930 PfnOffset = PageDescriptor - MmPageArray;
931 if (NeedClear)
932 {
933 MiZeroPage(PfnOffset);
934 }
935 if (PageDescriptor->MapCount != 0)
936 {
937 DbgPrint("Returning mapped page.\n");
938 KEBUGCHECK(0);
939 }
940 return PfnOffset;
941 }
942
943 LONG
944 MmAllocPagesSpecifyRange(ULONG Consumer,
945 PHYSICAL_ADDRESS LowestAddress,
946 PHYSICAL_ADDRESS HighestAddress,
947 ULONG NumberOfPages,
948 PPFN_TYPE Pages)
949 {
950 PPHYSICAL_PAGE PageDescriptor;
951 KIRQL oldIrql;
952 PFN_TYPE LowestPage, HighestPage;
953 PFN_TYPE pfn;
954 ULONG NumberOfPagesFound = 0;
955 ULONG i;
956
957 DPRINT("MmAllocPagesSpecifyRange()\n"
958 " LowestAddress = 0x%08x%08x\n"
959 " HighestAddress = 0x%08x%08x\n"
960 " NumberOfPages = %d\n",
961 LowestAddress.u.HighPart, LowestAddress.u.LowPart,
962 HighestAddress.u.HighPart, HighestAddress.u.LowPart,
963 NumberOfPages);
964
965 if (NumberOfPages == 0)
966 return 0;
967
968 LowestPage = LowestAddress.QuadPart / PAGE_SIZE;
969 HighestPage = HighestAddress.QuadPart / PAGE_SIZE;
970 if ((HighestAddress.u.LowPart % PAGE_SIZE) != 0)
971 HighestPage++;
972
973 if (LowestPage >= MmPageArraySize)
974 {
975 DPRINT1("MmAllocPagesSpecifyRange(): Out of memory\n");
976 return -1;
977 }
978 if (HighestPage > MmPageArraySize)
979 HighestPage = MmPageArraySize;
980
981 KeAcquireSpinLock(&PageListLock, &oldIrql);
982 if (LowestPage == 0 && HighestPage == MmPageArraySize)
983 {
984 PLIST_ENTRY ListEntry;
985 while (NumberOfPagesFound < NumberOfPages)
986 {
987 if (!IsListEmpty(&FreeZeroedPageListHead))
988 {
989 ListEntry = RemoveTailList(&FreeZeroedPageListHead);
990 }
991 else if (!IsListEmpty(&FreeUnzeroedPageListHead))
992 {
993 ListEntry = RemoveTailList(&FreeUnzeroedPageListHead);
994 UnzeroedPageCount--;
995 }
996 else
997 {
998 if (NumberOfPagesFound == 0)
999 {
1000 KeReleaseSpinLock(&PageListLock, oldIrql);
1001 DPRINT1("MmAllocPagesSpecifyRange(): Out of memory\n");
1002 return -1;
1003 }
1004 else
1005 {
1006 break;
1007 }
1008 }
1009 PageDescriptor = CONTAINING_RECORD(ListEntry, PHYSICAL_PAGE, ListEntry);
1010
1011 ASSERT(PageDescriptor->Flags.Type == MM_PHYSICAL_PAGE_FREE);
1012 ASSERT(PageDescriptor->MapCount == 0);
1013 ASSERT(PageDescriptor->ReferenceCount == 0);
1014
1015 /* Allocate the page */
1016 PageDescriptor->Flags.Type = MM_PHYSICAL_PAGE_USED;
1017 PageDescriptor->Flags.Consumer = Consumer;
1018 PageDescriptor->ReferenceCount = 1;
1019 PageDescriptor->LockCount = 0;
1020 PageDescriptor->MapCount = 0;
1021 PageDescriptor->SavedSwapEntry = 0; /* FIXME: Do we need swap entries? */
1022 InsertTailList(&UsedPageListHeads[Consumer], &PageDescriptor->ListEntry);
1023
1024 MmStats.NrSystemPages++;
1025 MmStats.NrFreePages--;
1026
1027 /* Remember the page */
1028 pfn = PageDescriptor - MmPageArray;
1029 Pages[NumberOfPagesFound++] = pfn;
1030 }
1031 }
1032 else
1033 {
1034 INT LookForZeroedPages;
1035 for (LookForZeroedPages = 1; LookForZeroedPages >= 0; LookForZeroedPages--)
1036 {
1037 for (pfn = LowestPage; pfn < HighestPage; pfn++)
1038 {
1039 PageDescriptor = MmPageArray + pfn;
1040
1041 if (PageDescriptor->Flags.Type != MM_PHYSICAL_PAGE_FREE)
1042 continue;
1043 if (PageDescriptor->Flags.Zero != LookForZeroedPages)
1044 continue;
1045
1046 ASSERT(PageDescriptor->MapCount == 0);
1047 ASSERT(PageDescriptor->ReferenceCount == 0);
1048
1049 /* Allocate the page */
1050 PageDescriptor->Flags.Type = MM_PHYSICAL_PAGE_USED;
1051 PageDescriptor->Flags.Consumer = Consumer;
1052 PageDescriptor->ReferenceCount = 1;
1053 PageDescriptor->LockCount = 0;
1054 PageDescriptor->MapCount = 0;
1055 PageDescriptor->SavedSwapEntry = 0; /* FIXME: Do we need swap entries? */
1056 RemoveEntryList(&PageDescriptor->ListEntry);
1057 InsertTailList(&UsedPageListHeads[Consumer], &PageDescriptor->ListEntry);
1058
1059 if (!PageDescriptor->Flags.Zero)
1060 UnzeroedPageCount--;
1061 MmStats.NrSystemPages++;
1062 MmStats.NrFreePages--;
1063
1064 /* Remember the page */
1065 Pages[NumberOfPagesFound++] = pfn;
1066 if (NumberOfPagesFound == NumberOfPages)
1067 break;
1068 }
1069 if (NumberOfPagesFound == NumberOfPages)
1070 break;
1071 }
1072 }
1073 KeReleaseSpinLock(&PageListLock, oldIrql);
1074
1075 /* Zero unzero-ed pages */
1076 for (i = 0; i < NumberOfPagesFound; i++)
1077 {
1078 pfn = Pages[i];
1079 if (MmPageArray[pfn].Flags.Zero == 0)
1080 {
1081 MiZeroPage(pfn);
1082 }
1083 else
1084 {
1085 MmPageArray[pfn].Flags.Zero = 0;
1086 }
1087 }
1088
1089 return NumberOfPagesFound;
1090 }
1091
1092 VOID STDCALL
1093 MmZeroPageThreadMain(PVOID Ignored)
1094 {
1095 NTSTATUS Status;
1096 KIRQL oldIrql;
1097 PLIST_ENTRY ListEntry;
1098 PPHYSICAL_PAGE PageDescriptor;
1099 PFN_TYPE Pfn;
1100 ULONG Count;
1101
1102 while(1)
1103 {
1104 Status = KeWaitForSingleObject(&ZeroPageThreadEvent,
1105 0,
1106 KernelMode,
1107 FALSE,
1108 NULL);
1109 if (!NT_SUCCESS(Status))
1110 {
1111 DbgPrint("ZeroPageThread: Wait failed\n");
1112 KEBUGCHECK(0);
1113 return;
1114 }
1115
1116 if (ZeroPageThreadShouldTerminate)
1117 {
1118 DbgPrint("ZeroPageThread: Terminating\n");
1119 return;
1120 }
1121 Count = 0;
1122 KeAcquireSpinLock(&PageListLock, &oldIrql);
1123 while (!IsListEmpty(&FreeUnzeroedPageListHead))
1124 {
1125 ListEntry = RemoveTailList(&FreeUnzeroedPageListHead);
1126 UnzeroedPageCount--;
1127 PageDescriptor = CONTAINING_RECORD(ListEntry, PHYSICAL_PAGE, ListEntry);
1128 /* We set the page to used, because MmCreateVirtualMapping failed with unused pages */
1129 PageDescriptor->Flags.Type = MM_PHYSICAL_PAGE_USED;
1130 KeReleaseSpinLock(&PageListLock, oldIrql);
1131 Pfn = PageDescriptor - MmPageArray;
1132 Status = MiZeroPage(Pfn);
1133
1134 KeAcquireSpinLock(&PageListLock, &oldIrql);
1135 if (PageDescriptor->MapCount != 0)
1136 {
1137 DbgPrint("Mapped page on freelist.\n");
1138 KEBUGCHECK(0);
1139 }
1140 PageDescriptor->Flags.Zero = 1;
1141 PageDescriptor->Flags.Type = MM_PHYSICAL_PAGE_FREE;
1142 if (NT_SUCCESS(Status))
1143 {
1144 InsertHeadList(&FreeZeroedPageListHead, ListEntry);
1145 Count++;
1146 }
1147 else
1148 {
1149 InsertHeadList(&FreeUnzeroedPageListHead, ListEntry);
1150 UnzeroedPageCount++;
1151 }
1152
1153 }
1154 DPRINT("Zeroed %d pages.\n", Count);
1155 KeResetEvent(&ZeroPageThreadEvent);
1156 KeReleaseSpinLock(&PageListLock, oldIrql);
1157 }
1158 }
1159
1160 NTSTATUS INIT_FUNCTION
1161 MmInitZeroPageThread(VOID)
1162 {
1163 NTSTATUS Status;
1164 HANDLE ThreadHandle;
1165
1166 ZeroPageThreadShouldTerminate = FALSE;
1167 Status = PsCreateSystemThread(&ThreadHandle,
1168 THREAD_ALL_ACCESS,
1169 NULL,
1170 NULL,
1171 &ZeroPageThreadId,
1172 MmZeroPageThreadMain,
1173 NULL);
1174 if (!NT_SUCCESS(Status))
1175 {
1176 KEBUGCHECK(0);
1177 }
1178
1179 Status = ObReferenceObjectByHandle(ThreadHandle,
1180 THREAD_ALL_ACCESS,
1181 PsThreadType,
1182 KernelMode,
1183 (PVOID*)&ZeroPageThread,
1184 NULL);
1185 if (!NT_SUCCESS(Status))
1186 {
1187 KEBUGCHECK(0);
1188 }
1189
1190 KeSetPriorityThread(&ZeroPageThread->Tcb, LOW_PRIORITY);
1191 NtClose(ThreadHandle);
1192 return STATUS_SUCCESS;
1193 }
1194
1195 /* EOF */