- Fix compile issues caused by previous patch.
[reactos.git] / reactos / ntoskrnl / mm / freelist.c
1 /* $Id$
2 *
3 * COPYRIGHT: See COPYING in the top level directory
4 * PROJECT: ReactOS kernel
5 * FILE: ntoskrnl/mm/freelist.c
6 * PURPOSE: Handle the list of free physical pages
7 *
8 * PROGRAMMERS: David Welch (welch@cwcom.net)
9 * Robert Bergkvist
10 */
11
12 /* INCLUDES ****************************************************************/
13
14 #include <ntoskrnl.h>
15 #define NDEBUG
16 #include <internal/debug.h>
17
18 /* TYPES *******************************************************************/
19
20 #define MM_PHYSICAL_PAGE_FREE (0x1)
21 #define MM_PHYSICAL_PAGE_USED (0x2)
22 #define MM_PHYSICAL_PAGE_BIOS (0x3)
23
24 typedef struct _PHYSICAL_PAGE
25 {
26 union
27 {
28 struct
29 {
30 ULONG Type: 2;
31 ULONG Consumer: 3;
32 ULONG Zero: 1;
33 }
34 Flags;
35 ULONG AllFlags;
36 };
37
38 LIST_ENTRY ListEntry;
39 ULONG ReferenceCount;
40 SWAPENTRY SavedSwapEntry;
41 ULONG LockCount;
42 ULONG MapCount;
43 struct _MM_RMAP_ENTRY* RmapListHead;
44 }
45 PHYSICAL_PAGE, *PPHYSICAL_PAGE;
46
47
48 /* GLOBALS ****************************************************************/
49
50 static PPHYSICAL_PAGE MmPageArray;
51 ULONG MmPageArraySize;
52
53 static KSPIN_LOCK PageListLock;
54 static LIST_ENTRY UsedPageListHeads[MC_MAXIMUM];
55 static LIST_ENTRY FreeZeroedPageListHead;
56 static LIST_ENTRY FreeUnzeroedPageListHead;
57 static LIST_ENTRY BiosPageListHead;
58
59 static PETHREAD ZeroPageThread;
60 static CLIENT_ID ZeroPageThreadId;
61 static KEVENT ZeroPageThreadEvent;
62 static BOOLEAN ZeroPageThreadShouldTerminate = FALSE;
63
64 static ULONG UnzeroedPageCount = 0;
65
66 /* FUNCTIONS *************************************************************/
67
68 VOID
69 NTAPI
70 MmTransferOwnershipPage(PFN_TYPE Pfn, ULONG NewConsumer)
71 {
72 KIRQL oldIrql;
73
74 KeAcquireSpinLock(&PageListLock, &oldIrql);
75 if (MmPageArray[Pfn].MapCount != 0)
76 {
77 DbgPrint("Transfering mapped page.\n");
78 KEBUGCHECK(0);
79 }
80 if (MmPageArray[Pfn].Flags.Type != MM_PHYSICAL_PAGE_USED)
81 {
82 DPRINT1("Type: %d\n", MmPageArray[Pfn].Flags.Type);
83 KEBUGCHECK(0);
84 }
85 if (MmPageArray[Pfn].ReferenceCount != 1)
86 {
87 DPRINT1("ReferenceCount: %d\n", MmPageArray[Pfn].ReferenceCount);
88 KEBUGCHECK(0);
89 }
90 RemoveEntryList(&MmPageArray[Pfn].ListEntry);
91 InsertTailList(&UsedPageListHeads[NewConsumer],
92 &MmPageArray[Pfn].ListEntry);
93 MmPageArray[Pfn].Flags.Consumer = NewConsumer;
94 KeReleaseSpinLock(&PageListLock, oldIrql);
95 MiZeroPage(Pfn);
96 }
97
98 PFN_TYPE
99 NTAPI
100 MmGetLRUFirstUserPage(VOID)
101 {
102 PLIST_ENTRY NextListEntry;
103 PHYSICAL_PAGE* PageDescriptor;
104 KIRQL oldIrql;
105
106 KeAcquireSpinLock(&PageListLock, &oldIrql);
107 NextListEntry = UsedPageListHeads[MC_USER].Flink;
108 if (NextListEntry == &UsedPageListHeads[MC_USER])
109 {
110 KeReleaseSpinLock(&PageListLock, oldIrql);
111 return 0;
112 }
113 PageDescriptor = CONTAINING_RECORD(NextListEntry, PHYSICAL_PAGE, ListEntry);
114 KeReleaseSpinLock(&PageListLock, oldIrql);
115 return PageDescriptor - MmPageArray;
116 }
117
118 VOID
119 NTAPI
120 MmSetLRULastPage(PFN_TYPE Pfn)
121 {
122 KIRQL oldIrql;
123
124 ASSERT(Pfn < MmPageArraySize);
125 KeAcquireSpinLock(&PageListLock, &oldIrql);
126 if (MmPageArray[Pfn].Flags.Type == MM_PHYSICAL_PAGE_USED &&
127 MmPageArray[Pfn].Flags.Consumer == MC_USER)
128 {
129 RemoveEntryList(&MmPageArray[Pfn].ListEntry);
130 InsertTailList(&UsedPageListHeads[MC_USER],
131 &MmPageArray[Pfn].ListEntry);
132 }
133 KeReleaseSpinLock(&PageListLock, oldIrql);
134 }
135
136 PFN_TYPE
137 NTAPI
138 MmGetLRUNextUserPage(PFN_TYPE PreviousPfn)
139 {
140 PLIST_ENTRY NextListEntry;
141 PHYSICAL_PAGE* PageDescriptor;
142 KIRQL oldIrql;
143
144 KeAcquireSpinLock(&PageListLock, &oldIrql);
145 if (MmPageArray[PreviousPfn].Flags.Type != MM_PHYSICAL_PAGE_USED ||
146 MmPageArray[PreviousPfn].Flags.Consumer != MC_USER)
147 {
148 NextListEntry = UsedPageListHeads[MC_USER].Flink;
149 }
150 else
151 {
152 NextListEntry = MmPageArray[PreviousPfn].ListEntry.Flink;
153 }
154 if (NextListEntry == &UsedPageListHeads[MC_USER])
155 {
156 KeReleaseSpinLock(&PageListLock, oldIrql);
157 return 0;
158 }
159 PageDescriptor = CONTAINING_RECORD(NextListEntry, PHYSICAL_PAGE, ListEntry);
160 KeReleaseSpinLock(&PageListLock, oldIrql);
161 return PageDescriptor - MmPageArray;
162 }
163
164 PFN_TYPE
165 NTAPI
166 MmGetContinuousPages(ULONG NumberOfBytes,
167 PHYSICAL_ADDRESS LowestAcceptableAddress,
168 PHYSICAL_ADDRESS HighestAcceptableAddress,
169 PHYSICAL_ADDRESS BoundaryAddressMultiple)
170 {
171 ULONG NrPages;
172 ULONG i, j;
173 ULONG start;
174 ULONG last;
175 ULONG length;
176 ULONG boundary;
177 KIRQL oldIrql;
178
179 NrPages = PAGE_ROUND_UP(NumberOfBytes) / PAGE_SIZE;
180
181 KeAcquireSpinLock(&PageListLock, &oldIrql);
182
183 last = min(HighestAcceptableAddress.QuadPart / PAGE_SIZE, MmPageArraySize - 1);
184 boundary = BoundaryAddressMultiple.QuadPart / PAGE_SIZE;
185
186 for (j = 0; j < 2; j++)
187 {
188 start = -1;
189 length = 0;
190 /* First try to allocate the pages above the 16MB area. This may fail
191 * because there are not enough continuous pages or we cannot allocate
192 * pages above the 16MB area because the caller has specify an upper limit.
193 * The second try uses the specified lower limit.
194 */
195 for (i = j == 0 ? 0x100000 / PAGE_SIZE : LowestAcceptableAddress.QuadPart / PAGE_SIZE; i <= last; )
196 {
197 if (MmPageArray[i].Flags.Type == MM_PHYSICAL_PAGE_FREE)
198 {
199 if (start == (ULONG)-1)
200 {
201 start = i;
202 length = 1;
203 }
204 else
205 {
206 length++;
207 if (boundary)
208 {
209 if (start / boundary != i / boundary)
210 {
211 start = i;
212 length = 1;
213 }
214 }
215 }
216 if (length == NrPages)
217 {
218 break;
219 }
220 }
221 else
222 {
223 start = (ULONG)-1;
224 }
225 i++;
226 }
227
228 if (start != (ULONG)-1 && length == NrPages)
229 {
230 for (i = start; i < (start + length); i++)
231 {
232 RemoveEntryList(&MmPageArray[i].ListEntry);
233 if (MmPageArray[i].Flags.Zero == 0)
234 {
235 UnzeroedPageCount--;
236 }
237 MmStats.NrFreePages--;
238 MmStats.NrSystemPages++;
239 MmPageArray[i].Flags.Type = MM_PHYSICAL_PAGE_USED;
240 MmPageArray[i].Flags.Consumer = MC_NPPOOL;
241 MmPageArray[i].ReferenceCount = 1;
242 MmPageArray[i].LockCount = 0;
243 MmPageArray[i].MapCount = 0;
244 MmPageArray[i].SavedSwapEntry = 0;
245 InsertTailList(&UsedPageListHeads[MC_NPPOOL],
246 &MmPageArray[i].ListEntry);
247 }
248 KeReleaseSpinLock(&PageListLock, oldIrql);
249 for (i = start; i < (start + length); i++)
250 {
251 if (MmPageArray[i].Flags.Zero == 0)
252 {
253 MiZeroPage(i);
254 }
255 else
256 {
257 MmPageArray[i].Flags.Zero = 0;
258 }
259 }
260 return start;
261 }
262 }
263 KeReleaseSpinLock(&PageListLock, oldIrql);
264 return 0;
265 }
266
267
268 BOOLEAN
269 NTAPI
270 MiIsPfnRam(PADDRESS_RANGE BIOSMemoryMap,
271 ULONG AddressRangeCount,
272 PFN_TYPE Pfn)
273 {
274 BOOLEAN IsUsable;
275 LARGE_INTEGER BaseAddress;
276 LARGE_INTEGER EndAddress;
277 ULONG i;
278 if (BIOSMemoryMap != NULL && AddressRangeCount > 0)
279 {
280 IsUsable = FALSE;
281 for (i = 0; i < AddressRangeCount; i++)
282 {
283 BaseAddress.u.LowPart = BIOSMemoryMap[i].BaseAddrLow;
284 BaseAddress.u.HighPart = BIOSMemoryMap[i].BaseAddrHigh;
285 EndAddress.u.LowPart = BIOSMemoryMap[i].LengthLow;
286 EndAddress.u.HighPart = BIOSMemoryMap[i].LengthHigh;
287 EndAddress.QuadPart += BaseAddress.QuadPart;
288 BaseAddress.QuadPart = PAGE_ROUND_DOWN(BaseAddress.QuadPart);
289 EndAddress.QuadPart = PAGE_ROUND_UP(EndAddress.QuadPart);
290
291 if ((BaseAddress.QuadPart >> PAGE_SHIFT) <= Pfn &&
292 Pfn < (EndAddress.QuadPart >> PAGE_SHIFT))
293 {
294 if (BIOSMemoryMap[i].Type == 1)
295 {
296 IsUsable = TRUE;
297 }
298 else
299 {
300 return FALSE;
301 }
302 }
303 }
304 return IsUsable;
305 }
306 return TRUE;
307 }
308
309
310 PVOID
311 INIT_FUNCTION
312 NTAPI
313 MmInitializePageList(ULONG_PTR FirstPhysKernelAddress,
314 ULONG_PTR LastPhysKernelAddress,
315 ULONG MemorySizeInPages,
316 ULONG_PTR LastKernelAddress,
317 PADDRESS_RANGE BIOSMemoryMap,
318 ULONG AddressRangeCount)
319 /*
320 * FUNCTION: Initializes the page list with all pages free
321 * except those known to be reserved and those used by the kernel
322 * ARGUMENTS:
323 * FirstKernelAddress = First physical address used by the kernel
324 * LastKernelAddress = Last physical address used by the kernel
325 */
326 {
327 ULONG i;
328 ULONG Reserved;
329 NTSTATUS Status;
330 PFN_TYPE LastPage;
331 PFN_TYPE FirstUninitializedPage;
332
333 DPRINT("MmInitializePageList(FirstPhysKernelAddress %x, "
334 "LastPhysKernelAddress %x, "
335 "MemorySizeInPages %x, LastKernelAddress %x)\n",
336 FirstPhysKernelAddress,
337 LastPhysKernelAddress,
338 MemorySizeInPages,
339 LastKernelAddress);
340
341 for (i = 0; i < MC_MAXIMUM; i++)
342 {
343 InitializeListHead(&UsedPageListHeads[i]);
344 }
345 KeInitializeSpinLock(&PageListLock);
346 InitializeListHead(&FreeUnzeroedPageListHead);
347 InitializeListHead(&FreeZeroedPageListHead);
348 InitializeListHead(&BiosPageListHead);
349
350 LastKernelAddress = PAGE_ROUND_UP(LastKernelAddress);
351
352 MmPageArraySize = MemorySizeInPages;
353 Reserved =
354 PAGE_ROUND_UP((MmPageArraySize * sizeof(PHYSICAL_PAGE))) / PAGE_SIZE;
355 MmPageArray = (PHYSICAL_PAGE *)LastKernelAddress;
356
357 DPRINT("Reserved %d\n", Reserved);
358
359 LastKernelAddress = PAGE_ROUND_UP(LastKernelAddress);
360 LastKernelAddress = ((ULONG_PTR)LastKernelAddress + (Reserved * PAGE_SIZE));
361 LastPhysKernelAddress = (ULONG_PTR)PAGE_ROUND_UP(LastPhysKernelAddress);
362 LastPhysKernelAddress = (ULONG_PTR)LastPhysKernelAddress + (Reserved * PAGE_SIZE);
363
364 MmStats.NrTotalPages = 0;
365 MmStats.NrSystemPages = 0;
366 MmStats.NrUserPages = 0;
367 MmStats.NrReservedPages = 0;
368 MmStats.NrFreePages = 0;
369 MmStats.NrLockedPages = 0;
370
371 /* Preinitialize the Balancer because we need some pages for pte's */
372 MmInitializeBalancer(MemorySizeInPages, 0);
373
374 FirstUninitializedPage = (ULONG_PTR)LastPhysKernelAddress / PAGE_SIZE;
375 LastPage = MmPageArraySize;
376 for (i = 0; i < Reserved; i++)
377 {
378 PVOID Address = (char*)MmPageArray + (i * PAGE_SIZE);
379 ULONG j, start, end;
380 if (!MmIsPagePresent(NULL, Address))
381 {
382 PFN_TYPE Pfn;
383 Pfn = 0;
384 while (Pfn == 0 && LastPage > FirstUninitializedPage)
385 {
386 /* Allocate the page from the upper end of the RAM */
387 if (MiIsPfnRam(BIOSMemoryMap, AddressRangeCount, --LastPage))
388 {
389 Pfn = LastPage;
390 }
391 }
392 if (Pfn == 0)
393 {
394 Pfn = MmAllocPage(MC_NPPOOL, 0);
395 if (Pfn == 0)
396 {
397 KEBUGCHECK(0);
398 }
399 }
400 Status = MmCreateVirtualMappingForKernel(Address,
401 PAGE_READWRITE,
402 &Pfn,
403 1);
404 if (!NT_SUCCESS(Status))
405 {
406 DbgPrint("Unable to create virtual mapping\n");
407 KEBUGCHECK(0);
408 }
409 }
410 else
411 {
412 /* Setting the page protection is necessary to set the global bit on IA32 */
413 MmSetPageProtect(NULL, Address, PAGE_READWRITE);
414 }
415 memset(Address, 0, PAGE_SIZE);
416
417 start = ((ULONG_PTR)Address - (ULONG_PTR)MmPageArray) / sizeof(PHYSICAL_PAGE);
418 end = ((ULONG_PTR)Address - (ULONG_PTR)MmPageArray + PAGE_SIZE) / sizeof(PHYSICAL_PAGE);
419
420 for (j = start; j < end && j < LastPage; j++)
421 {
422 if (MiIsPfnRam(BIOSMemoryMap, AddressRangeCount, j))
423 {
424 if (j == 0)
425 {
426 /*
427 * Page zero is reserved
428 */
429 MmPageArray[0].Flags.Type = MM_PHYSICAL_PAGE_BIOS;
430 MmPageArray[0].Flags.Consumer = MC_NPPOOL;
431 MmPageArray[0].Flags.Zero = 0;
432 MmPageArray[0].ReferenceCount = 0;
433 InsertTailList(&BiosPageListHead,
434 &MmPageArray[0].ListEntry);
435 MmStats.NrReservedPages++;
436 }
437 else if (j == 1)
438 {
439
440 /*
441 * Page one is reserved for the initial KPCR
442 */
443 MmPageArray[1].Flags.Type = MM_PHYSICAL_PAGE_BIOS;
444 MmPageArray[1].Flags.Consumer = MC_NPPOOL;
445 MmPageArray[1].Flags.Zero = 0;
446 MmPageArray[1].ReferenceCount = 0;
447 InsertTailList(&BiosPageListHead,
448 &MmPageArray[1].ListEntry);
449 MmStats.NrReservedPages++;
450 }
451 /* Protect the Page Directory. This will be changed in r3 */
452 else if (j >= (KeLoaderBlock.PageDirectoryStart / PAGE_SIZE) && j < (KeLoaderBlock.PageDirectoryEnd / PAGE_SIZE))
453 {
454 MmPageArray[j].Flags.Type = MM_PHYSICAL_PAGE_BIOS;
455 MmPageArray[j].Flags.Zero = 0;
456 MmPageArray[j].Flags.Consumer = MC_NPPOOL;
457 MmPageArray[j].ReferenceCount = 1;
458 InsertTailList(&BiosPageListHead,
459 &MmPageArray[j].ListEntry);
460 MmStats.NrReservedPages++;
461 }
462 else if (j >= 0xa0000 / PAGE_SIZE && j < 0x100000 / PAGE_SIZE)
463 {
464 MmPageArray[j].Flags.Type = MM_PHYSICAL_PAGE_BIOS;
465 MmPageArray[j].Flags.Zero = 0;
466 MmPageArray[j].Flags.Consumer = MC_NPPOOL;
467 MmPageArray[j].ReferenceCount = 1;
468 InsertTailList(&BiosPageListHead,
469 &MmPageArray[j].ListEntry);
470 MmStats.NrReservedPages++;
471 }
472 else if (j >= (ULONG)FirstPhysKernelAddress/PAGE_SIZE &&
473 j < (ULONG)LastPhysKernelAddress/PAGE_SIZE)
474 {
475 MmPageArray[j].Flags.Type = MM_PHYSICAL_PAGE_USED;
476 MmPageArray[j].Flags.Zero = 0;
477 MmPageArray[j].Flags.Consumer = MC_NPPOOL;
478 MmPageArray[j].ReferenceCount = 1;
479 MmPageArray[j].MapCount = 1;
480 InsertTailList(&UsedPageListHeads[MC_NPPOOL],
481 &MmPageArray[j].ListEntry);
482 MmStats.NrSystemPages++;
483 }
484 else
485 {
486 MmPageArray[j].Flags.Type = MM_PHYSICAL_PAGE_FREE;
487 MmPageArray[j].Flags.Zero = 0;
488 MmPageArray[j].ReferenceCount = 0;
489 InsertTailList(&FreeUnzeroedPageListHead,
490 &MmPageArray[j].ListEntry);
491 UnzeroedPageCount++;
492 MmStats.NrFreePages++;
493 }
494 }
495 else
496 {
497 MmPageArray[j].Flags.Type = MM_PHYSICAL_PAGE_BIOS;
498 MmPageArray[j].Flags.Consumer = MC_NPPOOL;
499 MmPageArray[j].Flags.Zero = 0;
500 MmPageArray[j].ReferenceCount = 0;
501 InsertTailList(&BiosPageListHead,
502 &MmPageArray[j].ListEntry);
503 MmStats.NrReservedPages++;
504 }
505 }
506 FirstUninitializedPage = j;
507
508 }
509
510 /* Add the pages from the upper end to the list */
511 for (i = LastPage; i < MmPageArraySize; i++)
512 {
513 if (MiIsPfnRam(BIOSMemoryMap, AddressRangeCount, i))
514 {
515 MmPageArray[i].Flags.Type = MM_PHYSICAL_PAGE_USED;
516 MmPageArray[i].Flags.Zero = 0;
517 MmPageArray[i].Flags.Consumer = MC_NPPOOL;
518 MmPageArray[i].ReferenceCount = 1;
519 MmPageArray[i].MapCount = 1;
520 InsertTailList(&UsedPageListHeads[MC_NPPOOL],
521 &MmPageArray[i].ListEntry);
522 MmStats.NrSystemPages++;
523 }
524 else
525 {
526 MmPageArray[i].Flags.Type = MM_PHYSICAL_PAGE_BIOS;
527 MmPageArray[i].Flags.Consumer = MC_NPPOOL;
528 MmPageArray[i].Flags.Zero = 0;
529 MmPageArray[i].ReferenceCount = 0;
530 InsertTailList(&BiosPageListHead,
531 &MmPageArray[i].ListEntry);
532 MmStats.NrReservedPages++;
533 }
534 }
535
536
537
538 KeInitializeEvent(&ZeroPageThreadEvent, NotificationEvent, TRUE);
539
540 MmStats.NrTotalPages = MmStats.NrFreePages + MmStats.NrSystemPages +
541 MmStats.NrReservedPages + MmStats.NrUserPages;
542 MmInitializeBalancer(MmStats.NrFreePages, MmStats.NrSystemPages + MmStats.NrReservedPages);
543 return((PVOID)LastKernelAddress);
544 }
545
546 VOID
547 NTAPI
548 MmSetFlagsPage(PFN_TYPE Pfn, ULONG Flags)
549 {
550 KIRQL oldIrql;
551
552 ASSERT(Pfn < MmPageArraySize);
553 KeAcquireSpinLock(&PageListLock, &oldIrql);
554 MmPageArray[Pfn].AllFlags = Flags;
555 KeReleaseSpinLock(&PageListLock, oldIrql);
556 }
557
558 VOID
559 NTAPI
560 MmSetRmapListHeadPage(PFN_TYPE Pfn, struct _MM_RMAP_ENTRY* ListHead)
561 {
562 MmPageArray[Pfn].RmapListHead = ListHead;
563 }
564
565 struct _MM_RMAP_ENTRY*
566 NTAPI
567 MmGetRmapListHeadPage(PFN_TYPE Pfn)
568 {
569 return(MmPageArray[Pfn].RmapListHead);
570 }
571
572 VOID
573 NTAPI
574 MmMarkPageMapped(PFN_TYPE Pfn)
575 {
576 KIRQL oldIrql;
577
578 if (Pfn < MmPageArraySize)
579 {
580 KeAcquireSpinLock(&PageListLock, &oldIrql);
581 if (MmPageArray[Pfn].Flags.Type == MM_PHYSICAL_PAGE_FREE)
582 {
583 DbgPrint("Mapping non-used page\n");
584 KEBUGCHECK(0);
585 }
586 MmPageArray[Pfn].MapCount++;
587 KeReleaseSpinLock(&PageListLock, oldIrql);
588 }
589 }
590
591 VOID
592 NTAPI
593 MmMarkPageUnmapped(PFN_TYPE Pfn)
594 {
595 KIRQL oldIrql;
596
597 if (Pfn < MmPageArraySize)
598 {
599 KeAcquireSpinLock(&PageListLock, &oldIrql);
600 if (MmPageArray[Pfn].Flags.Type == MM_PHYSICAL_PAGE_FREE)
601 {
602 DbgPrint("Unmapping non-used page\n");
603 KEBUGCHECK(0);
604 }
605 if (MmPageArray[Pfn].MapCount == 0)
606 {
607 DbgPrint("Unmapping not mapped page\n");
608 KEBUGCHECK(0);
609 }
610 MmPageArray[Pfn].MapCount--;
611 KeReleaseSpinLock(&PageListLock, oldIrql);
612 }
613 }
614
615 ULONG
616 NTAPI
617 MmGetFlagsPage(PFN_TYPE Pfn)
618 {
619 KIRQL oldIrql;
620 ULONG Flags;
621
622 ASSERT(Pfn < MmPageArraySize);
623 KeAcquireSpinLock(&PageListLock, &oldIrql);
624 Flags = MmPageArray[Pfn].AllFlags;
625 KeReleaseSpinLock(&PageListLock, oldIrql);
626
627 return(Flags);
628 }
629
630
631 VOID
632 NTAPI
633 MmSetSavedSwapEntryPage(PFN_TYPE Pfn, SWAPENTRY SavedSwapEntry)
634 {
635 KIRQL oldIrql;
636
637 ASSERT(Pfn < MmPageArraySize);
638 KeAcquireSpinLock(&PageListLock, &oldIrql);
639 MmPageArray[Pfn].SavedSwapEntry = SavedSwapEntry;
640 KeReleaseSpinLock(&PageListLock, oldIrql);
641 }
642
643 SWAPENTRY
644 NTAPI
645 MmGetSavedSwapEntryPage(PFN_TYPE Pfn)
646 {
647 SWAPENTRY SavedSwapEntry;
648 KIRQL oldIrql;
649
650 ASSERT(Pfn < MmPageArraySize);
651 KeAcquireSpinLock(&PageListLock, &oldIrql);
652 SavedSwapEntry = MmPageArray[Pfn].SavedSwapEntry;
653 KeReleaseSpinLock(&PageListLock, oldIrql);
654
655 return(SavedSwapEntry);
656 }
657
658 VOID
659 NTAPI
660 MmReferencePageUnsafe(PFN_TYPE Pfn)
661 {
662 KIRQL oldIrql;
663
664 DPRINT("MmReferencePageUnsafe(PysicalAddress %x)\n", Pfn << PAGE_SHIFT);
665
666 if (Pfn == 0 || Pfn >= MmPageArraySize)
667 {
668 return;
669 }
670
671 KeAcquireSpinLock(&PageListLock, &oldIrql);
672
673 if (MmPageArray[Pfn].Flags.Type != MM_PHYSICAL_PAGE_USED)
674 {
675 DbgPrint("Referencing non-used page\n");
676 KEBUGCHECK(0);
677 }
678
679 MmPageArray[Pfn].ReferenceCount++;
680 KeReleaseSpinLock(&PageListLock, oldIrql);
681 }
682
683 VOID
684 NTAPI
685 MmReferencePage(PFN_TYPE Pfn)
686 {
687 DPRINT("MmReferencePage(PysicalAddress %x)\n", Pfn << PAGE_SHIFT);
688
689 if (Pfn == 0 || Pfn >= MmPageArraySize)
690 {
691 KEBUGCHECK(0);
692 }
693
694 MmReferencePageUnsafe(Pfn);
695 }
696
697 ULONG
698 NTAPI
699 MmGetReferenceCountPage(PFN_TYPE Pfn)
700 {
701 KIRQL oldIrql;
702 ULONG RCount;
703
704 DPRINT("MmGetReferenceCountPage(PhysicalAddress %x)\n", Pfn << PAGE_SHIFT);
705
706 if (Pfn == 0 || Pfn >= MmPageArraySize)
707 {
708 KEBUGCHECK(0);
709 }
710
711 KeAcquireSpinLock(&PageListLock, &oldIrql);
712
713 if (MmPageArray[Pfn].Flags.Type != MM_PHYSICAL_PAGE_USED)
714 {
715 DbgPrint("Getting reference count for free page\n");
716 KEBUGCHECK(0);
717 }
718
719 RCount = MmPageArray[Pfn].ReferenceCount;
720
721 KeReleaseSpinLock(&PageListLock, oldIrql);
722 return(RCount);
723 }
724
725 BOOLEAN
726 NTAPI
727 MmIsUsablePage(PFN_TYPE Pfn)
728 {
729
730 DPRINT("MmIsUsablePage(PhysicalAddress %x)\n", Pfn << PAGE_SHIFT);
731
732 if (Pfn == 0 || Pfn >= MmPageArraySize)
733 {
734 KEBUGCHECK(0);
735 }
736
737 if (MmPageArray[Pfn].Flags.Type != MM_PHYSICAL_PAGE_USED &&
738 MmPageArray[Pfn].Flags.Type != MM_PHYSICAL_PAGE_BIOS)
739 {
740 return(FALSE);
741 }
742
743 return(TRUE);
744 }
745
746 VOID
747 NTAPI
748 MmDereferencePage(PFN_TYPE Pfn)
749 {
750 KIRQL oldIrql;
751
752 DPRINT("MmDereferencePage(PhysicalAddress %x)\n", Pfn << PAGE_SHIFT);
753
754 if (Pfn == 0 || Pfn >= MmPageArraySize)
755 {
756 KEBUGCHECK(0);
757 }
758
759 KeAcquireSpinLock(&PageListLock, &oldIrql);
760
761 if (MmPageArray[Pfn].Flags.Type != MM_PHYSICAL_PAGE_USED)
762 {
763 DbgPrint("Dereferencing free page\n");
764 KEBUGCHECK(0);
765 }
766 if (MmPageArray[Pfn].ReferenceCount == 0)
767 {
768 DbgPrint("Derefrencing page with reference count 0\n");
769 KEBUGCHECK(0);
770 }
771
772 MmPageArray[Pfn].ReferenceCount--;
773 if (MmPageArray[Pfn].ReferenceCount == 0)
774 {
775 MmStats.NrFreePages++;
776 MmStats.NrSystemPages--;
777 RemoveEntryList(&MmPageArray[Pfn].ListEntry);
778 if (MmPageArray[Pfn].RmapListHead != NULL)
779 {
780 DbgPrint("Freeing page with rmap entries.\n");
781 KEBUGCHECK(0);
782 }
783 if (MmPageArray[Pfn].MapCount != 0)
784 {
785 DbgPrint("Freeing mapped page (0x%x count %d)\n",
786 Pfn << PAGE_SHIFT, MmPageArray[Pfn].MapCount);
787 KEBUGCHECK(0);
788 }
789 if (MmPageArray[Pfn].LockCount > 0)
790 {
791 DbgPrint("Freeing locked page\n");
792 KEBUGCHECK(0);
793 }
794 if (MmPageArray[Pfn].SavedSwapEntry != 0)
795 {
796 DbgPrint("Freeing page with swap entry.\n");
797 KEBUGCHECK(0);
798 }
799 if (MmPageArray[Pfn].Flags.Type != MM_PHYSICAL_PAGE_USED)
800 {
801 DbgPrint("Freeing page with flags %x\n",
802 MmPageArray[Pfn].Flags.Type);
803 KEBUGCHECK(0);
804 }
805 MmPageArray[Pfn].Flags.Type = MM_PHYSICAL_PAGE_FREE;
806 MmPageArray[Pfn].Flags.Consumer = MC_MAXIMUM;
807 InsertTailList(&FreeUnzeroedPageListHead,
808 &MmPageArray[Pfn].ListEntry);
809 UnzeroedPageCount++;
810 if (UnzeroedPageCount > 8 && 0 == KeReadStateEvent(&ZeroPageThreadEvent))
811 {
812 KeSetEvent(&ZeroPageThreadEvent, IO_NO_INCREMENT, FALSE);
813 }
814 }
815 KeReleaseSpinLock(&PageListLock, oldIrql);
816 }
817
818 ULONG
819 NTAPI
820 MmGetLockCountPage(PFN_TYPE Pfn)
821 {
822 KIRQL oldIrql;
823 ULONG LockCount;
824
825 DPRINT("MmGetLockCountPage(PhysicalAddress %x)\n", Pfn << PAGE_SHIFT);
826
827 if (Pfn == 0 || Pfn >= MmPageArraySize)
828 {
829 KEBUGCHECK(0);
830 }
831
832 KeAcquireSpinLock(&PageListLock, &oldIrql);
833
834 if (MmPageArray[Pfn].Flags.Type != MM_PHYSICAL_PAGE_USED)
835 {
836 DbgPrint("Getting lock count for free page\n");
837 KEBUGCHECK(0);
838 }
839
840 LockCount = MmPageArray[Pfn].LockCount;
841 KeReleaseSpinLock(&PageListLock, oldIrql);
842
843 return(LockCount);
844 }
845
846 VOID
847 NTAPI
848 MmLockPageUnsafe(PFN_TYPE Pfn)
849 {
850 KIRQL oldIrql;
851
852 DPRINT("MmLockPageUnsafe(PhysicalAddress %x)\n", Pfn << PAGE_SHIFT);
853
854 if (Pfn == 0 || Pfn >= MmPageArraySize)
855 {
856 return;
857 }
858
859 KeAcquireSpinLock(&PageListLock, &oldIrql);
860
861 if (MmPageArray[Pfn].Flags.Type != MM_PHYSICAL_PAGE_USED)
862 {
863 DbgPrint("Locking free page\n");
864 KEBUGCHECK(0);
865 }
866
867 MmPageArray[Pfn].LockCount++;
868 KeReleaseSpinLock(&PageListLock, oldIrql);
869 }
870
871 VOID
872 NTAPI
873 MmLockPage(PFN_TYPE Pfn)
874 {
875 DPRINT("MmLockPage(PhysicalAddress %x)\n", Pfn << PAGE_SHIFT);
876
877 if (Pfn == 0 || Pfn >= MmPageArraySize)
878 {
879 KEBUGCHECK(0);
880 }
881
882 MmLockPageUnsafe(Pfn);
883 }
884
885 VOID
886 NTAPI
887 MmUnlockPage(PFN_TYPE Pfn)
888 {
889 KIRQL oldIrql;
890
891 DPRINT("MmUnlockPage(PhysicalAddress %x)\n", Pfn << PAGE_SHIFT);
892
893 if (Pfn == 0 || Pfn >= MmPageArraySize)
894 {
895 KEBUGCHECK(0);
896 }
897
898 KeAcquireSpinLock(&PageListLock, &oldIrql);
899
900 if (MmPageArray[Pfn].Flags.Type != MM_PHYSICAL_PAGE_USED)
901 {
902 DbgPrint("Unlocking free page\n");
903 KEBUGCHECK(0);
904 }
905
906 MmPageArray[Pfn].LockCount--;
907 KeReleaseSpinLock(&PageListLock, oldIrql);
908 }
909
910 PFN_TYPE
911 NTAPI
912 MmAllocPage(ULONG Consumer, SWAPENTRY SavedSwapEntry)
913 {
914 PFN_TYPE PfnOffset;
915 PLIST_ENTRY ListEntry;
916 PPHYSICAL_PAGE PageDescriptor;
917 KIRQL oldIrql;
918 BOOLEAN NeedClear = FALSE;
919
920 DPRINT("MmAllocPage()\n");
921
922 KeAcquireSpinLock(&PageListLock, &oldIrql);
923 if (IsListEmpty(&FreeZeroedPageListHead))
924 {
925 if (IsListEmpty(&FreeUnzeroedPageListHead))
926 {
927 DPRINT1("MmAllocPage(): Out of memory\n");
928 KeReleaseSpinLock(&PageListLock, oldIrql);
929 return 0;
930 }
931 ListEntry = RemoveTailList(&FreeUnzeroedPageListHead);
932 UnzeroedPageCount--;
933
934 PageDescriptor = CONTAINING_RECORD(ListEntry, PHYSICAL_PAGE, ListEntry);
935
936 NeedClear = TRUE;
937 }
938 else
939 {
940 ListEntry = RemoveTailList(&FreeZeroedPageListHead);
941
942 PageDescriptor = CONTAINING_RECORD(ListEntry, PHYSICAL_PAGE, ListEntry);
943 }
944
945 if (PageDescriptor->Flags.Type != MM_PHYSICAL_PAGE_FREE)
946 {
947 DbgPrint("Got non-free page from freelist\n");
948 KEBUGCHECK(0);
949 }
950 if (PageDescriptor->MapCount != 0)
951 {
952 DbgPrint("Got mapped page from freelist\n");
953 KEBUGCHECK(0);
954 }
955 if (PageDescriptor->ReferenceCount != 0)
956 {
957 DPRINT1("%d\n", PageDescriptor->ReferenceCount);
958 KEBUGCHECK(0);
959 }
960 PageDescriptor->Flags.Type = MM_PHYSICAL_PAGE_USED;
961 PageDescriptor->Flags.Consumer = Consumer;
962 PageDescriptor->ReferenceCount = 1;
963 PageDescriptor->LockCount = 0;
964 PageDescriptor->MapCount = 0;
965 PageDescriptor->SavedSwapEntry = SavedSwapEntry;
966 InsertTailList(&UsedPageListHeads[Consumer], ListEntry);
967
968 MmStats.NrSystemPages++;
969 MmStats.NrFreePages--;
970
971 KeReleaseSpinLock(&PageListLock, oldIrql);
972
973 PfnOffset = PageDescriptor - MmPageArray;
974 if (NeedClear)
975 {
976 MiZeroPage(PfnOffset);
977 }
978 if (PageDescriptor->MapCount != 0)
979 {
980 DbgPrint("Returning mapped page.\n");
981 KEBUGCHECK(0);
982 }
983 return PfnOffset;
984 }
985
986 LONG
987 NTAPI
988 MmAllocPagesSpecifyRange(ULONG Consumer,
989 PHYSICAL_ADDRESS LowestAddress,
990 PHYSICAL_ADDRESS HighestAddress,
991 ULONG NumberOfPages,
992 PPFN_TYPE Pages)
993 {
994 PPHYSICAL_PAGE PageDescriptor;
995 KIRQL oldIrql;
996 PFN_TYPE LowestPage, HighestPage;
997 PFN_TYPE pfn;
998 ULONG NumberOfPagesFound = 0;
999 ULONG i;
1000
1001 DPRINT("MmAllocPagesSpecifyRange()\n"
1002 " LowestAddress = 0x%08x%08x\n"
1003 " HighestAddress = 0x%08x%08x\n"
1004 " NumberOfPages = %d\n",
1005 LowestAddress.u.HighPart, LowestAddress.u.LowPart,
1006 HighestAddress.u.HighPart, HighestAddress.u.LowPart,
1007 NumberOfPages);
1008
1009 if (NumberOfPages == 0)
1010 return 0;
1011
1012 LowestPage = LowestAddress.QuadPart / PAGE_SIZE;
1013 HighestPage = HighestAddress.QuadPart / PAGE_SIZE;
1014 if ((HighestAddress.u.LowPart % PAGE_SIZE) != 0)
1015 HighestPage++;
1016
1017 if (LowestPage >= MmPageArraySize)
1018 {
1019 DPRINT1("MmAllocPagesSpecifyRange(): Out of memory\n");
1020 return -1;
1021 }
1022 if (HighestPage > MmPageArraySize)
1023 HighestPage = MmPageArraySize;
1024
1025 KeAcquireSpinLock(&PageListLock, &oldIrql);
1026 if (LowestPage == 0 && HighestPage == MmPageArraySize)
1027 {
1028 PLIST_ENTRY ListEntry;
1029 while (NumberOfPagesFound < NumberOfPages)
1030 {
1031 if (!IsListEmpty(&FreeZeroedPageListHead))
1032 {
1033 ListEntry = RemoveTailList(&FreeZeroedPageListHead);
1034 }
1035 else if (!IsListEmpty(&FreeUnzeroedPageListHead))
1036 {
1037 ListEntry = RemoveTailList(&FreeUnzeroedPageListHead);
1038 UnzeroedPageCount--;
1039 }
1040 else
1041 {
1042 if (NumberOfPagesFound == 0)
1043 {
1044 KeReleaseSpinLock(&PageListLock, oldIrql);
1045 DPRINT1("MmAllocPagesSpecifyRange(): Out of memory\n");
1046 return -1;
1047 }
1048 else
1049 {
1050 break;
1051 }
1052 }
1053 PageDescriptor = CONTAINING_RECORD(ListEntry, PHYSICAL_PAGE, ListEntry);
1054
1055 ASSERT(PageDescriptor->Flags.Type == MM_PHYSICAL_PAGE_FREE);
1056 ASSERT(PageDescriptor->MapCount == 0);
1057 ASSERT(PageDescriptor->ReferenceCount == 0);
1058
1059 /* Allocate the page */
1060 PageDescriptor->Flags.Type = MM_PHYSICAL_PAGE_USED;
1061 PageDescriptor->Flags.Consumer = Consumer;
1062 PageDescriptor->ReferenceCount = 1;
1063 PageDescriptor->LockCount = 0;
1064 PageDescriptor->MapCount = 0;
1065 PageDescriptor->SavedSwapEntry = 0; /* FIXME: Do we need swap entries? */
1066 InsertTailList(&UsedPageListHeads[Consumer], &PageDescriptor->ListEntry);
1067
1068 MmStats.NrSystemPages++;
1069 MmStats.NrFreePages--;
1070
1071 /* Remember the page */
1072 pfn = PageDescriptor - MmPageArray;
1073 Pages[NumberOfPagesFound++] = pfn;
1074 }
1075 }
1076 else
1077 {
1078 INT LookForZeroedPages;
1079 for (LookForZeroedPages = 1; LookForZeroedPages >= 0; LookForZeroedPages--)
1080 {
1081 for (pfn = LowestPage; pfn < HighestPage; pfn++)
1082 {
1083 PageDescriptor = MmPageArray + pfn;
1084
1085 if (PageDescriptor->Flags.Type != MM_PHYSICAL_PAGE_FREE)
1086 continue;
1087 if (PageDescriptor->Flags.Zero != LookForZeroedPages)
1088 continue;
1089
1090 ASSERT(PageDescriptor->MapCount == 0);
1091 ASSERT(PageDescriptor->ReferenceCount == 0);
1092
1093 /* Allocate the page */
1094 PageDescriptor->Flags.Type = MM_PHYSICAL_PAGE_USED;
1095 PageDescriptor->Flags.Consumer = Consumer;
1096 PageDescriptor->ReferenceCount = 1;
1097 PageDescriptor->LockCount = 0;
1098 PageDescriptor->MapCount = 0;
1099 PageDescriptor->SavedSwapEntry = 0; /* FIXME: Do we need swap entries? */
1100 RemoveEntryList(&PageDescriptor->ListEntry);
1101 InsertTailList(&UsedPageListHeads[Consumer], &PageDescriptor->ListEntry);
1102
1103 if (!PageDescriptor->Flags.Zero)
1104 UnzeroedPageCount--;
1105 MmStats.NrSystemPages++;
1106 MmStats.NrFreePages--;
1107
1108 /* Remember the page */
1109 Pages[NumberOfPagesFound++] = pfn;
1110 if (NumberOfPagesFound == NumberOfPages)
1111 break;
1112 }
1113 if (NumberOfPagesFound == NumberOfPages)
1114 break;
1115 }
1116 }
1117 KeReleaseSpinLock(&PageListLock, oldIrql);
1118
1119 /* Zero unzero-ed pages */
1120 for (i = 0; i < NumberOfPagesFound; i++)
1121 {
1122 pfn = Pages[i];
1123 if (MmPageArray[pfn].Flags.Zero == 0)
1124 {
1125 MiZeroPage(pfn);
1126 }
1127 else
1128 {
1129 MmPageArray[pfn].Flags.Zero = 0;
1130 }
1131 }
1132
1133 return NumberOfPagesFound;
1134 }
1135
1136 VOID STDCALL
1137 MmZeroPageThreadMain(PVOID Ignored)
1138 {
1139 NTSTATUS Status;
1140 KIRQL oldIrql;
1141 PLIST_ENTRY ListEntry;
1142 PPHYSICAL_PAGE PageDescriptor;
1143 PFN_TYPE Pfn;
1144 ULONG Count;
1145
1146 while(1)
1147 {
1148 Status = KeWaitForSingleObject(&ZeroPageThreadEvent,
1149 0,
1150 KernelMode,
1151 FALSE,
1152 NULL);
1153 if (!NT_SUCCESS(Status))
1154 {
1155 DbgPrint("ZeroPageThread: Wait failed\n");
1156 KEBUGCHECK(0);
1157 return;
1158 }
1159
1160 if (ZeroPageThreadShouldTerminate)
1161 {
1162 DbgPrint("ZeroPageThread: Terminating\n");
1163 return;
1164 }
1165 Count = 0;
1166 KeAcquireSpinLock(&PageListLock, &oldIrql);
1167 while (!IsListEmpty(&FreeUnzeroedPageListHead))
1168 {
1169 ListEntry = RemoveTailList(&FreeUnzeroedPageListHead);
1170 UnzeroedPageCount--;
1171 PageDescriptor = CONTAINING_RECORD(ListEntry, PHYSICAL_PAGE, ListEntry);
1172 /* We set the page to used, because MmCreateVirtualMapping failed with unused pages */
1173 PageDescriptor->Flags.Type = MM_PHYSICAL_PAGE_USED;
1174 KeReleaseSpinLock(&PageListLock, oldIrql);
1175 Pfn = PageDescriptor - MmPageArray;
1176 Status = MiZeroPage(Pfn);
1177
1178 KeAcquireSpinLock(&PageListLock, &oldIrql);
1179 if (PageDescriptor->MapCount != 0)
1180 {
1181 DbgPrint("Mapped page on freelist.\n");
1182 KEBUGCHECK(0);
1183 }
1184 PageDescriptor->Flags.Zero = 1;
1185 PageDescriptor->Flags.Type = MM_PHYSICAL_PAGE_FREE;
1186 if (NT_SUCCESS(Status))
1187 {
1188 InsertHeadList(&FreeZeroedPageListHead, ListEntry);
1189 Count++;
1190 }
1191 else
1192 {
1193 InsertHeadList(&FreeUnzeroedPageListHead, ListEntry);
1194 UnzeroedPageCount++;
1195 }
1196
1197 }
1198 DPRINT("Zeroed %d pages.\n", Count);
1199 KeResetEvent(&ZeroPageThreadEvent);
1200 KeReleaseSpinLock(&PageListLock, oldIrql);
1201 }
1202 }
1203
1204 NTSTATUS
1205 INIT_FUNCTION
1206 NTAPI
1207 MmInitZeroPageThread(VOID)
1208 {
1209 NTSTATUS Status;
1210 HANDLE ThreadHandle;
1211
1212 ZeroPageThreadShouldTerminate = FALSE;
1213 Status = PsCreateSystemThread(&ThreadHandle,
1214 THREAD_ALL_ACCESS,
1215 NULL,
1216 NULL,
1217 &ZeroPageThreadId,
1218 MmZeroPageThreadMain,
1219 NULL);
1220 if (!NT_SUCCESS(Status))
1221 {
1222 KEBUGCHECK(0);
1223 }
1224
1225 Status = ObReferenceObjectByHandle(ThreadHandle,
1226 THREAD_ALL_ACCESS,
1227 PsThreadType,
1228 KernelMode,
1229 (PVOID*)&ZeroPageThread,
1230 NULL);
1231 if (!NT_SUCCESS(Status))
1232 {
1233 KEBUGCHECK(0);
1234 }
1235
1236 KeSetPriorityThread(&ZeroPageThread->Tcb, LOW_PRIORITY);
1237 NtClose(ThreadHandle);
1238 return STATUS_SUCCESS;
1239 }
1240
1241 /* EOF */