- Initialize the used page count for the non paged pool in MmInitializeBalancer.
[reactos.git] / reactos / ntoskrnl / mm / freelist.c
1 /*
2 * COPYRIGHT: See COPYING in the top level directory
3 * PROJECT: ReactOS kernel
4 * FILE: ntoskrnl/mm/freelist.c
5 * PURPOSE: Handle the list of free physical pages
6 * PROGRAMMER: David Welch (welch@cwcom.net)
7 * UPDATE HISTORY:
8 * 27/05/98: Created
9 * 18/08/98: Added a fix from Robert Bergkvist
10 */
11
12 /* INCLUDES ****************************************************************/
13
14 #include <ddk/ntddk.h>
15 #include <internal/mm.h>
16 #include <internal/ntoskrnl.h>
17
18 #define NDEBUG
19 #include <internal/debug.h>
20
21 /* TYPES *******************************************************************/
22
23 #define MM_PHYSICAL_PAGE_FREE (0x1)
24 #define MM_PHYSICAL_PAGE_USED (0x2)
25 #define MM_PHYSICAL_PAGE_BIOS (0x3)
26
27 typedef struct _PHYSICAL_PAGE
28 {
29 union
30 {
31 struct
32 {
33 ULONG Type:2;
34 ULONG Consumer:3;
35 }Flags;
36 ULONG AllFlags;
37 };
38
39 LIST_ENTRY ListEntry;
40 ULONG ReferenceCount;
41 SWAPENTRY SavedSwapEntry;
42 ULONG LockCount;
43 ULONG MapCount;
44 struct _MM_RMAP_ENTRY* RmapListHead;
45 } PHYSICAL_PAGE, *PPHYSICAL_PAGE;
46
47 /* GLOBALS ****************************************************************/
48
49 static PPHYSICAL_PAGE MmPageArray;
50 static ULONG MmPageArraySize;
51
52 static KSPIN_LOCK PageListLock;
53 static LIST_ENTRY UsedPageListHeads[MC_MAXIMUM];
54 static LIST_ENTRY FreeZeroedPageListHead;
55 static LIST_ENTRY FreeUnzeroedPageListHead;
56 static LIST_ENTRY BiosPageListHead;
57
58 static HANDLE ZeroPageThreadHandle;
59 static CLIENT_ID ZeroPageThreadId;
60 static KEVENT ZeroPageThreadEvent;
61
62 static ULONG UnzeroedPageCount = 0;
63
64 /* FUNCTIONS *************************************************************/
65
66 VOID
67 MmTransferOwnershipPage(PHYSICAL_ADDRESS PhysicalAddress, ULONG NewConsumer)
68 {
69 ULONG Start = PhysicalAddress.u.LowPart / PAGE_SIZE;
70 KIRQL oldIrql;
71
72 KeAcquireSpinLock(&PageListLock, &oldIrql);
73 if (MmPageArray[Start].MapCount != 0)
74 {
75 DbgPrint("Transfering mapped page.\n");
76 KEBUGCHECK(0);
77 }
78 RemoveEntryList(&MmPageArray[Start].ListEntry);
79 InsertTailList(&UsedPageListHeads[NewConsumer],
80 &MmPageArray[Start].ListEntry);
81 MmPageArray[Start].Flags.Consumer = NewConsumer;
82 KeReleaseSpinLock(&PageListLock, oldIrql);
83 MiZeroPage(PhysicalAddress);
84 }
85
86 PHYSICAL_ADDRESS
87 MmGetLRUFirstUserPage(VOID)
88 {
89 PLIST_ENTRY NextListEntry;
90 PHYSICAL_ADDRESS Next;
91 PHYSICAL_PAGE* PageDescriptor;
92 KIRQL oldIrql;
93
94 KeAcquireSpinLock(&PageListLock, &oldIrql);
95 NextListEntry = UsedPageListHeads[MC_USER].Flink;
96 if (NextListEntry == &UsedPageListHeads[MC_USER])
97 {
98 KeReleaseSpinLock(&PageListLock, oldIrql);
99 return((LARGE_INTEGER)0LL);
100 }
101 PageDescriptor = CONTAINING_RECORD(NextListEntry, PHYSICAL_PAGE, ListEntry);
102 Next.QuadPart = (ULONG)((ULONG)PageDescriptor - (ULONG)MmPageArray);
103 Next.QuadPart = (Next.QuadPart / sizeof(PHYSICAL_PAGE)) * PAGE_SIZE;
104 KeReleaseSpinLock(&PageListLock, oldIrql);
105 return(Next);
106 }
107
108 VOID
109 MmSetLRULastPage(PHYSICAL_ADDRESS PhysicalAddress)
110 {
111 ULONG Start = PhysicalAddress.u.LowPart / PAGE_SIZE;
112 KIRQL oldIrql;
113
114 KeAcquireSpinLock(&PageListLock, &oldIrql);
115 if (MmPageArray[Start].Flags.Type == MM_PHYSICAL_PAGE_USED &&
116 MmPageArray[Start].Flags.Consumer == MC_USER)
117 {
118 RemoveEntryList(&MmPageArray[Start].ListEntry);
119 InsertTailList(&UsedPageListHeads[MC_USER],
120 &MmPageArray[Start].ListEntry);
121 }
122 KeReleaseSpinLock(&PageListLock, oldIrql);
123 }
124
125 PHYSICAL_ADDRESS
126 MmGetLRUNextUserPage(PHYSICAL_ADDRESS PreviousPhysicalAddress)
127 {
128 ULONG Start = PreviousPhysicalAddress.u.LowPart / PAGE_SIZE;
129 PLIST_ENTRY NextListEntry;
130 PHYSICAL_ADDRESS Next;
131 PHYSICAL_PAGE* PageDescriptor;
132 KIRQL oldIrql;
133
134 KeAcquireSpinLock(&PageListLock, &oldIrql);
135 if (MmPageArray[Start].Flags.Type != MM_PHYSICAL_PAGE_USED ||
136 MmPageArray[Start].Flags.Consumer != MC_USER)
137 {
138 NextListEntry = UsedPageListHeads[MC_USER].Flink;
139 }
140 else
141 {
142 NextListEntry = MmPageArray[Start].ListEntry.Flink;
143 }
144 if (NextListEntry == &UsedPageListHeads[MC_USER])
145 {
146 KeReleaseSpinLock(&PageListLock, oldIrql);
147 return((LARGE_INTEGER)0LL);
148 }
149 PageDescriptor = CONTAINING_RECORD(NextListEntry, PHYSICAL_PAGE, ListEntry);
150 Next.QuadPart = (ULONG)((ULONG)PageDescriptor - (ULONG)MmPageArray);
151 Next.QuadPart = (Next.QuadPart / sizeof(PHYSICAL_PAGE)) * PAGE_SIZE;
152 KeReleaseSpinLock(&PageListLock, oldIrql);
153 return(Next);
154 }
155
156 PHYSICAL_ADDRESS
157 MmGetContinuousPages(ULONG NumberOfBytes,
158 PHYSICAL_ADDRESS HighestAcceptableAddress,
159 ULONG Alignment)
160 {
161 ULONG NrPages;
162 ULONG i;
163 LONG start;
164 ULONG length;
165 KIRQL oldIrql;
166
167 NrPages = PAGE_ROUND_UP(NumberOfBytes) / PAGE_SIZE;
168
169 KeAcquireSpinLock(&PageListLock, &oldIrql);
170
171 start = -1;
172 length = 0;
173 for (i = 0; i < (HighestAcceptableAddress.QuadPart / PAGE_SIZE); )
174 {
175 if (MmPageArray[i].Flags.Type == MM_PHYSICAL_PAGE_FREE)
176 {
177 if (start == -1)
178 {
179 start = i;
180 length = 1;
181 }
182 else
183 {
184 length++;
185 }
186 i++;
187 if (length == NrPages)
188 {
189 break;
190 }
191 }
192 else
193 {
194 start = -1;
195 /*
196 * Fast forward to the base of the next aligned region
197 */
198 i = ROUND_UP((i + 1), (Alignment / PAGE_SIZE));
199 }
200 }
201 if (start == -1 || length != NrPages)
202 {
203 KeReleaseSpinLock(&PageListLock, oldIrql);
204 return((LARGE_INTEGER)(LONGLONG)0);
205 }
206 for (i = start; i < (start + length); i++)
207 {
208 RemoveEntryList(&MmPageArray[i].ListEntry);
209 MmPageArray[i].Flags.Type = MM_PHYSICAL_PAGE_USED;
210 MmPageArray[i].Flags.Consumer = MC_NPPOOL;
211 MmPageArray[i].ReferenceCount = 1;
212 MmPageArray[i].LockCount = 0;
213 MmPageArray[i].MapCount = 0;
214 MmPageArray[i].SavedSwapEntry = 0;
215 InsertTailList(&UsedPageListHeads[MC_NPPOOL],
216 &MmPageArray[i].ListEntry);
217 }
218 KeReleaseSpinLock(&PageListLock, oldIrql);
219 return((LARGE_INTEGER)((LONGLONG)start * PAGE_SIZE));
220 }
221
222 VOID INIT_FUNCTION
223 MiParseRangeToFreeList(PADDRESS_RANGE Range)
224 {
225 ULONG i, first, last;
226
227 /* FIXME: Not 64-bit ready */
228
229 DPRINT("Range going to free list (Base 0x%X, Length 0x%X, Type 0x%X)\n",
230 Range->BaseAddrLow,
231 Range->LengthLow,
232 Range->Type);
233
234 first = (Range->BaseAddrLow + PAGE_SIZE - 1) / PAGE_SIZE;
235 last = first + ((Range->LengthLow + PAGE_SIZE - 1) / PAGE_SIZE);
236 for (i = first; i < last && i < MmPageArraySize; i++)
237 {
238 if (MmPageArray[i].Flags.Type == 0)
239 {
240 MmPageArray[i].Flags.Type = MM_PHYSICAL_PAGE_FREE;
241 MmPageArray[i].ReferenceCount = 0;
242 InsertTailList(&FreeUnzeroedPageListHead,
243 &MmPageArray[i].ListEntry);
244 UnzeroedPageCount++;
245 }
246 }
247 }
248
249 VOID INIT_FUNCTION
250 MiParseRangeToBiosList(PADDRESS_RANGE Range)
251 {
252 ULONG i, first, last;
253
254 /* FIXME: Not 64-bit ready */
255
256 DPRINT("Range going to bios list (Base 0x%X, Length 0x%X, Type 0x%X)\n",
257 Range->BaseAddrLow,
258 Range->LengthLow,
259 Range->Type);
260
261 first = (Range->BaseAddrLow + PAGE_SIZE - 1) / PAGE_SIZE;
262 last = first + ((Range->LengthLow + PAGE_SIZE - 1) / PAGE_SIZE);
263 for (i = first; i < last && i < MmPageArraySize; i++)
264 {
265 /* Remove the page from the free list if it is there */
266 if (MmPageArray[i].Flags.Type == MM_PHYSICAL_PAGE_FREE)
267 {
268 RemoveEntryList(&MmPageArray[i].ListEntry);
269 }
270
271 if (MmPageArray[i].Flags.Type != MM_PHYSICAL_PAGE_BIOS)
272 {
273 MmPageArray[i].Flags.Type = MM_PHYSICAL_PAGE_BIOS;
274 MmPageArray[i].Flags.Consumer = MC_NPPOOL;
275 MmPageArray[i].ReferenceCount = 1;
276 InsertTailList(&BiosPageListHead,
277 &MmPageArray[i].ListEntry);
278 }
279 }
280 }
281
282 VOID INIT_FUNCTION
283 MiParseBIOSMemoryMap(PADDRESS_RANGE BIOSMemoryMap,
284 ULONG AddressRangeCount)
285 {
286 PADDRESS_RANGE p;
287 ULONG i;
288
289 p = BIOSMemoryMap;
290 for (i = 0; i < AddressRangeCount; i++, p++)
291 {
292 if (p->Type == 1)
293 {
294 MiParseRangeToFreeList(p);
295 }
296 else
297 {
298 MiParseRangeToBiosList(p);
299 }
300 }
301 }
302
303 PVOID INIT_FUNCTION
304 MmInitializePageList(PVOID FirstPhysKernelAddress,
305 PVOID LastPhysKernelAddress,
306 ULONG MemorySizeInPages,
307 ULONG LastKernelAddress,
308 PADDRESS_RANGE BIOSMemoryMap,
309 ULONG AddressRangeCount)
310 /*
311 * FUNCTION: Initializes the page list with all pages free
312 * except those known to be reserved and those used by the kernel
313 * ARGUMENTS:
314 * PageBuffer = Page sized buffer
315 * FirstKernelAddress = First physical address used by the kernel
316 * LastKernelAddress = Last physical address used by the kernel
317 */
318 {
319 ULONG i;
320 ULONG Reserved;
321 NTSTATUS Status;
322
323 DPRINT("MmInitializePageList(FirstPhysKernelAddress %x, "
324 "LastPhysKernelAddress %x, "
325 "MemorySizeInPages %x, LastKernelAddress %x)\n",
326 FirstPhysKernelAddress,
327 LastPhysKernelAddress,
328 MemorySizeInPages,
329 LastKernelAddress);
330
331 for (i = 0; i < MC_MAXIMUM; i++)
332 {
333 InitializeListHead(&UsedPageListHeads[i]);
334 }
335 KeInitializeSpinLock(&PageListLock);
336 InitializeListHead(&FreeUnzeroedPageListHead);
337 InitializeListHead(&FreeZeroedPageListHead);
338 InitializeListHead(&BiosPageListHead);
339
340 LastKernelAddress = PAGE_ROUND_UP(LastKernelAddress);
341
342 MmPageArraySize = MemorySizeInPages;
343 Reserved =
344 PAGE_ROUND_UP((MmPageArraySize * sizeof(PHYSICAL_PAGE))) / PAGE_SIZE;
345 MmPageArray = (PHYSICAL_PAGE *)LastKernelAddress;
346
347 DPRINT("Reserved %d\n", Reserved);
348
349 LastKernelAddress = PAGE_ROUND_UP(LastKernelAddress);
350 LastKernelAddress = ((ULONG)LastKernelAddress + (Reserved * PAGE_SIZE));
351 LastPhysKernelAddress = (PVOID)PAGE_ROUND_UP(LastPhysKernelAddress);
352 LastPhysKernelAddress = LastPhysKernelAddress + (Reserved * PAGE_SIZE);
353
354 MmStats.NrTotalPages = 0;
355 MmStats.NrSystemPages = 0;
356 MmStats.NrUserPages = 0;
357 MmStats.NrReservedPages = 0;
358 MmStats.NrFreePages = 0;
359 MmStats.NrLockedPages = 0;
360
361 for (i = 0; i < Reserved; i++)
362 {
363 PVOID Address = (PVOID)(ULONG)MmPageArray + (i * PAGE_SIZE);
364 if (!MmIsPagePresent(NULL, Address))
365 {
366 ULONG PhysicalAddress;
367 PhysicalAddress = (ULONG)LastPhysKernelAddress -
368 (Reserved * PAGE_SIZE) + (i * PAGE_SIZE);
369 Status =
370 MmCreateVirtualMappingUnsafe(NULL,
371 Address,
372 PAGE_READWRITE,
373 (PHYSICAL_ADDRESS)(LONGLONG)PhysicalAddress,
374 FALSE);
375 if (!NT_SUCCESS(Status))
376 {
377 DbgPrint("Unable to create virtual mapping\n");
378 KEBUGCHECK(0);
379 }
380 }
381 memset((PVOID)MmPageArray + (i * PAGE_SIZE), 0, PAGE_SIZE);
382 }
383
384
385 /*
386 * Page zero is reserved
387 */
388 MmPageArray[0].Flags.Type = MM_PHYSICAL_PAGE_BIOS;
389 MmPageArray[0].Flags.Consumer = MC_NPPOOL;
390 MmPageArray[0].ReferenceCount = 0;
391 InsertTailList(&BiosPageListHead,
392 &MmPageArray[0].ListEntry);
393
394 /*
395 * Page one is reserved for the initial KPCR
396 */
397 MmPageArray[1].Flags.Type = MM_PHYSICAL_PAGE_BIOS;
398 MmPageArray[1].Flags.Consumer = MC_NPPOOL;
399 MmPageArray[1].ReferenceCount = 0;
400 InsertTailList(&BiosPageListHead,
401 &MmPageArray[1].ListEntry);
402
403 i = 2;
404 if ((ULONG)FirstPhysKernelAddress < 0xa0000)
405 {
406 MmStats.NrFreePages += (((ULONG)FirstPhysKernelAddress/PAGE_SIZE) - 2);
407 for (; i<((ULONG)FirstPhysKernelAddress/PAGE_SIZE); i++)
408 {
409 MmPageArray[i].Flags.Type = MM_PHYSICAL_PAGE_FREE;
410 MmPageArray[i].ReferenceCount = 0;
411 InsertTailList(&FreeUnzeroedPageListHead,
412 &MmPageArray[i].ListEntry);
413 UnzeroedPageCount++;
414 }
415 MmStats.NrSystemPages +=
416 ((((ULONG)LastPhysKernelAddress) / PAGE_SIZE) - i);
417 for (; i<((ULONG)LastPhysKernelAddress / PAGE_SIZE); i++)
418 {
419 MmPageArray[i].Flags.Type = MM_PHYSICAL_PAGE_USED;
420 MmPageArray[i].Flags.Consumer = MC_NPPOOL;
421 MmPageArray[i].ReferenceCount = 1;
422 MmPageArray[i].MapCount = 1;
423 InsertTailList(&UsedPageListHeads[MC_NPPOOL],
424 &MmPageArray[i].ListEntry);
425 }
426 MmStats.NrFreePages += ((0xa0000/PAGE_SIZE) - i);
427 for (; i<(0xa0000/PAGE_SIZE); i++)
428 {
429 MmPageArray[i].Flags.Type = MM_PHYSICAL_PAGE_FREE;
430 MmPageArray[i].ReferenceCount = 0;
431 InsertTailList(&FreeUnzeroedPageListHead,
432 &MmPageArray[i].ListEntry);
433 UnzeroedPageCount++;
434 }
435 MmStats.NrReservedPages += ((0x100000/PAGE_SIZE) - i);
436 for (; i<(0x100000 / PAGE_SIZE); i++)
437 {
438 MmPageArray[i].Flags.Type = MM_PHYSICAL_PAGE_BIOS;
439 MmPageArray[i].Flags.Consumer = MC_NPPOOL;
440 MmPageArray[i].ReferenceCount = 1;
441 InsertTailList(&BiosPageListHead,
442 &MmPageArray[i].ListEntry);
443 }
444 }
445 else
446 {
447 MmStats.NrFreePages += ((0xa0000 / PAGE_SIZE) - 2);
448 for (; i<(0xa0000 / PAGE_SIZE); i++)
449 {
450 MmPageArray[i].Flags.Type = MM_PHYSICAL_PAGE_FREE;
451 MmPageArray[i].ReferenceCount = 0;
452 InsertTailList(&FreeUnzeroedPageListHead,
453 &MmPageArray[i].ListEntry);
454 UnzeroedPageCount++;
455 }
456 MmStats.NrReservedPages += (0x60000 / PAGE_SIZE);
457 for (; i<(0x100000 / PAGE_SIZE); i++)
458 {
459 MmPageArray[i].Flags.Type = MM_PHYSICAL_PAGE_BIOS;
460 MmPageArray[i].Flags.Consumer = MC_NPPOOL;
461 MmPageArray[i].ReferenceCount = 1;
462 InsertTailList(&BiosPageListHead,
463 &MmPageArray[i].ListEntry);
464 }
465 MmStats.NrFreePages += (((ULONG)FirstPhysKernelAddress/PAGE_SIZE) - i);
466 for (; i<((ULONG)FirstPhysKernelAddress/PAGE_SIZE); i++)
467 {
468 MmPageArray[i].Flags.Type = MM_PHYSICAL_PAGE_FREE;
469 MmPageArray[i].ReferenceCount = 0;
470 InsertTailList(&FreeUnzeroedPageListHead,
471 &MmPageArray[i].ListEntry);
472 UnzeroedPageCount++;
473 }
474 MmStats.NrSystemPages +=
475 (((ULONG)LastPhysKernelAddress/PAGE_SIZE) - i);
476 for (; i<((ULONG)LastPhysKernelAddress/PAGE_SIZE); i++)
477 {
478 MmPageArray[i].Flags.Type = MM_PHYSICAL_PAGE_USED;
479 MmPageArray[i].Flags.Consumer = MC_NPPOOL;
480 MmPageArray[i].ReferenceCount = 1;
481 MmPageArray[i].MapCount = 1;
482 InsertTailList(&UsedPageListHeads[MC_NPPOOL],
483 &MmPageArray[i].ListEntry);
484 }
485 }
486
487 MmStats.NrFreePages += (MemorySizeInPages - i);
488 for (; i<MemorySizeInPages; i++)
489 {
490 MmPageArray[i].Flags.Type = MM_PHYSICAL_PAGE_FREE;
491 MmPageArray[i].ReferenceCount = 0;
492 InsertTailList(&FreeUnzeroedPageListHead,
493 &MmPageArray[i].ListEntry);
494 UnzeroedPageCount++;
495 }
496
497 if ((BIOSMemoryMap != NULL) && (AddressRangeCount > 0))
498 {
499 MiParseBIOSMemoryMap(
500 BIOSMemoryMap,
501 AddressRangeCount);
502 }
503
504 KeInitializeEvent(&ZeroPageThreadEvent, NotificationEvent, TRUE);
505
506
507 MmStats.NrTotalPages = MmStats.NrFreePages + MmStats.NrSystemPages +
508 MmStats.NrReservedPages + MmStats.NrUserPages;
509 MmInitializeBalancer(MmStats.NrFreePages, MmStats.NrSystemPages + MmStats.NrReservedPages);
510 return((PVOID)LastKernelAddress);
511 }
512
513 VOID
514 MmSetFlagsPage(PHYSICAL_ADDRESS PhysicalAddress, ULONG Flags)
515 {
516 ULONG Start = PhysicalAddress.u.LowPart / PAGE_SIZE;
517 KIRQL oldIrql;
518
519 KeAcquireSpinLock(&PageListLock, &oldIrql);
520 MmPageArray[Start].AllFlags = Flags;
521 KeReleaseSpinLock(&PageListLock, oldIrql);
522 }
523
524 VOID
525 MmSetRmapListHeadPage(PHYSICAL_ADDRESS PhysicalAddress,
526 struct _MM_RMAP_ENTRY* ListHead)
527 {
528 ULONG Start = PhysicalAddress.u.LowPart / PAGE_SIZE;
529
530 MmPageArray[Start].RmapListHead = ListHead;
531 }
532
533 struct _MM_RMAP_ENTRY*
534 MmGetRmapListHeadPage(PHYSICAL_ADDRESS PhysicalAddress)
535 {
536 ULONG Start = PhysicalAddress.u.LowPart / PAGE_SIZE;
537
538 return(MmPageArray[Start].RmapListHead);
539 }
540
541 VOID
542 MmMarkPageMapped(PHYSICAL_ADDRESS PhysicalAddress)
543 {
544 ULONG Start = PhysicalAddress.u.LowPart / PAGE_SIZE;
545 KIRQL oldIrql;
546
547 if (Start < MmPageArraySize)
548 {
549 KeAcquireSpinLock(&PageListLock, &oldIrql);
550 if (MmPageArray[Start].Flags.Type == MM_PHYSICAL_PAGE_FREE)
551 {
552 DbgPrint("Mapping non-used page\n");
553 KEBUGCHECK(0);
554 }
555 MmPageArray[Start].MapCount++;
556 KeReleaseSpinLock(&PageListLock, oldIrql);
557 }
558 }
559
560 VOID
561 MmMarkPageUnmapped(PHYSICAL_ADDRESS PhysicalAddress)
562 {
563 ULONG Start = PhysicalAddress.u.LowPart / PAGE_SIZE;
564 KIRQL oldIrql;
565
566 if (Start < MmPageArraySize)
567 {
568 KeAcquireSpinLock(&PageListLock, &oldIrql);
569 if (MmPageArray[Start].Flags.Type == MM_PHYSICAL_PAGE_FREE)
570 {
571 DbgPrint("Unmapping non-used page\n");
572 KEBUGCHECK(0);
573 }
574 if (MmPageArray[Start].MapCount == 0)
575 {
576 DbgPrint("Unmapping not mapped page\n");
577 KEBUGCHECK(0);
578 }
579 MmPageArray[Start].MapCount--;
580 KeReleaseSpinLock(&PageListLock, oldIrql);
581 }
582 }
583
584 ULONG
585 MmGetFlagsPage(PHYSICAL_ADDRESS PhysicalAddress)
586 {
587 ULONG Start = PhysicalAddress.u.LowPart / PAGE_SIZE;
588 KIRQL oldIrql;
589 ULONG Flags;
590
591 KeAcquireSpinLock(&PageListLock, &oldIrql);
592 Flags = MmPageArray[Start].AllFlags;
593 KeReleaseSpinLock(&PageListLock, oldIrql);
594
595 return(Flags);
596 }
597
598
599 VOID
600 MmSetSavedSwapEntryPage(PHYSICAL_ADDRESS PhysicalAddress,
601 SWAPENTRY SavedSwapEntry)
602 {
603 ULONG Start = PhysicalAddress.u.LowPart / PAGE_SIZE;
604 KIRQL oldIrql;
605
606 KeAcquireSpinLock(&PageListLock, &oldIrql);
607 MmPageArray[Start].SavedSwapEntry = SavedSwapEntry;
608 KeReleaseSpinLock(&PageListLock, oldIrql);
609 }
610
611 SWAPENTRY
612 MmGetSavedSwapEntryPage(PHYSICAL_ADDRESS PhysicalAddress)
613 {
614 ULONG Start = PhysicalAddress.u.LowPart / PAGE_SIZE;
615 SWAPENTRY SavedSwapEntry;
616 KIRQL oldIrql;
617
618 KeAcquireSpinLock(&PageListLock, &oldIrql);
619 SavedSwapEntry = MmPageArray[Start].SavedSwapEntry;
620 KeReleaseSpinLock(&PageListLock, oldIrql);
621
622 return(SavedSwapEntry);
623 }
624
625 VOID
626 MmReferencePage(PHYSICAL_ADDRESS PhysicalAddress)
627 {
628 ULONG Start = PhysicalAddress.u.LowPart / PAGE_SIZE;
629 KIRQL oldIrql;
630
631 DPRINT("MmReferencePage(PhysicalAddress %x)\n", PhysicalAddress);
632
633 if (PhysicalAddress.u.LowPart == 0)
634 {
635 KEBUGCHECK(0);
636 }
637
638 KeAcquireSpinLock(&PageListLock, &oldIrql);
639
640 if (MmPageArray[Start].Flags.Type != MM_PHYSICAL_PAGE_USED)
641 {
642 DbgPrint("Referencing non-used page\n");
643 KEBUGCHECK(0);
644 }
645
646 MmPageArray[Start].ReferenceCount++;
647 KeReleaseSpinLock(&PageListLock, oldIrql);
648 }
649
650 ULONG
651 MmGetReferenceCountPage(PHYSICAL_ADDRESS PhysicalAddress)
652 {
653 ULONG Start = PhysicalAddress.u.LowPart / PAGE_SIZE;
654 KIRQL oldIrql;
655 ULONG RCount;
656
657 DPRINT("MmGetReferenceCountPage(PhysicalAddress %x)\n", PhysicalAddress);
658
659 if (PhysicalAddress.u.LowPart == 0)
660 {
661 KEBUGCHECK(0);
662 }
663
664 KeAcquireSpinLock(&PageListLock, &oldIrql);
665
666 if (MmPageArray[Start].Flags.Type != MM_PHYSICAL_PAGE_USED)
667 {
668 DbgPrint("Getting reference count for free page\n");
669 KEBUGCHECK(0);
670 }
671
672 RCount = MmPageArray[Start].ReferenceCount;
673
674 KeReleaseSpinLock(&PageListLock, oldIrql);
675 return(RCount);
676 }
677
678 BOOLEAN
679 MmIsUsablePage(PHYSICAL_ADDRESS PhysicalAddress)
680 {
681 ULONG Start = PhysicalAddress.u.LowPart / PAGE_SIZE;
682
683 DPRINT("MmGetReferenceCountPage(PhysicalAddress %x)\n", PhysicalAddress);
684
685 if (PhysicalAddress.u.LowPart == 0)
686 {
687 KEBUGCHECK(0);
688 }
689
690 if (MmPageArray[Start].Flags.Type != MM_PHYSICAL_PAGE_USED &&
691 MmPageArray[Start].Flags.Type != MM_PHYSICAL_PAGE_BIOS)
692 {
693 return(FALSE);
694 }
695
696 return(TRUE);
697 }
698
699 VOID
700 MmDereferencePage(PHYSICAL_ADDRESS PhysicalAddress)
701 {
702 ULONG Start = PhysicalAddress.u.LowPart / PAGE_SIZE;
703 KIRQL oldIrql;
704
705 DPRINT("MmDereferencePage(PhysicalAddress %I64x)\n", PhysicalAddress);
706
707 if (PhysicalAddress.u.LowPart == 0)
708 {
709 KEBUGCHECK(0);
710 }
711
712 KeAcquireSpinLock(&PageListLock, &oldIrql);
713
714
715 if (MmPageArray[Start].Flags.Type != MM_PHYSICAL_PAGE_USED)
716 {
717 DbgPrint("Dereferencing free page\n");
718 KEBUGCHECK(0);
719 }
720
721 MmPageArray[Start].ReferenceCount--;
722 if (MmPageArray[Start].ReferenceCount == 0)
723 {
724 MmStats.NrFreePages++;
725 MmStats.NrSystemPages--;
726 RemoveEntryList(&MmPageArray[Start].ListEntry);
727 if (MmPageArray[Start].RmapListHead != NULL)
728 {
729 DbgPrint("Freeing page with rmap entries.\n");
730 KEBUGCHECK(0);
731 }
732 if (MmPageArray[Start].MapCount != 0)
733 {
734 DbgPrint("Freeing mapped page (0x%I64x count %d)\n",
735 PhysicalAddress, MmPageArray[Start].MapCount);
736 KEBUGCHECK(0);
737 }
738 if (MmPageArray[Start].LockCount > 0)
739 {
740 DbgPrint("Freeing locked page\n");
741 KEBUGCHECK(0);
742 }
743 if (MmPageArray[Start].SavedSwapEntry != 0)
744 {
745 DbgPrint("Freeing page with swap entry.\n");
746 KEBUGCHECK(0);
747 }
748 if (MmPageArray[Start].Flags.Type != MM_PHYSICAL_PAGE_USED)
749 {
750 DbgPrint("Freeing page with flags %x\n",
751 MmPageArray[Start].Flags.Type);
752 KEBUGCHECK(0);
753 }
754 MmPageArray[Start].Flags.Type = MM_PHYSICAL_PAGE_FREE;
755 InsertTailList(&FreeUnzeroedPageListHead,
756 &MmPageArray[Start].ListEntry);
757 UnzeroedPageCount++;
758 if (UnzeroedPageCount > 8 && 0 == KeReadStateEvent(&ZeroPageThreadEvent))
759 {
760 KeSetEvent(&ZeroPageThreadEvent, IO_NO_INCREMENT, FALSE);
761 }
762 }
763 KeReleaseSpinLock(&PageListLock, oldIrql);
764 }
765
766 ULONG
767 MmGetLockCountPage(PHYSICAL_ADDRESS PhysicalAddress)
768 {
769 ULONG Start = PhysicalAddress.u.LowPart / PAGE_SIZE;
770 KIRQL oldIrql;
771 ULONG LockCount;
772
773 DPRINT("MmGetLockCountPage(PhysicalAddress %x)\n", PhysicalAddress);
774
775 if (PhysicalAddress.u.LowPart == 0)
776 {
777 KEBUGCHECK(0);
778 }
779
780 KeAcquireSpinLock(&PageListLock, &oldIrql);
781
782 if (MmPageArray[Start].Flags.Type != MM_PHYSICAL_PAGE_USED)
783 {
784 DbgPrint("Getting lock count for free page\n");
785 KEBUGCHECK(0);
786 }
787
788 LockCount = MmPageArray[Start].LockCount;
789 KeReleaseSpinLock(&PageListLock, oldIrql);
790
791 return(LockCount);
792 }
793
794 VOID
795 MmLockPage(PHYSICAL_ADDRESS PhysicalAddress)
796 {
797 ULONG Start = PhysicalAddress.u.LowPart / PAGE_SIZE;
798 KIRQL oldIrql;
799
800 DPRINT("MmLockPage(PhysicalAddress %x)\n", PhysicalAddress);
801
802 if (PhysicalAddress.u.LowPart == 0)
803 {
804 KEBUGCHECK(0);
805 }
806
807 KeAcquireSpinLock(&PageListLock, &oldIrql);
808
809 if (MmPageArray[Start].Flags.Type != MM_PHYSICAL_PAGE_USED)
810 {
811 DbgPrint("Locking free page\n");
812 KEBUGCHECK(0);
813 }
814
815 MmPageArray[Start].LockCount++;
816 KeReleaseSpinLock(&PageListLock, oldIrql);
817 }
818
819 VOID
820 MmUnlockPage(PHYSICAL_ADDRESS PhysicalAddress)
821 {
822 ULONG Start = PhysicalAddress.u.LowPart / PAGE_SIZE;
823 KIRQL oldIrql;
824
825 DPRINT("MmUnlockPage(PhysicalAddress %llx)\n", PhysicalAddress);
826
827 if (PhysicalAddress.u.LowPart == 0)
828 {
829 KEBUGCHECK(0);
830 }
831
832 KeAcquireSpinLock(&PageListLock, &oldIrql);
833
834 if (MmPageArray[Start].Flags.Type != MM_PHYSICAL_PAGE_USED)
835 {
836 DbgPrint("Unlocking free page\n");
837 KEBUGCHECK(0);
838 }
839
840 MmPageArray[Start].LockCount--;
841 KeReleaseSpinLock(&PageListLock, oldIrql);
842 }
843
844 PHYSICAL_ADDRESS
845 MmAllocPage(ULONG Consumer, SWAPENTRY SavedSwapEntry)
846 {
847 PHYSICAL_ADDRESS PageOffset;
848 PLIST_ENTRY ListEntry;
849 PPHYSICAL_PAGE PageDescriptor;
850 KIRQL oldIrql;
851 BOOLEAN NeedClear = FALSE;
852
853 DPRINT("MmAllocPage()\n");
854
855 KeAcquireSpinLock(&PageListLock, &oldIrql);
856 if (IsListEmpty(&FreeZeroedPageListHead))
857 {
858 if (IsListEmpty(&FreeUnzeroedPageListHead))
859 {
860 DPRINT1("MmAllocPage(): Out of memory\n");
861 KeReleaseSpinLock(&PageListLock, oldIrql);
862 return((PHYSICAL_ADDRESS)0LL);
863 }
864 ListEntry = RemoveTailList(&FreeUnzeroedPageListHead);
865 UnzeroedPageCount--;
866
867 PageDescriptor = CONTAINING_RECORD(ListEntry, PHYSICAL_PAGE, ListEntry);
868
869 NeedClear = TRUE;
870 }
871 else
872 {
873 ListEntry = RemoveTailList(&FreeZeroedPageListHead);
874
875 PageDescriptor = CONTAINING_RECORD(ListEntry, PHYSICAL_PAGE, ListEntry);
876 }
877
878 if (PageDescriptor->Flags.Type != MM_PHYSICAL_PAGE_FREE)
879 {
880 DbgPrint("Got non-free page from freelist\n");
881 KEBUGCHECK(0);
882 }
883 if (PageDescriptor->MapCount != 0)
884 {
885 DbgPrint("Got mapped page from freelist\n");
886 KEBUGCHECK(0);
887 }
888 PageDescriptor->Flags.Type = MM_PHYSICAL_PAGE_USED;
889 PageDescriptor->Flags.Consumer = Consumer;
890 PageDescriptor->ReferenceCount = 1;
891 PageDescriptor->LockCount = 0;
892 PageDescriptor->MapCount = 0;
893 PageDescriptor->SavedSwapEntry = SavedSwapEntry;
894 InsertTailList(&UsedPageListHeads[Consumer], ListEntry);
895
896 MmStats.NrSystemPages++;
897 MmStats.NrFreePages--;
898
899 KeReleaseSpinLock(&PageListLock, oldIrql);
900
901 PageOffset.QuadPart = (ULONG)((ULONG)PageDescriptor - (ULONG)MmPageArray);
902 PageOffset.QuadPart =
903 (PageOffset.QuadPart / sizeof(PHYSICAL_PAGE)) * PAGE_SIZE;
904 if (NeedClear)
905 {
906 MiZeroPage(PageOffset);
907 }
908 if (PageDescriptor->MapCount != 0)
909 {
910 DbgPrint("Returning mapped page.\n");
911 KEBUGCHECK(0);
912 }
913 return(PageOffset);
914 }
915
916
917 NTSTATUS STDCALL
918 MmZeroPageThreadMain(PVOID Ignored)
919 {
920 NTSTATUS Status;
921 KIRQL oldIrql;
922 PLIST_ENTRY ListEntry;
923 PPHYSICAL_PAGE PageDescriptor;
924 PHYSICAL_ADDRESS PhysPage;
925 static PVOID Address = NULL;
926 ULONG Count;
927
928 while(1)
929 {
930 Status = KeWaitForSingleObject(&ZeroPageThreadEvent,
931 0,
932 KernelMode,
933 FALSE,
934 NULL);
935 if (!NT_SUCCESS(Status))
936 {
937 DbgPrint("ZeroPageThread: Wait failed\n");
938 KEBUGCHECK(0);
939 return(STATUS_UNSUCCESSFUL);
940 }
941
942 Count = 0;
943 KeAcquireSpinLock(&PageListLock, &oldIrql);
944 while (!IsListEmpty(&FreeUnzeroedPageListHead))
945 {
946 ListEntry = RemoveTailList(&FreeUnzeroedPageListHead);
947 UnzeroedPageCount--;
948 PageDescriptor = CONTAINING_RECORD(ListEntry, PHYSICAL_PAGE, ListEntry);
949 /* We set the page to used, because MmCreateVirtualMapping failed with unused pages */
950 PageDescriptor->Flags.Type = MM_PHYSICAL_PAGE_USED;
951 KeReleaseSpinLock(&PageListLock, oldIrql);
952 Count++;
953 PhysPage.QuadPart = (ULONG)((ULONG)PageDescriptor - (ULONG)MmPageArray);
954 PhysPage.QuadPart = (PhysPage.QuadPart / sizeof(PHYSICAL_PAGE)) * PAGE_SIZE;
955 if (Address == NULL)
956 {
957 Address = ExAllocatePageWithPhysPage(PhysPage);
958 }
959 else
960 {
961 Status = MmCreateVirtualMapping(NULL,
962 Address,
963 PAGE_READWRITE | PAGE_SYSTEM,
964 PhysPage,
965 FALSE);
966 if (!NT_SUCCESS(Status))
967 {
968 DbgPrint("Unable to create virtual mapping\n");
969 KEBUGCHECK(0);
970 }
971 }
972 memset(Address, 0, PAGE_SIZE);
973 MmDeleteVirtualMapping(NULL, (PVOID)Address, FALSE, NULL, NULL);
974 KeAcquireSpinLock(&PageListLock, &oldIrql);
975 if (PageDescriptor->MapCount != 0)
976 {
977 DbgPrint("Mapped page on freelist.\n");
978 KEBUGCHECK(0);
979 }
980 PageDescriptor->Flags.Type = MM_PHYSICAL_PAGE_FREE;
981 InsertHeadList(&FreeZeroedPageListHead, ListEntry);
982 }
983 DPRINT("Zeroed %d pages.\n", Count);
984 KeResetEvent(&ZeroPageThreadEvent);
985 KeReleaseSpinLock(&PageListLock, oldIrql);
986 }
987 }
988
989 NTSTATUS INIT_FUNCTION
990 MmInitZeroPageThread(VOID)
991 {
992 KPRIORITY Priority;
993 NTSTATUS Status;
994
995 Status = PsCreateSystemThread(&ZeroPageThreadHandle,
996 THREAD_ALL_ACCESS,
997 NULL,
998 NULL,
999 &ZeroPageThreadId,
1000 (PKSTART_ROUTINE) MmZeroPageThreadMain,
1001 NULL);
1002 if (!NT_SUCCESS(Status))
1003 {
1004 return(Status);
1005 }
1006
1007 Priority = 1;
1008 NtSetInformationThread(ZeroPageThreadHandle,
1009 ThreadPriority,
1010 &Priority,
1011 sizeof(Priority));
1012
1013 return(STATUS_SUCCESS);
1014 }
1015
1016 /* EOF */