- Fixed deadlock in pager thread.
[reactos.git] / reactos / ntoskrnl / mm / freelist.c
1 /*
2 * COPYRIGHT: See COPYING in the top level directory
3 * PROJECT: ReactOS kernel
4 * FILE: ntoskrnl/mm/freelist.c
5 * PURPOSE: Handle the list of free physical pages
6 * PROGRAMMER: David Welch (welch@cwcom.net)
7 * UPDATE HISTORY:
8 * 27/05/98: Created
9 * 18/08/98: Added a fix from Robert Bergkvist
10 */
11
12 /* INCLUDES ****************************************************************/
13
14 #include <ddk/ntddk.h>
15 #include <internal/mm.h>
16 #include <internal/ntoskrnl.h>
17
18 #define NDEBUG
19 #include <internal/debug.h>
20
21 /* TYPES *******************************************************************/
22
23 #define MM_PHYSICAL_PAGE_FREE (0x1)
24 #define MM_PHYSICAL_PAGE_USED (0x2)
25 #define MM_PHYSICAL_PAGE_BIOS (0x3)
26
27 typedef struct _PHYSICAL_PAGE
28 {
29 union
30 {
31 struct
32 {
33 ULONG Type:2;
34 ULONG Consumer:3;
35 }Flags;
36 ULONG AllFlags;
37 };
38
39 LIST_ENTRY ListEntry;
40 ULONG ReferenceCount;
41 SWAPENTRY SavedSwapEntry;
42 ULONG LockCount;
43 ULONG MapCount;
44 struct _MM_RMAP_ENTRY* RmapListHead;
45 } PHYSICAL_PAGE, *PPHYSICAL_PAGE;
46
47 /* GLOBALS ****************************************************************/
48
49 static PPHYSICAL_PAGE MmPageArray;
50 static ULONG MmPageArraySize;
51
52 static KSPIN_LOCK PageListLock;
53 static LIST_ENTRY UsedPageListHeads[MC_MAXIMUM];
54 static LIST_ENTRY FreeZeroedPageListHead;
55 static LIST_ENTRY FreeUnzeroedPageListHead;
56 static LIST_ENTRY BiosPageListHead;
57
58 static HANDLE ZeroPageThreadHandle;
59 static CLIENT_ID ZeroPageThreadId;
60 static KEVENT ZeroPageThreadEvent;
61
62 static ULONG UnzeroedPageCount = 0;
63
64 /* FUNCTIONS *************************************************************/
65
66 VOID
67 MmTransferOwnershipPage(PHYSICAL_ADDRESS PhysicalAddress, ULONG NewConsumer)
68 {
69 ULONG Start = PhysicalAddress.u.LowPart / PAGE_SIZE;
70 KIRQL oldIrql;
71
72 KeAcquireSpinLock(&PageListLock, &oldIrql);
73 if (MmPageArray[Start].MapCount != 0)
74 {
75 DbgPrint("Transfering mapped page.\n");
76 KeBugCheck(0);
77 }
78 RemoveEntryList(&MmPageArray[Start].ListEntry);
79 InsertTailList(&UsedPageListHeads[NewConsumer],
80 &MmPageArray[Start].ListEntry);
81 MmPageArray[Start].Flags.Consumer = NewConsumer;
82 KeReleaseSpinLock(&PageListLock, oldIrql);
83 MiZeroPage(PhysicalAddress);
84 }
85
86 PHYSICAL_ADDRESS
87 MmGetLRUFirstUserPage(VOID)
88 {
89 PLIST_ENTRY NextListEntry;
90 PHYSICAL_ADDRESS Next;
91 PHYSICAL_PAGE* PageDescriptor;
92 KIRQL oldIrql;
93
94 KeAcquireSpinLock(&PageListLock, &oldIrql);
95 NextListEntry = UsedPageListHeads[MC_USER].Flink;
96 if (NextListEntry == &UsedPageListHeads[MC_USER])
97 {
98 KeReleaseSpinLock(&PageListLock, oldIrql);
99 return((LARGE_INTEGER)0LL);
100 }
101 PageDescriptor = CONTAINING_RECORD(NextListEntry, PHYSICAL_PAGE, ListEntry);
102 Next.QuadPart = (ULONG)((ULONG)PageDescriptor - (ULONG)MmPageArray);
103 Next.QuadPart = (Next.QuadPart / sizeof(PHYSICAL_PAGE)) * PAGE_SIZE;
104 KeReleaseSpinLock(&PageListLock, oldIrql);
105 return(Next);
106 }
107
108 VOID
109 MmSetLRULastPage(PHYSICAL_ADDRESS PhysicalAddress)
110 {
111 ULONG Start = PhysicalAddress.u.LowPart / PAGE_SIZE;
112 KIRQL oldIrql;
113
114 KeAcquireSpinLock(&PageListLock, &oldIrql);
115 if (MmPageArray[Start].Flags.Type == MM_PHYSICAL_PAGE_USED &&
116 MmPageArray[Start].Flags.Consumer == MC_USER)
117 {
118 RemoveEntryList(&MmPageArray[Start].ListEntry);
119 InsertTailList(&UsedPageListHeads[MC_USER],
120 &MmPageArray[Start].ListEntry);
121 }
122 KeReleaseSpinLock(&PageListLock, oldIrql);
123 }
124
125 PHYSICAL_ADDRESS
126 MmGetLRUNextUserPage(PHYSICAL_ADDRESS PreviousPhysicalAddress)
127 {
128 ULONG Start = PreviousPhysicalAddress.u.LowPart / PAGE_SIZE;
129 PLIST_ENTRY NextListEntry;
130 PHYSICAL_ADDRESS Next;
131 PHYSICAL_PAGE* PageDescriptor;
132 KIRQL oldIrql;
133
134 KeAcquireSpinLock(&PageListLock, &oldIrql);
135 if (MmPageArray[Start].Flags.Type != MM_PHYSICAL_PAGE_USED ||
136 MmPageArray[Start].Flags.Consumer != MC_USER)
137 {
138 NextListEntry = UsedPageListHeads[MC_USER].Flink;
139 }
140 else
141 {
142 NextListEntry = MmPageArray[Start].ListEntry.Flink;
143 }
144 if (NextListEntry == &UsedPageListHeads[MC_USER])
145 {
146 KeReleaseSpinLock(&PageListLock, oldIrql);
147 return((LARGE_INTEGER)0LL);
148 }
149 PageDescriptor = CONTAINING_RECORD(NextListEntry, PHYSICAL_PAGE, ListEntry);
150 Next.QuadPart = (ULONG)((ULONG)PageDescriptor - (ULONG)MmPageArray);
151 Next.QuadPart = (Next.QuadPart / sizeof(PHYSICAL_PAGE)) * PAGE_SIZE;
152 KeReleaseSpinLock(&PageListLock, oldIrql);
153 return(Next);
154 }
155
156 PHYSICAL_ADDRESS
157 MmGetContinuousPages(ULONG NumberOfBytes,
158 PHYSICAL_ADDRESS HighestAcceptableAddress,
159 ULONG Alignment)
160 {
161 ULONG NrPages;
162 ULONG i;
163 LONG start;
164 ULONG length;
165 KIRQL oldIrql;
166
167 NrPages = PAGE_ROUND_UP(NumberOfBytes) / PAGE_SIZE;
168
169 KeAcquireSpinLock(&PageListLock, &oldIrql);
170
171 start = -1;
172 length = 0;
173 for (i = 0; i < (HighestAcceptableAddress.QuadPart / PAGE_SIZE); )
174 {
175 if (MmPageArray[i].Flags.Type == MM_PHYSICAL_PAGE_FREE)
176 {
177 if (start == -1)
178 {
179 start = i;
180 length = 1;
181 }
182 else
183 {
184 length++;
185 }
186 i++;
187 if (length == NrPages)
188 {
189 break;
190 }
191 }
192 else
193 {
194 start = -1;
195 /*
196 * Fast forward to the base of the next aligned region
197 */
198 i = ROUND_UP((i + 1), (Alignment / PAGE_SIZE));
199 }
200 }
201 if (start == -1 || length != NrPages)
202 {
203 KeReleaseSpinLock(&PageListLock, oldIrql);
204 return((LARGE_INTEGER)(LONGLONG)0);
205 }
206 for (i = start; i < (start + length); i++)
207 {
208 RemoveEntryList(&MmPageArray[i].ListEntry);
209 MmPageArray[i].Flags.Type = MM_PHYSICAL_PAGE_USED;
210 MmPageArray[i].Flags.Consumer = MC_NPPOOL;
211 MmPageArray[i].ReferenceCount = 1;
212 MmPageArray[i].LockCount = 0;
213 MmPageArray[i].MapCount = 0;
214 MmPageArray[i].SavedSwapEntry = 0;
215 InsertTailList(&UsedPageListHeads[MC_NPPOOL],
216 &MmPageArray[i].ListEntry);
217 }
218 KeReleaseSpinLock(&PageListLock, oldIrql);
219 return((LARGE_INTEGER)((LONGLONG)start * PAGE_SIZE));
220 }
221
222 VOID
223 MiParseRangeToFreeList(PADDRESS_RANGE Range)
224 {
225 ULONG i, first, last;
226
227 /* FIXME: Not 64-bit ready */
228
229 DPRINT("Range going to free list (Base 0x%X, Length 0x%X, Type 0x%X)\n",
230 Range->BaseAddrLow,
231 Range->LengthLow,
232 Range->Type);
233
234 first = (Range->BaseAddrLow + PAGE_SIZE - 1) / PAGE_SIZE;
235 last = first + ((Range->LengthLow + PAGE_SIZE - 1) / PAGE_SIZE);
236 for (i = first; i < last; i++)
237 {
238 if (MmPageArray[i].Flags.Type == 0)
239 {
240 MmPageArray[i].Flags.Type = MM_PHYSICAL_PAGE_FREE;
241 MmPageArray[i].ReferenceCount = 0;
242 InsertTailList(&FreeUnzeroedPageListHead,
243 &MmPageArray[i].ListEntry);
244 UnzeroedPageCount++;
245 }
246 }
247 }
248
249 VOID
250 MiParseRangeToBiosList(PADDRESS_RANGE Range)
251 {
252 ULONG i, first, last;
253
254 /* FIXME: Not 64-bit ready */
255
256 DPRINT("Range going to bios list (Base 0x%X, Length 0x%X, Type 0x%X)\n",
257 Range->BaseAddrLow,
258 Range->LengthLow,
259 Range->Type);
260
261 first = (Range->BaseAddrLow + PAGE_SIZE - 1) / PAGE_SIZE;
262 last = first + ((Range->LengthLow + PAGE_SIZE - 1) / PAGE_SIZE);
263 for (i = first; i < last; i++)
264 {
265 /* Remove the page from the free list if it is there */
266 if (MmPageArray[i].Flags.Type == MM_PHYSICAL_PAGE_FREE)
267 {
268 RemoveEntryList(&MmPageArray[i].ListEntry);
269 }
270
271 if (MmPageArray[i].Flags.Type != MM_PHYSICAL_PAGE_BIOS)
272 {
273 MmPageArray[i].Flags.Type = MM_PHYSICAL_PAGE_BIOS;
274 MmPageArray[i].Flags.Consumer = MC_NPPOOL;
275 MmPageArray[i].ReferenceCount = 1;
276 InsertTailList(&BiosPageListHead,
277 &MmPageArray[i].ListEntry);
278 }
279 }
280 }
281
282 VOID
283 MiParseBIOSMemoryMap(ULONG MemorySizeInPages,
284 PADDRESS_RANGE BIOSMemoryMap,
285 ULONG AddressRangeCount)
286 {
287 PADDRESS_RANGE p;
288 ULONG i;
289
290 p = BIOSMemoryMap;
291 for (i = 0; i < AddressRangeCount; i++)
292 {
293 if (((p->BaseAddrLow + PAGE_SIZE - 1) / PAGE_SIZE) < MemorySizeInPages)
294 {
295 if (p->Type == 1)
296 {
297 MiParseRangeToFreeList(p);
298 }
299 else
300 {
301 MiParseRangeToBiosList(p);
302 }
303 }
304 p += 1;
305 }
306 }
307
308 PVOID
309 MmInitializePageList(PVOID FirstPhysKernelAddress,
310 PVOID LastPhysKernelAddress,
311 ULONG MemorySizeInPages,
312 ULONG LastKernelAddress,
313 PADDRESS_RANGE BIOSMemoryMap,
314 ULONG AddressRangeCount)
315 /*
316 * FUNCTION: Initializes the page list with all pages free
317 * except those known to be reserved and those used by the kernel
318 * ARGUMENTS:
319 * PageBuffer = Page sized buffer
320 * FirstKernelAddress = First physical address used by the kernel
321 * LastKernelAddress = Last physical address used by the kernel
322 */
323 {
324 ULONG i;
325 ULONG Reserved;
326 NTSTATUS Status;
327
328 DPRINT("MmInitializePageList(FirstPhysKernelAddress %x, "
329 "LastPhysKernelAddress %x, "
330 "MemorySizeInPages %x, LastKernelAddress %x)\n",
331 FirstPhysKernelAddress,
332 LastPhysKernelAddress,
333 MemorySizeInPages,
334 LastKernelAddress);
335
336 for (i = 0; i < MC_MAXIMUM; i++)
337 {
338 InitializeListHead(&UsedPageListHeads[i]);
339 }
340 KeInitializeSpinLock(&PageListLock);
341 InitializeListHead(&FreeUnzeroedPageListHead);
342 InitializeListHead(&FreeZeroedPageListHead);
343 InitializeListHead(&BiosPageListHead);
344
345 LastKernelAddress = PAGE_ROUND_UP(LastKernelAddress);
346
347 MmPageArraySize = MemorySizeInPages;
348 Reserved =
349 PAGE_ROUND_UP((MmPageArraySize * sizeof(PHYSICAL_PAGE))) / PAGE_SIZE;
350 MmPageArray = (PHYSICAL_PAGE *)LastKernelAddress;
351
352 DPRINT("Reserved %d\n", Reserved);
353
354 LastKernelAddress = PAGE_ROUND_UP(LastKernelAddress);
355 LastKernelAddress = ((ULONG)LastKernelAddress + (Reserved * PAGE_SIZE));
356 LastPhysKernelAddress = (PVOID)PAGE_ROUND_UP(LastPhysKernelAddress);
357 LastPhysKernelAddress = LastPhysKernelAddress + (Reserved * PAGE_SIZE);
358
359 MmStats.NrTotalPages = 0;
360 MmStats.NrSystemPages = 0;
361 MmStats.NrUserPages = 0;
362 MmStats.NrReservedPages = 0;
363 MmStats.NrFreePages = 0;
364 MmStats.NrLockedPages = 0;
365
366 for (i = 0; i < Reserved; i++)
367 {
368 PVOID Address = (PVOID)(ULONG)MmPageArray + (i * PAGE_SIZE);
369 if (!MmIsPagePresent(NULL, Address))
370 {
371 ULONG PhysicalAddress;
372 PhysicalAddress = (ULONG)LastPhysKernelAddress -
373 (Reserved * PAGE_SIZE) + (i * PAGE_SIZE);
374 Status =
375 MmCreateVirtualMappingUnsafe(NULL,
376 Address,
377 PAGE_READWRITE,
378 (PHYSICAL_ADDRESS)(LONGLONG)PhysicalAddress,
379 FALSE);
380 if (!NT_SUCCESS(Status))
381 {
382 DbgPrint("Unable to create virtual mapping\n");
383 KeBugCheck(0);
384 }
385 }
386 memset((PVOID)MmPageArray + (i * PAGE_SIZE), 0, PAGE_SIZE);
387 }
388
389
390 /*
391 * Page zero is reserved
392 */
393 MmPageArray[0].Flags.Type = MM_PHYSICAL_PAGE_BIOS;
394 MmPageArray[0].Flags.Consumer = MC_NPPOOL;
395 MmPageArray[0].ReferenceCount = 0;
396 InsertTailList(&BiosPageListHead,
397 &MmPageArray[0].ListEntry);
398
399 /*
400 * Page one is reserved for the initial KPCR
401 */
402 MmPageArray[1].Flags.Type = MM_PHYSICAL_PAGE_BIOS;
403 MmPageArray[1].Flags.Consumer = MC_NPPOOL;
404 MmPageArray[1].ReferenceCount = 0;
405 InsertTailList(&BiosPageListHead,
406 &MmPageArray[1].ListEntry);
407
408 i = 2;
409 if ((ULONG)FirstPhysKernelAddress < 0xa0000)
410 {
411 MmStats.NrFreePages += (((ULONG)FirstPhysKernelAddress/PAGE_SIZE) - 2);
412 for (; i<((ULONG)FirstPhysKernelAddress/PAGE_SIZE); i++)
413 {
414 MmPageArray[i].Flags.Type = MM_PHYSICAL_PAGE_FREE;
415 MmPageArray[i].ReferenceCount = 0;
416 InsertTailList(&FreeUnzeroedPageListHead,
417 &MmPageArray[i].ListEntry);
418 UnzeroedPageCount++;
419 }
420 MmStats.NrSystemPages +=
421 ((((ULONG)LastPhysKernelAddress) / PAGE_SIZE) - i);
422 for (; i<((ULONG)LastPhysKernelAddress / PAGE_SIZE); i++)
423 {
424 MmPageArray[i].Flags.Type = MM_PHYSICAL_PAGE_USED;
425 MmPageArray[i].Flags.Consumer = MC_NPPOOL;
426 MmPageArray[i].ReferenceCount = 1;
427 MmPageArray[i].MapCount = 1;
428 InsertTailList(&UsedPageListHeads[MC_NPPOOL],
429 &MmPageArray[i].ListEntry);
430 }
431 MmStats.NrFreePages += ((0xa0000/PAGE_SIZE) - i);
432 for (; i<(0xa0000/PAGE_SIZE); i++)
433 {
434 MmPageArray[i].Flags.Type = MM_PHYSICAL_PAGE_FREE;
435 MmPageArray[i].ReferenceCount = 0;
436 InsertTailList(&FreeUnzeroedPageListHead,
437 &MmPageArray[i].ListEntry);
438 UnzeroedPageCount++;
439 }
440 MmStats.NrReservedPages += ((0x100000/PAGE_SIZE) - i);
441 for (; i<(0x100000 / PAGE_SIZE); i++)
442 {
443 MmPageArray[i].Flags.Type = MM_PHYSICAL_PAGE_BIOS;
444 MmPageArray[i].Flags.Consumer = MC_NPPOOL;
445 MmPageArray[i].ReferenceCount = 1;
446 InsertTailList(&BiosPageListHead,
447 &MmPageArray[i].ListEntry);
448 }
449 }
450 else
451 {
452 MmStats.NrFreePages += ((0xa0000 / PAGE_SIZE) - 2);
453 for (; i<(0xa0000 / PAGE_SIZE); i++)
454 {
455 MmPageArray[i].Flags.Type = MM_PHYSICAL_PAGE_FREE;
456 MmPageArray[i].ReferenceCount = 0;
457 InsertTailList(&FreeUnzeroedPageListHead,
458 &MmPageArray[i].ListEntry);
459 UnzeroedPageCount++;
460 }
461 MmStats.NrReservedPages += (0x60000 / PAGE_SIZE);
462 for (; i<(0x100000 / PAGE_SIZE); i++)
463 {
464 MmPageArray[i].Flags.Type = MM_PHYSICAL_PAGE_BIOS;
465 MmPageArray[i].Flags.Consumer = MC_NPPOOL;
466 MmPageArray[i].ReferenceCount = 1;
467 InsertTailList(&BiosPageListHead,
468 &MmPageArray[i].ListEntry);
469 }
470 MmStats.NrFreePages += (((ULONG)FirstPhysKernelAddress/PAGE_SIZE) - i);
471 for (; i<((ULONG)FirstPhysKernelAddress/PAGE_SIZE); i++)
472 {
473 MmPageArray[i].Flags.Type = MM_PHYSICAL_PAGE_FREE;
474 MmPageArray[i].ReferenceCount = 0;
475 InsertTailList(&FreeUnzeroedPageListHead,
476 &MmPageArray[i].ListEntry);
477 UnzeroedPageCount++;
478 }
479 MmStats.NrSystemPages +=
480 (((ULONG)LastPhysKernelAddress/PAGE_SIZE) - i);
481 for (; i<((ULONG)LastPhysKernelAddress/PAGE_SIZE); i++)
482 {
483 MmPageArray[i].Flags.Type = MM_PHYSICAL_PAGE_USED;
484 MmPageArray[i].Flags.Consumer = MC_NPPOOL;
485 MmPageArray[i].ReferenceCount = 1;
486 MmPageArray[i].MapCount = 1;
487 InsertTailList(&UsedPageListHeads[MC_NPPOOL],
488 &MmPageArray[i].ListEntry);
489 }
490 }
491
492 MmStats.NrFreePages += (MemorySizeInPages - i);
493 for (; i<MemorySizeInPages; i++)
494 {
495 MmPageArray[i].Flags.Type = MM_PHYSICAL_PAGE_FREE;
496 MmPageArray[i].ReferenceCount = 0;
497 InsertTailList(&FreeUnzeroedPageListHead,
498 &MmPageArray[i].ListEntry);
499 UnzeroedPageCount++;
500 }
501
502 if ((BIOSMemoryMap != NULL) && (AddressRangeCount > 0))
503 {
504 MiParseBIOSMemoryMap(
505 MemorySizeInPages,
506 BIOSMemoryMap,
507 AddressRangeCount);
508 }
509
510 KeInitializeEvent(&ZeroPageThreadEvent, NotificationEvent, TRUE);
511
512
513 MmStats.NrTotalPages = MmStats.NrFreePages + MmStats.NrSystemPages +
514 MmStats.NrReservedPages + MmStats.NrUserPages;
515 MmInitializeBalancer(MmStats.NrFreePages);
516 return((PVOID)LastKernelAddress);
517 }
518
519 VOID
520 MmSetFlagsPage(PHYSICAL_ADDRESS PhysicalAddress, ULONG Flags)
521 {
522 ULONG Start = PhysicalAddress.u.LowPart / PAGE_SIZE;
523 KIRQL oldIrql;
524
525 KeAcquireSpinLock(&PageListLock, &oldIrql);
526 MmPageArray[Start].AllFlags = Flags;
527 KeReleaseSpinLock(&PageListLock, oldIrql);
528 }
529
530 VOID
531 MmSetRmapListHeadPage(PHYSICAL_ADDRESS PhysicalAddress,
532 struct _MM_RMAP_ENTRY* ListHead)
533 {
534 ULONG Start = PhysicalAddress.u.LowPart / PAGE_SIZE;
535
536 MmPageArray[Start].RmapListHead = ListHead;
537 }
538
539 struct _MM_RMAP_ENTRY*
540 MmGetRmapListHeadPage(PHYSICAL_ADDRESS PhysicalAddress)
541 {
542 ULONG Start = PhysicalAddress.u.LowPart / PAGE_SIZE;
543
544 return(MmPageArray[Start].RmapListHead);
545 }
546
547 VOID
548 MmMarkPageMapped(PHYSICAL_ADDRESS PhysicalAddress)
549 {
550 ULONG Start = PhysicalAddress.u.LowPart / PAGE_SIZE;
551 KIRQL oldIrql;
552
553 if (Start < MmPageArraySize)
554 {
555 KeAcquireSpinLock(&PageListLock, &oldIrql);
556 if (MmPageArray[Start].Flags.Type == MM_PHYSICAL_PAGE_FREE)
557 {
558 DbgPrint("Mapping non-used page\n");
559 KeBugCheck(0);
560 }
561 MmPageArray[Start].MapCount++;
562 KeReleaseSpinLock(&PageListLock, oldIrql);
563 }
564 }
565
566 VOID
567 MmMarkPageUnmapped(PHYSICAL_ADDRESS PhysicalAddress)
568 {
569 ULONG Start = PhysicalAddress.u.LowPart / PAGE_SIZE;
570 KIRQL oldIrql;
571
572 if (Start < MmPageArraySize)
573 {
574 KeAcquireSpinLock(&PageListLock, &oldIrql);
575 if (MmPageArray[Start].Flags.Type == MM_PHYSICAL_PAGE_FREE)
576 {
577 DbgPrint("Unmapping non-used page\n");
578 KeBugCheck(0);
579 }
580 if (MmPageArray[Start].MapCount == 0)
581 {
582 DbgPrint("Unmapping not mapped page\n");
583 KeBugCheck(0);
584 }
585 MmPageArray[Start].MapCount--;
586 KeReleaseSpinLock(&PageListLock, oldIrql);
587 }
588 }
589
590 ULONG
591 MmGetFlagsPage(PHYSICAL_ADDRESS PhysicalAddress)
592 {
593 ULONG Start = PhysicalAddress.u.LowPart / PAGE_SIZE;
594 KIRQL oldIrql;
595 ULONG Flags;
596
597 KeAcquireSpinLock(&PageListLock, &oldIrql);
598 Flags = MmPageArray[Start].AllFlags;
599 KeReleaseSpinLock(&PageListLock, oldIrql);
600
601 return(Flags);
602 }
603
604
605 VOID
606 MmSetSavedSwapEntryPage(PHYSICAL_ADDRESS PhysicalAddress,
607 SWAPENTRY SavedSwapEntry)
608 {
609 ULONG Start = PhysicalAddress.u.LowPart / PAGE_SIZE;
610 KIRQL oldIrql;
611
612 KeAcquireSpinLock(&PageListLock, &oldIrql);
613 MmPageArray[Start].SavedSwapEntry = SavedSwapEntry;
614 KeReleaseSpinLock(&PageListLock, oldIrql);
615 }
616
617 SWAPENTRY
618 MmGetSavedSwapEntryPage(PHYSICAL_ADDRESS PhysicalAddress)
619 {
620 ULONG Start = PhysicalAddress.u.LowPart / PAGE_SIZE;
621 SWAPENTRY SavedSwapEntry;
622 KIRQL oldIrql;
623
624 KeAcquireSpinLock(&PageListLock, &oldIrql);
625 SavedSwapEntry = MmPageArray[Start].SavedSwapEntry;
626 KeReleaseSpinLock(&PageListLock, oldIrql);
627
628 return(SavedSwapEntry);
629 }
630
631 VOID
632 MmReferencePage(PHYSICAL_ADDRESS PhysicalAddress)
633 {
634 ULONG Start = PhysicalAddress.u.LowPart / PAGE_SIZE;
635 KIRQL oldIrql;
636
637 DPRINT("MmReferencePage(PhysicalAddress %x)\n", PhysicalAddress);
638
639 if (PhysicalAddress.u.LowPart == 0)
640 {
641 KeBugCheck(0);
642 }
643
644 KeAcquireSpinLock(&PageListLock, &oldIrql);
645
646 if (MmPageArray[Start].Flags.Type != MM_PHYSICAL_PAGE_USED)
647 {
648 DbgPrint("Referencing non-used page\n");
649 KeBugCheck(0);
650 }
651
652 MmPageArray[Start].ReferenceCount++;
653 KeReleaseSpinLock(&PageListLock, oldIrql);
654 }
655
656 ULONG
657 MmGetReferenceCountPage(PHYSICAL_ADDRESS PhysicalAddress)
658 {
659 ULONG Start = PhysicalAddress.u.LowPart / PAGE_SIZE;
660 KIRQL oldIrql;
661 ULONG RCount;
662
663 DPRINT("MmGetReferenceCountPage(PhysicalAddress %x)\n", PhysicalAddress);
664
665 if (PhysicalAddress.u.LowPart == 0)
666 {
667 KeBugCheck(0);
668 }
669
670 KeAcquireSpinLock(&PageListLock, &oldIrql);
671
672 if (MmPageArray[Start].Flags.Type != MM_PHYSICAL_PAGE_USED)
673 {
674 DbgPrint("Getting reference count for free page\n");
675 KeBugCheck(0);
676 }
677
678 RCount = MmPageArray[Start].ReferenceCount;
679
680 KeReleaseSpinLock(&PageListLock, oldIrql);
681 return(RCount);
682 }
683
684 BOOLEAN
685 MmIsUsablePage(PHYSICAL_ADDRESS PhysicalAddress)
686 {
687 ULONG Start = PhysicalAddress.u.LowPart / PAGE_SIZE;
688
689 DPRINT("MmGetReferenceCountPage(PhysicalAddress %x)\n", PhysicalAddress);
690
691 if (PhysicalAddress.u.LowPart == 0)
692 {
693 KeBugCheck(0);
694 }
695
696 if (MmPageArray[Start].Flags.Type != MM_PHYSICAL_PAGE_USED &&
697 MmPageArray[Start].Flags.Type != MM_PHYSICAL_PAGE_BIOS)
698 {
699 return(FALSE);
700 }
701
702 return(TRUE);
703 }
704
705 VOID
706 MmDereferencePage(PHYSICAL_ADDRESS PhysicalAddress)
707 {
708 ULONG Start = PhysicalAddress.u.LowPart / PAGE_SIZE;
709 KIRQL oldIrql;
710
711 DPRINT("MmDereferencePage(PhysicalAddress %I64x)\n", PhysicalAddress);
712
713 if (PhysicalAddress.u.LowPart == 0)
714 {
715 KeBugCheck(0);
716 }
717
718 KeAcquireSpinLock(&PageListLock, &oldIrql);
719
720
721 if (MmPageArray[Start].Flags.Type != MM_PHYSICAL_PAGE_USED)
722 {
723 DbgPrint("Dereferencing free page\n");
724 KeBugCheck(0);
725 }
726
727 MmPageArray[Start].ReferenceCount--;
728 if (MmPageArray[Start].ReferenceCount == 0)
729 {
730 MmStats.NrFreePages++;
731 MmStats.NrSystemPages--;
732 RemoveEntryList(&MmPageArray[Start].ListEntry);
733 if (MmPageArray[Start].RmapListHead != NULL)
734 {
735 DbgPrint("Freeing page with rmap entries.\n");
736 KeBugCheck(0);
737 }
738 if (MmPageArray[Start].MapCount != 0)
739 {
740 DbgPrint("Freeing mapped page (0x%I64x count %d)\n",
741 PhysicalAddress, MmPageArray[Start].MapCount);
742 KeBugCheck(0);
743 }
744 if (MmPageArray[Start].LockCount > 0)
745 {
746 DbgPrint("Freeing locked page\n");
747 KeBugCheck(0);
748 }
749 if (MmPageArray[Start].SavedSwapEntry != 0)
750 {
751 DbgPrint("Freeing page with swap entry.\n");
752 KeBugCheck(0);
753 }
754 if (MmPageArray[Start].Flags.Type != MM_PHYSICAL_PAGE_USED)
755 {
756 DbgPrint("Freeing page with flags %x\n",
757 MmPageArray[Start].Flags.Type);
758 KeBugCheck(0);
759 }
760 MmPageArray[Start].Flags.Type = MM_PHYSICAL_PAGE_FREE;
761 InsertTailList(&FreeUnzeroedPageListHead,
762 &MmPageArray[Start].ListEntry);
763 UnzeroedPageCount++;
764 if (UnzeroedPageCount > 8 && 0 == KeReadStateEvent(&ZeroPageThreadEvent))
765 {
766 KeSetEvent(&ZeroPageThreadEvent, IO_NO_INCREMENT, FALSE);
767 }
768 }
769 KeReleaseSpinLock(&PageListLock, oldIrql);
770 }
771
772 ULONG
773 MmGetLockCountPage(PHYSICAL_ADDRESS PhysicalAddress)
774 {
775 ULONG Start = PhysicalAddress.u.LowPart / PAGE_SIZE;
776 KIRQL oldIrql;
777 ULONG LockCount;
778
779 DPRINT("MmGetLockCountPage(PhysicalAddress %x)\n", PhysicalAddress);
780
781 if (PhysicalAddress.u.LowPart == 0)
782 {
783 KeBugCheck(0);
784 }
785
786 KeAcquireSpinLock(&PageListLock, &oldIrql);
787
788 if (MmPageArray[Start].Flags.Type != MM_PHYSICAL_PAGE_USED)
789 {
790 DbgPrint("Getting lock count for free page\n");
791 KeBugCheck(0);
792 }
793
794 LockCount = MmPageArray[Start].LockCount;
795 KeReleaseSpinLock(&PageListLock, oldIrql);
796
797 return(LockCount);
798 }
799
800 VOID
801 MmLockPage(PHYSICAL_ADDRESS PhysicalAddress)
802 {
803 ULONG Start = PhysicalAddress.u.LowPart / PAGE_SIZE;
804 KIRQL oldIrql;
805
806 DPRINT("MmLockPage(PhysicalAddress %x)\n", PhysicalAddress);
807
808 if (PhysicalAddress.u.LowPart == 0)
809 {
810 KeBugCheck(0);
811 }
812
813 KeAcquireSpinLock(&PageListLock, &oldIrql);
814
815 if (MmPageArray[Start].Flags.Type != MM_PHYSICAL_PAGE_USED)
816 {
817 DbgPrint("Locking free page\n");
818 KeBugCheck(0);
819 }
820
821 MmPageArray[Start].LockCount++;
822 KeReleaseSpinLock(&PageListLock, oldIrql);
823 }
824
825 VOID
826 MmUnlockPage(PHYSICAL_ADDRESS PhysicalAddress)
827 {
828 ULONG Start = PhysicalAddress.u.LowPart / PAGE_SIZE;
829 KIRQL oldIrql;
830
831 DPRINT("MmUnlockPage(PhysicalAddress %llx)\n", PhysicalAddress);
832
833 if (PhysicalAddress.u.LowPart == 0)
834 {
835 KeBugCheck(0);
836 }
837
838 KeAcquireSpinLock(&PageListLock, &oldIrql);
839
840 if (MmPageArray[Start].Flags.Type != MM_PHYSICAL_PAGE_USED)
841 {
842 DbgPrint("Unlocking free page\n");
843 KeBugCheck(0);
844 }
845
846 MmPageArray[Start].LockCount--;
847 KeReleaseSpinLock(&PageListLock, oldIrql);
848 }
849
850 PHYSICAL_ADDRESS
851 MmAllocPage(ULONG Consumer, SWAPENTRY SavedSwapEntry)
852 {
853 PHYSICAL_ADDRESS PageOffset;
854 PLIST_ENTRY ListEntry;
855 PPHYSICAL_PAGE PageDescriptor;
856 KIRQL oldIrql;
857 BOOLEAN NeedClear = FALSE;
858
859 DPRINT("MmAllocPage()\n");
860
861 KeAcquireSpinLock(&PageListLock, &oldIrql);
862 if (IsListEmpty(&FreeZeroedPageListHead))
863 {
864 if (IsListEmpty(&FreeUnzeroedPageListHead))
865 {
866 DPRINT1("MmAllocPage(): Out of memory\n");
867 KeReleaseSpinLock(&PageListLock, oldIrql);
868 return((PHYSICAL_ADDRESS)0LL);
869 }
870 ListEntry = RemoveTailList(&FreeUnzeroedPageListHead);
871 UnzeroedPageCount--;
872
873 PageDescriptor = CONTAINING_RECORD(ListEntry, PHYSICAL_PAGE, ListEntry);
874
875 NeedClear = TRUE;
876 }
877 else
878 {
879 ListEntry = RemoveTailList(&FreeZeroedPageListHead);
880
881 PageDescriptor = CONTAINING_RECORD(ListEntry, PHYSICAL_PAGE, ListEntry);
882 }
883
884 if (PageDescriptor->Flags.Type != MM_PHYSICAL_PAGE_FREE)
885 {
886 DbgPrint("Got non-free page from freelist\n");
887 KeBugCheck(0);
888 }
889 if (PageDescriptor->MapCount != 0)
890 {
891 DbgPrint("Got mapped page from freelist\n");
892 KeBugCheck(0);
893 }
894 PageDescriptor->Flags.Type = MM_PHYSICAL_PAGE_USED;
895 PageDescriptor->Flags.Consumer = Consumer;
896 PageDescriptor->ReferenceCount = 1;
897 PageDescriptor->LockCount = 0;
898 PageDescriptor->MapCount = 0;
899 PageDescriptor->SavedSwapEntry = SavedSwapEntry;
900 InsertTailList(&UsedPageListHeads[Consumer], ListEntry);
901
902 MmStats.NrSystemPages++;
903 MmStats.NrFreePages--;
904
905 KeReleaseSpinLock(&PageListLock, oldIrql);
906
907 PageOffset.QuadPart = (ULONG)((ULONG)PageDescriptor - (ULONG)MmPageArray);
908 PageOffset.QuadPart =
909 (PageOffset.QuadPart / sizeof(PHYSICAL_PAGE)) * PAGE_SIZE;
910 if (NeedClear)
911 {
912 MiZeroPage(PageOffset);
913 }
914 if (PageDescriptor->MapCount != 0)
915 {
916 DbgPrint("Returning mapped page.\n");
917 KeBugCheck(0);
918 }
919 return(PageOffset);
920 }
921
922
923 NTSTATUS STDCALL
924 MmZeroPageThreadMain(PVOID Ignored)
925 {
926 NTSTATUS Status;
927 KIRQL oldIrql;
928 PLIST_ENTRY ListEntry;
929 PPHYSICAL_PAGE PageDescriptor;
930 PHYSICAL_ADDRESS PhysPage;
931 static PVOID Address = NULL;
932 ULONG Count;
933
934 while(1)
935 {
936 Status = KeWaitForSingleObject(&ZeroPageThreadEvent,
937 0,
938 KernelMode,
939 FALSE,
940 NULL);
941 if (!NT_SUCCESS(Status))
942 {
943 DbgPrint("ZeroPageThread: Wait failed\n");
944 KeBugCheck(0);
945 return(STATUS_UNSUCCESSFUL);
946 }
947
948 Count = 0;
949 KeAcquireSpinLock(&PageListLock, &oldIrql);
950 while (!IsListEmpty(&FreeUnzeroedPageListHead))
951 {
952 ListEntry = RemoveTailList(&FreeUnzeroedPageListHead);
953 UnzeroedPageCount--;
954 PageDescriptor = CONTAINING_RECORD(ListEntry, PHYSICAL_PAGE, ListEntry);
955 /* We set the page to used, because MmCreateVirtualMapping failed with unused pages */
956 PageDescriptor->Flags.Type = MM_PHYSICAL_PAGE_USED;
957 KeReleaseSpinLock(&PageListLock, oldIrql);
958 Count++;
959 PhysPage.QuadPart = (ULONG)((ULONG)PageDescriptor - (ULONG)MmPageArray);
960 PhysPage.QuadPart = (PhysPage.QuadPart / sizeof(PHYSICAL_PAGE)) * PAGE_SIZE;
961 if (Address == NULL)
962 {
963 Address = ExAllocatePageWithPhysPage(PhysPage);
964 }
965 else
966 {
967 Status = MmCreateVirtualMapping(NULL,
968 Address,
969 PAGE_READWRITE | PAGE_SYSTEM,
970 PhysPage,
971 FALSE);
972 if (!NT_SUCCESS(Status))
973 {
974 DbgPrint("Unable to create virtual mapping\n");
975 KeBugCheck(0);
976 }
977 }
978 memset(Address, 0, PAGE_SIZE);
979 MmDeleteVirtualMapping(NULL, (PVOID)Address, FALSE, NULL, NULL);
980 KeAcquireSpinLock(&PageListLock, &oldIrql);
981 if (PageDescriptor->MapCount != 0)
982 {
983 DbgPrint("Mapped page on freelist.\n");
984 KeBugCheck(0);
985 }
986 PageDescriptor->Flags.Type = MM_PHYSICAL_PAGE_FREE;
987 InsertHeadList(&FreeZeroedPageListHead, ListEntry);
988 }
989 DPRINT("Zeroed %d pages.\n", Count);
990 KeResetEvent(&ZeroPageThreadEvent);
991 KeReleaseSpinLock(&PageListLock, oldIrql);
992 }
993 }
994
995 NTSTATUS MmInitZeroPageThread(VOID)
996 {
997 KPRIORITY Priority;
998 NTSTATUS Status;
999
1000 Status = PsCreateSystemThread(&ZeroPageThreadHandle,
1001 THREAD_ALL_ACCESS,
1002 NULL,
1003 NULL,
1004 &ZeroPageThreadId,
1005 MmZeroPageThreadMain,
1006 NULL);
1007 if (!NT_SUCCESS(Status))
1008 {
1009 return(Status);
1010 }
1011
1012 Priority = 1;
1013 NtSetInformationThread(ZeroPageThreadHandle,
1014 ThreadPriority,
1015 &Priority,
1016 sizeof(Priority));
1017
1018 return(STATUS_SUCCESS);
1019 }
1020
1021 /* EOF */