2002-06-04 David Welch <welch@whitehall1-5.seh.ox.ac.uk>
[reactos.git] / reactos / ntoskrnl / mm / freelist.c
1 /*
2 * COPYRIGHT: See COPYING in the top level directory
3 * PROJECT: ReactOS kernel
4 * FILE: ntoskrnl/mm/freelist.c
5 * PURPOSE: Handle the list of free physical pages
6 * PROGRAMMER: David Welch (welch@cwcom.net)
7 * UPDATE HISTORY:
8 * 27/05/98: Created
9 * 18/08/98: Added a fix from Robert Bergkvist
10 */
11
12 /* INCLUDES ****************************************************************/
13
14 #include <ddk/ntddk.h>
15 #include <internal/mm.h>
16 #include <internal/ntoskrnl.h>
17
18 #define NDEBUG
19 #include <internal/debug.h>
20
21 /* TYPES *******************************************************************/
22
23 #define MM_PHYSICAL_PAGE_FREE (0x1)
24 #define MM_PHYSICAL_PAGE_USED (0x2)
25 #define MM_PHYSICAL_PAGE_BIOS (0x3)
26
27 #define MM_PTYPE(x) ((x) & 0x3)
28
29 typedef struct _PHYSICAL_PAGE
30 {
31 ULONG Flags;
32 LIST_ENTRY ListEntry;
33 ULONG ReferenceCount;
34 SWAPENTRY SavedSwapEntry;
35 ULONG LockCount;
36 ULONG MapCount;
37 struct _MM_RMAP_ENTRY* RmapListHead;
38 } PHYSICAL_PAGE, *PPHYSICAL_PAGE;
39
40 /* GLOBALS ****************************************************************/
41
42 static PPHYSICAL_PAGE MmPageArray;
43
44 static KSPIN_LOCK PageListLock;
45 static LIST_ENTRY UsedPageListHeads[MC_MAXIMUM];
46 static LIST_ENTRY FreeZeroedPageListHead;
47 static LIST_ENTRY FreeUnzeroedPageListHead;
48 static LIST_ENTRY BiosPageListHead;
49
50 /* FUNCTIONS *************************************************************/
51
52 VOID
53 MmTransferOwnershipPage(PHYSICAL_ADDRESS PhysicalAddress, ULONG NewConsumer)
54 {
55 ULONG Start = PhysicalAddress.u.LowPart / PAGESIZE;
56 KIRQL oldIrql;
57
58 KeAcquireSpinLock(&PageListLock, &oldIrql);
59 RemoveEntryList(&MmPageArray[Start].ListEntry);
60 InsertTailList(&UsedPageListHeads[NewConsumer],
61 &MmPageArray[Start].ListEntry);
62 KeReleaseSpinLock(&PageListLock, oldIrql);
63 }
64
65 PHYSICAL_ADDRESS
66 MmGetLRUFirstUserPage(VOID)
67 {
68 PLIST_ENTRY NextListEntry;
69 PHYSICAL_ADDRESS Next;
70 PHYSICAL_PAGE* PageDescriptor;
71 KIRQL oldIrql;
72
73 KeAcquireSpinLock(&PageListLock, &oldIrql);
74 NextListEntry = UsedPageListHeads[MC_USER].Flink;
75 if (NextListEntry == &UsedPageListHeads[MC_USER])
76 {
77 KeReleaseSpinLock(&PageListLock, oldIrql);
78 return((LARGE_INTEGER)0LL);
79 }
80 PageDescriptor = CONTAINING_RECORD(NextListEntry, PHYSICAL_PAGE, ListEntry);
81 Next.QuadPart = (ULONG)((ULONG)PageDescriptor - (ULONG)MmPageArray);
82 Next.QuadPart = (Next.QuadPart / sizeof(PHYSICAL_PAGE)) * PAGESIZE;
83 KeReleaseSpinLock(&PageListLock, oldIrql);
84 return(Next);
85 }
86
87 PHYSICAL_ADDRESS
88 MmGetLRUNextUserPage(PHYSICAL_ADDRESS PreviousPhysicalAddress)
89 {
90 ULONG Start = PreviousPhysicalAddress.u.LowPart / PAGESIZE;
91 PLIST_ENTRY NextListEntry;
92 PHYSICAL_ADDRESS Next;
93 PHYSICAL_PAGE* PageDescriptor;
94 KIRQL oldIrql;
95
96 KeAcquireSpinLock(&PageListLock, &oldIrql);
97 if (!(MmPageArray[Start].Flags & MM_PHYSICAL_PAGE_USED))
98 {
99 NextListEntry = UsedPageListHeads[MC_USER].Flink;
100 }
101 else
102 {
103 NextListEntry = MmPageArray[Start].ListEntry.Flink;
104 }
105 if (NextListEntry == &UsedPageListHeads[MC_USER])
106 {
107 KeReleaseSpinLock(&PageListLock, oldIrql);
108 return((LARGE_INTEGER)0LL);
109 }
110 PageDescriptor = CONTAINING_RECORD(NextListEntry, PHYSICAL_PAGE, ListEntry);
111 Next.QuadPart = (ULONG)((ULONG)PageDescriptor - (ULONG)MmPageArray);
112 Next.QuadPart = (Next.QuadPart / sizeof(PHYSICAL_PAGE)) * PAGESIZE;
113 KeReleaseSpinLock(&PageListLock, oldIrql);
114 return(Next);
115 }
116
117 PHYSICAL_ADDRESS
118 MmGetContinuousPages(ULONG NumberOfBytes,
119 PHYSICAL_ADDRESS HighestAcceptableAddress,
120 ULONG Alignment)
121 {
122 ULONG NrPages;
123 ULONG i;
124 ULONG start;
125 ULONG length;
126 KIRQL oldIrql;
127
128 NrPages = PAGE_ROUND_UP(NumberOfBytes) / PAGESIZE;
129
130 KeAcquireSpinLock(&PageListLock, &oldIrql);
131
132 start = -1;
133 length = 0;
134 for (i = 0; i < (HighestAcceptableAddress.QuadPart / PAGESIZE); )
135 {
136 if (MM_PTYPE(MmPageArray[i].Flags) == MM_PHYSICAL_PAGE_FREE)
137 {
138 if (start == -1)
139 {
140 start = i;
141 length = 1;
142 }
143 else
144 {
145 length++;
146 }
147 i++;
148 if (length == NrPages)
149 {
150 break;
151 }
152 }
153 else
154 {
155 start = -1;
156 /*
157 * Fast forward to the base of the next aligned region
158 */
159 i = ROUND_UP((i + 1), (Alignment / PAGESIZE));
160 }
161 }
162 if (start == -1 || length != NrPages)
163 {
164 KeReleaseSpinLock(&PageListLock, oldIrql);
165 return((LARGE_INTEGER)(LONGLONG)0);
166 }
167 for (i = start; i < (start + length); i++)
168 {
169 RemoveEntryList(&MmPageArray[i].ListEntry);
170 MmPageArray[i].Flags = MM_PHYSICAL_PAGE_USED;
171 MmPageArray[i].ReferenceCount = 1;
172 MmPageArray[i].LockCount = 0;
173 MmPageArray[i].MapCount = 0;
174 MmPageArray[i].SavedSwapEntry = 0;
175 InsertTailList(&UsedPageListHeads[MC_NPPOOL],
176 &MmPageArray[i].ListEntry);
177 }
178 KeReleaseSpinLock(&PageListLock, oldIrql);
179 return((LARGE_INTEGER)((LONGLONG)start * 4096));
180 }
181
182 VOID
183 MiParseRangeToFreeList(PADDRESS_RANGE Range)
184 {
185 ULONG i, first, last;
186
187 /* FIXME: Not 64-bit ready */
188
189 DPRINT("Range going to free list (Base 0x%X, Length 0x%X, Type 0x%X)\n",
190 Range->BaseAddrLow,
191 Range->LengthLow,
192 Range->Type);
193
194 first = (Range->BaseAddrLow + PAGESIZE - 1) / PAGESIZE;
195 last = first + ((Range->LengthLow + PAGESIZE - 1) / PAGESIZE);
196 for (i = first; i < last; i++)
197 {
198 if (MmPageArray[i].Flags == 0)
199 {
200 MmPageArray[i].Flags = MM_PHYSICAL_PAGE_FREE;
201 MmPageArray[i].ReferenceCount = 0;
202 InsertTailList(&FreeUnzeroedPageListHead,
203 &MmPageArray[i].ListEntry);
204 }
205 }
206 }
207
208 VOID
209 MiParseRangeToBiosList(PADDRESS_RANGE Range)
210 {
211 ULONG i, first, last;
212
213 /* FIXME: Not 64-bit ready */
214
215 DPRINT("Range going to bios list (Base 0x%X, Length 0x%X, Type 0x%X)\n",
216 Range->BaseAddrLow,
217 Range->LengthLow,
218 Range->Type);
219
220 first = (Range->BaseAddrLow + PAGESIZE - 1) / PAGESIZE;
221 last = first + ((Range->LengthLow + PAGESIZE - 1) / PAGESIZE);
222 for (i = first; i < last; i++)
223 {
224 /* Remove the page from the free list if it is there */
225 if (MmPageArray[i].Flags == MM_PHYSICAL_PAGE_FREE)
226 {
227 RemoveEntryList(&MmPageArray[i].ListEntry);
228 }
229
230 if (MmPageArray[i].Flags != MM_PHYSICAL_PAGE_BIOS)
231 {
232 MmPageArray[i].Flags = MM_PHYSICAL_PAGE_BIOS;
233 MmPageArray[i].ReferenceCount = 1;
234 InsertTailList(&BiosPageListHead,
235 &MmPageArray[i].ListEntry);
236 }
237 }
238 }
239
240 VOID
241 MiParseBIOSMemoryMap(ULONG MemorySizeInPages,
242 PADDRESS_RANGE BIOSMemoryMap,
243 ULONG AddressRangeCount)
244 {
245 PADDRESS_RANGE p;
246 ULONG i;
247
248 p = BIOSMemoryMap;
249 for (i = 0; i < AddressRangeCount; i++)
250 {
251 if (((p->BaseAddrLow + PAGESIZE - 1) / PAGESIZE) < MemorySizeInPages)
252 {
253 if (p->Type == 1)
254 {
255 MiParseRangeToFreeList(p);
256 }
257 else
258 {
259 MiParseRangeToBiosList(p);
260 }
261 }
262 p += 1;
263 }
264 }
265
266 PVOID
267 MmInitializePageList(PVOID FirstPhysKernelAddress,
268 PVOID LastPhysKernelAddress,
269 ULONG MemorySizeInPages,
270 ULONG LastKernelAddress,
271 PADDRESS_RANGE BIOSMemoryMap,
272 ULONG AddressRangeCount)
273 /*
274 * FUNCTION: Initializes the page list with all pages free
275 * except those known to be reserved and those used by the kernel
276 * ARGUMENTS:
277 * PageBuffer = Page sized buffer
278 * FirstKernelAddress = First physical address used by the kernel
279 * LastKernelAddress = Last physical address used by the kernel
280 */
281 {
282 ULONG i;
283 ULONG Reserved;
284 NTSTATUS Status;
285
286 DPRINT("MmInitializePageList(FirstPhysKernelAddress %x, "
287 "LastPhysKernelAddress %x, "
288 "MemorySizeInPages %x, LastKernelAddress %x)\n",
289 FirstPhysKernelAddress,
290 LastPhysKernelAddress,
291 MemorySizeInPages,
292 LastKernelAddress);
293
294 for (i = 0; i < MC_MAXIMUM; i++)
295 {
296 InitializeListHead(&UsedPageListHeads[i]);
297 }
298 KeInitializeSpinLock(&PageListLock);
299 InitializeListHead(&FreeUnzeroedPageListHead);
300 InitializeListHead(&FreeZeroedPageListHead);
301 InitializeListHead(&BiosPageListHead);
302
303 LastKernelAddress = PAGE_ROUND_UP(LastKernelAddress);
304
305 Reserved =
306 PAGE_ROUND_UP((MemorySizeInPages * sizeof(PHYSICAL_PAGE))) / PAGESIZE;
307 MmPageArray = (PHYSICAL_PAGE *)LastKernelAddress;
308
309 DPRINT("Reserved %d\n", Reserved);
310
311 LastKernelAddress = PAGE_ROUND_UP(LastKernelAddress);
312 LastKernelAddress = ((ULONG)LastKernelAddress + (Reserved * PAGESIZE));
313 LastPhysKernelAddress = (PVOID)PAGE_ROUND_UP(LastPhysKernelAddress);
314 LastPhysKernelAddress = LastPhysKernelAddress + (Reserved * PAGESIZE);
315
316 MmStats.NrTotalPages = 0;
317 MmStats.NrSystemPages = 0;
318 MmStats.NrUserPages = 0;
319 MmStats.NrReservedPages = 0;
320 MmStats.NrFreePages = 0;
321 MmStats.NrLockedPages = 0;
322
323 for (i = 0; i < Reserved; i++)
324 {
325 PVOID Address = (PVOID)(ULONG)MmPageArray + (i * PAGESIZE);
326 if (!MmIsPagePresent(NULL, Address))
327 {
328 ULONG PhysicalAddress;
329 PhysicalAddress = (ULONG)LastPhysKernelAddress -
330 (Reserved * PAGESIZE) + (i * PAGESIZE);
331 Status =
332 MmCreateVirtualMappingUnsafe(NULL,
333 Address,
334 PAGE_READWRITE,
335 (PHYSICAL_ADDRESS)(LONGLONG)PhysicalAddress,
336 FALSE);
337 if (!NT_SUCCESS(Status))
338 {
339 DbgPrint("Unable to create virtual mapping\n");
340 KeBugCheck(0);
341 }
342 }
343 memset((PVOID)MmPageArray + (i * PAGESIZE), 0, PAGESIZE);
344 }
345
346 /*
347 * Page zero is reserved
348 */
349 MmPageArray[0].Flags = MM_PHYSICAL_PAGE_BIOS;
350 MmPageArray[0].ReferenceCount = 0;
351 InsertTailList(&BiosPageListHead,
352 &MmPageArray[0].ListEntry);
353
354 /*
355 * Page one is reserved for the initial KPCR
356 */
357 MmPageArray[1].Flags = MM_PHYSICAL_PAGE_BIOS;
358 MmPageArray[1].ReferenceCount = 0;
359 InsertTailList(&BiosPageListHead,
360 &MmPageArray[1].ListEntry);
361
362 i = 2;
363 if ((ULONG)FirstPhysKernelAddress < 0xa0000)
364 {
365 MmStats.NrFreePages += (((ULONG)FirstPhysKernelAddress/PAGESIZE) - 1);
366 for (; i<((ULONG)FirstPhysKernelAddress/PAGESIZE); i++)
367 {
368 MmPageArray[i].Flags = MM_PHYSICAL_PAGE_FREE;
369 MmPageArray[i].ReferenceCount = 0;
370 InsertTailList(&FreeUnzeroedPageListHead,
371 &MmPageArray[i].ListEntry);
372 }
373 MmStats.NrSystemPages +=
374 ((((ULONG)LastPhysKernelAddress) / PAGESIZE) - i);
375 for (; i<((ULONG)LastPhysKernelAddress / PAGESIZE); i++)
376 {
377 MmPageArray[i].Flags = MM_PHYSICAL_PAGE_USED;
378 MmPageArray[i].ReferenceCount = 1;
379 InsertTailList(&UsedPageListHeads[MC_NPPOOL],
380 &MmPageArray[i].ListEntry);
381 }
382 MmStats.NrFreePages += ((0xa0000/PAGESIZE) - i);
383 for (; i<(0xa0000/PAGESIZE); i++)
384 {
385 MmPageArray[i].Flags = MM_PHYSICAL_PAGE_FREE;
386 MmPageArray[i].ReferenceCount = 0;
387 InsertTailList(&FreeUnzeroedPageListHead,
388 &MmPageArray[i].ListEntry);
389 }
390 MmStats.NrReservedPages += ((0x100000/PAGESIZE) - i);
391 for (; i<(0x100000 / PAGESIZE); i++)
392 {
393 MmPageArray[i].Flags = MM_PHYSICAL_PAGE_BIOS;
394 MmPageArray[i].ReferenceCount = 1;
395 InsertTailList(&BiosPageListHead,
396 &MmPageArray[i].ListEntry);
397 }
398 }
399 else
400 {
401 MmStats.NrFreePages += ((0xa0000 / PAGESIZE) - 1);
402 for (; i<(0xa0000 / PAGESIZE); i++)
403 {
404 MmPageArray[i].Flags = MM_PHYSICAL_PAGE_FREE;
405 MmPageArray[i].ReferenceCount = 0;
406 InsertTailList(&FreeUnzeroedPageListHead,
407 &MmPageArray[i].ListEntry);
408 }
409 MmStats.NrReservedPages += (0x60000 / PAGESIZE);
410 for (; i<(0x100000 / PAGESIZE); i++)
411 {
412 MmPageArray[i].Flags = MM_PHYSICAL_PAGE_BIOS;
413 MmPageArray[i].ReferenceCount = 1;
414 InsertTailList(&BiosPageListHead,
415 &MmPageArray[i].ListEntry);
416 }
417 MmStats.NrFreePages += (((ULONG)FirstPhysKernelAddress/PAGESIZE) - i);
418 for (; i<((ULONG)FirstPhysKernelAddress/PAGESIZE); i++)
419 {
420 MmPageArray[i].Flags = MM_PHYSICAL_PAGE_FREE;
421 MmPageArray[i].ReferenceCount = 0;
422 InsertTailList(&FreeUnzeroedPageListHead,
423 &MmPageArray[i].ListEntry);
424 }
425 MmStats.NrSystemPages +=
426 (((ULONG)LastPhysKernelAddress/PAGESIZE) - i);
427 for (; i<((ULONG)LastPhysKernelAddress/PAGESIZE); i++)
428 {
429 MmPageArray[i].Flags = MM_PHYSICAL_PAGE_USED;
430 MmPageArray[i].ReferenceCount = 1;
431 InsertTailList(&UsedPageListHeads[MC_NPPOOL],
432 &MmPageArray[i].ListEntry);
433 }
434 }
435
436 MmStats.NrFreePages += (MemorySizeInPages - i);
437 for (; i<MemorySizeInPages; i++)
438 {
439 MmPageArray[i].Flags = MM_PHYSICAL_PAGE_FREE;
440 MmPageArray[i].ReferenceCount = 0;
441 InsertTailList(&FreeUnzeroedPageListHead,
442 &MmPageArray[i].ListEntry);
443 }
444
445 if ((BIOSMemoryMap != NULL) && (AddressRangeCount > 0))
446 {
447 MiParseBIOSMemoryMap(
448 MemorySizeInPages,
449 BIOSMemoryMap,
450 AddressRangeCount);
451 }
452
453 MmStats.NrTotalPages = MmStats.NrFreePages + MmStats.NrSystemPages +
454 MmStats.NrReservedPages + MmStats.NrUserPages;
455 MmInitializeBalancer(MmStats.NrFreePages);
456 return((PVOID)LastKernelAddress);
457 }
458
459 VOID
460 MmSetFlagsPage(PHYSICAL_ADDRESS PhysicalAddress, ULONG Flags)
461 {
462 ULONG Start = PhysicalAddress.u.LowPart / PAGESIZE;
463 KIRQL oldIrql;
464
465 KeAcquireSpinLock(&PageListLock, &oldIrql);
466 MmPageArray[Start].Flags = Flags;
467 KeReleaseSpinLock(&PageListLock, oldIrql);
468 }
469
470 VOID
471 MmSetRmapListHeadPage(PHYSICAL_ADDRESS PhysicalAddress,
472 struct _MM_RMAP_ENTRY* ListHead)
473 {
474 ULONG Start = PhysicalAddress.u.LowPart / PAGESIZE;
475
476 MmPageArray[Start].RmapListHead = ListHead;
477 }
478
479 struct _MM_RMAP_ENTRY*
480 MmGetRmapListHeadPage(PHYSICAL_ADDRESS PhysicalAddress)
481 {
482 ULONG Start = PhysicalAddress.u.LowPart / PAGESIZE;
483
484 return(MmPageArray[Start].RmapListHead);
485 }
486
487 VOID
488 MmMarkPageMapped(PHYSICAL_ADDRESS PhysicalAddress)
489 {
490 ULONG Start = PhysicalAddress.u.LowPart / PAGESIZE;
491 KIRQL oldIrql;
492
493 KeAcquireSpinLock(&PageListLock, &oldIrql);
494 MmPageArray[Start].MapCount++;
495 KeReleaseSpinLock(&PageListLock, oldIrql);
496 }
497
498 VOID
499 MmMarkPageUnmapped(PHYSICAL_ADDRESS PhysicalAddress)
500 {
501 ULONG Start = PhysicalAddress.u.LowPart / PAGESIZE;
502 KIRQL oldIrql;
503
504 KeAcquireSpinLock(&PageListLock, &oldIrql);
505 MmPageArray[Start].MapCount--;
506 KeReleaseSpinLock(&PageListLock, oldIrql);
507 }
508
509 ULONG
510 MmGetFlagsPage(PHYSICAL_ADDRESS PhysicalAddress)
511 {
512 ULONG Start = PhysicalAddress.u.LowPart / PAGESIZE;
513 KIRQL oldIrql;
514 ULONG Flags;
515
516 KeAcquireSpinLock(&PageListLock, &oldIrql);
517 Flags = MmPageArray[Start].Flags;
518 KeReleaseSpinLock(&PageListLock, oldIrql);
519
520 return(Flags);
521 }
522
523
524 VOID
525 MmSetSavedSwapEntryPage(PHYSICAL_ADDRESS PhysicalAddress,
526 SWAPENTRY SavedSwapEntry)
527 {
528 ULONG Start = PhysicalAddress.u.LowPart / PAGESIZE;
529 KIRQL oldIrql;
530
531 KeAcquireSpinLock(&PageListLock, &oldIrql);
532 MmPageArray[Start].SavedSwapEntry = SavedSwapEntry;
533 KeReleaseSpinLock(&PageListLock, oldIrql);
534 }
535
536 SWAPENTRY
537 MmGetSavedSwapEntryPage(PHYSICAL_ADDRESS PhysicalAddress)
538 {
539 ULONG Start = PhysicalAddress.u.LowPart / PAGESIZE;
540 SWAPENTRY SavedSwapEntry;
541 KIRQL oldIrql;
542
543 KeAcquireSpinLock(&PageListLock, &oldIrql);
544 SavedSwapEntry = MmPageArray[Start].SavedSwapEntry;
545 KeReleaseSpinLock(&PageListLock, oldIrql);
546
547 return(SavedSwapEntry);
548 }
549
550 VOID
551 MmReferencePage(PHYSICAL_ADDRESS PhysicalAddress)
552 {
553 ULONG Start = PhysicalAddress.u.LowPart / PAGESIZE;
554 KIRQL oldIrql;
555
556 DPRINT("MmReferencePage(PhysicalAddress %x)\n", PhysicalAddress);
557
558 if (PhysicalAddress.u.LowPart == 0)
559 {
560 KeBugCheck(0);
561 }
562
563 KeAcquireSpinLock(&PageListLock, &oldIrql);
564
565 if (MM_PTYPE(MmPageArray[Start].Flags) != MM_PHYSICAL_PAGE_USED)
566 {
567 DbgPrint("Referencing non-used page\n");
568 KeBugCheck(0);
569 }
570
571 MmPageArray[Start].ReferenceCount++;
572 KeReleaseSpinLock(&PageListLock, oldIrql);
573 }
574
575 ULONG
576 MmGetReferenceCountPage(PHYSICAL_ADDRESS PhysicalAddress)
577 {
578 ULONG Start = PhysicalAddress.u.LowPart / PAGESIZE;
579 KIRQL oldIrql;
580 ULONG RCount;
581
582 DPRINT("MmGetReferenceCountPage(PhysicalAddress %x)\n", PhysicalAddress);
583
584 if (PhysicalAddress.u.LowPart == 0)
585 {
586 KeBugCheck(0);
587 }
588
589 KeAcquireSpinLock(&PageListLock, &oldIrql);
590
591 if (MM_PTYPE(MmPageArray[Start].Flags) != MM_PHYSICAL_PAGE_USED)
592 {
593 DbgPrint("Getting reference count for free page\n");
594 KeBugCheck(0);
595 }
596
597 RCount = MmPageArray[Start].ReferenceCount;
598
599 KeReleaseSpinLock(&PageListLock, oldIrql);
600 return(RCount);
601 }
602
603 BOOLEAN
604 MmIsUsablePage(PHYSICAL_ADDRESS PhysicalAddress)
605 {
606 ULONG Start = PhysicalAddress.u.LowPart / PAGESIZE;
607
608 DPRINT("MmGetReferenceCountPage(PhysicalAddress %x)\n", PhysicalAddress);
609
610 if (PhysicalAddress.u.LowPart == 0)
611 {
612 KeBugCheck(0);
613 }
614
615 if (MM_PTYPE(MmPageArray[Start].Flags) != MM_PHYSICAL_PAGE_USED &&
616 MM_PTYPE(MmPageArray[Start].Flags) != MM_PHYSICAL_PAGE_BIOS)
617 {
618 return(FALSE);
619 }
620
621 return(TRUE);
622 }
623
624 VOID
625 MmDereferencePage(PHYSICAL_ADDRESS PhysicalAddress)
626 {
627 ULONG Start = PhysicalAddress.u.LowPart / PAGESIZE;
628 KIRQL oldIrql;
629
630 DPRINT("MmDereferencePage(PhysicalAddress %x)\n", PhysicalAddress);
631
632 if (PhysicalAddress.u.LowPart == 0)
633 {
634 KeBugCheck(0);
635 }
636
637 KeAcquireSpinLock(&PageListLock, &oldIrql);
638
639
640 if (MM_PTYPE(MmPageArray[Start].Flags) != MM_PHYSICAL_PAGE_USED)
641 {
642 DbgPrint("Dereferencing free page\n");
643 KeBugCheck(0);
644 }
645
646 MmPageArray[Start].ReferenceCount--;
647 if (MmPageArray[Start].ReferenceCount == 0)
648 {
649 MmStats.NrFreePages++;
650 MmStats.NrSystemPages--;
651 RemoveEntryList(&MmPageArray[Start].ListEntry);
652 if (MmPageArray[Start].RmapListHead != NULL)
653 {
654 DbgPrint("Freeing page with rmap entries.\n");
655 KeBugCheck(0);
656 }
657 if (MmPageArray[Start].MapCount != 0)
658 {
659 DbgPrint("Freeing mapped page (0x%x count %d)\n",
660 PhysicalAddress, MmPageArray[Start].MapCount);
661 KeBugCheck(0);
662 }
663 if (MmPageArray[Start].LockCount > 0)
664 {
665 DbgPrint("Freeing locked page\n");
666 KeBugCheck(0);
667 }
668 if (MmPageArray[Start].SavedSwapEntry != 0)
669 {
670 DbgPrint("Freeing page with swap entry.\n");
671 KeBugCheck(0);
672 }
673 if (MmPageArray[Start].Flags != MM_PHYSICAL_PAGE_USED)
674 {
675 DbgPrint("Freeing page with flags %x\n",
676 MmPageArray[Start].Flags);
677 KeBugCheck(0);
678 }
679 MmPageArray[Start].Flags = MM_PHYSICAL_PAGE_FREE;
680 InsertTailList(&FreeUnzeroedPageListHead,
681 &MmPageArray[Start].ListEntry);
682 }
683 KeReleaseSpinLock(&PageListLock, oldIrql);
684 }
685
686 ULONG
687 MmGetLockCountPage(PHYSICAL_ADDRESS PhysicalAddress)
688 {
689 ULONG Start = PhysicalAddress.u.LowPart / PAGESIZE;
690 KIRQL oldIrql;
691 ULONG LockCount;
692
693 DPRINT("MmGetLockCountPage(PhysicalAddress %x)\n", PhysicalAddress);
694
695 if (PhysicalAddress.u.LowPart == 0)
696 {
697 KeBugCheck(0);
698 }
699
700 KeAcquireSpinLock(&PageListLock, &oldIrql);
701
702 if (MM_PTYPE(MmPageArray[Start].Flags) != MM_PHYSICAL_PAGE_USED)
703 {
704 DbgPrint("Getting lock count for free page\n");
705 KeBugCheck(0);
706 }
707
708 LockCount = MmPageArray[Start].LockCount;
709 KeReleaseSpinLock(&PageListLock, oldIrql);
710
711 return(LockCount);
712 }
713
714 VOID
715 MmLockPage(PHYSICAL_ADDRESS PhysicalAddress)
716 {
717 ULONG Start = PhysicalAddress.u.LowPart / PAGESIZE;
718 KIRQL oldIrql;
719
720 DPRINT("MmLockPage(PhysicalAddress %x)\n", PhysicalAddress);
721
722 if (PhysicalAddress.u.LowPart == 0)
723 {
724 KeBugCheck(0);
725 }
726
727 KeAcquireSpinLock(&PageListLock, &oldIrql);
728
729 if (MM_PTYPE(MmPageArray[Start].Flags) != MM_PHYSICAL_PAGE_USED)
730 {
731 DbgPrint("Locking free page\n");
732 KeBugCheck(0);
733 }
734
735 MmPageArray[Start].LockCount++;
736 KeReleaseSpinLock(&PageListLock, oldIrql);
737 }
738
739 VOID
740 MmUnlockPage(PHYSICAL_ADDRESS PhysicalAddress)
741 {
742 ULONG Start = PhysicalAddress.u.LowPart / PAGESIZE;
743 KIRQL oldIrql;
744
745 DPRINT("MmUnlockPage(PhysicalAddress %llx)\n", PhysicalAddress);
746
747 if (PhysicalAddress.u.LowPart == 0)
748 {
749 KeBugCheck(0);
750 }
751
752 KeAcquireSpinLock(&PageListLock, &oldIrql);
753
754 if (MM_PTYPE(MmPageArray[Start].Flags) != MM_PHYSICAL_PAGE_USED)
755 {
756 DbgPrint("Unlocking free page\n");
757 KeBugCheck(0);
758 }
759
760 MmPageArray[Start].LockCount--;
761 KeReleaseSpinLock(&PageListLock, oldIrql);
762 }
763
764 PHYSICAL_ADDRESS
765 MmAllocPage(ULONG Consumer, SWAPENTRY SavedSwapEntry)
766 {
767 PHYSICAL_ADDRESS PageOffset;
768 PLIST_ENTRY ListEntry;
769 PPHYSICAL_PAGE PageDescriptor;
770 KIRQL oldIrql;
771 BOOLEAN NeedClear = FALSE;
772
773 DPRINT("MmAllocPage()\n");
774
775 KeAcquireSpinLock(&PageListLock, &oldIrql);
776 if (IsListEmpty(&FreeZeroedPageListHead))
777 {
778 if (IsListEmpty(&FreeUnzeroedPageListHead))
779 {
780 DPRINT1("MmAllocPage(): Out of memory\n");
781 KeReleaseSpinLock(&PageListLock, oldIrql);
782 return((PHYSICAL_ADDRESS)0LL);
783 }
784 ListEntry = RemoveTailList(&FreeUnzeroedPageListHead);
785
786 PageDescriptor = CONTAINING_RECORD(ListEntry, PHYSICAL_PAGE, ListEntry);
787 KeReleaseSpinLock(&PageListLock, oldIrql);
788
789 NeedClear = TRUE;
790 }
791 else
792 {
793 ListEntry = RemoveTailList(&FreeZeroedPageListHead);
794 KeReleaseSpinLock(&PageListLock, oldIrql);
795
796 PageDescriptor = CONTAINING_RECORD(ListEntry, PHYSICAL_PAGE, ListEntry);
797 }
798
799 if (PageDescriptor->Flags != MM_PHYSICAL_PAGE_FREE)
800 {
801 DbgPrint("Got non-free page from freelist\n");
802 KeBugCheck(0);
803 }
804 PageDescriptor->Flags = MM_PHYSICAL_PAGE_USED;
805 PageDescriptor->ReferenceCount = 1;
806 PageDescriptor->LockCount = 0;
807 PageDescriptor->MapCount = 0;
808 PageDescriptor->SavedSwapEntry = SavedSwapEntry;
809 ExInterlockedInsertTailList(&UsedPageListHeads[Consumer], ListEntry,
810 &PageListLock);
811
812 MmStats.NrSystemPages++;
813 MmStats.NrFreePages--;
814
815 PageOffset.QuadPart = (ULONG)((ULONG)PageDescriptor - (ULONG)MmPageArray);
816 PageOffset.QuadPart =
817 (PageOffset.QuadPart / sizeof(PHYSICAL_PAGE)) * PAGESIZE;
818 if (NeedClear)
819 {
820 MiZeroPage(PageOffset);
821 }
822 return(PageOffset);
823 }