2002-11-10 Casper S. Hornstrup <chorns@users.sourceforge.net>
[reactos.git] / reactos / ntoskrnl / mm / freelist.c
1 /*
2 * COPYRIGHT: See COPYING in the top level directory
3 * PROJECT: ReactOS kernel
4 * FILE: ntoskrnl/mm/freelist.c
5 * PURPOSE: Handle the list of free physical pages
6 * PROGRAMMER: David Welch (welch@cwcom.net)
7 * UPDATE HISTORY:
8 * 27/05/98: Created
9 * 18/08/98: Added a fix from Robert Bergkvist
10 */
11
12 /* INCLUDES ****************************************************************/
13
14 #include <ddk/ntddk.h>
15 #include <internal/mm.h>
16 #include <internal/ntoskrnl.h>
17
18 #define NDEBUG
19 #include <internal/debug.h>
20
21 /* TYPES *******************************************************************/
22
23 #define MM_PHYSICAL_PAGE_FREE (0x1)
24 #define MM_PHYSICAL_PAGE_USED (0x2)
25 #define MM_PHYSICAL_PAGE_BIOS (0x3)
26
27 #define MM_PTYPE(x) ((x) & 0x3)
28
29 typedef struct _PHYSICAL_PAGE
30 {
31 ULONG Flags;
32 LIST_ENTRY ListEntry;
33 ULONG ReferenceCount;
34 SWAPENTRY SavedSwapEntry;
35 ULONG LockCount;
36 ULONG MapCount;
37 struct _MM_RMAP_ENTRY* RmapListHead;
38 } PHYSICAL_PAGE, *PPHYSICAL_PAGE;
39
40 /* GLOBALS ****************************************************************/
41
42 static PPHYSICAL_PAGE MmPageArray;
43
44 static KSPIN_LOCK PageListLock;
45 static LIST_ENTRY UsedPageListHeads[MC_MAXIMUM];
46 static LIST_ENTRY FreeZeroedPageListHead;
47 static LIST_ENTRY FreeUnzeroedPageListHead;
48 static LIST_ENTRY BiosPageListHead;
49
50 /* FUNCTIONS *************************************************************/
51
52 VOID
53 MmTransferOwnershipPage(PHYSICAL_ADDRESS PhysicalAddress, ULONG NewConsumer)
54 {
55 ULONG Start = PhysicalAddress.u.LowPart / PAGE_SIZE;
56 KIRQL oldIrql;
57
58 KeAcquireSpinLock(&PageListLock, &oldIrql);
59 RemoveEntryList(&MmPageArray[Start].ListEntry);
60 InsertTailList(&UsedPageListHeads[NewConsumer],
61 &MmPageArray[Start].ListEntry);
62 KeReleaseSpinLock(&PageListLock, oldIrql);
63 MiZeroPage(PhysicalAddress);
64 }
65
66 PHYSICAL_ADDRESS
67 MmGetLRUFirstUserPage(VOID)
68 {
69 PLIST_ENTRY NextListEntry;
70 PHYSICAL_ADDRESS Next;
71 PHYSICAL_PAGE* PageDescriptor;
72 KIRQL oldIrql;
73
74 KeAcquireSpinLock(&PageListLock, &oldIrql);
75 NextListEntry = UsedPageListHeads[MC_USER].Flink;
76 if (NextListEntry == &UsedPageListHeads[MC_USER])
77 {
78 KeReleaseSpinLock(&PageListLock, oldIrql);
79 return((LARGE_INTEGER)0LL);
80 }
81 PageDescriptor = CONTAINING_RECORD(NextListEntry, PHYSICAL_PAGE, ListEntry);
82 Next.QuadPart = (ULONG)((ULONG)PageDescriptor - (ULONG)MmPageArray);
83 Next.QuadPart = (Next.QuadPart / sizeof(PHYSICAL_PAGE)) * PAGE_SIZE;
84 KeReleaseSpinLock(&PageListLock, oldIrql);
85 return(Next);
86 }
87
88 PHYSICAL_ADDRESS
89 MmGetLRUNextUserPage(PHYSICAL_ADDRESS PreviousPhysicalAddress)
90 {
91 ULONG Start = PreviousPhysicalAddress.u.LowPart / PAGE_SIZE;
92 PLIST_ENTRY NextListEntry;
93 PHYSICAL_ADDRESS Next;
94 PHYSICAL_PAGE* PageDescriptor;
95 KIRQL oldIrql;
96
97 KeAcquireSpinLock(&PageListLock, &oldIrql);
98 if (!(MmPageArray[Start].Flags & MM_PHYSICAL_PAGE_USED))
99 {
100 NextListEntry = UsedPageListHeads[MC_USER].Flink;
101 }
102 else
103 {
104 NextListEntry = MmPageArray[Start].ListEntry.Flink;
105 }
106 if (NextListEntry == &UsedPageListHeads[MC_USER])
107 {
108 KeReleaseSpinLock(&PageListLock, oldIrql);
109 return((LARGE_INTEGER)0LL);
110 }
111 PageDescriptor = CONTAINING_RECORD(NextListEntry, PHYSICAL_PAGE, ListEntry);
112 Next.QuadPart = (ULONG)((ULONG)PageDescriptor - (ULONG)MmPageArray);
113 Next.QuadPart = (Next.QuadPart / sizeof(PHYSICAL_PAGE)) * PAGE_SIZE;
114 KeReleaseSpinLock(&PageListLock, oldIrql);
115 return(Next);
116 }
117
118 PHYSICAL_ADDRESS
119 MmGetContinuousPages(ULONG NumberOfBytes,
120 PHYSICAL_ADDRESS HighestAcceptableAddress,
121 ULONG Alignment)
122 {
123 ULONG NrPages;
124 ULONG i;
125 LONG start;
126 ULONG length;
127 KIRQL oldIrql;
128
129 NrPages = PAGE_ROUND_UP(NumberOfBytes) / PAGE_SIZE;
130
131 KeAcquireSpinLock(&PageListLock, &oldIrql);
132
133 start = -1;
134 length = 0;
135 for (i = 0; i < (HighestAcceptableAddress.QuadPart / PAGE_SIZE); )
136 {
137 if (MM_PTYPE(MmPageArray[i].Flags) == MM_PHYSICAL_PAGE_FREE)
138 {
139 if (start == -1)
140 {
141 start = i;
142 length = 1;
143 }
144 else
145 {
146 length++;
147 }
148 i++;
149 if (length == NrPages)
150 {
151 break;
152 }
153 }
154 else
155 {
156 start = -1;
157 /*
158 * Fast forward to the base of the next aligned region
159 */
160 i = ROUND_UP((i + 1), (Alignment / PAGE_SIZE));
161 }
162 }
163 if (start == -1 || length != NrPages)
164 {
165 KeReleaseSpinLock(&PageListLock, oldIrql);
166 return((LARGE_INTEGER)(LONGLONG)0);
167 }
168 for (i = start; i < (start + length); i++)
169 {
170 RemoveEntryList(&MmPageArray[i].ListEntry);
171 MmPageArray[i].Flags = MM_PHYSICAL_PAGE_USED;
172 MmPageArray[i].ReferenceCount = 1;
173 MmPageArray[i].LockCount = 0;
174 MmPageArray[i].MapCount = 0;
175 MmPageArray[i].SavedSwapEntry = 0;
176 InsertTailList(&UsedPageListHeads[MC_NPPOOL],
177 &MmPageArray[i].ListEntry);
178 }
179 KeReleaseSpinLock(&PageListLock, oldIrql);
180 return((LARGE_INTEGER)((LONGLONG)start * 4096));
181 }
182
183 VOID
184 MiParseRangeToFreeList(PADDRESS_RANGE Range)
185 {
186 ULONG i, first, last;
187
188 /* FIXME: Not 64-bit ready */
189
190 DPRINT("Range going to free list (Base 0x%X, Length 0x%X, Type 0x%X)\n",
191 Range->BaseAddrLow,
192 Range->LengthLow,
193 Range->Type);
194
195 first = (Range->BaseAddrLow + PAGE_SIZE - 1) / PAGE_SIZE;
196 last = first + ((Range->LengthLow + PAGE_SIZE - 1) / PAGE_SIZE);
197 for (i = first; i < last; i++)
198 {
199 if (MmPageArray[i].Flags == 0)
200 {
201 MmPageArray[i].Flags = MM_PHYSICAL_PAGE_FREE;
202 MmPageArray[i].ReferenceCount = 0;
203 InsertTailList(&FreeUnzeroedPageListHead,
204 &MmPageArray[i].ListEntry);
205 }
206 }
207 }
208
209 VOID
210 MiParseRangeToBiosList(PADDRESS_RANGE Range)
211 {
212 ULONG i, first, last;
213
214 /* FIXME: Not 64-bit ready */
215
216 DPRINT("Range going to bios list (Base 0x%X, Length 0x%X, Type 0x%X)\n",
217 Range->BaseAddrLow,
218 Range->LengthLow,
219 Range->Type);
220
221 first = (Range->BaseAddrLow + PAGE_SIZE - 1) / PAGE_SIZE;
222 last = first + ((Range->LengthLow + PAGE_SIZE - 1) / PAGE_SIZE);
223 for (i = first; i < last; i++)
224 {
225 /* Remove the page from the free list if it is there */
226 if (MmPageArray[i].Flags == MM_PHYSICAL_PAGE_FREE)
227 {
228 RemoveEntryList(&MmPageArray[i].ListEntry);
229 }
230
231 if (MmPageArray[i].Flags != MM_PHYSICAL_PAGE_BIOS)
232 {
233 MmPageArray[i].Flags = MM_PHYSICAL_PAGE_BIOS;
234 MmPageArray[i].ReferenceCount = 1;
235 InsertTailList(&BiosPageListHead,
236 &MmPageArray[i].ListEntry);
237 }
238 }
239 }
240
241 VOID
242 MiParseBIOSMemoryMap(ULONG MemorySizeInPages,
243 PADDRESS_RANGE BIOSMemoryMap,
244 ULONG AddressRangeCount)
245 {
246 PADDRESS_RANGE p;
247 ULONG i;
248
249 p = BIOSMemoryMap;
250 for (i = 0; i < AddressRangeCount; i++)
251 {
252 if (((p->BaseAddrLow + PAGE_SIZE - 1) / PAGE_SIZE) < MemorySizeInPages)
253 {
254 if (p->Type == 1)
255 {
256 MiParseRangeToFreeList(p);
257 }
258 else
259 {
260 MiParseRangeToBiosList(p);
261 }
262 }
263 p += 1;
264 }
265 }
266
267 PVOID
268 MmInitializePageList(PVOID FirstPhysKernelAddress,
269 PVOID LastPhysKernelAddress,
270 ULONG MemorySizeInPages,
271 ULONG LastKernelAddress,
272 PADDRESS_RANGE BIOSMemoryMap,
273 ULONG AddressRangeCount)
274 /*
275 * FUNCTION: Initializes the page list with all pages free
276 * except those known to be reserved and those used by the kernel
277 * ARGUMENTS:
278 * PageBuffer = Page sized buffer
279 * FirstKernelAddress = First physical address used by the kernel
280 * LastKernelAddress = Last physical address used by the kernel
281 */
282 {
283 ULONG i;
284 ULONG Reserved;
285 NTSTATUS Status;
286
287 DPRINT("MmInitializePageList(FirstPhysKernelAddress %x, "
288 "LastPhysKernelAddress %x, "
289 "MemorySizeInPages %x, LastKernelAddress %x)\n",
290 FirstPhysKernelAddress,
291 LastPhysKernelAddress,
292 MemorySizeInPages,
293 LastKernelAddress);
294
295 for (i = 0; i < MC_MAXIMUM; i++)
296 {
297 InitializeListHead(&UsedPageListHeads[i]);
298 }
299 KeInitializeSpinLock(&PageListLock);
300 InitializeListHead(&FreeUnzeroedPageListHead);
301 InitializeListHead(&FreeZeroedPageListHead);
302 InitializeListHead(&BiosPageListHead);
303
304 LastKernelAddress = PAGE_ROUND_UP(LastKernelAddress);
305
306 Reserved =
307 PAGE_ROUND_UP((MemorySizeInPages * sizeof(PHYSICAL_PAGE))) / PAGE_SIZE;
308 MmPageArray = (PHYSICAL_PAGE *)LastKernelAddress;
309
310 DPRINT("Reserved %d\n", Reserved);
311
312 LastKernelAddress = PAGE_ROUND_UP(LastKernelAddress);
313 LastKernelAddress = ((ULONG)LastKernelAddress + (Reserved * PAGE_SIZE));
314 LastPhysKernelAddress = (PVOID)PAGE_ROUND_UP(LastPhysKernelAddress);
315 LastPhysKernelAddress = LastPhysKernelAddress + (Reserved * PAGE_SIZE);
316
317 MmStats.NrTotalPages = 0;
318 MmStats.NrSystemPages = 0;
319 MmStats.NrUserPages = 0;
320 MmStats.NrReservedPages = 0;
321 MmStats.NrFreePages = 0;
322 MmStats.NrLockedPages = 0;
323
324 for (i = 0; i < Reserved; i++)
325 {
326 PVOID Address = (PVOID)(ULONG)MmPageArray + (i * PAGE_SIZE);
327 if (!MmIsPagePresent(NULL, Address))
328 {
329 ULONG PhysicalAddress;
330 PhysicalAddress = (ULONG)LastPhysKernelAddress -
331 (Reserved * PAGE_SIZE) + (i * PAGE_SIZE);
332 Status =
333 MmCreateVirtualMappingUnsafe(NULL,
334 Address,
335 PAGE_READWRITE,
336 (PHYSICAL_ADDRESS)(LONGLONG)PhysicalAddress,
337 FALSE);
338 if (!NT_SUCCESS(Status))
339 {
340 DbgPrint("Unable to create virtual mapping\n");
341 KeBugCheck(0);
342 }
343 }
344 memset((PVOID)MmPageArray + (i * PAGE_SIZE), 0, PAGE_SIZE);
345 }
346
347 /*
348 * Page zero is reserved
349 */
350 MmPageArray[0].Flags = MM_PHYSICAL_PAGE_BIOS;
351 MmPageArray[0].ReferenceCount = 0;
352 InsertTailList(&BiosPageListHead,
353 &MmPageArray[0].ListEntry);
354
355 /*
356 * Page one is reserved for the initial KPCR
357 */
358 MmPageArray[1].Flags = MM_PHYSICAL_PAGE_BIOS;
359 MmPageArray[1].ReferenceCount = 0;
360 InsertTailList(&BiosPageListHead,
361 &MmPageArray[1].ListEntry);
362
363 i = 2;
364 if ((ULONG)FirstPhysKernelAddress < 0xa0000)
365 {
366 MmStats.NrFreePages += (((ULONG)FirstPhysKernelAddress/PAGE_SIZE) - 1);
367 for (; i<((ULONG)FirstPhysKernelAddress/PAGE_SIZE); i++)
368 {
369 MmPageArray[i].Flags = MM_PHYSICAL_PAGE_FREE;
370 MmPageArray[i].ReferenceCount = 0;
371 InsertTailList(&FreeUnzeroedPageListHead,
372 &MmPageArray[i].ListEntry);
373 }
374 MmStats.NrSystemPages +=
375 ((((ULONG)LastPhysKernelAddress) / PAGE_SIZE) - i);
376 for (; i<((ULONG)LastPhysKernelAddress / PAGE_SIZE); i++)
377 {
378 MmPageArray[i].Flags = MM_PHYSICAL_PAGE_USED;
379 MmPageArray[i].ReferenceCount = 1;
380 InsertTailList(&UsedPageListHeads[MC_NPPOOL],
381 &MmPageArray[i].ListEntry);
382 }
383 MmStats.NrFreePages += ((0xa0000/PAGE_SIZE) - i);
384 for (; i<(0xa0000/PAGE_SIZE); i++)
385 {
386 MmPageArray[i].Flags = MM_PHYSICAL_PAGE_FREE;
387 MmPageArray[i].ReferenceCount = 0;
388 InsertTailList(&FreeUnzeroedPageListHead,
389 &MmPageArray[i].ListEntry);
390 }
391 MmStats.NrReservedPages += ((0x100000/PAGE_SIZE) - i);
392 for (; i<(0x100000 / PAGE_SIZE); i++)
393 {
394 MmPageArray[i].Flags = MM_PHYSICAL_PAGE_BIOS;
395 MmPageArray[i].ReferenceCount = 1;
396 InsertTailList(&BiosPageListHead,
397 &MmPageArray[i].ListEntry);
398 }
399 }
400 else
401 {
402 MmStats.NrFreePages += ((0xa0000 / PAGE_SIZE) - 1);
403 for (; i<(0xa0000 / PAGE_SIZE); i++)
404 {
405 MmPageArray[i].Flags = MM_PHYSICAL_PAGE_FREE;
406 MmPageArray[i].ReferenceCount = 0;
407 InsertTailList(&FreeUnzeroedPageListHead,
408 &MmPageArray[i].ListEntry);
409 }
410 MmStats.NrReservedPages += (0x60000 / PAGE_SIZE);
411 for (; i<(0x100000 / PAGE_SIZE); i++)
412 {
413 MmPageArray[i].Flags = MM_PHYSICAL_PAGE_BIOS;
414 MmPageArray[i].ReferenceCount = 1;
415 InsertTailList(&BiosPageListHead,
416 &MmPageArray[i].ListEntry);
417 }
418 MmStats.NrFreePages += (((ULONG)FirstPhysKernelAddress/PAGE_SIZE) - i);
419 for (; i<((ULONG)FirstPhysKernelAddress/PAGE_SIZE); i++)
420 {
421 MmPageArray[i].Flags = MM_PHYSICAL_PAGE_FREE;
422 MmPageArray[i].ReferenceCount = 0;
423 InsertTailList(&FreeUnzeroedPageListHead,
424 &MmPageArray[i].ListEntry);
425 }
426 MmStats.NrSystemPages +=
427 (((ULONG)LastPhysKernelAddress/PAGE_SIZE) - i);
428 for (; i<((ULONG)LastPhysKernelAddress/PAGE_SIZE); i++)
429 {
430 MmPageArray[i].Flags = MM_PHYSICAL_PAGE_USED;
431 MmPageArray[i].ReferenceCount = 1;
432 InsertTailList(&UsedPageListHeads[MC_NPPOOL],
433 &MmPageArray[i].ListEntry);
434 }
435 }
436
437 MmStats.NrFreePages += (MemorySizeInPages - i);
438 for (; i<MemorySizeInPages; i++)
439 {
440 MmPageArray[i].Flags = MM_PHYSICAL_PAGE_FREE;
441 MmPageArray[i].ReferenceCount = 0;
442 InsertTailList(&FreeUnzeroedPageListHead,
443 &MmPageArray[i].ListEntry);
444 }
445
446 if ((BIOSMemoryMap != NULL) && (AddressRangeCount > 0))
447 {
448 MiParseBIOSMemoryMap(
449 MemorySizeInPages,
450 BIOSMemoryMap,
451 AddressRangeCount);
452 }
453
454 MmStats.NrTotalPages = MmStats.NrFreePages + MmStats.NrSystemPages +
455 MmStats.NrReservedPages + MmStats.NrUserPages;
456 MmInitializeBalancer(MmStats.NrFreePages);
457 return((PVOID)LastKernelAddress);
458 }
459
460 VOID
461 MmSetFlagsPage(PHYSICAL_ADDRESS PhysicalAddress, ULONG Flags)
462 {
463 ULONG Start = PhysicalAddress.u.LowPart / PAGE_SIZE;
464 KIRQL oldIrql;
465
466 KeAcquireSpinLock(&PageListLock, &oldIrql);
467 MmPageArray[Start].Flags = Flags;
468 KeReleaseSpinLock(&PageListLock, oldIrql);
469 }
470
471 VOID
472 MmSetRmapListHeadPage(PHYSICAL_ADDRESS PhysicalAddress,
473 struct _MM_RMAP_ENTRY* ListHead)
474 {
475 ULONG Start = PhysicalAddress.u.LowPart / PAGE_SIZE;
476
477 MmPageArray[Start].RmapListHead = ListHead;
478 }
479
480 struct _MM_RMAP_ENTRY*
481 MmGetRmapListHeadPage(PHYSICAL_ADDRESS PhysicalAddress)
482 {
483 ULONG Start = PhysicalAddress.u.LowPart / PAGE_SIZE;
484
485 return(MmPageArray[Start].RmapListHead);
486 }
487
488 VOID
489 MmMarkPageMapped(PHYSICAL_ADDRESS PhysicalAddress)
490 {
491 ULONG Start = PhysicalAddress.u.LowPart / PAGE_SIZE;
492 KIRQL oldIrql;
493
494 KeAcquireSpinLock(&PageListLock, &oldIrql);
495 MmPageArray[Start].MapCount++;
496 KeReleaseSpinLock(&PageListLock, oldIrql);
497 }
498
499 VOID
500 MmMarkPageUnmapped(PHYSICAL_ADDRESS PhysicalAddress)
501 {
502 ULONG Start = PhysicalAddress.u.LowPart / PAGE_SIZE;
503 KIRQL oldIrql;
504
505 KeAcquireSpinLock(&PageListLock, &oldIrql);
506 MmPageArray[Start].MapCount--;
507 KeReleaseSpinLock(&PageListLock, oldIrql);
508 }
509
510 ULONG
511 MmGetFlagsPage(PHYSICAL_ADDRESS PhysicalAddress)
512 {
513 ULONG Start = PhysicalAddress.u.LowPart / PAGE_SIZE;
514 KIRQL oldIrql;
515 ULONG Flags;
516
517 KeAcquireSpinLock(&PageListLock, &oldIrql);
518 Flags = MmPageArray[Start].Flags;
519 KeReleaseSpinLock(&PageListLock, oldIrql);
520
521 return(Flags);
522 }
523
524
525 VOID
526 MmSetSavedSwapEntryPage(PHYSICAL_ADDRESS PhysicalAddress,
527 SWAPENTRY SavedSwapEntry)
528 {
529 ULONG Start = PhysicalAddress.u.LowPart / PAGE_SIZE;
530 KIRQL oldIrql;
531
532 KeAcquireSpinLock(&PageListLock, &oldIrql);
533 MmPageArray[Start].SavedSwapEntry = SavedSwapEntry;
534 KeReleaseSpinLock(&PageListLock, oldIrql);
535 }
536
537 SWAPENTRY
538 MmGetSavedSwapEntryPage(PHYSICAL_ADDRESS PhysicalAddress)
539 {
540 ULONG Start = PhysicalAddress.u.LowPart / PAGE_SIZE;
541 SWAPENTRY SavedSwapEntry;
542 KIRQL oldIrql;
543
544 KeAcquireSpinLock(&PageListLock, &oldIrql);
545 SavedSwapEntry = MmPageArray[Start].SavedSwapEntry;
546 KeReleaseSpinLock(&PageListLock, oldIrql);
547
548 return(SavedSwapEntry);
549 }
550
551 VOID
552 MmReferencePage(PHYSICAL_ADDRESS PhysicalAddress)
553 {
554 ULONG Start = PhysicalAddress.u.LowPart / PAGE_SIZE;
555 KIRQL oldIrql;
556
557 DPRINT("MmReferencePage(PhysicalAddress %x)\n", PhysicalAddress);
558
559 if (PhysicalAddress.u.LowPart == 0)
560 {
561 KeBugCheck(0);
562 }
563
564 KeAcquireSpinLock(&PageListLock, &oldIrql);
565
566 if (MM_PTYPE(MmPageArray[Start].Flags) != MM_PHYSICAL_PAGE_USED)
567 {
568 DbgPrint("Referencing non-used page\n");
569 KeBugCheck(0);
570 }
571
572 MmPageArray[Start].ReferenceCount++;
573 KeReleaseSpinLock(&PageListLock, oldIrql);
574 }
575
576 ULONG
577 MmGetReferenceCountPage(PHYSICAL_ADDRESS PhysicalAddress)
578 {
579 ULONG Start = PhysicalAddress.u.LowPart / PAGE_SIZE;
580 KIRQL oldIrql;
581 ULONG RCount;
582
583 DPRINT("MmGetReferenceCountPage(PhysicalAddress %x)\n", PhysicalAddress);
584
585 if (PhysicalAddress.u.LowPart == 0)
586 {
587 KeBugCheck(0);
588 }
589
590 KeAcquireSpinLock(&PageListLock, &oldIrql);
591
592 if (MM_PTYPE(MmPageArray[Start].Flags) != MM_PHYSICAL_PAGE_USED)
593 {
594 DbgPrint("Getting reference count for free page\n");
595 KeBugCheck(0);
596 }
597
598 RCount = MmPageArray[Start].ReferenceCount;
599
600 KeReleaseSpinLock(&PageListLock, oldIrql);
601 return(RCount);
602 }
603
604 BOOLEAN
605 MmIsUsablePage(PHYSICAL_ADDRESS PhysicalAddress)
606 {
607 ULONG Start = PhysicalAddress.u.LowPart / PAGE_SIZE;
608
609 DPRINT("MmGetReferenceCountPage(PhysicalAddress %x)\n", PhysicalAddress);
610
611 if (PhysicalAddress.u.LowPart == 0)
612 {
613 KeBugCheck(0);
614 }
615
616 if (MM_PTYPE(MmPageArray[Start].Flags) != MM_PHYSICAL_PAGE_USED &&
617 MM_PTYPE(MmPageArray[Start].Flags) != MM_PHYSICAL_PAGE_BIOS)
618 {
619 return(FALSE);
620 }
621
622 return(TRUE);
623 }
624
625 VOID
626 MmDereferencePage(PHYSICAL_ADDRESS PhysicalAddress)
627 {
628 ULONG Start = PhysicalAddress.u.LowPart / PAGE_SIZE;
629 KIRQL oldIrql;
630
631 DPRINT("MmDereferencePage(PhysicalAddress %I64x)\n", PhysicalAddress);
632
633 if (PhysicalAddress.u.LowPart == 0)
634 {
635 KeBugCheck(0);
636 }
637
638 KeAcquireSpinLock(&PageListLock, &oldIrql);
639
640
641 if (MM_PTYPE(MmPageArray[Start].Flags) != MM_PHYSICAL_PAGE_USED)
642 {
643 DbgPrint("Dereferencing free page\n");
644 KeBugCheck(0);
645 }
646
647 MmPageArray[Start].ReferenceCount--;
648 if (MmPageArray[Start].ReferenceCount == 0)
649 {
650 MmStats.NrFreePages++;
651 MmStats.NrSystemPages--;
652 RemoveEntryList(&MmPageArray[Start].ListEntry);
653 if (MmPageArray[Start].RmapListHead != NULL)
654 {
655 DbgPrint("Freeing page with rmap entries.\n");
656 KeBugCheck(0);
657 }
658 if (MmPageArray[Start].MapCount != 0)
659 {
660 DbgPrint("Freeing mapped page (0x%I64x count %d)\n",
661 PhysicalAddress, MmPageArray[Start].MapCount);
662 KeBugCheck(0);
663 }
664 if (MmPageArray[Start].LockCount > 0)
665 {
666 DbgPrint("Freeing locked page\n");
667 KeBugCheck(0);
668 }
669 if (MmPageArray[Start].SavedSwapEntry != 0)
670 {
671 DbgPrint("Freeing page with swap entry.\n");
672 KeBugCheck(0);
673 }
674 if (MmPageArray[Start].Flags != MM_PHYSICAL_PAGE_USED)
675 {
676 DbgPrint("Freeing page with flags %x\n",
677 MmPageArray[Start].Flags);
678 KeBugCheck(0);
679 }
680 MmPageArray[Start].Flags = MM_PHYSICAL_PAGE_FREE;
681 InsertTailList(&FreeUnzeroedPageListHead,
682 &MmPageArray[Start].ListEntry);
683 }
684 KeReleaseSpinLock(&PageListLock, oldIrql);
685 }
686
687 ULONG
688 MmGetLockCountPage(PHYSICAL_ADDRESS PhysicalAddress)
689 {
690 ULONG Start = PhysicalAddress.u.LowPart / PAGE_SIZE;
691 KIRQL oldIrql;
692 ULONG LockCount;
693
694 DPRINT("MmGetLockCountPage(PhysicalAddress %x)\n", PhysicalAddress);
695
696 if (PhysicalAddress.u.LowPart == 0)
697 {
698 KeBugCheck(0);
699 }
700
701 KeAcquireSpinLock(&PageListLock, &oldIrql);
702
703 if (MM_PTYPE(MmPageArray[Start].Flags) != MM_PHYSICAL_PAGE_USED)
704 {
705 DbgPrint("Getting lock count for free page\n");
706 KeBugCheck(0);
707 }
708
709 LockCount = MmPageArray[Start].LockCount;
710 KeReleaseSpinLock(&PageListLock, oldIrql);
711
712 return(LockCount);
713 }
714
715 VOID
716 MmLockPage(PHYSICAL_ADDRESS PhysicalAddress)
717 {
718 ULONG Start = PhysicalAddress.u.LowPart / PAGE_SIZE;
719 KIRQL oldIrql;
720
721 DPRINT("MmLockPage(PhysicalAddress %x)\n", PhysicalAddress);
722
723 if (PhysicalAddress.u.LowPart == 0)
724 {
725 KeBugCheck(0);
726 }
727
728 KeAcquireSpinLock(&PageListLock, &oldIrql);
729
730 if (MM_PTYPE(MmPageArray[Start].Flags) != MM_PHYSICAL_PAGE_USED)
731 {
732 DbgPrint("Locking free page\n");
733 KeBugCheck(0);
734 }
735
736 MmPageArray[Start].LockCount++;
737 KeReleaseSpinLock(&PageListLock, oldIrql);
738 }
739
740 VOID
741 MmUnlockPage(PHYSICAL_ADDRESS PhysicalAddress)
742 {
743 ULONG Start = PhysicalAddress.u.LowPart / PAGE_SIZE;
744 KIRQL oldIrql;
745
746 DPRINT("MmUnlockPage(PhysicalAddress %llx)\n", PhysicalAddress);
747
748 if (PhysicalAddress.u.LowPart == 0)
749 {
750 KeBugCheck(0);
751 }
752
753 KeAcquireSpinLock(&PageListLock, &oldIrql);
754
755 if (MM_PTYPE(MmPageArray[Start].Flags) != MM_PHYSICAL_PAGE_USED)
756 {
757 DbgPrint("Unlocking free page\n");
758 KeBugCheck(0);
759 }
760
761 MmPageArray[Start].LockCount--;
762 KeReleaseSpinLock(&PageListLock, oldIrql);
763 }
764
765 PHYSICAL_ADDRESS
766 MmAllocPage(ULONG Consumer, SWAPENTRY SavedSwapEntry)
767 {
768 PHYSICAL_ADDRESS PageOffset;
769 PLIST_ENTRY ListEntry;
770 PPHYSICAL_PAGE PageDescriptor;
771 KIRQL oldIrql;
772 BOOLEAN NeedClear = FALSE;
773
774 DPRINT("MmAllocPage()\n");
775
776 KeAcquireSpinLock(&PageListLock, &oldIrql);
777 if (IsListEmpty(&FreeZeroedPageListHead))
778 {
779 if (IsListEmpty(&FreeUnzeroedPageListHead))
780 {
781 DPRINT1("MmAllocPage(): Out of memory\n");
782 KeReleaseSpinLock(&PageListLock, oldIrql);
783 return((PHYSICAL_ADDRESS)0LL);
784 }
785 ListEntry = RemoveTailList(&FreeUnzeroedPageListHead);
786
787 PageDescriptor = CONTAINING_RECORD(ListEntry, PHYSICAL_PAGE, ListEntry);
788 KeReleaseSpinLock(&PageListLock, oldIrql);
789
790 NeedClear = TRUE;
791 }
792 else
793 {
794 ListEntry = RemoveTailList(&FreeZeroedPageListHead);
795 KeReleaseSpinLock(&PageListLock, oldIrql);
796
797 PageDescriptor = CONTAINING_RECORD(ListEntry, PHYSICAL_PAGE, ListEntry);
798 }
799
800 if (PageDescriptor->Flags != MM_PHYSICAL_PAGE_FREE)
801 {
802 DbgPrint("Got non-free page from freelist\n");
803 KeBugCheck(0);
804 }
805 PageDescriptor->Flags = MM_PHYSICAL_PAGE_USED;
806 PageDescriptor->ReferenceCount = 1;
807 PageDescriptor->LockCount = 0;
808 PageDescriptor->MapCount = 0;
809 PageDescriptor->SavedSwapEntry = SavedSwapEntry;
810 ExInterlockedInsertTailList(&UsedPageListHeads[Consumer], ListEntry,
811 &PageListLock);
812
813 MmStats.NrSystemPages++;
814 MmStats.NrFreePages--;
815
816 PageOffset.QuadPart = (ULONG)((ULONG)PageDescriptor - (ULONG)MmPageArray);
817 PageOffset.QuadPart =
818 (PageOffset.QuadPart / sizeof(PHYSICAL_PAGE)) * PAGE_SIZE;
819 if (NeedClear)
820 {
821 MiZeroPage(PageOffset);
822 }
823 return(PageOffset);
824 }