716138680352d9452c399e430f4561f2d77c3ac7
[reactos.git] / reactos / ntoskrnl / mm / freelist.c
1 /*
2 * COPYRIGHT: See COPYING in the top level directory
3 * PROJECT: ReactOS kernel
4 * FILE: ntoskrnl/mm/freelist.c
5 * PURPOSE: Handle the list of free physical pages
6 * PROGRAMMER: David Welch (welch@cwcom.net)
7 * UPDATE HISTORY:
8 * 27/05/98: Created
9 * 18/08/98: Added a fix from Robert Bergkvist
10 */
11
12 /* INCLUDES ****************************************************************/
13
14 #include <ddk/ntddk.h>
15 #include <internal/mm.h>
16 #include <internal/ntoskrnl.h>
17
18 #define NDEBUG
19 #include <internal/debug.h>
20
21 /* TYPES *******************************************************************/
22
23 #define MM_PHYSICAL_PAGE_FREE (0x1)
24 #define MM_PHYSICAL_PAGE_USED (0x2)
25 #define MM_PHYSICAL_PAGE_BIOS (0x3)
26
27 #define MM_PTYPE(x) ((x) & 0x3)
28
29 typedef struct _PHYSICAL_PAGE
30 {
31 ULONG Flags;
32 LIST_ENTRY ListEntry;
33 ULONG ReferenceCount;
34 SWAPENTRY SavedSwapEntry;
35 ULONG LockCount;
36 ULONG MapCount;
37 struct _MM_RMAP_ENTRY* RmapListHead;
38 } PHYSICAL_PAGE, *PPHYSICAL_PAGE;
39
40 /* GLOBALS ****************************************************************/
41
42 static PPHYSICAL_PAGE MmPageArray;
43
44 static KSPIN_LOCK PageListLock;
45 static LIST_ENTRY UsedPageListHeads[MC_MAXIMUM];
46 static LIST_ENTRY FreeZeroedPageListHead;
47 static LIST_ENTRY FreeUnzeroedPageListHead;
48 static LIST_ENTRY BiosPageListHead;
49
50 /* FUNCTIONS *************************************************************/
51
52 VOID
53 MmTransferOwnershipPage(PVOID PhysicalAddress, ULONG NewConsumer)
54 {
55 ULONG Start = (ULONG)PhysicalAddress / PAGESIZE;
56 KIRQL oldIrql;
57
58 KeAcquireSpinLock(&PageListLock, &oldIrql);
59 RemoveEntryList(&MmPageArray[Start].ListEntry);
60 InsertTailList(&UsedPageListHeads[NewConsumer],
61 &MmPageArray[Start].ListEntry);
62 KeReleaseSpinLock(&PageListLock, oldIrql);
63 }
64
65 PVOID
66 MmGetLRUFirstUserPage(VOID)
67 {
68 PLIST_ENTRY NextListEntry;
69 ULONG Next;
70 PHYSICAL_PAGE* PageDescriptor;
71 KIRQL oldIrql;
72
73 KeAcquireSpinLock(&PageListLock, &oldIrql);
74 NextListEntry = UsedPageListHeads[MC_USER].Flink;
75 if (NextListEntry == &UsedPageListHeads[MC_USER])
76 {
77 KeReleaseSpinLock(&PageListLock, oldIrql);
78 return(NULL);
79 }
80 PageDescriptor = CONTAINING_RECORD(NextListEntry, PHYSICAL_PAGE, ListEntry);
81 Next = (ULONG)((ULONG)PageDescriptor - (ULONG)MmPageArray);
82 Next = (Next / sizeof(PHYSICAL_PAGE)) * PAGESIZE;
83 KeReleaseSpinLock(&PageListLock, oldIrql);
84 return((PVOID)Next);
85 }
86
87 PVOID
88 MmGetLRUNextUserPage(PVOID PreviousPhysicalAddress)
89 {
90 ULONG Start = (ULONG)PreviousPhysicalAddress / PAGESIZE;
91 PLIST_ENTRY NextListEntry;
92 ULONG Next;
93 PHYSICAL_PAGE* PageDescriptor;
94 KIRQL oldIrql;
95
96 KeAcquireSpinLock(&PageListLock, &oldIrql);
97 if (!(MmPageArray[Start].Flags & MM_PHYSICAL_PAGE_USED))
98 {
99 NextListEntry = UsedPageListHeads[MC_USER].Flink;
100 }
101 else
102 {
103 NextListEntry = MmPageArray[Start].ListEntry.Flink;
104 }
105 if (NextListEntry == &UsedPageListHeads[MC_USER])
106 {
107 KeReleaseSpinLock(&PageListLock, oldIrql);
108 return(NULL);
109 }
110 PageDescriptor = CONTAINING_RECORD(NextListEntry, PHYSICAL_PAGE, ListEntry);
111 Next = (ULONG)((ULONG)PageDescriptor - (ULONG)MmPageArray);
112 Next = (Next / sizeof(PHYSICAL_PAGE)) * PAGESIZE;
113 KeReleaseSpinLock(&PageListLock, oldIrql);
114 return((PVOID)Next);
115 }
116
117 PVOID
118 MmGetContinuousPages(ULONG NumberOfBytes,
119 PHYSICAL_ADDRESS HighestAcceptableAddress,
120 ULONG Alignment)
121 {
122 ULONG NrPages;
123 ULONG i;
124 ULONG start;
125 ULONG length;
126 KIRQL oldIrql;
127
128 NrPages = PAGE_ROUND_UP(NumberOfBytes) / PAGESIZE;
129
130 KeAcquireSpinLock(&PageListLock, &oldIrql);
131
132 start = -1;
133 length = 0;
134 for (i = 0; i < (HighestAcceptableAddress.QuadPart / PAGESIZE); )
135 {
136 if (MM_PTYPE(MmPageArray[i].Flags) == MM_PHYSICAL_PAGE_FREE)
137 {
138 if (start == -1)
139 {
140 start = i;
141 length = 1;
142 }
143 else
144 {
145 length++;
146 }
147 i++;
148 if (length == NrPages)
149 {
150 break;
151 }
152 }
153 else
154 {
155 start = -1;
156 /*
157 * Fast forward to the base of the next aligned region
158 */
159 i = ROUND_UP((i + 1), (Alignment / PAGESIZE));
160 }
161 }
162 if (start == -1 || length != NrPages)
163 {
164 KeReleaseSpinLock(&PageListLock, oldIrql);
165 return(NULL);
166 }
167 for (i = start; i < (start + length); i++)
168 {
169 RemoveEntryList(&MmPageArray[i].ListEntry);
170 MmPageArray[i].Flags = MM_PHYSICAL_PAGE_USED;
171 MmPageArray[i].ReferenceCount = 1;
172 MmPageArray[i].LockCount = 0;
173 MmPageArray[i].MapCount = 0;
174 MmPageArray[i].SavedSwapEntry = 0;
175 InsertTailList(&UsedPageListHeads[MC_NPPOOL],
176 &MmPageArray[i].ListEntry);
177 }
178 KeReleaseSpinLock(&PageListLock, oldIrql);
179 return((PVOID)(start * 4096));
180 }
181
182 VOID MiParseRangeToFreeList(
183 PADDRESS_RANGE Range)
184 {
185 ULONG i, first, last;
186
187 /* FIXME: Not 64-bit ready */
188
189 DPRINT("Range going to free list (Base 0x%X, Length 0x%X, Type 0x%X)\n",
190 Range->BaseAddrLow,
191 Range->LengthLow,
192 Range->Type);
193
194 first = (Range->BaseAddrLow + PAGESIZE - 1) / PAGESIZE;
195 last = first + ((Range->LengthLow + PAGESIZE - 1) / PAGESIZE);
196 for (i = first; i < last; i++)
197 {
198 if (MmPageArray[i].Flags == 0)
199 {
200 MmPageArray[i].Flags = MM_PHYSICAL_PAGE_FREE;
201 MmPageArray[i].ReferenceCount = 0;
202 InsertTailList(&FreeUnzeroedPageListHead,
203 &MmPageArray[i].ListEntry);
204 }
205 }
206 }
207
208 VOID MiParseRangeToBiosList(
209 PADDRESS_RANGE Range)
210 {
211 ULONG i, first, last;
212
213 /* FIXME: Not 64-bit ready */
214
215 DPRINT("Range going to bios list (Base 0x%X, Length 0x%X, Type 0x%X)\n",
216 Range->BaseAddrLow,
217 Range->LengthLow,
218 Range->Type);
219
220 first = (Range->BaseAddrLow + PAGESIZE - 1) / PAGESIZE;
221 last = first + ((Range->LengthLow + PAGESIZE - 1) / PAGESIZE);
222 for (i = first; i < last; i++)
223 {
224 /* Remove the page from the free list if it is there */
225 if (MmPageArray[i].Flags == MM_PHYSICAL_PAGE_FREE)
226 {
227 RemoveEntryList(&MmPageArray[i].ListEntry);
228 }
229
230 if (MmPageArray[i].Flags != MM_PHYSICAL_PAGE_BIOS)
231 {
232 MmPageArray[i].Flags = MM_PHYSICAL_PAGE_BIOS;
233 MmPageArray[i].ReferenceCount = 1;
234 InsertTailList(&BiosPageListHead,
235 &MmPageArray[i].ListEntry);
236 }
237 }
238 }
239
240 VOID MiParseBIOSMemoryMap(
241 ULONG MemorySizeInPages,
242 PADDRESS_RANGE BIOSMemoryMap,
243 ULONG AddressRangeCount)
244 {
245 PADDRESS_RANGE p;
246 ULONG i;
247
248 p = BIOSMemoryMap;
249 for (i = 0; i < AddressRangeCount; i++)
250 {
251 if (((p->BaseAddrLow + PAGESIZE - 1) / PAGESIZE) < MemorySizeInPages)
252 {
253 if (p->Type == 1)
254 {
255 MiParseRangeToFreeList(p);
256 }
257 else
258 {
259 MiParseRangeToBiosList(p);
260 }
261 }
262 p += 1;
263 }
264 }
265
266 PVOID
267 MmInitializePageList(PVOID FirstPhysKernelAddress,
268 PVOID LastPhysKernelAddress,
269 ULONG MemorySizeInPages,
270 ULONG LastKernelAddress,
271 PADDRESS_RANGE BIOSMemoryMap,
272 ULONG AddressRangeCount)
273 /*
274 * FUNCTION: Initializes the page list with all pages free
275 * except those known to be reserved and those used by the kernel
276 * ARGUMENTS:
277 * PageBuffer = Page sized buffer
278 * FirstKernelAddress = First physical address used by the kernel
279 * LastKernelAddress = Last physical address used by the kernel
280 */
281 {
282 ULONG i;
283 ULONG Reserved;
284 NTSTATUS Status;
285
286 DPRINT("MmInitializePageList(FirstPhysKernelAddress %x, "
287 "LastPhysKernelAddress %x, "
288 "MemorySizeInPages %x, LastKernelAddress %x)\n",
289 FirstPhysKernelAddress,
290 LastPhysKernelAddress,
291 MemorySizeInPages,
292 LastKernelAddress);
293
294 for (i = 0; i < MC_MAXIMUM; i++)
295 {
296 InitializeListHead(&UsedPageListHeads[i]);
297 }
298 KeInitializeSpinLock(&PageListLock);
299 InitializeListHead(&FreeUnzeroedPageListHead);
300 InitializeListHead(&FreeZeroedPageListHead);
301 InitializeListHead(&BiosPageListHead);
302
303 LastKernelAddress = PAGE_ROUND_UP(LastKernelAddress);
304
305 Reserved =
306 PAGE_ROUND_UP((MemorySizeInPages * sizeof(PHYSICAL_PAGE))) / PAGESIZE;
307 MmPageArray = (PHYSICAL_PAGE *)LastKernelAddress;
308
309 DPRINT("Reserved %d\n", Reserved);
310
311 LastKernelAddress = PAGE_ROUND_UP(LastKernelAddress);
312 LastKernelAddress = ((ULONG)LastKernelAddress + (Reserved * PAGESIZE));
313 LastPhysKernelAddress = (PVOID)PAGE_ROUND_UP(LastPhysKernelAddress);
314 LastPhysKernelAddress = LastPhysKernelAddress + (Reserved * PAGESIZE);
315
316 MmStats.NrTotalPages = 0;
317 MmStats.NrSystemPages = 0;
318 MmStats.NrUserPages = 0;
319 MmStats.NrReservedPages = 0;
320 MmStats.NrFreePages = 0;
321 MmStats.NrLockedPages = 0;
322
323 for (i = 0; i < Reserved; i++)
324 {
325 if (!MmIsPagePresent(NULL,
326 (PVOID)((ULONG)MmPageArray + (i * PAGESIZE))))
327 {
328 Status =
329 MmCreateVirtualMappingUnsafe(NULL,
330 (PVOID)((ULONG)MmPageArray +
331 (i * PAGESIZE)),
332 PAGE_READWRITE,
333 (ULONG)(LastPhysKernelAddress
334 - (Reserved * PAGESIZE) + (i * PAGESIZE)),
335 FALSE);
336 if (!NT_SUCCESS(Status))
337 {
338 DbgPrint("Unable to create virtual mapping\n");
339 KeBugCheck(0);
340 }
341 }
342 memset((PVOID)MmPageArray + (i * PAGESIZE), 0, PAGESIZE);
343 }
344
345 /*
346 * Page zero is reserved
347 */
348 MmPageArray[0].Flags = MM_PHYSICAL_PAGE_BIOS;
349 MmPageArray[0].ReferenceCount = 0;
350 InsertTailList(&BiosPageListHead,
351 &MmPageArray[0].ListEntry);
352
353 /*
354 * Page one is reserved for the initial KPCR
355 */
356 MmPageArray[1].Flags = MM_PHYSICAL_PAGE_BIOS;
357 MmPageArray[1].ReferenceCount = 0;
358 InsertTailList(&BiosPageListHead,
359 &MmPageArray[1].ListEntry);
360
361 i = 2;
362 if ((ULONG)FirstPhysKernelAddress < 0xa0000)
363 {
364 MmStats.NrFreePages += (((ULONG)FirstPhysKernelAddress/PAGESIZE) - 1);
365 for (; i<((ULONG)FirstPhysKernelAddress/PAGESIZE); i++)
366 {
367 MmPageArray[i].Flags = MM_PHYSICAL_PAGE_FREE;
368 MmPageArray[i].ReferenceCount = 0;
369 InsertTailList(&FreeUnzeroedPageListHead,
370 &MmPageArray[i].ListEntry);
371 }
372 MmStats.NrSystemPages +=
373 ((((ULONG)LastPhysKernelAddress) / PAGESIZE) - i);
374 for (; i<((ULONG)LastPhysKernelAddress / PAGESIZE); i++)
375 {
376 MmPageArray[i].Flags = MM_PHYSICAL_PAGE_USED;
377 MmPageArray[i].ReferenceCount = 1;
378 InsertTailList(&UsedPageListHeads[MC_NPPOOL],
379 &MmPageArray[i].ListEntry);
380 }
381 MmStats.NrFreePages += ((0xa0000/PAGESIZE) - i);
382 for (; i<(0xa0000/PAGESIZE); i++)
383 {
384 MmPageArray[i].Flags = MM_PHYSICAL_PAGE_FREE;
385 MmPageArray[i].ReferenceCount = 0;
386 InsertTailList(&FreeUnzeroedPageListHead,
387 &MmPageArray[i].ListEntry);
388 }
389 MmStats.NrReservedPages += ((0x100000/PAGESIZE) - i);
390 for (; i<(0x100000 / PAGESIZE); i++)
391 {
392 MmPageArray[i].Flags = MM_PHYSICAL_PAGE_BIOS;
393 MmPageArray[i].ReferenceCount = 1;
394 InsertTailList(&BiosPageListHead,
395 &MmPageArray[i].ListEntry);
396 }
397 }
398 else
399 {
400 MmStats.NrFreePages += ((0xa0000 / PAGESIZE) - 1);
401 for (; i<(0xa0000 / PAGESIZE); i++)
402 {
403 MmPageArray[i].Flags = MM_PHYSICAL_PAGE_FREE;
404 MmPageArray[i].ReferenceCount = 0;
405 InsertTailList(&FreeUnzeroedPageListHead,
406 &MmPageArray[i].ListEntry);
407 }
408 MmStats.NrReservedPages += (0x60000 / PAGESIZE);
409 for (; i<(0x100000 / PAGESIZE); i++)
410 {
411 MmPageArray[i].Flags = MM_PHYSICAL_PAGE_BIOS;
412 MmPageArray[i].ReferenceCount = 1;
413 InsertTailList(&BiosPageListHead,
414 &MmPageArray[i].ListEntry);
415 }
416 MmStats.NrFreePages += (((ULONG)FirstPhysKernelAddress/PAGESIZE) - i);
417 for (; i<((ULONG)FirstPhysKernelAddress/PAGESIZE); i++)
418 {
419 MmPageArray[i].Flags = MM_PHYSICAL_PAGE_FREE;
420 MmPageArray[i].ReferenceCount = 0;
421 InsertTailList(&FreeUnzeroedPageListHead,
422 &MmPageArray[i].ListEntry);
423 }
424 MmStats.NrSystemPages +=
425 (((ULONG)LastPhysKernelAddress/PAGESIZE) - i);
426 for (; i<((ULONG)LastPhysKernelAddress/PAGESIZE); i++)
427 {
428 MmPageArray[i].Flags = MM_PHYSICAL_PAGE_USED;
429 MmPageArray[i].ReferenceCount = 1;
430 InsertTailList(&UsedPageListHeads[MC_NPPOOL],
431 &MmPageArray[i].ListEntry);
432 }
433 }
434
435 MmStats.NrFreePages += (MemorySizeInPages - i);
436 for (; i<MemorySizeInPages; i++)
437 {
438 MmPageArray[i].Flags = MM_PHYSICAL_PAGE_FREE;
439 MmPageArray[i].ReferenceCount = 0;
440 InsertTailList(&FreeUnzeroedPageListHead,
441 &MmPageArray[i].ListEntry);
442 }
443
444 if ((BIOSMemoryMap != NULL) && (AddressRangeCount > 0))
445 {
446 MiParseBIOSMemoryMap(
447 MemorySizeInPages,
448 BIOSMemoryMap,
449 AddressRangeCount);
450 }
451
452 MmStats.NrTotalPages = MmStats.NrFreePages + MmStats.NrSystemPages +
453 MmStats.NrReservedPages + MmStats.NrUserPages;
454 MmInitializeBalancer(MmStats.NrFreePages);
455 return((PVOID)LastKernelAddress);
456 }
457
458 VOID MmSetFlagsPage(PVOID PhysicalAddress,
459 ULONG Flags)
460 {
461 ULONG Start = (ULONG)PhysicalAddress / PAGESIZE;
462 KIRQL oldIrql;
463
464 KeAcquireSpinLock(&PageListLock, &oldIrql);
465 MmPageArray[Start].Flags = Flags;
466 KeReleaseSpinLock(&PageListLock, oldIrql);
467 }
468
469 VOID
470 MmSetRmapListHeadPage(PVOID PhysicalAddress, struct _MM_RMAP_ENTRY* ListHead)
471 {
472 ULONG Start = (ULONG)PhysicalAddress / PAGESIZE;
473
474 MmPageArray[Start].RmapListHead = ListHead;
475 }
476
477 struct _MM_RMAP_ENTRY*
478 MmGetRmapListHeadPage(PVOID PhysicalAddress)
479 {
480 ULONG Start = (ULONG)PhysicalAddress / PAGESIZE;
481
482 return(MmPageArray[Start].RmapListHead);
483 }
484
485 VOID
486 MmMarkPageMapped(PVOID PhysicalAddress)
487 {
488 ULONG Start = (ULONG)PhysicalAddress / PAGESIZE;
489 KIRQL oldIrql;
490
491 KeAcquireSpinLock(&PageListLock, &oldIrql);
492 MmPageArray[Start].MapCount++;
493 KeReleaseSpinLock(&PageListLock, oldIrql);
494 }
495
496 VOID
497 MmMarkPageUnmapped(PVOID PhysicalAddress)
498 {
499 ULONG Start = (ULONG)PhysicalAddress / PAGESIZE;
500 KIRQL oldIrql;
501
502 KeAcquireSpinLock(&PageListLock, &oldIrql);
503 MmPageArray[Start].MapCount--;
504 KeReleaseSpinLock(&PageListLock, oldIrql);
505 }
506
507 ULONG MmGetFlagsPage(PVOID PhysicalAddress)
508 {
509 ULONG Start = (ULONG)PhysicalAddress / PAGESIZE;
510 KIRQL oldIrql;
511 ULONG Flags;
512
513 KeAcquireSpinLock(&PageListLock, &oldIrql);
514 Flags = MmPageArray[Start].Flags;
515 KeReleaseSpinLock(&PageListLock, oldIrql);
516
517 return(Flags);
518 }
519
520
521 VOID MmSetSavedSwapEntryPage(PVOID PhysicalAddress,
522 SWAPENTRY SavedSwapEntry)
523 {
524 ULONG Start = (ULONG)PhysicalAddress / PAGESIZE;
525 KIRQL oldIrql;
526
527 KeAcquireSpinLock(&PageListLock, &oldIrql);
528 MmPageArray[Start].SavedSwapEntry = SavedSwapEntry;
529 KeReleaseSpinLock(&PageListLock, oldIrql);
530 }
531
532 SWAPENTRY
533 MmGetSavedSwapEntryPage(PVOID PhysicalAddress)
534 {
535 ULONG Start = (ULONG)PhysicalAddress / PAGESIZE;
536 SWAPENTRY SavedSwapEntry;
537 KIRQL oldIrql;
538
539 KeAcquireSpinLock(&PageListLock, &oldIrql);
540 SavedSwapEntry = MmPageArray[Start].SavedSwapEntry;
541 KeReleaseSpinLock(&PageListLock, oldIrql);
542
543 return(SavedSwapEntry);
544 }
545
546 VOID MmReferencePage(PVOID PhysicalAddress)
547 {
548 ULONG Start = (ULONG)PhysicalAddress / PAGESIZE;
549 KIRQL oldIrql;
550
551 DPRINT("MmReferencePage(PhysicalAddress %x)\n", PhysicalAddress);
552
553 if (((ULONG)PhysicalAddress) == 0)
554 {
555 KeBugCheck(0);
556 }
557
558 KeAcquireSpinLock(&PageListLock, &oldIrql);
559
560 if (MM_PTYPE(MmPageArray[Start].Flags) != MM_PHYSICAL_PAGE_USED)
561 {
562 DbgPrint("Referencing non-used page\n");
563 KeBugCheck(0);
564 }
565
566 MmPageArray[Start].ReferenceCount++;
567 KeReleaseSpinLock(&PageListLock, oldIrql);
568 }
569
570 ULONG
571 MmGetReferenceCountPage(PVOID PhysicalAddress)
572 {
573 ULONG Start = (ULONG)PhysicalAddress / PAGESIZE;
574 KIRQL oldIrql;
575 ULONG RCount;
576
577 DPRINT("MmGetReferenceCountPage(PhysicalAddress %x)\n", PhysicalAddress);
578
579 if (((ULONG)PhysicalAddress) == 0)
580 {
581 KeBugCheck(0);
582 }
583
584 KeAcquireSpinLock(&PageListLock, &oldIrql);
585
586 if (MM_PTYPE(MmPageArray[Start].Flags) != MM_PHYSICAL_PAGE_USED)
587 {
588 DbgPrint("Getting reference count for free page\n");
589 KeBugCheck(0);
590 }
591
592 RCount = MmPageArray[Start].ReferenceCount;
593
594 KeReleaseSpinLock(&PageListLock, oldIrql);
595 return(RCount);
596 }
597
598 BOOLEAN
599 MmIsUsablePage(PVOID PhysicalAddress)
600 {
601 ULONG Start = (ULONG)PhysicalAddress / PAGESIZE;
602
603 DPRINT("MmGetReferenceCountPage(PhysicalAddress %x)\n", PhysicalAddress);
604
605 if (((ULONG)PhysicalAddress) == 0)
606 {
607 KeBugCheck(0);
608 }
609
610 if (MM_PTYPE(MmPageArray[Start].Flags) != MM_PHYSICAL_PAGE_USED &&
611 MM_PTYPE(MmPageArray[Start].Flags) != MM_PHYSICAL_PAGE_BIOS)
612 {
613 return(FALSE);
614 }
615
616 return(TRUE);
617 }
618
619
620 VOID MmDereferencePage(PVOID PhysicalAddress)
621 {
622 ULONG Start = (ULONG)PhysicalAddress / PAGESIZE;
623 KIRQL oldIrql;
624
625 DPRINT("MmDereferencePage(PhysicalAddress %x)\n", PhysicalAddress);
626
627 if (((ULONG)PhysicalAddress) == 0)
628 {
629 KeBugCheck(0);
630 }
631
632 KeAcquireSpinLock(&PageListLock, &oldIrql);
633
634
635 if (MM_PTYPE(MmPageArray[Start].Flags) != MM_PHYSICAL_PAGE_USED)
636 {
637 DbgPrint("Dereferencing free page\n");
638 KeBugCheck(0);
639 }
640
641 MmPageArray[Start].ReferenceCount--;
642 if (MmPageArray[Start].ReferenceCount == 0)
643 {
644 MmStats.NrFreePages++;
645 MmStats.NrSystemPages--;
646 RemoveEntryList(&MmPageArray[Start].ListEntry);
647 if (MmPageArray[Start].RmapListHead != NULL)
648 {
649 DbgPrint("Freeing page with rmap entries.\n");
650 KeBugCheck(0);
651 }
652 if (MmPageArray[Start].MapCount != 0)
653 {
654 DbgPrint("Freeing mapped page (0x%x count %d)\n",
655 PhysicalAddress, MmPageArray[Start].MapCount);
656 KeBugCheck(0);
657 }
658 if (MmPageArray[Start].LockCount > 0)
659 {
660 DbgPrint("Freeing locked page\n");
661 KeBugCheck(0);
662 }
663 if (MmPageArray[Start].SavedSwapEntry != 0)
664 {
665 DbgPrint("Freeing page with swap entry.\n");
666 KeBugCheck(0);
667 }
668 if (MmPageArray[Start].Flags != MM_PHYSICAL_PAGE_USED)
669 {
670 DbgPrint("Freeing page with flags %x\n",
671 MmPageArray[Start].Flags);
672 KeBugCheck(0);
673 }
674 MmPageArray[Start].Flags = MM_PHYSICAL_PAGE_FREE;
675 InsertTailList(&FreeUnzeroedPageListHead,
676 &MmPageArray[Start].ListEntry);
677 }
678 KeReleaseSpinLock(&PageListLock, oldIrql);
679 }
680
681 ULONG MmGetLockCountPage(PVOID PhysicalAddress)
682 {
683 ULONG Start = (ULONG)PhysicalAddress / PAGESIZE;
684 KIRQL oldIrql;
685 ULONG LockCount;
686
687 DPRINT("MmGetLockCountPage(PhysicalAddress %x)\n", PhysicalAddress);
688
689 if (((ULONG)PhysicalAddress) == 0)
690 {
691 KeBugCheck(0);
692 }
693
694 KeAcquireSpinLock(&PageListLock, &oldIrql);
695
696 if (MM_PTYPE(MmPageArray[Start].Flags) != MM_PHYSICAL_PAGE_USED)
697 {
698 DbgPrint("Getting lock count for free page\n");
699 KeBugCheck(0);
700 }
701
702 LockCount = MmPageArray[Start].LockCount;
703 KeReleaseSpinLock(&PageListLock, oldIrql);
704
705 return(LockCount);
706 }
707
708 VOID MmLockPage(PVOID PhysicalAddress)
709 {
710 ULONG Start = (ULONG)PhysicalAddress / PAGESIZE;
711 KIRQL oldIrql;
712
713 DPRINT("MmLockPage(PhysicalAddress %x)\n", PhysicalAddress);
714
715 if (((ULONG)PhysicalAddress) == 0)
716 {
717 KeBugCheck(0);
718 }
719
720 KeAcquireSpinLock(&PageListLock, &oldIrql);
721
722 if (MM_PTYPE(MmPageArray[Start].Flags) != MM_PHYSICAL_PAGE_USED)
723 {
724 DbgPrint("Locking free page\n");
725 KeBugCheck(0);
726 }
727
728 MmPageArray[Start].LockCount++;
729 KeReleaseSpinLock(&PageListLock, oldIrql);
730 }
731
732 VOID MmUnlockPage(PVOID PhysicalAddress)
733 {
734 ULONG Start = (ULONG)PhysicalAddress / PAGESIZE;
735 KIRQL oldIrql;
736
737 DPRINT("MmUnlockPage(PhysicalAddress %x)\n", PhysicalAddress);
738
739 if (((ULONG)PhysicalAddress) == 0)
740 {
741 KeBugCheck(0);
742 }
743
744 KeAcquireSpinLock(&PageListLock, &oldIrql);
745
746 if (MM_PTYPE(MmPageArray[Start].Flags) != MM_PHYSICAL_PAGE_USED)
747 {
748 DbgPrint("Unlocking free page\n");
749 KeBugCheck(0);
750 }
751
752 MmPageArray[Start].LockCount--;
753 KeReleaseSpinLock(&PageListLock, oldIrql);
754 }
755
756
757 PVOID
758 MmAllocPage(ULONG Consumer, SWAPENTRY SavedSwapEntry)
759 {
760 ULONG offset;
761 PLIST_ENTRY ListEntry;
762 PPHYSICAL_PAGE PageDescriptor;
763 KIRQL oldIrql;
764 BOOLEAN NeedClear = FALSE;
765
766 if (SavedSwapEntry == 0x17)
767 {
768 KeBugCheck(0);
769 }
770
771 DPRINT("MmAllocPage()\n");
772
773 KeAcquireSpinLock(&PageListLock, &oldIrql);
774 if (IsListEmpty(&FreeZeroedPageListHead))
775 {
776 if (IsListEmpty(&FreeUnzeroedPageListHead))
777 {
778 DPRINT1("MmAllocPage(): Out of memory\n");
779 KeReleaseSpinLock(&PageListLock, oldIrql);
780 return(NULL);
781 }
782 ListEntry = RemoveTailList(&FreeUnzeroedPageListHead);
783
784 PageDescriptor = CONTAINING_RECORD(ListEntry, PHYSICAL_PAGE, ListEntry);
785 KeReleaseSpinLock(&PageListLock, oldIrql);
786
787 NeedClear = TRUE;
788 }
789 else
790 {
791 ListEntry = RemoveTailList(&FreeZeroedPageListHead);
792 KeReleaseSpinLock(&PageListLock, oldIrql);
793
794 PageDescriptor = CONTAINING_RECORD(ListEntry, PHYSICAL_PAGE, ListEntry);
795 }
796
797 if (PageDescriptor->Flags != MM_PHYSICAL_PAGE_FREE)
798 {
799 DbgPrint("Got non-free page from freelist\n");
800 KeBugCheck(0);
801 }
802 PageDescriptor->Flags = MM_PHYSICAL_PAGE_USED;
803 PageDescriptor->ReferenceCount = 1;
804 PageDescriptor->LockCount = 0;
805 PageDescriptor->MapCount = 0;
806 PageDescriptor->SavedSwapEntry = SavedSwapEntry;
807 ExInterlockedInsertTailList(&UsedPageListHeads[Consumer], ListEntry,
808 &PageListLock);
809
810 MmStats.NrSystemPages++;
811 MmStats.NrFreePages--;
812
813 offset = (ULONG)((ULONG)PageDescriptor - (ULONG)MmPageArray);
814 offset = (offset / sizeof(PHYSICAL_PAGE)) * PAGESIZE;
815 if (NeedClear)
816 {
817 MiZeroPage(offset);
818 }
819 DPRINT("MmAllocPage() = %x\n",offset);
820 return((PVOID)offset);
821 }