Sync to trunk head (r40091)
[reactos.git] / reactos / ntoskrnl / mm / freelist.c
1 /*
2 * COPYRIGHT: See COPYING in the top level directory
3 * PROJECT: ReactOS kernel
4 * FILE: ntoskrnl/mm/freelist.c
5 * PURPOSE: Handle the list of free physical pages
6 *
7 * PROGRAMMERS: David Welch (welch@cwcom.net)
8 * Robert Bergkvist
9 */
10
11 /* INCLUDES ****************************************************************/
12
13 #include <ntoskrnl.h>
14 #define NDEBUG
15 #include <debug.h>
16
17 #if defined (ALLOC_PRAGMA)
18 #pragma alloc_text(INIT, MmInitializePageList)
19 #endif
20
21
22 /* TYPES *******************************************************************/
23
24 #define MM_PHYSICAL_PAGE_FREE (0x1)
25 #define MM_PHYSICAL_PAGE_USED (0x2)
26 #define MM_PHYSICAL_PAGE_BIOS (0x3)
27
28 /* GLOBALS ****************************************************************/
29
30 PPHYSICAL_PAGE MmPageArray;
31 ULONG MmPageArraySize;
32
33 /* List of pages allocated to the MC_USER Consumer */
34 static LIST_ENTRY UserPageListHead;
35 /* List of pages zeroed by the ZPW (MmZeroPageThreadMain) */
36 static LIST_ENTRY FreeZeroedPageListHead;
37 /* List of free pages, filled by MmGetReferenceCountPage and
38 * and MmInitializePageList */
39 static LIST_ENTRY FreeUnzeroedPageListHead;
40
41 static KEVENT ZeroPageThreadEvent;
42 static BOOLEAN ZeroPageThreadShouldTerminate = FALSE;
43
44 static ULONG UnzeroedPageCount = 0;
45
46 /* FUNCTIONS *************************************************************/
47
48 PFN_TYPE
49 NTAPI
50 MmGetLRUFirstUserPage(VOID)
51 {
52 PLIST_ENTRY NextListEntry;
53 PHYSICAL_PAGE* PageDescriptor;
54 KIRQL oldIrql;
55
56 oldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
57 NextListEntry = UserPageListHead.Flink;
58 if (NextListEntry == &UserPageListHead)
59 {
60 KeReleaseQueuedSpinLock(LockQueuePfnLock, oldIrql);
61 return 0;
62 }
63 PageDescriptor = CONTAINING_RECORD(NextListEntry, PHYSICAL_PAGE, ListEntry);
64 ASSERT_PFN(PageDescriptor);
65 KeReleaseQueuedSpinLock(LockQueuePfnLock, oldIrql);
66 return PageDescriptor - MmPageArray;
67 }
68
69 VOID
70 NTAPI
71 MmInsertLRULastUserPage(PFN_TYPE Pfn)
72 {
73 KIRQL oldIrql;
74 PPHYSICAL_PAGE Page;
75
76 oldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
77 Page = MiGetPfnEntry(Pfn);
78 ASSERT(Page->Flags.Type == MM_PHYSICAL_PAGE_USED);
79 ASSERT(Page->Flags.Consumer == MC_USER);
80 InsertTailList(&UserPageListHead, &Page->ListEntry);
81 KeReleaseQueuedSpinLock(LockQueuePfnLock, oldIrql);
82 }
83
84 PFN_TYPE
85 NTAPI
86 MmGetLRUNextUserPage(PFN_TYPE PreviousPfn)
87 {
88 PLIST_ENTRY NextListEntry;
89 PHYSICAL_PAGE* PageDescriptor;
90 KIRQL oldIrql;
91 PPHYSICAL_PAGE Page;
92
93 oldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
94 Page = MiGetPfnEntry(PreviousPfn);
95 ASSERT(Page->Flags.Type == MM_PHYSICAL_PAGE_USED);
96 ASSERT(Page->Flags.Consumer == MC_USER);
97 NextListEntry = Page->ListEntry.Flink;
98 if (NextListEntry == &UserPageListHead)
99 {
100 KeReleaseQueuedSpinLock(LockQueuePfnLock, oldIrql);
101 return 0;
102 }
103 PageDescriptor = CONTAINING_RECORD(NextListEntry, PHYSICAL_PAGE, ListEntry);
104 KeReleaseQueuedSpinLock(LockQueuePfnLock, oldIrql);
105 return PageDescriptor - MmPageArray;
106 }
107
108 VOID
109 NTAPI
110 MmRemoveLRUUserPage(PFN_TYPE Page)
111 {
112 RemoveEntryList(&MiGetPfnEntry(Page)->ListEntry);
113 }
114
115 PFN_TYPE
116 NTAPI
117 MmGetContinuousPages(ULONG NumberOfBytes,
118 PHYSICAL_ADDRESS LowestAcceptableAddress,
119 PHYSICAL_ADDRESS HighestAcceptableAddress,
120 PHYSICAL_ADDRESS BoundaryAddressMultiple)
121 {
122 ULONG NrPages;
123 ULONG i, j;
124 ULONG start;
125 ULONG last;
126 ULONG length;
127 ULONG boundary;
128 KIRQL oldIrql;
129
130 NrPages = PAGE_ROUND_UP(NumberOfBytes) / PAGE_SIZE;
131
132 oldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
133
134 last = min(HighestAcceptableAddress.LowPart / PAGE_SIZE, MmPageArraySize - 1);
135 boundary = BoundaryAddressMultiple.LowPart / PAGE_SIZE;
136
137 for (j = 0; j < 2; j++)
138 {
139 start = -1;
140 length = 0;
141 /* First try to allocate the pages above the 16MB area. This may fail
142 * because there are not enough continuous pages or we cannot allocate
143 * pages above the 16MB area because the caller has specify an upper limit.
144 * The second try uses the specified lower limit.
145 */
146 for (i = j == 0 ? 0x100000 / PAGE_SIZE : LowestAcceptableAddress.LowPart / PAGE_SIZE; i <= last; )
147 {
148 if (MiGetPfnEntry(i)->Flags.Type == MM_PHYSICAL_PAGE_FREE)
149 {
150 if (start == (ULONG)-1)
151 {
152 start = i;
153 length = 1;
154 }
155 else
156 {
157 length++;
158 if (boundary)
159 {
160 if (start / boundary != i / boundary)
161 {
162 start = i;
163 length = 1;
164 }
165 }
166 }
167 if (length == NrPages)
168 {
169 break;
170 }
171 }
172 else
173 {
174 start = (ULONG)-1;
175 }
176 i++;
177 }
178
179 if (start != (ULONG)-1 && length == NrPages)
180 {
181 for (i = start; i < (start + length); i++)
182 {
183 PPHYSICAL_PAGE Page;
184 Page = MiGetPfnEntry(i);
185 RemoveEntryList(&Page->ListEntry);
186 if (MmPageArray[i].Flags.Zero == 0)
187 {
188 UnzeroedPageCount--;
189 }
190 MmStats.NrFreePages--;
191 MmStats.NrSystemPages++;
192 Page->Flags.Type = MM_PHYSICAL_PAGE_USED;
193 Page->Flags.Consumer = MC_NPPOOL;
194 Page->ReferenceCount = 1;
195 Page->LockCount = 0;
196 Page->MapCount = 0;
197 Page->SavedSwapEntry = 0;
198 }
199 KeReleaseQueuedSpinLock(LockQueuePfnLock, oldIrql);
200 for (i = start; i < (start + length); i++)
201 {
202 if (MiGetPfnEntry(i)->Flags.Zero == 0)
203 {
204 MiZeroPage(i);
205 }
206 else
207 {
208 MiGetPfnEntry(i)->Flags.Zero = 0;
209 }
210 }
211 return start;
212 }
213 }
214 KeReleaseQueuedSpinLock(LockQueuePfnLock, oldIrql);
215 return 0;
216 }
217
218 PFN_TYPE
219 NTAPI
220 MmAllocEarlyPage(VOID)
221 {
222 PFN_TYPE Pfn;
223
224 /* Use one of our highest usable pages */
225 Pfn = MiFreeDescriptor->BasePage + MiFreeDescriptor->PageCount - 1;
226 MiFreeDescriptor->PageCount--;
227
228 /* Return it */
229 return Pfn;
230 }
231
232 VOID
233 NTAPI
234 MmInitializePageList(VOID)
235 {
236 ULONG i;
237 ULONG Reserved;
238 NTSTATUS Status;
239 PFN_TYPE Pfn = 0;
240 PHYSICAL_PAGE UsedPage;
241 PLIST_ENTRY NextEntry;
242 PMEMORY_ALLOCATION_DESCRIPTOR Md;
243
244 /* Initialize the page lists */
245 InitializeListHead(&UserPageListHead);
246 InitializeListHead(&FreeUnzeroedPageListHead);
247 InitializeListHead(&FreeZeroedPageListHead);
248
249 /* Set the size and start of the PFN Database */
250 MmPageArray = (PHYSICAL_PAGE *)MmPfnDatabase;
251 MmPageArraySize = MmHighestPhysicalPage;
252 Reserved = PAGE_ROUND_UP((MmPageArraySize * sizeof(PHYSICAL_PAGE))) / PAGE_SIZE;
253
254 /* Loop every page required to hold the PFN database */
255 for (i = 0; i < Reserved; i++)
256 {
257 PVOID Address = (char*)MmPageArray + (i * PAGE_SIZE);
258
259 /* Check if FreeLDR has already allocated it for us */
260 if (!MmIsPagePresent(NULL, Address))
261 {
262 /* Use one of our highest usable pages */
263 Pfn = MmAllocEarlyPage();
264
265 /* Set the PFN */
266 Status = MmCreateVirtualMappingForKernel(Address,
267 PAGE_READWRITE,
268 &Pfn,
269 1);
270 if (!NT_SUCCESS(Status))
271 {
272 DPRINT1("Unable to create virtual mapping\n");
273 KeBugCheck(MEMORY_MANAGEMENT);
274 }
275 }
276 else
277 {
278 /* Setting the page protection is necessary to set the global bit */
279 MmSetPageProtect(NULL, Address, PAGE_READWRITE);
280 }
281 }
282
283 /* Clear the PFN database */
284 RtlZeroMemory(MmPageArray, (MmPageArraySize + 1) * sizeof(PHYSICAL_PAGE));
285
286 /* This is what a used page looks like */
287 RtlZeroMemory(&UsedPage, sizeof(UsedPage));
288 UsedPage.Flags.Type = MM_PHYSICAL_PAGE_USED;
289 UsedPage.Flags.Consumer = MC_NPPOOL;
290 UsedPage.ReferenceCount = 2;
291 UsedPage.MapCount = 1;
292
293 /* Loop the memory descriptors */
294 for (NextEntry = KeLoaderBlock->MemoryDescriptorListHead.Flink;
295 NextEntry != &KeLoaderBlock->MemoryDescriptorListHead;
296 NextEntry = NextEntry->Flink)
297 {
298 /* Get the descriptor */
299 Md = CONTAINING_RECORD(NextEntry,
300 MEMORY_ALLOCATION_DESCRIPTOR,
301 ListEntry);
302
303 /* Skip bad memory */
304 if ((Md->MemoryType == LoaderFirmwarePermanent) ||
305 (Md->MemoryType == LoaderBBTMemory) ||
306 (Md->MemoryType == LoaderSpecialMemory) ||
307 (Md->MemoryType == LoaderBad))
308 {
309 /* Loop every page part of the block but valid in the database */
310 for (i = 0; i < Md->PageCount; i++)
311 {
312 /* Skip memory we ignore completely */
313 if ((Md->BasePage + i) > MmPageArraySize) break;
314
315 /* These are pages reserved by the BIOS/ROMs */
316 MmPageArray[Md->BasePage + i].Flags.Type = MM_PHYSICAL_PAGE_BIOS;
317 MmPageArray[Md->BasePage + i].Flags.Consumer = MC_NPPOOL;
318 MmStats.NrSystemPages++;
319 }
320 }
321 else if ((Md->MemoryType == LoaderFree) ||
322 (Md->MemoryType == LoaderLoadedProgram) ||
323 (Md->MemoryType == LoaderFirmwareTemporary) ||
324 (Md->MemoryType == LoaderOsloaderStack))
325 {
326 /* Loop every page part of the block */
327 for (i = 0; i < Md->PageCount; i++)
328 {
329 /* Mark it as a free page */
330 MmPageArray[Md->BasePage + i].Flags.Type = MM_PHYSICAL_PAGE_FREE;
331 InsertTailList(&FreeUnzeroedPageListHead,
332 &MmPageArray[Md->BasePage + i].ListEntry);
333 UnzeroedPageCount++;
334 MmStats.NrFreePages++;
335 }
336 }
337 else
338 {
339 /* Loop every page part of the block */
340 for (i = 0; i < Md->PageCount; i++)
341 {
342 /* Everything else is used memory */
343 MmPageArray[Md->BasePage + i] = UsedPage;
344 MmStats.NrSystemPages++;
345 }
346 }
347 }
348
349 /* Finally handle the pages describing the PFN database themselves */
350 for (i = (MiFreeDescriptor->BasePage + MiFreeDescriptor->PageCount);
351 i < (MiFreeDescriptorOrg.BasePage + MiFreeDescriptorOrg.PageCount);
352 i++)
353 {
354 /* Ensure this page was not added previously */
355 ASSERT(MmPageArray[i].Flags.Type == 0);
356
357 /* Mark it as used kernel memory */
358 MmPageArray[i] = UsedPage;
359 MmStats.NrSystemPages++;
360 }
361
362 KeInitializeEvent(&ZeroPageThreadEvent, NotificationEvent, TRUE);
363
364 DPRINT("Pages: %x %x\n", MmStats.NrFreePages, MmStats.NrSystemPages);
365 MmStats.NrTotalPages = MmStats.NrFreePages + MmStats.NrSystemPages + MmStats.NrUserPages;
366 MmInitializeBalancer(MmStats.NrFreePages, MmStats.NrSystemPages);
367 }
368
369 VOID
370 NTAPI
371 MmSetFlagsPage(PFN_TYPE Pfn, ULONG Flags)
372 {
373 KIRQL oldIrql;
374
375 oldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
376 MiGetPfnEntry(Pfn)->AllFlags = Flags;
377 KeReleaseQueuedSpinLock(LockQueuePfnLock, oldIrql);
378 }
379
380 VOID
381 NTAPI
382 MmSetRmapListHeadPage(PFN_TYPE Pfn, struct _MM_RMAP_ENTRY* ListHead)
383 {
384 KIRQL oldIrql;
385
386 oldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
387 MiGetPfnEntry(Pfn)->RmapListHead = ListHead;
388 KeReleaseQueuedSpinLock(LockQueuePfnLock, oldIrql);
389 }
390
391 struct _MM_RMAP_ENTRY*
392 NTAPI
393 MmGetRmapListHeadPage(PFN_TYPE Pfn)
394 {
395 KIRQL oldIrql;
396 struct _MM_RMAP_ENTRY* ListHead;
397
398 oldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
399 ListHead = MiGetPfnEntry(Pfn)->RmapListHead;
400 KeReleaseQueuedSpinLock(LockQueuePfnLock, oldIrql);
401
402 return(ListHead);
403 }
404
405 VOID
406 NTAPI
407 MmMarkPageMapped(PFN_TYPE Pfn)
408 {
409 KIRQL oldIrql;
410 PPHYSICAL_PAGE Page;
411
412 if (Pfn <= MmPageArraySize)
413 {
414 oldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
415 Page = MiGetPfnEntry(Pfn);
416 if (Page->Flags.Type == MM_PHYSICAL_PAGE_FREE)
417 {
418 DPRINT1("Mapping non-used page\n");
419 KeBugCheck(MEMORY_MANAGEMENT);
420 }
421 Page->MapCount++;
422 Page->ReferenceCount++;
423 KeReleaseQueuedSpinLock(LockQueuePfnLock, oldIrql);
424 }
425 }
426
427 VOID
428 NTAPI
429 MmMarkPageUnmapped(PFN_TYPE Pfn)
430 {
431 KIRQL oldIrql;
432 PPHYSICAL_PAGE Page;
433
434 if (Pfn <= MmPageArraySize)
435 {
436 oldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
437 Page = MiGetPfnEntry(Pfn);
438 if (Page->Flags.Type == MM_PHYSICAL_PAGE_FREE)
439 {
440 DPRINT1("Unmapping non-used page\n");
441 KeBugCheck(MEMORY_MANAGEMENT);
442 }
443 if (Page->MapCount == 0)
444 {
445 DPRINT1("Unmapping not mapped page\n");
446 KeBugCheck(MEMORY_MANAGEMENT);
447 }
448 Page->MapCount--;
449 Page->ReferenceCount--;
450 KeReleaseQueuedSpinLock(LockQueuePfnLock, oldIrql);
451 }
452 }
453
454 ULONG
455 NTAPI
456 MmGetFlagsPage(PFN_TYPE Pfn)
457 {
458 KIRQL oldIrql;
459 ULONG Flags;
460
461 oldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
462 Flags = MiGetPfnEntry(Pfn)->AllFlags;
463 KeReleaseQueuedSpinLock(LockQueuePfnLock, oldIrql);
464
465 return(Flags);
466 }
467
468
469 VOID
470 NTAPI
471 MmSetSavedSwapEntryPage(PFN_TYPE Pfn, SWAPENTRY SavedSwapEntry)
472 {
473 KIRQL oldIrql;
474
475 oldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
476 MiGetPfnEntry(Pfn)->SavedSwapEntry = SavedSwapEntry;
477 KeReleaseQueuedSpinLock(LockQueuePfnLock, oldIrql);
478 }
479
480 SWAPENTRY
481 NTAPI
482 MmGetSavedSwapEntryPage(PFN_TYPE Pfn)
483 {
484 SWAPENTRY SavedSwapEntry;
485 KIRQL oldIrql;
486
487 oldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
488 SavedSwapEntry = MiGetPfnEntry(Pfn)->SavedSwapEntry;
489 KeReleaseQueuedSpinLock(LockQueuePfnLock, oldIrql);
490
491 return(SavedSwapEntry);
492 }
493
494 VOID
495 NTAPI
496 MmReferencePageUnsafe(PFN_TYPE Pfn)
497 {
498 KIRQL oldIrql;
499 PPHYSICAL_PAGE Page;
500
501 DPRINT("MmReferencePageUnsafe(PysicalAddress %x)\n", Pfn << PAGE_SHIFT);
502
503 if (Pfn == 0 || Pfn > MmPageArraySize)
504 {
505 return;
506 }
507
508 oldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
509
510 Page = MiGetPfnEntry(Pfn);
511 if (Page->Flags.Type != MM_PHYSICAL_PAGE_USED)
512 {
513 DPRINT1("Referencing non-used page\n");
514 KeBugCheck(MEMORY_MANAGEMENT);
515 }
516
517 Page->ReferenceCount++;
518 KeReleaseQueuedSpinLock(LockQueuePfnLock, oldIrql);
519 }
520
521 VOID
522 NTAPI
523 MmReferencePage(PFN_TYPE Pfn)
524 {
525 DPRINT("MmReferencePage(PysicalAddress %x)\n", Pfn << PAGE_SHIFT);
526
527 MmReferencePageUnsafe(Pfn);
528 }
529
530 ULONG
531 NTAPI
532 MmGetReferenceCountPage(PFN_TYPE Pfn)
533 {
534 KIRQL oldIrql;
535 ULONG RCount;
536 PPHYSICAL_PAGE Page;
537
538 DPRINT("MmGetReferenceCountPage(PhysicalAddress %x)\n", Pfn << PAGE_SHIFT);
539
540 oldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
541 Page = MiGetPfnEntry(Pfn);
542 if (Page->Flags.Type != MM_PHYSICAL_PAGE_USED)
543 {
544 DPRINT1("Getting reference count for free page\n");
545 KeBugCheck(MEMORY_MANAGEMENT);
546 }
547
548 RCount = Page->ReferenceCount;
549
550 KeReleaseQueuedSpinLock(LockQueuePfnLock, oldIrql);
551 return(RCount);
552 }
553
554 BOOLEAN
555 NTAPI
556 MmIsPageInUse(PFN_TYPE Pfn)
557 {
558
559 DPRINT("MmIsPageInUse(PhysicalAddress %x)\n", Pfn << PAGE_SHIFT);
560
561 return (MiGetPfnEntry(Pfn)->Flags.Type == MM_PHYSICAL_PAGE_USED);
562 }
563
564 VOID
565 NTAPI
566 MmDereferencePage(PFN_TYPE Pfn)
567 {
568 KIRQL oldIrql;
569 PPHYSICAL_PAGE Page;
570
571 DPRINT("MmDereferencePage(PhysicalAddress %x)\n", Pfn << PAGE_SHIFT);
572
573 oldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
574
575 Page = MiGetPfnEntry(Pfn);
576
577 if (Page->Flags.Type != MM_PHYSICAL_PAGE_USED)
578 {
579 DPRINT1("Dereferencing free page\n");
580 KeBugCheck(MEMORY_MANAGEMENT);
581 }
582 if (Page->ReferenceCount == 0)
583 {
584 DPRINT1("Derefrencing page with reference count 0\n");
585 KeBugCheck(MEMORY_MANAGEMENT);
586 }
587
588 Page->ReferenceCount--;
589 if (Page->ReferenceCount == 0)
590 {
591 MmStats.NrFreePages++;
592 MmStats.NrSystemPages--;
593 if (Page->Flags.Consumer == MC_USER) RemoveEntryList(&Page->ListEntry);
594 if (Page->RmapListHead != NULL)
595 {
596 DPRINT1("Freeing page with rmap entries.\n");
597 KeBugCheck(MEMORY_MANAGEMENT);
598 }
599 if (Page->MapCount != 0)
600 {
601 DPRINT1("Freeing mapped page (0x%x count %d)\n",
602 Pfn << PAGE_SHIFT, Page->MapCount);
603 KeBugCheck(MEMORY_MANAGEMENT);
604 }
605 if (Page->LockCount > 0)
606 {
607 DPRINT1("Freeing locked page\n");
608 KeBugCheck(MEMORY_MANAGEMENT);
609 }
610 if (Page->SavedSwapEntry != 0)
611 {
612 DPRINT1("Freeing page with swap entry.\n");
613 KeBugCheck(MEMORY_MANAGEMENT);
614 }
615 if (Page->Flags.Type != MM_PHYSICAL_PAGE_USED)
616 {
617 DPRINT1("Freeing page with flags %x\n",
618 Page->Flags.Type);
619 KeBugCheck(MEMORY_MANAGEMENT);
620 }
621 Page->Flags.Type = MM_PHYSICAL_PAGE_FREE;
622 Page->Flags.Consumer = MC_MAXIMUM;
623 InsertTailList(&FreeUnzeroedPageListHead,
624 &Page->ListEntry);
625 UnzeroedPageCount++;
626 if (UnzeroedPageCount > 8 && 0 == KeReadStateEvent(&ZeroPageThreadEvent))
627 {
628 KeSetEvent(&ZeroPageThreadEvent, IO_NO_INCREMENT, FALSE);
629 }
630 }
631 KeReleaseQueuedSpinLock(LockQueuePfnLock, oldIrql);
632 }
633
634 ULONG
635 NTAPI
636 MmGetLockCountPage(PFN_TYPE Pfn)
637 {
638 KIRQL oldIrql;
639 ULONG LockCount;
640 PPHYSICAL_PAGE Page;
641
642 DPRINT("MmGetLockCountPage(PhysicalAddress %x)\n", Pfn << PAGE_SHIFT);
643
644 oldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
645
646 Page = MiGetPfnEntry(Pfn);
647 if (Page->Flags.Type != MM_PHYSICAL_PAGE_USED)
648 {
649 DPRINT1("Getting lock count for free page\n");
650 KeBugCheck(MEMORY_MANAGEMENT);
651 }
652
653 LockCount = Page->LockCount;
654 KeReleaseQueuedSpinLock(LockQueuePfnLock, oldIrql);
655
656 return(LockCount);
657 }
658
659 VOID
660 NTAPI
661 MmLockPageUnsafe(PFN_TYPE Pfn)
662 {
663 KIRQL oldIrql;
664 PPHYSICAL_PAGE Page;
665
666 DPRINT("MmLockPageUnsafe(PhysicalAddress %x)\n", Pfn << PAGE_SHIFT);
667
668 oldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
669
670 Page = MiGetPfnEntry(Pfn);
671 if (Page->Flags.Type != MM_PHYSICAL_PAGE_USED)
672 {
673 DPRINT1("Locking free page\n");
674 KeBugCheck(MEMORY_MANAGEMENT);
675 }
676
677 Page->LockCount++;
678 KeReleaseQueuedSpinLock(LockQueuePfnLock, oldIrql);
679 }
680
681 VOID
682 NTAPI
683 MmLockPage(PFN_TYPE Pfn)
684 {
685 DPRINT("MmLockPage(PhysicalAddress %x)\n", Pfn << PAGE_SHIFT);
686
687 MmLockPageUnsafe(Pfn);
688 }
689
690 VOID
691 NTAPI
692 MmUnlockPage(PFN_TYPE Pfn)
693 {
694 KIRQL oldIrql;
695 PPHYSICAL_PAGE Page;
696
697 DPRINT("MmUnlockPage(PhysicalAddress %x)\n", Pfn << PAGE_SHIFT);
698
699 oldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
700
701 Page = MiGetPfnEntry(Pfn);
702 if (Page->Flags.Type != MM_PHYSICAL_PAGE_USED)
703 {
704 DPRINT1("Unlocking free page\n");
705 KeBugCheck(MEMORY_MANAGEMENT);
706 }
707
708 Page->LockCount--;
709 KeReleaseQueuedSpinLock(LockQueuePfnLock, oldIrql);
710 }
711
712 PFN_TYPE
713 NTAPI
714 MmAllocPage(ULONG Consumer, SWAPENTRY SavedSwapEntry)
715 {
716 PFN_TYPE PfnOffset;
717 PLIST_ENTRY ListEntry;
718 PPHYSICAL_PAGE PageDescriptor;
719 KIRQL oldIrql;
720 BOOLEAN NeedClear = FALSE;
721
722 DPRINT("MmAllocPage()\n");
723
724 oldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
725 if (IsListEmpty(&FreeZeroedPageListHead))
726 {
727 if (IsListEmpty(&FreeUnzeroedPageListHead))
728 {
729 /* Check if this allocation is for the PFN DB itself */
730 if (MmStats.NrTotalPages == 0)
731 {
732 /* Allocate an early page -- we'll account for it later */
733 KeReleaseQueuedSpinLock(LockQueuePfnLock, oldIrql);
734 PfnOffset = MmAllocEarlyPage();
735 MiZeroPage(PfnOffset);
736 return PfnOffset;
737 }
738
739 DPRINT1("MmAllocPage(): Out of memory\n");
740 KeReleaseQueuedSpinLock(LockQueuePfnLock, oldIrql);
741 return 0;
742 }
743 ListEntry = RemoveTailList(&FreeUnzeroedPageListHead);
744 UnzeroedPageCount--;
745
746 PageDescriptor = CONTAINING_RECORD(ListEntry, PHYSICAL_PAGE, ListEntry);
747
748 NeedClear = TRUE;
749 }
750 else
751 {
752 ListEntry = RemoveTailList(&FreeZeroedPageListHead);
753
754 PageDescriptor = CONTAINING_RECORD(ListEntry, PHYSICAL_PAGE, ListEntry);
755 }
756
757 if (PageDescriptor->Flags.Type != MM_PHYSICAL_PAGE_FREE)
758 {
759 DPRINT1("Got non-free page from freelist\n");
760 KeBugCheck(MEMORY_MANAGEMENT);
761 }
762 if (PageDescriptor->MapCount != 0)
763 {
764 DPRINT1("Got mapped page from freelist\n");
765 KeBugCheck(MEMORY_MANAGEMENT);
766 }
767 if (PageDescriptor->ReferenceCount != 0)
768 {
769 DPRINT1("%d\n", PageDescriptor->ReferenceCount);
770 KeBugCheck(MEMORY_MANAGEMENT);
771 }
772 PageDescriptor->Flags.Type = MM_PHYSICAL_PAGE_USED;
773 PageDescriptor->Flags.Consumer = Consumer;
774 PageDescriptor->ReferenceCount = 1;
775 PageDescriptor->LockCount = 0;
776 PageDescriptor->MapCount = 0;
777 PageDescriptor->SavedSwapEntry = SavedSwapEntry;
778
779 MmStats.NrSystemPages++;
780 MmStats.NrFreePages--;
781
782 KeReleaseQueuedSpinLock(LockQueuePfnLock, oldIrql);
783
784 PfnOffset = PageDescriptor - MmPageArray;
785 if (NeedClear)
786 {
787 MiZeroPage(PfnOffset);
788 }
789 if (PageDescriptor->MapCount != 0)
790 {
791 DPRINT1("Returning mapped page.\n");
792 KeBugCheck(MEMORY_MANAGEMENT);
793 }
794 return PfnOffset;
795 }
796
797 LONG
798 NTAPI
799 MmAllocPagesSpecifyRange(ULONG Consumer,
800 PHYSICAL_ADDRESS LowestAddress,
801 PHYSICAL_ADDRESS HighestAddress,
802 ULONG NumberOfPages,
803 PPFN_TYPE Pages)
804 {
805 PPHYSICAL_PAGE PageDescriptor;
806 KIRQL oldIrql;
807 PFN_TYPE LowestPage, HighestPage;
808 PFN_TYPE pfn;
809 ULONG NumberOfPagesFound = 0;
810 ULONG i;
811
812 DPRINT("MmAllocPagesSpecifyRange()\n"
813 " LowestAddress = 0x%08x%08x\n"
814 " HighestAddress = 0x%08x%08x\n"
815 " NumberOfPages = %d\n",
816 LowestAddress.u.HighPart, LowestAddress.u.LowPart,
817 HighestAddress.u.HighPart, HighestAddress.u.LowPart,
818 NumberOfPages);
819
820 if (NumberOfPages == 0)
821 return 0;
822
823 LowestPage = LowestAddress.LowPart / PAGE_SIZE;
824 HighestPage = HighestAddress.LowPart / PAGE_SIZE;
825 if ((HighestAddress.u.LowPart % PAGE_SIZE) != 0)
826 HighestPage++;
827
828 if (LowestPage >= MmPageArraySize)
829 {
830 DPRINT1("MmAllocPagesSpecifyRange(): Out of memory\n");
831 return -1;
832 }
833 if (HighestPage > MmPageArraySize)
834 HighestPage = MmPageArraySize;
835
836 oldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
837 if (LowestPage == 0 && HighestPage == MmPageArraySize)
838 {
839 PLIST_ENTRY ListEntry;
840 while (NumberOfPagesFound < NumberOfPages)
841 {
842 if (!IsListEmpty(&FreeZeroedPageListHead))
843 {
844 ListEntry = RemoveTailList(&FreeZeroedPageListHead);
845 }
846 else if (!IsListEmpty(&FreeUnzeroedPageListHead))
847 {
848 ListEntry = RemoveTailList(&FreeUnzeroedPageListHead);
849 UnzeroedPageCount--;
850 }
851 else
852 {
853 if (NumberOfPagesFound == 0)
854 {
855 KeReleaseQueuedSpinLock(LockQueuePfnLock, oldIrql);
856 DPRINT1("MmAllocPagesSpecifyRange(): Out of memory\n");
857 return -1;
858 }
859 else
860 {
861 break;
862 }
863 }
864 PageDescriptor = CONTAINING_RECORD(ListEntry, PHYSICAL_PAGE, ListEntry);
865
866 ASSERT(PageDescriptor->Flags.Type == MM_PHYSICAL_PAGE_FREE);
867 ASSERT(PageDescriptor->MapCount == 0);
868 ASSERT(PageDescriptor->ReferenceCount == 0);
869
870 /* Allocate the page */
871 PageDescriptor->Flags.Type = MM_PHYSICAL_PAGE_USED;
872 PageDescriptor->Flags.Consumer = Consumer;
873 PageDescriptor->ReferenceCount = 1;
874 PageDescriptor->LockCount = 0;
875 PageDescriptor->MapCount = 0;
876 PageDescriptor->SavedSwapEntry = 0; /* FIXME: Do we need swap entries? */
877
878 MmStats.NrSystemPages++;
879 MmStats.NrFreePages--;
880
881 /* Remember the page */
882 pfn = PageDescriptor - MmPageArray;
883 Pages[NumberOfPagesFound++] = pfn;
884 if(Consumer == MC_USER) MmInsertLRULastUserPage(pfn);
885 }
886 }
887 else
888 {
889 INT LookForZeroedPages;
890 for (LookForZeroedPages = 1; LookForZeroedPages >= 0; LookForZeroedPages--)
891 {
892 for (pfn = LowestPage; pfn < HighestPage; pfn++)
893 {
894 PageDescriptor = MmPageArray + pfn;
895
896 if (PageDescriptor->Flags.Type != MM_PHYSICAL_PAGE_FREE)
897 continue;
898 if (PageDescriptor->Flags.Zero != LookForZeroedPages)
899 continue;
900
901 ASSERT(PageDescriptor->MapCount == 0);
902 ASSERT(PageDescriptor->ReferenceCount == 0);
903
904 /* Allocate the page */
905 PageDescriptor->Flags.Type = MM_PHYSICAL_PAGE_USED;
906 PageDescriptor->Flags.Consumer = Consumer;
907 PageDescriptor->ReferenceCount = 1;
908 PageDescriptor->LockCount = 0;
909 PageDescriptor->MapCount = 0;
910 PageDescriptor->SavedSwapEntry = 0; /* FIXME: Do we need swap entries? */
911
912 if (!PageDescriptor->Flags.Zero)
913 UnzeroedPageCount--;
914 MmStats.NrSystemPages++;
915 MmStats.NrFreePages--;
916
917 /* Remember the page */
918 Pages[NumberOfPagesFound++] = pfn;
919 if (NumberOfPagesFound == NumberOfPages)
920 break;
921 }
922 if (NumberOfPagesFound == NumberOfPages)
923 break;
924 }
925 }
926 KeReleaseQueuedSpinLock(LockQueuePfnLock, oldIrql);
927
928 /* Zero unzero-ed pages */
929 for (i = 0; i < NumberOfPagesFound; i++)
930 {
931 pfn = Pages[i];
932 if (MiGetPfnEntry(pfn)->Flags.Zero == 0)
933 {
934 MiZeroPage(pfn);
935 }
936 else
937 {
938 MiGetPfnEntry(pfn)->Flags.Zero = 0;
939 }
940 }
941
942 return NumberOfPagesFound;
943 }
944
945 static
946 NTSTATUS
947 MiZeroPageInternal(PFN_TYPE Page)
948 {
949 PVOID TempAddress;
950
951 TempAddress = MiMapPageToZeroInHyperSpace(Page);
952 if (TempAddress == NULL)
953 {
954 return(STATUS_NO_MEMORY);
955 }
956 memset(TempAddress, 0, PAGE_SIZE);
957 return(STATUS_SUCCESS);
958 }
959
960 NTSTATUS
961 NTAPI
962 MmZeroPageThreadMain(PVOID Ignored)
963 {
964 NTSTATUS Status;
965 KIRQL oldIrql;
966 PLIST_ENTRY ListEntry;
967 PPHYSICAL_PAGE PageDescriptor;
968 PFN_TYPE Pfn;
969 ULONG Count;
970
971 /* Free initial kernel memory */
972 //MiFreeInitMemory();
973
974 /* Set our priority to 0 */
975 KeGetCurrentThread()->BasePriority = 0;
976 KeSetPriorityThread(KeGetCurrentThread(), 0);
977
978 while(1)
979 {
980 Status = KeWaitForSingleObject(&ZeroPageThreadEvent,
981 0,
982 KernelMode,
983 FALSE,
984 NULL);
985 if (!NT_SUCCESS(Status))
986 {
987 DPRINT1("ZeroPageThread: Wait failed\n");
988 KeBugCheck(MEMORY_MANAGEMENT);
989 }
990
991 if (ZeroPageThreadShouldTerminate)
992 {
993 DPRINT1("ZeroPageThread: Terminating\n");
994 return STATUS_SUCCESS;
995 }
996 Count = 0;
997 oldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
998 while (!IsListEmpty(&FreeUnzeroedPageListHead))
999 {
1000 ListEntry = RemoveTailList(&FreeUnzeroedPageListHead);
1001 UnzeroedPageCount--;
1002 PageDescriptor = CONTAINING_RECORD(ListEntry, PHYSICAL_PAGE, ListEntry);
1003 /* We set the page to used, because MmCreateVirtualMapping failed with unused pages */
1004 PageDescriptor->Flags.Type = MM_PHYSICAL_PAGE_USED;
1005 KeReleaseQueuedSpinLock(LockQueuePfnLock, oldIrql);
1006 Pfn = PageDescriptor - MmPageArray;
1007 Status = MiZeroPageInternal(Pfn);
1008
1009 oldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
1010 if (PageDescriptor->MapCount != 0)
1011 {
1012 DPRINT1("Mapped page on freelist.\n");
1013 KeBugCheck(MEMORY_MANAGEMENT);
1014 }
1015 PageDescriptor->Flags.Zero = 1;
1016 PageDescriptor->Flags.Type = MM_PHYSICAL_PAGE_FREE;
1017 if (NT_SUCCESS(Status))
1018 {
1019 InsertHeadList(&FreeZeroedPageListHead, ListEntry);
1020 Count++;
1021 }
1022 else
1023 {
1024 InsertHeadList(&FreeUnzeroedPageListHead, ListEntry);
1025 UnzeroedPageCount++;
1026 }
1027
1028 }
1029 DPRINT("Zeroed %d pages.\n", Count);
1030 KeResetEvent(&ZeroPageThreadEvent);
1031 KeReleaseQueuedSpinLock(LockQueuePfnLock, oldIrql);
1032 }
1033
1034 return STATUS_SUCCESS;
1035 }
1036
1037 /* EOF */