* PROJECT: ReactOS kernel
* FILE: ntoskrnl/mm/freelist.c
* PURPOSE: Handle the list of free physical pages
- * PROGRAMMER: David Welch (welch@mcmail.com)
+ * PROGRAMMER: David Welch (welch@cwcom.net)
* UPDATE HISTORY:
* 27/05/98: Created
* 18/08/98: Added a fix from Robert Bergkvist
*/
-/*
- * NOTE: The list of free pages is implemented as an unsorted double linked
- * list. This should make added or removing pages fast when you don't care
- * about the physical address. Because the entirety of physical memory is
- * mapped from 0xd0000000 upwards it is easy to do a mapping between
- * physical and linear address.
- */
-
/* INCLUDES ****************************************************************/
-#include <internal/stddef.h>
-#include <internal/hal/page.h>
+#include <ddk/ntddk.h>
#include <internal/mm.h>
#include <internal/ntoskrnl.h>
-#include <internal/bitops.h>
-#include <ddk/ntddk.h>
#define NDEBUG
#include <internal/debug.h>
/* TYPES *******************************************************************/
-typedef struct _free_page
-/*
- * PURPOSE: At the start of every region of free physical pages
- */
+#define MM_PHYSICAL_PAGE_FREE (0x1)
+#define MM_PHYSICAL_PAGE_USED (0x2)
+#define MM_PHYSICAL_PAGE_BIOS (0x3)
+
+typedef struct _PHYSICAL_PAGE
{
- struct _free_page* next;
- struct _free_page* previous;
- unsigned int nr_pages;
-} free_page_hdr;
+ union
+ {
+ struct
+ {
+ ULONG Type:2;
+ ULONG Consumer:3;
+ }Flags;
+ ULONG AllFlags;
+ };
+
+ LIST_ENTRY ListEntry;
+ ULONG ReferenceCount;
+ SWAPENTRY SavedSwapEntry;
+ ULONG LockCount;
+ ULONG MapCount;
+ struct _MM_RMAP_ENTRY* RmapListHead;
+} PHYSICAL_PAGE, *PPHYSICAL_PAGE;
/* GLOBALS ****************************************************************/
-/*
- * PURPOSE: Points to the first page in the free list
- */
-free_page_hdr* free_page_list_head=NULL;
+static PPHYSICAL_PAGE MmPageArray;
+static ULONG MmPageArraySize;
+
+static KSPIN_LOCK PageListLock;
+static LIST_ENTRY UsedPageListHeads[MC_MAXIMUM];
+static LIST_ENTRY FreeZeroedPageListHead;
+static LIST_ENTRY FreeUnzeroedPageListHead;
+static LIST_ENTRY BiosPageListHead;
+
+static HANDLE ZeroPageThreadHandle;
+static CLIENT_ID ZeroPageThreadId;
+static KEVENT ZeroPageThreadEvent;
+
+static ULONG UnzeroedPageCount = 0;
/* FUNCTIONS *************************************************************/
-void free_page(unsigned int physical_base, unsigned int nr)
+VOID
+MmTransferOwnershipPage(PHYSICAL_ADDRESS PhysicalAddress, ULONG NewConsumer)
+{
+ ULONG Start = PhysicalAddress.u.LowPart / PAGE_SIZE;
+ KIRQL oldIrql;
+
+ KeAcquireSpinLock(&PageListLock, &oldIrql);
+ if (MmPageArray[Start].MapCount != 0)
+ {
+ DbgPrint("Transfering mapped page.\n");
+ KEBUGCHECK(0);
+ }
+ RemoveEntryList(&MmPageArray[Start].ListEntry);
+ InsertTailList(&UsedPageListHeads[NewConsumer],
+ &MmPageArray[Start].ListEntry);
+ MmPageArray[Start].Flags.Consumer = NewConsumer;
+ KeReleaseSpinLock(&PageListLock, oldIrql);
+ MiZeroPage(PhysicalAddress);
+}
+
+PHYSICAL_ADDRESS
+MmGetLRUFirstUserPage(VOID)
+{
+ PLIST_ENTRY NextListEntry;
+ PHYSICAL_ADDRESS Next;
+ PHYSICAL_PAGE* PageDescriptor;
+ KIRQL oldIrql;
+
+ KeAcquireSpinLock(&PageListLock, &oldIrql);
+ NextListEntry = UsedPageListHeads[MC_USER].Flink;
+ if (NextListEntry == &UsedPageListHeads[MC_USER])
+ {
+ KeReleaseSpinLock(&PageListLock, oldIrql);
+ return((LARGE_INTEGER)0LL);
+ }
+ PageDescriptor = CONTAINING_RECORD(NextListEntry, PHYSICAL_PAGE, ListEntry);
+ Next.QuadPart = (ULONG)((ULONG)PageDescriptor - (ULONG)MmPageArray);
+ Next.QuadPart = (Next.QuadPart / sizeof(PHYSICAL_PAGE)) * PAGE_SIZE;
+ KeReleaseSpinLock(&PageListLock, oldIrql);
+ return(Next);
+}
+
+VOID
+MmSetLRULastPage(PHYSICAL_ADDRESS PhysicalAddress)
+{
+ ULONG Start = PhysicalAddress.u.LowPart / PAGE_SIZE;
+ KIRQL oldIrql;
+
+ KeAcquireSpinLock(&PageListLock, &oldIrql);
+ if (MmPageArray[Start].Flags.Type == MM_PHYSICAL_PAGE_USED &&
+ MmPageArray[Start].Flags.Consumer == MC_USER)
+ {
+ RemoveEntryList(&MmPageArray[Start].ListEntry);
+ InsertTailList(&UsedPageListHeads[MC_USER],
+ &MmPageArray[Start].ListEntry);
+ }
+ KeReleaseSpinLock(&PageListLock, oldIrql);
+}
+
+PHYSICAL_ADDRESS
+MmGetLRUNextUserPage(PHYSICAL_ADDRESS PreviousPhysicalAddress)
+{
+ ULONG Start = PreviousPhysicalAddress.u.LowPart / PAGE_SIZE;
+ PLIST_ENTRY NextListEntry;
+ PHYSICAL_ADDRESS Next;
+ PHYSICAL_PAGE* PageDescriptor;
+ KIRQL oldIrql;
+
+ KeAcquireSpinLock(&PageListLock, &oldIrql);
+ if (MmPageArray[Start].Flags.Type != MM_PHYSICAL_PAGE_USED ||
+ MmPageArray[Start].Flags.Consumer != MC_USER)
+ {
+ NextListEntry = UsedPageListHeads[MC_USER].Flink;
+ }
+ else
+ {
+ NextListEntry = MmPageArray[Start].ListEntry.Flink;
+ }
+ if (NextListEntry == &UsedPageListHeads[MC_USER])
+ {
+ KeReleaseSpinLock(&PageListLock, oldIrql);
+ return((LARGE_INTEGER)0LL);
+ }
+ PageDescriptor = CONTAINING_RECORD(NextListEntry, PHYSICAL_PAGE, ListEntry);
+ Next.QuadPart = (ULONG)((ULONG)PageDescriptor - (ULONG)MmPageArray);
+ Next.QuadPart = (Next.QuadPart / sizeof(PHYSICAL_PAGE)) * PAGE_SIZE;
+ KeReleaseSpinLock(&PageListLock, oldIrql);
+ return(Next);
+}
+
+PHYSICAL_ADDRESS
+MmGetContinuousPages(ULONG NumberOfBytes,
+ PHYSICAL_ADDRESS HighestAcceptableAddress,
+ ULONG Alignment)
+{
+ ULONG NrPages;
+ ULONG i;
+ LONG start;
+ ULONG length;
+ KIRQL oldIrql;
+
+ NrPages = PAGE_ROUND_UP(NumberOfBytes) / PAGE_SIZE;
+
+ KeAcquireSpinLock(&PageListLock, &oldIrql);
+
+ start = -1;
+ length = 0;
+ for (i = 0; i < (HighestAcceptableAddress.QuadPart / PAGE_SIZE); )
+ {
+ if (MmPageArray[i].Flags.Type == MM_PHYSICAL_PAGE_FREE)
+ {
+ if (start == -1)
+ {
+ start = i;
+ length = 1;
+ }
+ else
+ {
+ length++;
+ }
+ i++;
+ if (length == NrPages)
+ {
+ break;
+ }
+ }
+ else
+ {
+ start = -1;
+ /*
+ * Fast forward to the base of the next aligned region
+ */
+ i = ROUND_UP((i + 1), (Alignment / PAGE_SIZE));
+ }
+ }
+ if (start == -1 || length != NrPages)
+ {
+ KeReleaseSpinLock(&PageListLock, oldIrql);
+ return((LARGE_INTEGER)(LONGLONG)0);
+ }
+ for (i = start; i < (start + length); i++)
+ {
+ RemoveEntryList(&MmPageArray[i].ListEntry);
+ MmPageArray[i].Flags.Type = MM_PHYSICAL_PAGE_USED;
+ MmPageArray[i].Flags.Consumer = MC_NPPOOL;
+ MmPageArray[i].ReferenceCount = 1;
+ MmPageArray[i].LockCount = 0;
+ MmPageArray[i].MapCount = 0;
+ MmPageArray[i].SavedSwapEntry = 0;
+ InsertTailList(&UsedPageListHeads[MC_NPPOOL],
+ &MmPageArray[i].ListEntry);
+ }
+ KeReleaseSpinLock(&PageListLock, oldIrql);
+ return((LARGE_INTEGER)((LONGLONG)start * PAGE_SIZE));
+}
+
+VOID INIT_FUNCTION
+MiParseRangeToFreeList(PADDRESS_RANGE Range)
+{
+ ULONG i, first, last;
+
+ /* FIXME: Not 64-bit ready */
+
+ DPRINT("Range going to free list (Base 0x%X, Length 0x%X, Type 0x%X)\n",
+ Range->BaseAddrLow,
+ Range->LengthLow,
+ Range->Type);
+
+ first = (Range->BaseAddrLow + PAGE_SIZE - 1) / PAGE_SIZE;
+ last = first + ((Range->LengthLow + PAGE_SIZE - 1) / PAGE_SIZE);
+ for (i = first; i < last && i < MmPageArraySize; i++)
+ {
+ if (MmPageArray[i].Flags.Type == 0)
+ {
+ MmPageArray[i].Flags.Type = MM_PHYSICAL_PAGE_FREE;
+ MmPageArray[i].ReferenceCount = 0;
+ InsertTailList(&FreeUnzeroedPageListHead,
+ &MmPageArray[i].ListEntry);
+ UnzeroedPageCount++;
+ }
+ }
+}
+
+VOID INIT_FUNCTION
+MiParseRangeToBiosList(PADDRESS_RANGE Range)
+{
+ ULONG i, first, last;
+
+ /* FIXME: Not 64-bit ready */
+
+ DPRINT("Range going to bios list (Base 0x%X, Length 0x%X, Type 0x%X)\n",
+ Range->BaseAddrLow,
+ Range->LengthLow,
+ Range->Type);
+
+ first = (Range->BaseAddrLow + PAGE_SIZE - 1) / PAGE_SIZE;
+ last = first + ((Range->LengthLow + PAGE_SIZE - 1) / PAGE_SIZE);
+ for (i = first; i < last && i < MmPageArraySize; i++)
+ {
+ /* Remove the page from the free list if it is there */
+ if (MmPageArray[i].Flags.Type == MM_PHYSICAL_PAGE_FREE)
+ {
+ RemoveEntryList(&MmPageArray[i].ListEntry);
+ }
+
+ if (MmPageArray[i].Flags.Type != MM_PHYSICAL_PAGE_BIOS)
+ {
+ MmPageArray[i].Flags.Type = MM_PHYSICAL_PAGE_BIOS;
+ MmPageArray[i].Flags.Consumer = MC_NPPOOL;
+ MmPageArray[i].ReferenceCount = 1;
+ InsertTailList(&BiosPageListHead,
+ &MmPageArray[i].ListEntry);
+ }
+ }
+}
+
+VOID INIT_FUNCTION
+MiParseBIOSMemoryMap(PADDRESS_RANGE BIOSMemoryMap,
+ ULONG AddressRangeCount)
+{
+ PADDRESS_RANGE p;
+ ULONG i;
+
+ p = BIOSMemoryMap;
+ for (i = 0; i < AddressRangeCount; i++, p++)
+ {
+ if (p->Type == 1)
+ {
+ MiParseRangeToFreeList(p);
+ }
+ else
+ {
+ MiParseRangeToBiosList(p);
+ }
+ }
+}
+
+PVOID INIT_FUNCTION
+MmInitializePageList(PVOID FirstPhysKernelAddress,
+ PVOID LastPhysKernelAddress,
+ ULONG MemorySizeInPages,
+ ULONG LastKernelAddress,
+ PADDRESS_RANGE BIOSMemoryMap,
+ ULONG AddressRangeCount)
/*
- * FUNCTION: Add a physically continuous range of pages to the free list
+ * FUNCTION: Initializes the page list with all pages free
+ * except those known to be reserved and those used by the kernel
* ARGUMENTS:
- * physical_base = The first physical address to free
- * nr = the size of the region (in pages) to free
- * NOTES: This function attempts to keep the list partially unfragmented
+ * PageBuffer = Page sized buffer
+ * FirstKernelAddress = First physical address used by the kernel
+ * LastKernelAddress = Last physical address used by the kernel
*/
{
- unsigned int eflags;
- free_page_hdr* hdr=NULL;
+ ULONG i;
+ ULONG Reserved;
+ NTSTATUS Status;
+
+ DPRINT("MmInitializePageList(FirstPhysKernelAddress %x, "
+ "LastPhysKernelAddress %x, "
+ "MemorySizeInPages %x, LastKernelAddress %x)\n",
+ FirstPhysKernelAddress,
+ LastPhysKernelAddress,
+ MemorySizeInPages,
+ LastKernelAddress);
+
+ for (i = 0; i < MC_MAXIMUM; i++)
+ {
+ InitializeListHead(&UsedPageListHeads[i]);
+ }
+ KeInitializeSpinLock(&PageListLock);
+ InitializeListHead(&FreeUnzeroedPageListHead);
+ InitializeListHead(&FreeZeroedPageListHead);
+ InitializeListHead(&BiosPageListHead);
- DPRINT("Freeing %x to %x\n",physical_base,physical_base
- + (nr*PAGESIZE));
+ LastKernelAddress = PAGE_ROUND_UP(LastKernelAddress);
+ MmPageArraySize = MemorySizeInPages;
+ Reserved =
+ PAGE_ROUND_UP((MmPageArraySize * sizeof(PHYSICAL_PAGE))) / PAGE_SIZE;
+ MmPageArray = (PHYSICAL_PAGE *)LastKernelAddress;
+
+ DPRINT("Reserved %d\n", Reserved);
+
+ LastKernelAddress = PAGE_ROUND_UP(LastKernelAddress);
+ LastKernelAddress = ((ULONG)LastKernelAddress + (Reserved * PAGE_SIZE));
+ LastPhysKernelAddress = (PVOID)PAGE_ROUND_UP(LastPhysKernelAddress);
+ LastPhysKernelAddress = LastPhysKernelAddress + (Reserved * PAGE_SIZE);
+
+ MmStats.NrTotalPages = 0;
+ MmStats.NrSystemPages = 0;
+ MmStats.NrUserPages = 0;
+ MmStats.NrReservedPages = 0;
+ MmStats.NrFreePages = 0;
+ MmStats.NrLockedPages = 0;
+
+ for (i = 0; i < Reserved; i++)
+ {
+ PVOID Address = (PVOID)(ULONG)MmPageArray + (i * PAGE_SIZE);
+ if (!MmIsPagePresent(NULL, Address))
+ {
+ ULONG PhysicalAddress;
+ PhysicalAddress = (ULONG)LastPhysKernelAddress -
+ (Reserved * PAGE_SIZE) + (i * PAGE_SIZE);
+ Status =
+ MmCreateVirtualMappingUnsafe(NULL,
+ Address,
+ PAGE_READWRITE,
+ (PHYSICAL_ADDRESS)(LONGLONG)PhysicalAddress,
+ FALSE);
+ if (!NT_SUCCESS(Status))
+ {
+ DbgPrint("Unable to create virtual mapping\n");
+ KEBUGCHECK(0);
+ }
+ }
+ memset((PVOID)MmPageArray + (i * PAGE_SIZE), 0, PAGE_SIZE);
+ }
+
+
/*
- * This must be atomic
+ * Page zero is reserved
*/
- __asm__("pushf\n\tpop %0\n\tcli\n\t"
- : "=d" (eflags));
-
+ MmPageArray[0].Flags.Type = MM_PHYSICAL_PAGE_BIOS;
+ MmPageArray[0].Flags.Consumer = MC_NPPOOL;
+ MmPageArray[0].ReferenceCount = 0;
+ InsertTailList(&BiosPageListHead,
+ &MmPageArray[0].ListEntry);
+
/*
- *
+ * Page one is reserved for the initial KPCR
*/
- hdr = (free_page_hdr *)physical_to_linear(physical_base);
+ MmPageArray[1].Flags.Type = MM_PHYSICAL_PAGE_BIOS;
+ MmPageArray[1].Flags.Consumer = MC_NPPOOL;
+ MmPageArray[1].ReferenceCount = 0;
+ InsertTailList(&BiosPageListHead,
+ &MmPageArray[1].ListEntry);
+
+ i = 2;
+ if ((ULONG)FirstPhysKernelAddress < 0xa0000)
+ {
+ MmStats.NrFreePages += (((ULONG)FirstPhysKernelAddress/PAGE_SIZE) - 2);
+ for (; i<((ULONG)FirstPhysKernelAddress/PAGE_SIZE); i++)
+ {
+ MmPageArray[i].Flags.Type = MM_PHYSICAL_PAGE_FREE;
+ MmPageArray[i].ReferenceCount = 0;
+ InsertTailList(&FreeUnzeroedPageListHead,
+ &MmPageArray[i].ListEntry);
+ UnzeroedPageCount++;
+ }
+ MmStats.NrSystemPages +=
+ ((((ULONG)LastPhysKernelAddress) / PAGE_SIZE) - i);
+ for (; i<((ULONG)LastPhysKernelAddress / PAGE_SIZE); i++)
+ {
+ MmPageArray[i].Flags.Type = MM_PHYSICAL_PAGE_USED;
+ MmPageArray[i].Flags.Consumer = MC_NPPOOL;
+ MmPageArray[i].ReferenceCount = 1;
+ MmPageArray[i].MapCount = 1;
+ InsertTailList(&UsedPageListHeads[MC_NPPOOL],
+ &MmPageArray[i].ListEntry);
+ }
+ MmStats.NrFreePages += ((0xa0000/PAGE_SIZE) - i);
+ for (; i<(0xa0000/PAGE_SIZE); i++)
+ {
+ MmPageArray[i].Flags.Type = MM_PHYSICAL_PAGE_FREE;
+ MmPageArray[i].ReferenceCount = 0;
+ InsertTailList(&FreeUnzeroedPageListHead,
+ &MmPageArray[i].ListEntry);
+ UnzeroedPageCount++;
+ }
+ MmStats.NrReservedPages += ((0x100000/PAGE_SIZE) - i);
+ for (; i<(0x100000 / PAGE_SIZE); i++)
+ {
+ MmPageArray[i].Flags.Type = MM_PHYSICAL_PAGE_BIOS;
+ MmPageArray[i].Flags.Consumer = MC_NPPOOL;
+ MmPageArray[i].ReferenceCount = 1;
+ InsertTailList(&BiosPageListHead,
+ &MmPageArray[i].ListEntry);
+ }
+ }
+ else
+ {
+ MmStats.NrFreePages += ((0xa0000 / PAGE_SIZE) - 2);
+ for (; i<(0xa0000 / PAGE_SIZE); i++)
+ {
+ MmPageArray[i].Flags.Type = MM_PHYSICAL_PAGE_FREE;
+ MmPageArray[i].ReferenceCount = 0;
+ InsertTailList(&FreeUnzeroedPageListHead,
+ &MmPageArray[i].ListEntry);
+ UnzeroedPageCount++;
+ }
+ MmStats.NrReservedPages += (0x60000 / PAGE_SIZE);
+ for (; i<(0x100000 / PAGE_SIZE); i++)
+ {
+ MmPageArray[i].Flags.Type = MM_PHYSICAL_PAGE_BIOS;
+ MmPageArray[i].Flags.Consumer = MC_NPPOOL;
+ MmPageArray[i].ReferenceCount = 1;
+ InsertTailList(&BiosPageListHead,
+ &MmPageArray[i].ListEntry);
+ }
+ MmStats.NrFreePages += (((ULONG)FirstPhysKernelAddress/PAGE_SIZE) - i);
+ for (; i<((ULONG)FirstPhysKernelAddress/PAGE_SIZE); i++)
+ {
+ MmPageArray[i].Flags.Type = MM_PHYSICAL_PAGE_FREE;
+ MmPageArray[i].ReferenceCount = 0;
+ InsertTailList(&FreeUnzeroedPageListHead,
+ &MmPageArray[i].ListEntry);
+ UnzeroedPageCount++;
+ }
+ MmStats.NrSystemPages +=
+ (((ULONG)LastPhysKernelAddress/PAGE_SIZE) - i);
+ for (; i<((ULONG)LastPhysKernelAddress/PAGE_SIZE); i++)
+ {
+ MmPageArray[i].Flags.Type = MM_PHYSICAL_PAGE_USED;
+ MmPageArray[i].Flags.Consumer = MC_NPPOOL;
+ MmPageArray[i].ReferenceCount = 1;
+ MmPageArray[i].MapCount = 1;
+ InsertTailList(&UsedPageListHeads[MC_NPPOOL],
+ &MmPageArray[i].ListEntry);
+ }
+ }
+
+ MmStats.NrFreePages += (MemorySizeInPages - i);
+ for (; i<MemorySizeInPages; i++)
+ {
+ MmPageArray[i].Flags.Type = MM_PHYSICAL_PAGE_FREE;
+ MmPageArray[i].ReferenceCount = 0;
+ InsertTailList(&FreeUnzeroedPageListHead,
+ &MmPageArray[i].ListEntry);
+ UnzeroedPageCount++;
+ }
+
+ if ((BIOSMemoryMap != NULL) && (AddressRangeCount > 0))
+ {
+ MiParseBIOSMemoryMap(
+ BIOSMemoryMap,
+ AddressRangeCount);
+ }
+
+ KeInitializeEvent(&ZeroPageThreadEvent, NotificationEvent, TRUE);
+
+
+ MmStats.NrTotalPages = MmStats.NrFreePages + MmStats.NrSystemPages +
+ MmStats.NrReservedPages + MmStats.NrUserPages;
+ MmInitializeBalancer(MmStats.NrFreePages, MmStats.NrSystemPages + MmStats.NrReservedPages);
+ return((PVOID)LastKernelAddress);
+}
+
+VOID
+MmSetFlagsPage(PHYSICAL_ADDRESS PhysicalAddress, ULONG Flags)
+{
+ ULONG Start = PhysicalAddress.u.LowPart / PAGE_SIZE;
+ KIRQL oldIrql;
+
+ KeAcquireSpinLock(&PageListLock, &oldIrql);
+ MmPageArray[Start].AllFlags = Flags;
+ KeReleaseSpinLock(&PageListLock, oldIrql);
+}
+
+VOID
+MmSetRmapListHeadPage(PHYSICAL_ADDRESS PhysicalAddress,
+ struct _MM_RMAP_ENTRY* ListHead)
+{
+ ULONG Start = PhysicalAddress.u.LowPart / PAGE_SIZE;
+
+ MmPageArray[Start].RmapListHead = ListHead;
+}
+
+struct _MM_RMAP_ENTRY*
+MmGetRmapListHeadPage(PHYSICAL_ADDRESS PhysicalAddress)
+{
+ ULONG Start = PhysicalAddress.u.LowPart / PAGE_SIZE;
+
+ return(MmPageArray[Start].RmapListHead);
+}
+
+VOID
+MmMarkPageMapped(PHYSICAL_ADDRESS PhysicalAddress)
+{
+ ULONG Start = PhysicalAddress.u.LowPart / PAGE_SIZE;
+ KIRQL oldIrql;
+
+ if (Start < MmPageArraySize)
+ {
+ KeAcquireSpinLock(&PageListLock, &oldIrql);
+ if (MmPageArray[Start].Flags.Type == MM_PHYSICAL_PAGE_FREE)
+ {
+ DbgPrint("Mapping non-used page\n");
+ KEBUGCHECK(0);
+ }
+ MmPageArray[Start].MapCount++;
+ KeReleaseSpinLock(&PageListLock, oldIrql);
+ }
+}
+
+VOID
+MmMarkPageUnmapped(PHYSICAL_ADDRESS PhysicalAddress)
+{
+ ULONG Start = PhysicalAddress.u.LowPart / PAGE_SIZE;
+ KIRQL oldIrql;
+
+ if (Start < MmPageArraySize)
+ {
+ KeAcquireSpinLock(&PageListLock, &oldIrql);
+ if (MmPageArray[Start].Flags.Type == MM_PHYSICAL_PAGE_FREE)
+ {
+ DbgPrint("Unmapping non-used page\n");
+ KEBUGCHECK(0);
+ }
+ if (MmPageArray[Start].MapCount == 0)
+ {
+ DbgPrint("Unmapping not mapped page\n");
+ KEBUGCHECK(0);
+ }
+ MmPageArray[Start].MapCount--;
+ KeReleaseSpinLock(&PageListLock, oldIrql);
+ }
+}
+
+ULONG
+MmGetFlagsPage(PHYSICAL_ADDRESS PhysicalAddress)
+{
+ ULONG Start = PhysicalAddress.u.LowPart / PAGE_SIZE;
+ KIRQL oldIrql;
+ ULONG Flags;
+
+ KeAcquireSpinLock(&PageListLock, &oldIrql);
+ Flags = MmPageArray[Start].AllFlags;
+ KeReleaseSpinLock(&PageListLock, oldIrql);
+
+ return(Flags);
+}
+
+
+VOID
+MmSetSavedSwapEntryPage(PHYSICAL_ADDRESS PhysicalAddress,
+ SWAPENTRY SavedSwapEntry)
+{
+ ULONG Start = PhysicalAddress.u.LowPart / PAGE_SIZE;
+ KIRQL oldIrql;
+
+ KeAcquireSpinLock(&PageListLock, &oldIrql);
+ MmPageArray[Start].SavedSwapEntry = SavedSwapEntry;
+ KeReleaseSpinLock(&PageListLock, oldIrql);
+}
+
+SWAPENTRY
+MmGetSavedSwapEntryPage(PHYSICAL_ADDRESS PhysicalAddress)
+{
+ ULONG Start = PhysicalAddress.u.LowPart / PAGE_SIZE;
+ SWAPENTRY SavedSwapEntry;
+ KIRQL oldIrql;
+
+ KeAcquireSpinLock(&PageListLock, &oldIrql);
+ SavedSwapEntry = MmPageArray[Start].SavedSwapEntry;
+ KeReleaseSpinLock(&PageListLock, oldIrql);
+
+ return(SavedSwapEntry);
+}
+
+VOID
+MmReferencePage(PHYSICAL_ADDRESS PhysicalAddress)
+{
+ ULONG Start = PhysicalAddress.u.LowPart / PAGE_SIZE;
+ KIRQL oldIrql;
+
+ DPRINT("MmReferencePage(PhysicalAddress %x)\n", PhysicalAddress);
+
+ if (PhysicalAddress.u.LowPart == 0)
+ {
+ KEBUGCHECK(0);
+ }
- DPRINT("free_page_hdr %x\n",hdr);
- DPRINT("free_page_list_head %x\n",free_page_list_head);
+ KeAcquireSpinLock(&PageListLock, &oldIrql);
- if (free_page_list_head!=NULL)
+ if (MmPageArray[Start].Flags.Type != MM_PHYSICAL_PAGE_USED)
{
- free_page_list_head->previous=hdr;
+ DbgPrint("Referencing non-used page\n");
+ KEBUGCHECK(0);
}
- hdr->next=free_page_list_head;
- hdr->previous=NULL;
- hdr->nr_pages = nr;
- free_page_list_head=hdr;
- __asm__("push %0\n\tpopf\n\t"
- :
- : "d" (eflags));
+ MmPageArray[Start].ReferenceCount++;
+ KeReleaseSpinLock(&PageListLock, oldIrql);
}
-unsigned int get_dma_page(unsigned int max_address)
-/*
- * FUNCTION: Gets a page with a restricted max physical address (i.e.
- * suitable for dma)
- * ARGUMENTS:
- * max_address = The maximum address usable by the caller
- * RETURNS:
- * The physical address of the page if it succeeds
- * NULL if it fails.
- * NOTES: This is very inefficent because the list isn't sorted. On the
- * other hand sorting the list would be quite expensive especially if dma
- * is only used infrequently. Perhaps a special cache of dma pages should
- * be maintained?
- */
+ULONG
+MmGetReferenceCountPage(PHYSICAL_ADDRESS PhysicalAddress)
{
- free_page_hdr* current=NULL;
+ ULONG Start = PhysicalAddress.u.LowPart / PAGE_SIZE;
+ KIRQL oldIrql;
+ ULONG RCount;
- if (free_page_list_head==NULL)
+ DPRINT("MmGetReferenceCountPage(PhysicalAddress %x)\n", PhysicalAddress);
+
+ if (PhysicalAddress.u.LowPart == 0)
{
- printk("CRITICAL: Unable to allocate page\n");
- KeBugCheck(KBUG_OUT_OF_MEMORY);
+ KEBUGCHECK(0);
}
- /*
- * Walk the free page list looking for suitable memory
- */
- current = free_page_list_head;
- while (current!=NULL)
+ KeAcquireSpinLock(&PageListLock, &oldIrql);
+
+ if (MmPageArray[Start].Flags.Type != MM_PHYSICAL_PAGE_USED)
{
- if ( ((int)current) < max_address)
- {
- /*
- * We take the first page from the region
- */
- free_page_hdr* nhdr = (free_page_hdr *)(((int)current)+PAGESIZE);
- if (current->previous!=NULL)
- {
- current->previous->next=nhdr;
- }
- if (current->next!=NULL)
- {
- current->next->previous=nhdr;
- }
- nhdr->next=current->next;
- nhdr->previous=current->previous;
- nhdr->nr_pages=current->nr_pages-1;
- if (free_page_list_head==current)
- {
- free_page_list_head=nhdr;
- }
-
- return ((int)current);
- }
-
- current=current->next;
+ DbgPrint("Getting reference count for free page\n");
+ KEBUGCHECK(0);
}
- return(NULL);
+
+ RCount = MmPageArray[Start].ReferenceCount;
+
+ KeReleaseSpinLock(&PageListLock, oldIrql);
+ return(RCount);
}
-unsigned int get_free_page(void)
-/*
- * FUNCTION: Allocates a page
- * RETURNS: The physical address of the page allocated
- */
+BOOLEAN
+MmIsUsablePage(PHYSICAL_ADDRESS PhysicalAddress)
{
- unsigned int addr;
+ ULONG Start = PhysicalAddress.u.LowPart / PAGE_SIZE;
- /*
- * This must be atomic wrt everything
- */
- unsigned int eflags;
- __asm__("pushf\n\tpop %0\n\tcli\n\t"
- : "=d" (eflags));
- CHECKPOINT;
- /*
- * If we are totally out of memory then panic
- */
- if (free_page_list_head==NULL)
+ DPRINT("MmGetReferenceCountPage(PhysicalAddress %x)\n", PhysicalAddress);
+
+ if (PhysicalAddress.u.LowPart == 0)
{
- printk("CRITICAL: Unable to allocate page\n");
- KeBugCheck(KBUG_OUT_OF_MEMORY);
+ KEBUGCHECK(0);
}
- CHECKPOINT;
- addr = 0;
- CHECKPOINT;
- if (free_page_list_head->nr_pages>1)
+
+ if (MmPageArray[Start].Flags.Type != MM_PHYSICAL_PAGE_USED &&
+ MmPageArray[Start].Flags.Type != MM_PHYSICAL_PAGE_BIOS)
{
- free_page_list_head->nr_pages--;
- addr = ((unsigned int)free_page_list_head) +
- (free_page_list_head->nr_pages * PAGESIZE);
+ return(FALSE);
}
- else
+
+ return(TRUE);
+}
+
+VOID
+MmDereferencePage(PHYSICAL_ADDRESS PhysicalAddress)
+{
+ ULONG Start = PhysicalAddress.u.LowPart / PAGE_SIZE;
+ KIRQL oldIrql;
+
+ DPRINT("MmDereferencePage(PhysicalAddress %I64x)\n", PhysicalAddress);
+
+ if (PhysicalAddress.u.LowPart == 0)
{
- addr = (unsigned int)free_page_list_head;
- free_page_list_head = free_page_list_head -> next;
+ KEBUGCHECK(0);
}
- CHECKPOINT;
- __asm__("push %0\n\tpopf\n\t"
- :
- : "d" (eflags));
- addr = addr - (IDMAP_BASE);
- DPRINT("allocated %x\n",addr);
- CHECKPOINT;
- return(addr);
+ KeAcquireSpinLock(&PageListLock, &oldIrql);
+
+
+ if (MmPageArray[Start].Flags.Type != MM_PHYSICAL_PAGE_USED)
+ {
+ DbgPrint("Dereferencing free page\n");
+ KEBUGCHECK(0);
+ }
+
+ MmPageArray[Start].ReferenceCount--;
+ if (MmPageArray[Start].ReferenceCount == 0)
+ {
+ MmStats.NrFreePages++;
+ MmStats.NrSystemPages--;
+ RemoveEntryList(&MmPageArray[Start].ListEntry);
+ if (MmPageArray[Start].RmapListHead != NULL)
+ {
+ DbgPrint("Freeing page with rmap entries.\n");
+ KEBUGCHECK(0);
+ }
+ if (MmPageArray[Start].MapCount != 0)
+ {
+ DbgPrint("Freeing mapped page (0x%I64x count %d)\n",
+ PhysicalAddress, MmPageArray[Start].MapCount);
+ KEBUGCHECK(0);
+ }
+ if (MmPageArray[Start].LockCount > 0)
+ {
+ DbgPrint("Freeing locked page\n");
+ KEBUGCHECK(0);
+ }
+ if (MmPageArray[Start].SavedSwapEntry != 0)
+ {
+ DbgPrint("Freeing page with swap entry.\n");
+ KEBUGCHECK(0);
+ }
+ if (MmPageArray[Start].Flags.Type != MM_PHYSICAL_PAGE_USED)
+ {
+ DbgPrint("Freeing page with flags %x\n",
+ MmPageArray[Start].Flags.Type);
+ KEBUGCHECK(0);
+ }
+ MmPageArray[Start].Flags.Type = MM_PHYSICAL_PAGE_FREE;
+ InsertTailList(&FreeUnzeroedPageListHead,
+ &MmPageArray[Start].ListEntry);
+ UnzeroedPageCount++;
+ if (UnzeroedPageCount > 8 && 0 == KeReadStateEvent(&ZeroPageThreadEvent))
+ {
+ KeSetEvent(&ZeroPageThreadEvent, IO_NO_INCREMENT, FALSE);
+ }
+ }
+ KeReleaseSpinLock(&PageListLock, oldIrql);
}
+ULONG
+MmGetLockCountPage(PHYSICAL_ADDRESS PhysicalAddress)
+{
+ ULONG Start = PhysicalAddress.u.LowPart / PAGE_SIZE;
+ KIRQL oldIrql;
+ ULONG LockCount;
+
+ DPRINT("MmGetLockCountPage(PhysicalAddress %x)\n", PhysicalAddress);
+
+ if (PhysicalAddress.u.LowPart == 0)
+ {
+ KEBUGCHECK(0);
+ }
+
+ KeAcquireSpinLock(&PageListLock, &oldIrql);
+
+ if (MmPageArray[Start].Flags.Type != MM_PHYSICAL_PAGE_USED)
+ {
+ DbgPrint("Getting lock count for free page\n");
+ KEBUGCHECK(0);
+ }
+
+ LockCount = MmPageArray[Start].LockCount;
+ KeReleaseSpinLock(&PageListLock, oldIrql);
+ return(LockCount);
+}
+VOID
+MmLockPage(PHYSICAL_ADDRESS PhysicalAddress)
+{
+ ULONG Start = PhysicalAddress.u.LowPart / PAGE_SIZE;
+ KIRQL oldIrql;
+
+ DPRINT("MmLockPage(PhysicalAddress %x)\n", PhysicalAddress);
+
+ if (PhysicalAddress.u.LowPart == 0)
+ {
+ KEBUGCHECK(0);
+ }
+
+ KeAcquireSpinLock(&PageListLock, &oldIrql);
+
+ if (MmPageArray[Start].Flags.Type != MM_PHYSICAL_PAGE_USED)
+ {
+ DbgPrint("Locking free page\n");
+ KEBUGCHECK(0);
+ }
+
+ MmPageArray[Start].LockCount++;
+ KeReleaseSpinLock(&PageListLock, oldIrql);
+}
+
+VOID
+MmUnlockPage(PHYSICAL_ADDRESS PhysicalAddress)
+{
+ ULONG Start = PhysicalAddress.u.LowPart / PAGE_SIZE;
+ KIRQL oldIrql;
+
+ DPRINT("MmUnlockPage(PhysicalAddress %llx)\n", PhysicalAddress);
+
+ if (PhysicalAddress.u.LowPart == 0)
+ {
+ KEBUGCHECK(0);
+ }
+
+ KeAcquireSpinLock(&PageListLock, &oldIrql);
+
+ if (MmPageArray[Start].Flags.Type != MM_PHYSICAL_PAGE_USED)
+ {
+ DbgPrint("Unlocking free page\n");
+ KEBUGCHECK(0);
+ }
+
+ MmPageArray[Start].LockCount--;
+ KeReleaseSpinLock(&PageListLock, oldIrql);
+}
+
+PHYSICAL_ADDRESS
+MmAllocPage(ULONG Consumer, SWAPENTRY SavedSwapEntry)
+{
+ PHYSICAL_ADDRESS PageOffset;
+ PLIST_ENTRY ListEntry;
+ PPHYSICAL_PAGE PageDescriptor;
+ KIRQL oldIrql;
+ BOOLEAN NeedClear = FALSE;
+
+ DPRINT("MmAllocPage()\n");
+
+ KeAcquireSpinLock(&PageListLock, &oldIrql);
+ if (IsListEmpty(&FreeZeroedPageListHead))
+ {
+ if (IsListEmpty(&FreeUnzeroedPageListHead))
+ {
+ DPRINT1("MmAllocPage(): Out of memory\n");
+ KeReleaseSpinLock(&PageListLock, oldIrql);
+ return((PHYSICAL_ADDRESS)0LL);
+ }
+ ListEntry = RemoveTailList(&FreeUnzeroedPageListHead);
+ UnzeroedPageCount--;
+
+ PageDescriptor = CONTAINING_RECORD(ListEntry, PHYSICAL_PAGE, ListEntry);
+
+ NeedClear = TRUE;
+ }
+ else
+ {
+ ListEntry = RemoveTailList(&FreeZeroedPageListHead);
+
+ PageDescriptor = CONTAINING_RECORD(ListEntry, PHYSICAL_PAGE, ListEntry);
+ }
+
+ if (PageDescriptor->Flags.Type != MM_PHYSICAL_PAGE_FREE)
+ {
+ DbgPrint("Got non-free page from freelist\n");
+ KEBUGCHECK(0);
+ }
+ if (PageDescriptor->MapCount != 0)
+ {
+ DbgPrint("Got mapped page from freelist\n");
+ KEBUGCHECK(0);
+ }
+ PageDescriptor->Flags.Type = MM_PHYSICAL_PAGE_USED;
+ PageDescriptor->Flags.Consumer = Consumer;
+ PageDescriptor->ReferenceCount = 1;
+ PageDescriptor->LockCount = 0;
+ PageDescriptor->MapCount = 0;
+ PageDescriptor->SavedSwapEntry = SavedSwapEntry;
+ InsertTailList(&UsedPageListHeads[Consumer], ListEntry);
+
+ MmStats.NrSystemPages++;
+ MmStats.NrFreePages--;
+
+ KeReleaseSpinLock(&PageListLock, oldIrql);
+
+ PageOffset.QuadPart = (ULONG)((ULONG)PageDescriptor - (ULONG)MmPageArray);
+ PageOffset.QuadPart =
+ (PageOffset.QuadPart / sizeof(PHYSICAL_PAGE)) * PAGE_SIZE;
+ if (NeedClear)
+ {
+ MiZeroPage(PageOffset);
+ }
+ if (PageDescriptor->MapCount != 0)
+ {
+ DbgPrint("Returning mapped page.\n");
+ KEBUGCHECK(0);
+ }
+ return(PageOffset);
+}
+
+NTSTATUS STDCALL
+MmZeroPageThreadMain(PVOID Ignored)
+{
+ NTSTATUS Status;
+ KIRQL oldIrql;
+ PLIST_ENTRY ListEntry;
+ PPHYSICAL_PAGE PageDescriptor;
+ PHYSICAL_ADDRESS PhysPage;
+ static PVOID Address = NULL;
+ ULONG Count;
+
+ while(1)
+ {
+ Status = KeWaitForSingleObject(&ZeroPageThreadEvent,
+ 0,
+ KernelMode,
+ FALSE,
+ NULL);
+ if (!NT_SUCCESS(Status))
+ {
+ DbgPrint("ZeroPageThread: Wait failed\n");
+ KEBUGCHECK(0);
+ return(STATUS_UNSUCCESSFUL);
+ }
+
+ Count = 0;
+ KeAcquireSpinLock(&PageListLock, &oldIrql);
+ while (!IsListEmpty(&FreeUnzeroedPageListHead))
+ {
+ ListEntry = RemoveTailList(&FreeUnzeroedPageListHead);
+ UnzeroedPageCount--;
+ PageDescriptor = CONTAINING_RECORD(ListEntry, PHYSICAL_PAGE, ListEntry);
+ /* We set the page to used, because MmCreateVirtualMapping failed with unused pages */
+ PageDescriptor->Flags.Type = MM_PHYSICAL_PAGE_USED;
+ KeReleaseSpinLock(&PageListLock, oldIrql);
+ Count++;
+ PhysPage.QuadPart = (ULONG)((ULONG)PageDescriptor - (ULONG)MmPageArray);
+ PhysPage.QuadPart = (PhysPage.QuadPart / sizeof(PHYSICAL_PAGE)) * PAGE_SIZE;
+ if (Address == NULL)
+ {
+ Address = ExAllocatePageWithPhysPage(PhysPage);
+ }
+ else
+ {
+ Status = MmCreateVirtualMapping(NULL,
+ Address,
+ PAGE_READWRITE | PAGE_SYSTEM,
+ PhysPage,
+ FALSE);
+ if (!NT_SUCCESS(Status))
+ {
+ DbgPrint("Unable to create virtual mapping\n");
+ KEBUGCHECK(0);
+ }
+ }
+ memset(Address, 0, PAGE_SIZE);
+ MmDeleteVirtualMapping(NULL, (PVOID)Address, FALSE, NULL, NULL);
+ KeAcquireSpinLock(&PageListLock, &oldIrql);
+ if (PageDescriptor->MapCount != 0)
+ {
+ DbgPrint("Mapped page on freelist.\n");
+ KEBUGCHECK(0);
+ }
+ PageDescriptor->Flags.Type = MM_PHYSICAL_PAGE_FREE;
+ InsertHeadList(&FreeZeroedPageListHead, ListEntry);
+ }
+ DPRINT("Zeroed %d pages.\n", Count);
+ KeResetEvent(&ZeroPageThreadEvent);
+ KeReleaseSpinLock(&PageListLock, oldIrql);
+ }
+}
+
+NTSTATUS INIT_FUNCTION
+MmInitZeroPageThread(VOID)
+{
+ KPRIORITY Priority;
+ NTSTATUS Status;
+
+ Status = PsCreateSystemThread(&ZeroPageThreadHandle,
+ THREAD_ALL_ACCESS,
+ NULL,
+ NULL,
+ &ZeroPageThreadId,
+ (PKSTART_ROUTINE) MmZeroPageThreadMain,
+ NULL);
+ if (!NT_SUCCESS(Status))
+ {
+ return(Status);
+ }
+
+ Priority = 1;
+ NtSetInformationThread(ZeroPageThreadHandle,
+ ThreadPriority,
+ &Priority,
+ sizeof(Priority));
+
+ return(STATUS_SUCCESS);
+}
+
+/* EOF */