/* GLOBALS ****************************************************************/
-//
-//
// ReactOS to NT Physical Page Descriptor Entry Legacy Mapping Definitions
-//
-// REACTOS NT
-//
-#define RmapListHead AweReferenceCount
#define PHYSICAL_PAGE MMPFN
#define PPHYSICAL_PAGE PMMPFN
PFN_NUMBER MmResidentAvailablePages;
PFN_NUMBER MmResidentAvailableAtInit;
-SIZE_T MmTotalCommitLimit;
SIZE_T MmTotalCommittedPages;
SIZE_T MmSharedCommit;
SIZE_T MmDriverCommit;
SIZE_T MmProcessCommit;
SIZE_T MmPagedPoolCommit;
-SIZE_T MmPeakCommitment;
+SIZE_T MmPeakCommitment;
SIZE_T MmtotalCommitLimitMaximum;
-KEVENT ZeroPageThreadEvent;
-static BOOLEAN ZeroPageThreadShouldTerminate = FALSE;
static RTL_BITMAP MiUserPfnBitMap;
/* FUNCTIONS *************************************************************/
MiInitializeUserPfnBitmap(VOID)
{
PVOID Bitmap;
-
+
/* Allocate enough buffer for the PFN bitmap and align it on 32-bits */
Bitmap = ExAllocatePoolWithTag(NonPagedPool,
(((MmHighestPhysicalPage + 1) + 31) / 32) * 4,
/* Initialize it and clear all the bits to begin with */
RtlInitializeBitMap(&MiUserPfnBitMap,
Bitmap,
- MmHighestPhysicalPage + 1);
+ (ULONG)MmHighestPhysicalPage + 1);
RtlClearAllBits(&MiUserPfnBitMap);
}
{
ULONG Position;
KIRQL OldIrql;
-
+
/* Find the first user page */
OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
Position = RtlFindSetBits(&MiUserPfnBitMap, 1, 0);
KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
if (Position == 0xFFFFFFFF) return 0;
-
+
/* Return it */
+ ASSERT(Position != 0);
+ ASSERT_IS_ROS_PFN(MiGetPfnEntry(Position));
return Position;
}
KIRQL OldIrql;
/* Set the page as a user page */
+ ASSERT(Pfn != 0);
+ ASSERT_IS_ROS_PFN(MiGetPfnEntry(Pfn));
+ ASSERT(!RtlCheckBit(&MiUserPfnBitMap, (ULONG)Pfn));
OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
- RtlSetBit(&MiUserPfnBitMap, Pfn);
+ RtlSetBit(&MiUserPfnBitMap, (ULONG)Pfn);
KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
}
{
ULONG Position;
KIRQL OldIrql;
-
+
/* Find the next user page */
OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
- Position = RtlFindSetBits(&MiUserPfnBitMap, 1, PreviousPfn + 1);
+ Position = RtlFindSetBits(&MiUserPfnBitMap, 1, (ULONG)PreviousPfn + 1);
KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
if (Position == 0xFFFFFFFF) return 0;
-
+
/* Return it */
+ ASSERT(Position != 0);
+ ASSERT_IS_ROS_PFN(MiGetPfnEntry(Position));
return Position;
}
NTAPI
MmRemoveLRUUserPage(PFN_NUMBER Page)
{
+ KIRQL OldIrql;
+
/* Unset the page as a user page */
- RtlClearBit(&MiUserPfnBitMap, Page);
+ ASSERT(Page != 0);
+ ASSERT_IS_ROS_PFN(MiGetPfnEntry(Page));
+ ASSERT(RtlCheckBit(&MiUserPfnBitMap, (ULONG)Page));
+ OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
+ RtlClearBit(&MiUserPfnBitMap, (ULONG)Page);
+ KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
}
BOOLEAN
KIRQL OldIrql;
PPHYSICAL_PAGE Pfn1;
INT LookForZeroedPages;
- ASSERT (KeGetCurrentIrql() <= APC_LEVEL);
-
+ ASSERT(KeGetCurrentIrql() <= APC_LEVEL);
+ DPRINT1("ARM3-DEBUG: Being called with %I64x %I64x %I64x %lx %d %d\n", LowAddress, HighAddress, SkipBytes, TotalBytes, CacheAttribute, MdlFlags);
+
//
// Convert the low address into a PFN
//
LowPage = (PFN_NUMBER)(LowAddress.QuadPart >> PAGE_SHIFT);
-
+
//
// Convert, and normalize, the high address into a PFN
//
- HighPage = (PFN_NUMBER)(HighAddress.QuadPart >> PAGE_SHIFT);
+ HighPage = (PFN_NUMBER)(HighAddress.QuadPart >> PAGE_SHIFT);
if (HighPage > MmHighestPhysicalPage) HighPage = MmHighestPhysicalPage;
-
+
//
// Validate skipbytes and convert them into pages
//
if (BYTE_OFFSET(SkipBytes.LowPart)) return NULL;
SkipPages = (PFN_NUMBER)(SkipBytes.QuadPart >> PAGE_SHIFT);
-
+
+ /* This isn't supported at all */
+ if (SkipPages) DPRINT1("WARNING: Caller requesting SkipBytes, MDL might be mismatched\n");
+
//
// Now compute the number of pages the MDL will cover
//
//
Mdl = MmCreateMdl(NULL, NULL, PageCount << PAGE_SHIFT);
if (Mdl) break;
-
+
//
// This function is not required to return the amount of pages requested
// In fact, it can return as little as 1 page, and callers are supposed
//
PageCount -= (PageCount >> 4);
} while (PageCount);
-
+
//
// Wow, not even a single page was around!
//
if (!Mdl) return NULL;
-
+
//
// This is where the page array starts....
//
MdlPage = (PPFN_NUMBER)(Mdl + 1);
-
+
//
// Lock the PFN database
//
OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
-
+
//
// Are we looking for any pages, without discriminating?
//
//
while (PagesFound < PageCount)
{
- //
- // Do we have zeroed pages?
- //
- if (MmZeroedPageListHead.Total)
- {
- //
- // Grab a zero page
- //
- Pfn1 = MiRemoveHeadList(&MmZeroedPageListHead);
- }
- else if (MmFreePageListHead.Total)
+ /* Grab a page */
+ MI_SET_USAGE(MI_USAGE_MDL);
+ MI_SET_PROCESS2("Kernel");
+ Page = MiRemoveAnyPage(0);
+ if (Page == 0)
{
- //
- // Nope, grab an unzeroed page
- //
- Pfn1 = MiRemoveHeadList(&MmFreePageListHead);
- }
- else
- {
- //
- // This is not good... hopefully we have at least SOME pages
- //
+ /* This is not good... hopefully we have at least SOME pages */
ASSERT(PagesFound);
break;
}
-
+
+ /* Grab the page entry for it */
+ Pfn1 = MiGetPfnEntry(Page);
+
//
// Make sure it's really free
//
ASSERT(Pfn1->u3.e2.ReferenceCount == 0);
-
- //
- // Allocate it and mark it
- //
+
+ /* Now setup the page and mark it */
+ Pfn1->u3.e2.ReferenceCount = 1;
+ Pfn1->u2.ShareCount = 1;
+ MI_SET_PFN_DELETED(Pfn1);
+ Pfn1->u4.PteFrame = 0x1FFEDCB;
Pfn1->u3.e1.StartOfAllocation = 1;
Pfn1->u3.e1.EndOfAllocation = 1;
- Pfn1->u3.e2.ReferenceCount = 1;
-
- //
- // Decrease available pages
- //
- MmAvailablePages--;
-
+ Pfn1->u4.VerifierAllocation = 0;
+
//
// Save it into the MDL
//
//
Pfn1 = MiGetPfnEntry(Page);
ASSERT(Pfn1);
-
+
//
// Make sure it's free and if this is our first pass, zeroed
//
if (MiIsPfnInUse(Pfn1)) continue;
if ((Pfn1->u3.e1.PageLocation == ZeroedPageList) != LookForZeroedPages) continue;
-
+
+ /* Remove the page from the free or zero list */
+ ASSERT(Pfn1->u3.e1.ReadInProgress == 0);
+ MI_SET_USAGE(MI_USAGE_MDL);
+ MI_SET_PROCESS2("Kernel");
+ MiUnlinkFreeOrZeroedPage(Pfn1);
+
//
// Sanity checks
//
ASSERT(Pfn1->u3.e2.ReferenceCount == 0);
-
+
//
// Now setup the page and mark it
//
Pfn1->u3.e2.ReferenceCount = 1;
+ Pfn1->u2.ShareCount = 1;
+ MI_SET_PFN_DELETED(Pfn1);
+ Pfn1->u4.PteFrame = 0x1FFEDCB;
Pfn1->u3.e1.StartOfAllocation = 1;
Pfn1->u3.e1.EndOfAllocation = 1;
-
- //
- // Decrease available pages
- //
- MmAvailablePages--;
-
+ Pfn1->u4.VerifierAllocation = 0;
+
//
// Save this page into the MDL
//
*MdlPage++ = Page;
if (++PagesFound == PageCount) break;
}
-
+
//
// If the first pass was enough, don't keep going, otherwise, go again
//
if (PagesFound == PageCount) break;
}
}
-
+
//
// Now release the PFN count
//
KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
-
+
//
// We might've found less pages, but not more ;-)
//
// If we didn' tfind any pages at all, fail
//
DPRINT1("NO MDL PAGES!\n");
- ExFreePool(Mdl);
+ ExFreePoolWithTag(Mdl, TAG_MDL);
return NULL;
}
-
+
//
// Write out how many pages we found
//
Mdl->ByteCount = (ULONG)(PagesFound << PAGE_SHIFT);
-
+
//
// Terminate the MDL array if there's certain missing pages
//
- if (PagesFound != PageCount) *MdlPage = -1;
-
+ if (PagesFound != PageCount) *MdlPage = LIST_HEAD;
+
//
// Now go back and loop over all the MDL pages
//
// Check if we've reached the end
//
Page = *MdlPage++;
- if (Page == (PFN_NUMBER)-1) break;
-
+ if (Page == LIST_HEAD) break;
+
//
// Get the PFN entry for the page and check if we should zero it out
//
Pfn1 = MiGetPfnEntry(Page);
ASSERT(Pfn1);
- if (Pfn1->u3.e1.PageLocation != ZeroedPageList) MiZeroPage(Page);
+ if (Pfn1->u3.e1.PageLocation != ZeroedPageList) MiZeroPhysicalPage(Page);
Pfn1->u3.e1.PageLocation = ActiveAndValid;
}
-
+
//
- // We're done, mark the pages as locked (should we lock them, though???)
+ // We're done, mark the pages as locked
//
Mdl->Process = NULL;
- Mdl->MdlFlags |= MDL_PAGES_LOCKED;
+ Mdl->MdlFlags |= MDL_PAGES_LOCKED;
return Mdl;
}
VOID
NTAPI
-MmDumpPfnDatabase(VOID)
+MmSetRmapListHeadPage(PFN_NUMBER Pfn, PMM_RMAP_ENTRY ListHead)
{
- ULONG i;
- PPHYSICAL_PAGE Pfn1;
- PCHAR State = "????", Type = "Unknown";
- KIRQL OldIrql;
- ULONG Totals[5] = {0}, FreePages = 0;
-
- KeRaiseIrql(HIGH_LEVEL, &OldIrql);
-
- //
- // Loop the PFN database
- //
- for (i = 0; i <= MmHighestPhysicalPage; i++)
+ KIRQL oldIrql;
+ PMMPFN Pfn1;
+
+ oldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
+ Pfn1 = MiGetPfnEntry(Pfn);
+ ASSERT(Pfn1);
+ ASSERT_IS_ROS_PFN(Pfn1);
+
+ if (ListHead)
{
- Pfn1 = MiGetPfnEntry(i);
- if (!Pfn1) continue;
-
- //
- // Get the type
- //
- if (MiIsPfnInUse(Pfn1))
- {
- State = "Used";
- }
- else
- {
- State = "Free";
- Type = "Free";
- FreePages++;
- break;
- }
-
- //
- // Pretty-print the page
- //
- DbgPrint("0x%08p:\t%04s\t%20s\t(%02d) [%08p])\n",
- i << PAGE_SHIFT,
- State,
- Type,
- Pfn1->u3.e2.ReferenceCount,
- Pfn1->RmapListHead);
+ /* Should not be trying to insert an RMAP for a non-active page */
+ ASSERT(MiIsPfnInUse(Pfn1) == TRUE);
+
+ /* Set the list head address */
+ MI_GET_ROS_DATA(Pfn1)->RmapListHead = ListHead;
}
-
- DbgPrint("Nonpaged Pool: %d pages\t[%d KB]\n", Totals[MC_NPPOOL], (Totals[MC_NPPOOL] << PAGE_SHIFT) / 1024);
- DbgPrint("Paged Pool: %d pages\t[%d KB]\n", Totals[MC_PPOOL], (Totals[MC_PPOOL] << PAGE_SHIFT) / 1024);
- DbgPrint("File System Cache: %d pages\t[%d KB]\n", Totals[MC_CACHE], (Totals[MC_CACHE] << PAGE_SHIFT) / 1024);
- DbgPrint("Process Working Set: %d pages\t[%d KB]\n", Totals[MC_USER], (Totals[MC_USER] << PAGE_SHIFT) / 1024);
- DbgPrint("System: %d pages\t[%d KB]\n", Totals[MC_SYSTEM], (Totals[MC_SYSTEM] << PAGE_SHIFT) / 1024);
- DbgPrint("Free: %d pages\t[%d KB]\n", FreePages, (FreePages << PAGE_SHIFT) / 1024);
-
- KeLowerIrql(OldIrql);
-}
+ else
+ {
+ /* ReactOS semantics dictate the page is STILL active right now */
+ ASSERT(MiIsPfnInUse(Pfn1) == TRUE);
-VOID
-NTAPI
-MmSetRmapListHeadPage(PFN_NUMBER Pfn, struct _MM_RMAP_ENTRY* ListHead)
-{
- KIRQL oldIrql;
-
- oldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
- MiGetPfnEntry(Pfn)->RmapListHead = (LONG)ListHead;
- KeReleaseQueuedSpinLock(LockQueuePfnLock, oldIrql);
+ /* In this case, the RMAP is actually being removed, so clear field */
+ MI_GET_ROS_DATA(Pfn1)->RmapListHead = NULL;
+
+ /* ReactOS semantics will now release the page, which will make it free and enter a colored list */
+ }
+
+ KeReleaseQueuedSpinLock(LockQueuePfnLock, oldIrql);
}
-struct _MM_RMAP_ENTRY*
+PMM_RMAP_ENTRY
NTAPI
MmGetRmapListHeadPage(PFN_NUMBER Pfn)
{
- KIRQL oldIrql;
- struct _MM_RMAP_ENTRY* ListHead;
-
- oldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
- ListHead = (struct _MM_RMAP_ENTRY*)MiGetPfnEntry(Pfn)->RmapListHead;
- KeReleaseQueuedSpinLock(LockQueuePfnLock, oldIrql);
-
- return(ListHead);
+ KIRQL oldIrql;
+ PMM_RMAP_ENTRY ListHead;
+ PMMPFN Pfn1;
+
+ /* Lock PFN database */
+ oldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
+
+ /* Get the entry */
+ Pfn1 = MiGetPfnEntry(Pfn);
+ ASSERT(Pfn1);
+ ASSERT_IS_ROS_PFN(Pfn1);
+
+ /* Get the list head */
+ ListHead = MI_GET_ROS_DATA(Pfn1)->RmapListHead;
+
+ /* Should not have an RMAP for a non-active page */
+ ASSERT(MiIsPfnInUse(Pfn1) == TRUE);
+
+ /* Release PFN database and return rmap list head */
+ KeReleaseQueuedSpinLock(LockQueuePfnLock, oldIrql);
+ return ListHead;
}
VOID
MmSetSavedSwapEntryPage(PFN_NUMBER Pfn, SWAPENTRY SwapEntry)
{
KIRQL oldIrql;
+ PPHYSICAL_PAGE Page;
+
+ Page = MiGetPfnEntry(Pfn);
+ ASSERT(Page);
+ ASSERT_IS_ROS_PFN(Page);
oldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
- MiGetPfnEntry(Pfn)->u1.WsIndex = SwapEntry;
+ MI_GET_ROS_DATA(Page)->SwapEntry = SwapEntry;
KeReleaseQueuedSpinLock(LockQueuePfnLock, oldIrql);
}
{
SWAPENTRY SwapEntry;
KIRQL oldIrql;
+ PPHYSICAL_PAGE Page;
+
+ Page = MiGetPfnEntry(Pfn);
+ ASSERT(Page);
+ ASSERT_IS_ROS_PFN(Page);
oldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
- SwapEntry = MiGetPfnEntry(Pfn)->u1.WsIndex;
+ SwapEntry = MI_GET_ROS_DATA(Page)->SwapEntry;
KeReleaseQueuedSpinLock(LockQueuePfnLock, oldIrql);
return(SwapEntry);
DPRINT("MmReferencePage(PysicalAddress %x)\n", Pfn << PAGE_SHIFT);
- if (Pfn == 0 || Pfn > MmHighestPhysicalPage)
- {
- return;
- }
+ ASSERT(KeGetCurrentIrql() == DISPATCH_LEVEL);
+ ASSERT(Pfn != 0);
+ ASSERT(Pfn <= MmHighestPhysicalPage);
Page = MiGetPfnEntry(Pfn);
ASSERT(Page);
+ ASSERT_IS_ROS_PFN(Page);
+ ASSERT(Page->u3.e2.ReferenceCount != 0);
Page->u3.e2.ReferenceCount++;
}
oldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
Page = MiGetPfnEntry(Pfn);
ASSERT(Page);
+ ASSERT_IS_ROS_PFN(Page);
RCount = Page->u3.e2.ReferenceCount;
return MiIsPfnInUse(MiGetPfnEntry(Pfn));
}
-VOID
-NTAPI
-MiSetConsumer(IN PFN_NUMBER Pfn,
- IN ULONG Type)
-{
- MiGetPfnEntry(Pfn)->u3.e1.PageLocation = ActiveAndValid;
-}
-
VOID
NTAPI
MmDereferencePage(PFN_NUMBER Pfn)
{
PPHYSICAL_PAGE Page;
-
DPRINT("MmDereferencePage(PhysicalAddress %x)\n", Pfn << PAGE_SHIFT);
Page = MiGetPfnEntry(Pfn);
ASSERT(Page);
+ ASSERT_IS_ROS_PFN(Page);
+ ASSERT(Page->u3.e2.ReferenceCount != 0);
Page->u3.e2.ReferenceCount--;
if (Page->u3.e2.ReferenceCount == 0)
{
- MmAvailablePages++;
- Page->u3.e1.PageLocation = FreePageList;
- MiInsertInListTail(&MmFreePageListHead, Page);
- if (MmFreePageListHead.Total > 8 && 0 == KeReadStateEvent(&ZeroPageThreadEvent))
- {
- KeSetEvent(&ZeroPageThreadEvent, IO_NO_INCREMENT, FALSE);
- }
+ /* Mark the page temporarily as valid, we're going to make it free soon */
+ Page->u3.e1.PageLocation = ActiveAndValid;
+
+ /* It's not a ROS PFN anymore */
+ Page->u4.AweAllocation = FALSE;
+ ExFreePoolWithTag(MI_GET_ROS_DATA(Page), 'RsPf');
+ Page->RosMmData = 0;
+
+ /* Bring it back into the free list */
+ DPRINT("Legacy free: %lx\n", Pfn);
+ MiInsertPageInFreeList(Pfn);
}
}
MmAllocPage(ULONG Type)
{
PFN_NUMBER PfnOffset;
- PPHYSICAL_PAGE PageDescriptor;
- BOOLEAN NeedClear = FALSE;
-
- DPRINT("MmAllocPage()\n");
-
- if (MmZeroedPageListHead.Total == 0)
- {
- if (MmFreePageListHead.Total == 0)
- {
- /* Check if this allocation is for the PFN DB itself */
- if (MmNumberOfPhysicalPages == 0)
- {
- ASSERT(FALSE);
- }
-
- DPRINT1("MmAllocPage(): Out of memory\n");
- return 0;
- }
- PageDescriptor = MiRemoveHeadList(&MmFreePageListHead);
-
- NeedClear = TRUE;
- }
- else
- {
- PageDescriptor = MiRemoveHeadList(&MmZeroedPageListHead);
- }
+ PMMPFN Pfn1;
- PageDescriptor->u3.e2.ReferenceCount = 1;
+ PfnOffset = MiRemoveZeroPage(MI_GET_NEXT_COLOR());
- MmAvailablePages--;
-
- PfnOffset = MiGetPfnEntryIndex(PageDescriptor);
- if ((NeedClear) && (Type != MC_SYSTEM))
+ if (!PfnOffset)
{
- MiZeroPage(PfnOffset);
+ DPRINT1("MmAllocPage(): Out of memory\n");
+ return 0;
}
-
- PageDescriptor->u3.e1.PageLocation = ActiveAndValid;
- return PfnOffset;
-}
-
-NTSTATUS
-NTAPI
-MiZeroPage(PFN_NUMBER Page)
-{
- KIRQL Irql;
- PVOID TempAddress;
-
- Irql = KeRaiseIrqlToDpcLevel();
- TempAddress = MiMapPageToZeroInHyperSpace(Page);
- if (TempAddress == NULL)
- {
- return(STATUS_NO_MEMORY);
- }
- memset(TempAddress, 0, PAGE_SIZE);
- MiUnmapPagesInZeroSpace(TempAddress, 1);
- KeLowerIrql(Irql);
- return(STATUS_SUCCESS);
-}
-
-NTSTATUS
-NTAPI
-MmZeroPageThreadMain(PVOID Ignored)
-{
- NTSTATUS Status;
- KIRQL oldIrql;
- PPHYSICAL_PAGE PageDescriptor;
- PFN_NUMBER Pfn;
- ULONG Count;
- /* Free initial kernel memory */
- //MiFreeInitMemory();
+ DPRINT("Legacy allocate: %lx\n", PfnOffset);
+ Pfn1 = MiGetPfnEntry(PfnOffset);
+ Pfn1->u3.e2.ReferenceCount = 1;
+ Pfn1->u3.e1.PageLocation = ActiveAndValid;
- /* Set our priority to 0 */
- KeGetCurrentThread()->BasePriority = 0;
- KeSetPriorityThread(KeGetCurrentThread(), 0);
+ /* This marks the PFN as a ReactOS PFN */
+ Pfn1->u4.AweAllocation = TRUE;
- while(1)
- {
- Status = KeWaitForSingleObject(&ZeroPageThreadEvent,
- 0,
- KernelMode,
- FALSE,
- NULL);
-
- if (ZeroPageThreadShouldTerminate)
- {
- DPRINT1("ZeroPageThread: Terminating\n");
- return STATUS_SUCCESS;
- }
- Count = 0;
- oldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
- while (MmFreePageListHead.Total)
- {
- PageDescriptor = MiRemoveHeadList(&MmFreePageListHead);
- /* We set the page to used, because MmCreateVirtualMapping failed with unused pages */
- KeReleaseQueuedSpinLock(LockQueuePfnLock, oldIrql);
- Pfn = MiGetPfnEntryIndex(PageDescriptor);
- Status = MiZeroPage(Pfn);
-
- oldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
- if (NT_SUCCESS(Status))
- {
- MiInsertZeroListAtBack(Pfn);
- Count++;
- }
- else
- {
- MiInsertInListTail(&MmFreePageListHead, PageDescriptor);
- PageDescriptor->u3.e1.PageLocation = FreePageList;
- }
-
- }
- DPRINT("Zeroed %d pages.\n", Count);
- KeResetEvent(&ZeroPageThreadEvent);
- KeReleaseQueuedSpinLock(LockQueuePfnLock, oldIrql);
- }
+ /* Allocate the extra ReactOS Data and zero it out */
+ Pfn1->RosMmData = (LONG)ExAllocatePoolWithTag(NonPagedPool, sizeof(MMROSPFN), 'RsPf');
+ ASSERT(MI_GET_ROS_DATA(Pfn1) != NULL);
+ ASSERT_IS_ROS_PFN(Pfn1);
+ MI_GET_ROS_DATA(Pfn1)->SwapEntry = 0;
+ MI_GET_ROS_DATA(Pfn1)->RmapListHead = NULL;
- return STATUS_SUCCESS;
+ return PfnOffset;
}
/* EOF */