#define NDEBUG
#include <debug.h>
-#line 15 "ARM³::INIT"
#define MODULE_INVOLVED_IN_ARM3
#include "miarm.h"
//
SIZE_T MmMinimumNonPagedPoolSize = 256 * 1024;
ULONG MmMinAdditionNonPagedPoolPerMb = 32 * 1024;
-SIZE_T MmDefaultMaximumNonPagedPool = 1024 * 1024;
+SIZE_T MmDefaultMaximumNonPagedPool = 1024 * 1024;
ULONG MmMaxAdditionNonPagedPoolPerMb = 400 * 1024;
//
// Right now we call this the "ARM³ Nonpaged Pool" and it begins somewhere after
// the PFN database (which starts at 0xB0000000).
//
-// The expansion nonpaged pool, on the other hand, can grow much bigger (400MB
+// The expansion nonpaged pool, on the other hand, can grow much bigger (400MB
// for a 1GB system). On ARM³ however, it is currently capped at 128MB.
//
// The address where the initial nonpaged pool starts is aptly named
// a System PTE, it is always valid, until the System PTE is torn down.
//
// System PTEs are actually composed of two "spaces", the system space proper,
-// and the nonpaged pool expansion space. The latter, as we've already seen,
+// and the nonpaged pool expansion space. The latter, as we've already seen,
// begins at MmNonPagedPoolExpansionStart. Based on the number of System PTEs
// that the system will support, the remaining address space below this address
// is used to hold the system space PTEs. This address, in turn, is held in the
// drivers, followed by a 4MB area containing the session's working set. This is
// then followed by a 20MB mapped view area and finally by the session's paged
// pool, by default 16MB.
-//
+//
// On a normal system, this results in session space occupying the region from
// 0xBD000000 to 0xC0000000
//
*/
PFN_NUMBER MmMinimumFreePages = 26;
-/*
+/*
* This number indicates how many pages we consider to be a low limit of having
* "plenty" of free memory.
*
MxGetNextPage(IN PFN_NUMBER PageCount)
{
PFN_NUMBER Pfn;
-
+
/* Make sure we have enough pages */
if (PageCount > MxFreeDescriptor->PageCount)
{
MxOldFreeDescriptor.PageCount,
PageCount);
}
-
+
/* Use our lowest usable free pages */
Pfn = MxFreeDescriptor->BasePage;
MxFreeDescriptor->BasePage += PageCount;
MiComputeColorInformation(VOID)
{
ULONG L2Associativity;
-
+
/* Check if no setting was provided already */
if (!MmSecondaryColors)
{
/* Get L2 cache information */
L2Associativity = KeGetPcr()->SecondLevelCacheAssociativity;
-
+
/* The number of colors is the number of cache bytes by set/way */
MmSecondaryColors = KeGetPcr()->SecondLevelCacheSize;
if (L2Associativity) MmSecondaryColors /= L2Associativity;
}
-
+
/* Now convert cache bytes into pages */
MmSecondaryColors >>= PAGE_SHIFT;
if (!MmSecondaryColors)
/* Set the maximum */
MmSecondaryColors = MI_MAX_SECONDARY_COLORS;
}
-
+
/* Make sure there aren't too little colors */
if (MmSecondaryColors < MI_MIN_SECONDARY_COLORS)
{
/* Set the default */
MmSecondaryColors = MI_SECONDARY_COLORS;
}
-
+
/* Finally make sure the colors are a power of two */
if (MmSecondaryColors & (MmSecondaryColors - 1))
{
MmSecondaryColors = MI_SECONDARY_COLORS;
}
}
-
+
/* Compute the mask and store it */
MmSecondaryColorMask = MmSecondaryColors - 1;
- KeGetCurrentPrcb()->SecondaryColorMask = MmSecondaryColorMask;
+ KeGetCurrentPrcb()->SecondaryColorMask = MmSecondaryColorMask;
}
VOID
ULONG i;
PMMPTE PointerPte, LastPte;
MMPTE TempPte = ValidKernelPte;
-
+
/* The color table starts after the ARM3 PFN database */
MmFreePagesByColor[0] = (PMMCOLOR_TABLES)&MmPfnDatabase[MmHighestPhysicalPage + 1];
-
+
/* Loop the PTEs. We have two color tables for each secondary color */
PointerPte = MiAddressToPte(&MmFreePagesByColor[0][0]);
LastPte = MiAddressToPte((ULONG_PTR)MmFreePagesByColor[0] +
/* Zero out the page */
RtlZeroMemory(MiPteToAddress(PointerPte), PAGE_SIZE);
}
-
+
/* Next */
PointerPte++;
}
-
+
/* Now set the address of the next list, right after this one */
MmFreePagesByColor[1] = &MmFreePagesByColor[0][MmSecondaryColors];
-
+
/* Now loop the lists to set them up */
for (i = 0; i < MmSecondaryColors; i++)
{
PMEMORY_ALLOCATION_DESCRIPTOR MdBlock;
PMMPTE PointerPte, LastPte;
MMPTE TempPte = ValidKernelPte;
-
+
/* Get current page data, since we won't be using MxGetNextPage as it would corrupt our state */
FreePage = MxFreeDescriptor->BasePage;
FreePageCount = MxFreeDescriptor->PageCount;
PagesLeft = 0;
-
+
/* Loop the memory descriptors */
NextEntry = LoaderBlock->MemoryDescriptorListHead.Flink;
while (NextEntry != &LoaderBlock->MemoryDescriptorListHead)
NextEntry = MdBlock->ListEntry.Flink;
continue;
}
-
+
/* Next, check if this is our special free descriptor we've found */
if (MdBlock == MxFreeDescriptor)
{
BasePage = MdBlock->BasePage;
PageCount = MdBlock->PageCount;
}
-
+
/* Get the PTEs for this range */
PointerPte = MiAddressToPte(&MmPfnDatabase[BasePage]);
LastPte = MiAddressToPte(((ULONG_PTR)&MmPfnDatabase[BasePage + PageCount]) - 1);
DPRINT("MD Type: %lx Base: %lx Count: %lx\n", MdBlock->MemoryType, BasePage, PageCount);
-
+
/* Loop them */
while (PointerPte <= LastPte)
{
/* Use the next free page */
TempPte.u.Hard.PageFrameNumber = FreePage;
ASSERT(FreePageCount != 0);
-
+
/* Consume free pages */
FreePage++;
FreePageCount--;
MxOldFreeDescriptor.PageCount,
1);
}
-
+
/* Write out this PTE */
PagesLeft++;
MI_WRITE_VALID_PTE(PointerPte, TempPte);
-
+
/* Zero this page */
RtlZeroMemory(MiPteToAddress(PointerPte), PAGE_SIZE);
}
-
+
/* Next! */
PointerPte++;
}
/* Do the next address range */
NextEntry = MdBlock->ListEntry.Flink;
}
-
+
/* Now update the free descriptors to consume the pages we used up during the PFN allocation loop */
MxFreeDescriptor->BasePage = FreePage;
MxFreeDescriptor->PageCount = FreePageCount;
PFN_NUMBER PageFrameIndex, StartupPdIndex, PtePageIndex;
PMMPFN Pfn1, Pfn2;
ULONG_PTR BaseAddress = 0;
-
+
/* PFN of the startup page directory */
StartupPdIndex = PFN_FROM_PTE(MiAddressToPde(PDE_BASE));
-
+
/* Start with the first PDE and scan them all */
PointerPde = MiAddressToPde(NULL);
Count = PD_COUNT * PDE_COUNT;
{
/* Get the PFN from it */
PageFrameIndex = PFN_FROM_PTE(PointerPde);
-
+
/* Do we want a PFN entry for this page? */
if (MiIsRegularMemory(LoaderBlock, PageFrameIndex))
{
/* No PFN entry */
Pfn1 = NULL;
}
-
+
/* Now get the PTE and scan the pages */
PointerPte = MiAddressToPte(BaseAddress);
for (j = 0; j < PTE_COUNT; j++)
/* Increase the shared count of the PFN entry for the PDE */
ASSERT(Pfn1 != NULL);
Pfn1->u2.ShareCount++;
-
+
/* Now check if the PTE is valid memory too */
PtePageIndex = PFN_FROM_PTE(PointerPte);
if (MiIsRegularMemory(LoaderBlock, PtePageIndex))
}
}
}
-
+
/* Next PTE */
PointerPte++;
BaseAddress += PAGE_SIZE;
/* Next PDE mapped address */
BaseAddress += PDE_MAPPED_VA;
}
-
+
/* Next PTE */
PointerPde++;
}
{
PMMPFN Pfn1;
PMMPDE PointerPde;
-
+
/* Grab the lowest page and check if it has no real references */
Pfn1 = MiGetPfnEntry(MmLowestPhysicalPage);
if (!(MmLowestPhysicalPage) && !(Pfn1->u3.e2.ReferenceCount))
Pfn1->u3.e2.ReferenceCount = 0xFFF0;
Pfn1->u3.e1.PageLocation = ActiveAndValid;
Pfn1->u3.e1.CacheAttribute = MiNonCached;
- }
+ }
}
VOID
PMMPTE PointerPte;
PMMPDE PointerPde;
KIRQL OldIrql;
-
+
/* Now loop through the descriptors */
NextEntry = LoaderBlock->MemoryDescriptorListHead.Flink;
while (NextEntry != &LoaderBlock->MemoryDescriptorListHead)
/* In which case we'll trim the descriptor to go as high as we can */
PageCount = MmHighestPhysicalPage + 1 - PageFrameIndex;
MdBlock->PageCount = PageCount;
-
+
/* But if there's nothing left to trim, we got too high, so quit */
if (!PageCount) break;
}
{
/* Check for bad RAM */
case LoaderBad:
-
+
DPRINT1("You either have specified /BURNMEMORY or damaged RAM modules.\n");
break;
/* Get the last page of this descriptor. Note we loop backwards */
PageFrameIndex += PageCount - 1;
Pfn1 = MiGetPfnEntry(PageFrameIndex);
-
+
/* Lock the PFN Database */
OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
while (PageCount--)
Pfn1--;
PageFrameIndex--;
}
-
+
/* Release PFN database */
KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
-
+
/* Done with this block */
break;
#if MI_TRACE_PFNS
Pfn1->PfnUsage = MI_USAGE_BOOT_DRIVER;
#endif
-
+
/* Check for RAM disk page */
if (MdBlock->MemoryType == LoaderXIPRom)
{
Pfn1->u3.e1.PrototypePte = 1;
}
}
-
+
/* Advance page structures */
Pfn1++;
PageFrameIndex++;
{
PMMPTE PointerPte, LastPte;
PMMPFN Pfn1;
-
+
/* Loop the PFN database page */
PointerPte = MiAddressToPte(MiGetPfnEntry(MmLowestPhysicalPage));
LastPte = MiAddressToPte(MiGetPfnEntry(MmHighestPhysicalPage));
Pfn1->PfnUsage = MI_USAGE_PFN_DATABASE;
#endif
}
-
+
/* Next */
PointerPte++;
}
{
/* Scan memory and start setting up PFN entries */
MiBuildPfnDatabaseFromPages(LoaderBlock);
-
+
/* Add the zero page */
MiBuildPfnDatabaseZeroPage();
-
+
/* Scan the loader block and build the rest of the PFN database */
MiBuildPfnDatabaseFromLoaderBlock(LoaderBlock);
-
- /* Finally add the pages for the PFN database itself */
+
+ /* Finally add the pages for the PFN database itself */
MiBuildPfnDatabaseSelf();
}
MiAdjustWorkingSetManagerParameters(IN BOOLEAN Client)
{
/* This function needs to do more work, for now, we tune page minimums */
-
+
/* Check for a system with around 64MB RAM or more */
if (MmNumberOfPhysicalPages >= (63 * _1MB) / PAGE_SIZE)
{
/* Setup the ACL inside it */
Status = RtlCreateAcl(Dacl, DaclLength, ACL_REVISION);
if (!NT_SUCCESS(Status)) goto CleanUp;
-
+
/* Add query rights for everyone */
Status = RtlAddAccessAllowedAce(Dacl,
ACL_REVISION,
SYNCHRONIZE | EVENT_QUERY_STATE | READ_CONTROL,
SeWorldSid);
if (!NT_SUCCESS(Status)) goto CleanUp;
-
+
/* Full rights for the admin */
Status = RtlAddAccessAllowedAce(Dacl,
ACL_REVISION,
EVENT_ALL_ACCESS,
SeAliasAdminsSid);
if (!NT_SUCCESS(Status)) goto CleanUp;
-
+
/* As well as full rights for the system */
Status = RtlAddAccessAllowedAce(Dacl,
ACL_REVISION,
EVENT_ALL_ACCESS,
SeLocalSystemSid);
if (!NT_SUCCESS(Status)) goto CleanUp;
-
+
/* Set this DACL inside the SD */
Status = RtlSetDaclSecurityDescriptor(&SecurityDescriptor,
TRUE,
Dacl,
FALSE);
if (!NT_SUCCESS(Status)) goto CleanUp;
-
+
/* Setup the event attributes, making sure it's a permanent one */
InitializeObjectAttributes(&ObjectAttributes,
Name,
KernelMode,
(PVOID*)Event,
NULL);
- ZwClose (EventHandle);
+ ZwClose (EventHandle);
}
/* Return status */
switch (Pfn1->u3.e1.PageLocation)
{
case ActiveAndValid:
-
+
Consumer = "Active and Valid";
ActivePages++;
break;
-
+
case ZeroedPageList:
Consumer = "Zero Page List";
FreePages++;
break;//continue;
-
+
case FreePageList:
-
+
Consumer = "Free Page List";
FreePages++;
break;//continue;
-
+
default:
-
+
Consumer = "Other (ASSERT!)";
OtherPages++;
break;
}
-
+
#if MI_TRACE_PFNS
/* Add into bucket */
UsageBucket[Pfn1->PfnUsage]++;
"is disabled");
#endif
}
-
+
DbgPrint("Active: %5d pages\t[%6d KB]\n", ActivePages, (ActivePages << PAGE_SHIFT) / 1024);
DbgPrint("Free: %5d pages\t[%6d KB]\n", FreePages, (FreePages << PAGE_SHIFT) / 1024);
DbgPrint("-----------------------------------------\n");
PLIST_ENTRY NextEntry;
PFN_NUMBER PageCount = 0;
PMEMORY_ALLOCATION_DESCRIPTOR MdBlock;
-
+
//
// Now loop through the descriptors
//
//
PageCount += MdBlock->PageCount;
}
-
+
//
// Try the next descriptor
//
NextEntry = MdBlock->ListEntry.Flink;
}
-
+
//
// Return the total
//
PFN_NUMBER NextPage = -1, PageCount = 0;
PPHYSICAL_MEMORY_DESCRIPTOR Buffer, NewBuffer;
PMEMORY_ALLOCATION_DESCRIPTOR MdBlock;
-
+
//
// Scan the memory descriptors
//
InitialRuns++;
NextEntry = NextEntry->Flink;
}
-
+
//
// Allocate the maximum we'll ever need
//
// For now that's how many runs we have
//
Buffer->NumberOfRuns = InitialRuns;
-
+
//
// Now loop through the descriptors again
//
// Add this to our running total
//
PageCount += MdBlock->PageCount;
-
+
//
// Check if the next page is described by the next descriptor
- //
+ //
if (MdBlock->BasePage == NextPage)
{
//
Buffer->Run[Run].BasePage = MdBlock->BasePage;
Buffer->Run[Run].PageCount = MdBlock->PageCount;
NextPage = Buffer->Run[Run].BasePage + Buffer->Run[Run].PageCount;
-
+
//
// And in this case, increase the number of runs
//
Run++;
}
}
-
+
//
// Try the next descriptor
//
NextEntry = MdBlock->ListEntry.Flink;
}
-
+
//
// We should not have been able to go past our initial estimate
//
Buffer->Run,
sizeof(PHYSICAL_MEMORY_RUN) * Run);
ExFreePool(Buffer);
-
+
//
// Now use the new buffer
//
Buffer = NewBuffer;
}
}
-
+
//
// Write the final numbers, and return it
//
PointerPte = MiAddressToPte(PDE_BASE);
ASSERT(PD_COUNT == 1);
MmSystemPageDirectory[0] = PFN_FROM_PTE(PointerPte);
-
+
//
// Allocate a system PTE which will hold a copy of the page directory
//
//
// Let's be really sure this doesn't overflow into nonpaged system VA
//
- ASSERT((MmSizeOfPagedPoolInBytes + (ULONG_PTR)MmPagedPoolStart) <=
+ ASSERT((MmSizeOfPagedPoolInBytes + (ULONG_PTR)MmPagedPoolStart) <=
(ULONG_PTR)MmNonPagedSystemStart);
//
// So now get the PDE for paged pool and zero it out
//
PointerPde = MiAddressToPde(MmPagedPoolStart);
-
+
#if (_MI_PAGING_LEVELS >= 3)
/* On these systems, there's no double-mapping, so instead, the PPE and PXEs
* are setup to span the entire paged pool area, so there's no need for the
MiHighPagedPoolThreshold = (60 * _1MB) >> PAGE_SHIFT;
MiHighPagedPoolThreshold = min(MiHighPagedPoolThreshold, (Size * 2) / 5);
ASSERT(MiLowPagedPoolThreshold < MiHighPagedPoolThreshold);
-
+
/* Setup the global session space */
MiInitializeSystemSpaceMap(NULL);
}
"LoaderReserve ",
"LoaderXIPRom "
};
-
+
DPRINT1("Base\t\tLength\t\tType\n");
for (NextEntry = KeLoaderBlock->MemoryDescriptorListHead.Flink;
NextEntry != &KeLoaderBlock->MemoryDescriptorListHead;
PVOID Bitmap;
PPHYSICAL_MEMORY_RUN Run;
PFN_NUMBER PageCount;
-
+
/* Dump memory descriptors */
if (MiDbgEnableMdDump) MiDbgDumpMemoryDescriptors();
-
+
//
// Instantiate memory that we don't consider RAM/usable
// We use the same exclusions that Windows does, in order to try to be
MiHighPagedPoolEvent = &MiTempEvent;
MiLowNonPagedPoolEvent = &MiTempEvent;
MiHighNonPagedPoolEvent = &MiTempEvent;
-
+
//
// Define the basic user vs. kernel address space separation
//
MmSystemRangeStart = (PVOID)KSEG0_BASE;
MmUserProbeAddress = (ULONG_PTR)MmSystemRangeStart - 0x10000;
MmHighestUserAddress = (PVOID)(MmUserProbeAddress - 1);
-
+
/* Highest PTE and PDE based on the addresses above */
MiHighestUserPte = MiAddressToPte(MmHighestUserAddress);
MiHighestUserPde = MiAddressToPde(MmHighestUserAddress);
MmBootImageSize *= PAGE_SIZE;
MmBootImageSize = (MmBootImageSize + PDE_MAPPED_VA - 1) & ~(PDE_MAPPED_VA - 1);
ASSERT((MmBootImageSize % PDE_MAPPED_VA) == 0);
-
+
//
// Set the size of session view, pool, and image
//
MmSessionViewSize = MI_SESSION_VIEW_SIZE;
MmSessionPoolSize = MI_SESSION_POOL_SIZE;
MmSessionImageSize = MI_SESSION_IMAGE_SIZE;
-
+
//
// Set the size of system view
//
MmSystemViewSize = MI_SYSTEM_VIEW_SIZE;
-
+
//
// This is where it all ends
//
MiSessionImageEnd = (PVOID)PTE_BASE;
-
+
//
// This is where we will load Win32k.sys and the video driver
//
MiSessionImageStart = (PVOID)((ULONG_PTR)MiSessionImageEnd -
MmSessionImageSize);
-
+
//
// So the view starts right below the session working set (itself below
// the image area)
//
MiSessionViewStart = (PVOID)((ULONG_PTR)MiSessionImageEnd -
MmSessionImageSize -
- MI_SESSION_WORKING_SET_SIZE -
+ MI_SESSION_WORKING_SET_SIZE -
MmSessionViewSize);
-
+
//
// Session pool follows
//
MiSessionPoolEnd = MiSessionViewStart;
MiSessionPoolStart = (PVOID)((ULONG_PTR)MiSessionPoolEnd -
MmSessionPoolSize);
-
+
//
// And it all begins here
//
MmSessionBase = MiSessionPoolStart;
-
+
//
// Sanity check that our math is correct
//
ASSERT((ULONG_PTR)MmSessionBase + MmSessionSize == PTE_BASE);
-
+
//
// Session space ends wherever image session space ends
//
MiSessionSpaceEnd = MiSessionImageEnd;
-
+
//
// System view space ends at session space, so now that we know where
// this is, we can compute the base address of system view space itself.
MiSessionImagePteEnd = MiAddressToPte(MiSessionImageEnd);
MiSessionBasePte = MiAddressToPte(MmSessionBase);
MiSessionLastPte = MiAddressToPte(MiSessionSpaceEnd);
-
+
/* Initialize the user mode image list */
InitializeListHead(&MmLoadedUserImageList);
-
+
/* Initialize the paged pool mutex */
KeInitializeGuardedMutex(&MmPagedPoolMutex);
-
+
/* Initialize the Loader Lock */
- KeInitializeMutant(&MmSystemLoadLock, FALSE);
+ KeInitializeMutant(&MmSystemLoadLock, FALSE);
/* Set the zero page event */
KeInitializeEvent(&MmZeroingPageEvent, SynchronizationEvent, FALSE);
MmZeroingPageThreadActive = FALSE;
-
+
//
// Count physical pages on the system
//
PageCount = MiPagesInLoaderBlock(LoaderBlock, IncludeType);
-
+
//
// Check if this is a machine with less than 19MB of RAM
//
MmNumberOfSystemPtes <<= 1;
}
}
-
+
DPRINT("System PTE count has been tuned to %d (%d bytes)\n",
MmNumberOfSystemPtes, MmNumberOfSystemPtes * PAGE_SIZE);
-
+
/* Initialize the working set lock */
ExInitializePushLock((PULONG_PTR)&MmSystemCacheWs.WorkingSetMutex);
-
+
/* Set commit limit */
MmTotalCommitLimit = 2 * _1GB;
MmTotalCommitLimitMaximum = MmTotalCommitLimit;
-
+
/* Has the allocation fragment been setup? */
if (!MmAllocationFragment)
{
/* Convert from 1KB fragments to pages */
MmAllocationFragment *= _1KB;
MmAllocationFragment = ROUND_TO_PAGES(MmAllocationFragment);
-
+
/* Don't let it past the maximum */
MmAllocationFragment = min(MmAllocationFragment,
MI_MAX_ALLOCATION_FRAGMENT);
-
+
/* Don't let it too small either */
MmAllocationFragment = max(MmAllocationFragment,
MI_MIN_ALLOCATION_FRAGMENT);
}
-
- /* Initialize the platform-specific parts */
+
+ /* Initialize the platform-specific parts */
MiInitMachineDependent(LoaderBlock);
//
//
MmPhysicalMemoryBlock = MmInitializeMemoryLimits(LoaderBlock,
IncludeType);
-
+
//
// Allocate enough buffer for the PFN bitmap
// Align it up to a 32-bit boundary
MmHighestPhysicalPage,
0x101);
}
-
+
//
// Initialize it and clear all the bits to begin with
//
Bitmap,
MmHighestPhysicalPage + 1);
RtlClearAllBits(&MiPfnBitMap);
-
+
//
// Loop physical memory runs
//
RtlSetBits(&MiPfnBitMap, Run->BasePage, Run->PageCount);
}
}
-
+
/* Look for large page cache entries that need caching */
MiSyncCachedRanges();
MmSystemSize = MmMediumSystem;
MmSystemCacheWsMinimum += 400;
}
-
+
/* Check for less than 24MB */
if (MmNumberOfPhysicalPages < ((24 * _1MB) / PAGE_SIZE))
{
}
}
}
-
+
/* Check for more than 33 MB */
if (MmNumberOfPhysicalPages > ((33 * _1MB) / PAGE_SIZE))
{
/* Add another 500 pages to the cache */
MmSystemCacheWsMinimum += 500;
}
-
+
/* Now setup the shared user data fields */
ASSERT(SharedUserData->NumberOfPhysicalPages == 0);
SharedUserData->NumberOfPhysicalPages = MmNumberOfPhysicalPages;
DPRINT1("System cache working set too big\n");
return FALSE;
}
-
+
/* Initialize the system cache */
//MiInitializeSystemCache(MmSystemCacheWsMinimum, MmAvailablePages);
-
+
/* Update the commit limit */
MmTotalCommitLimit = MmAvailablePages;
if (MmTotalCommitLimit > 1024) MmTotalCommitLimit -= 1024;
MmTotalCommitLimitMaximum = MmTotalCommitLimit;
-
+
/* Size up paged pool and build the shadow system page directory */
MiBuildPagedPool();
-
+
/* Debugger physical memory support is now ready to be used */
MmDebugPte = MiAddressToPte(MiDebugMapping);
/* Initialize the loaded module list */
MiInitializeLoadedModuleList(LoaderBlock);
}
-
+
//
// Always return success for now
//