#define NDEBUG
#include <debug.h>
-#line 15 "ARMĀ³::MDLSUP"
#define MODULE_INVOLVED_IN_ARM3
#include "../ARM3/miarm.h"
/* GLOBALS ********************************************************************/
-
+
BOOLEAN MmTrackPtes;
BOOLEAN MmTrackLockedPages;
SIZE_T MmSystemLockPagesCount;
IN SIZE_T Length)
{
SIZE_T Size;
-
+
//
// Check if we don't have an MDL built
//
Mdl = ExAllocatePoolWithTag(NonPagedPool, Size, TAG_MDL);
if (!Mdl) return NULL;
}
-
+
//
// Initialize it
//
PFN_NUMBER Pfn, PageCount;
PVOID Base;
PMMPTE PointerPte;
-
+
//
// Sanity checks
//
ASSERT((Mdl->MdlFlags & (MDL_PAGES_LOCKED |
MDL_MAPPED_TO_SYSTEM_VA |
MDL_SOURCE_IS_NONPAGED_POOL |
- MDL_PARTIAL)) == 0);
-
+ MDL_PARTIAL)) == 0);
+
//
// We know the MDL isn't associated to a process now
//
Mdl->Process = NULL;
-
+
//
// Get page and VA information
//
MdlPages = (PPFN_NUMBER)(Mdl + 1);
Base = Mdl->StartVa;
-
+
//
// Set the system address and now get the page count
//
Mdl->ByteCount);
ASSERT(PageCount != 0);
EndPage = MdlPages + PageCount;
-
+
//
// Loop the PTEs
//
Pfn = PFN_FROM_PTE(PointerPte++);
*MdlPages++ = Pfn;
} while (MdlPages < EndPage);
-
+
//
// Set the nonpaged pool flag
//
Mdl->MdlFlags |= MDL_SOURCE_IS_NONPAGED_POOL;
-
+
//
// Check if this is an I/O mapping
//
IN ULONG Flags)
{
MI_PFN_CACHE_ATTRIBUTE CacheAttribute;
-
+
//
// Check for invalid cache type
//
//
CacheAttribute = MiPlatformCacheAttributes[FALSE][CacheType];
}
-
+
//
// Only these flags are allowed
//
//
return NULL;
}
-
+
//
// Call the internal routine
//
PMMPFN Pfn1;
KIRQL OldIrql;
DPRINT("Freeing MDL: %p\n", Mdl);
-
+
//
// Sanity checks
//
ASSERT(KeGetCurrentIrql() <= APC_LEVEL);
ASSERT((Mdl->MdlFlags & MDL_IO_SPACE) == 0);
ASSERT(((ULONG_PTR)Mdl->StartVa & (PAGE_SIZE - 1)) == 0);
-
+
//
// Get address and page information
//
ASSERT(Pfn1);
ASSERT(Pfn1->u2.ShareCount == 1);
ASSERT(MI_IS_PFN_DELETED(Pfn1) == TRUE);
- if (Pfn1->u4.PteFrame != 0x1FFEDCB)
+ if (Pfn1->u4.PteFrame != 0x1FFEDCB)
{
/* Corrupted PFN entry or invalid free */
KeBugCheckEx(MEMORY_MANAGEMENT, 0x1236, (ULONG_PTR)Mdl, (ULONG_PTR)Pages, *Pages);
}
-
+
//
// Clear it
//
Pfn1->u3.e1.StartOfAllocation = 0;
Pfn1->u3.e1.EndOfAllocation = 0;
Pfn1->u2.ShareCount = 0;
-
+
//
// Dereference it
//
/* We'll be nuking the whole page */
MiDecrementReferenceCount(Pfn1, *Pages);
}
-
+
//
// Clear this page and move on
//
MI_PFN_CACHE_ATTRIBUTE CacheAttribute;
PMMPTE PointerPte;
MMPTE TempPte;
-
+
//
// Sanity check
//
ASSERT(Mdl->ByteCount != 0);
-
+
//
// Get the base
//
Base = (PVOID)((ULONG_PTR)Mdl->StartVa + Mdl->ByteOffset);
-
+
//
// Handle kernel case first
//
MdlPages = (PPFN_NUMBER)(Mdl + 1);
PageCount = ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base, Mdl->ByteCount);
LastPage = MdlPages + PageCount;
-
+
//
// Sanity checks
//
MDL_SOURCE_IS_NONPAGED_POOL |
MDL_PARTIAL_HAS_BEEN_MAPPED)) == 0);
ASSERT((Mdl->MdlFlags & (MDL_PAGES_LOCKED | MDL_PARTIAL)) != 0);
-
+
//
// Get the correct cache type
//
IsIoMapping = (Mdl->MdlFlags & MDL_IO_SPACE) != 0;
CacheAttribute = MiPlatformCacheAttributes[IsIoMapping][CacheType];
-
+
//
// Reserve the PTEs
//
// If it can fail, return NULL
//
if (Mdl->MdlFlags & MDL_MAPPING_CAN_FAIL) return NULL;
-
+
//
// Should we bugcheck?
//
if (!BugCheckOnFailure) return NULL;
-
+
//
// Yes, crash the system
//
KeBugCheckEx(NO_MORE_SYSTEM_PTES, 0, PageCount, 0, 0);
}
-
+
//
// Get the mapped address
//
Base = (PVOID)((ULONG_PTR)MiPteToAddress(PointerPte) + Mdl->ByteOffset);
-
+
//
// Get the template
//
switch (CacheAttribute)
{
case MiNonCached:
-
+
//
// Disable caching
//
MI_PAGE_DISABLE_CACHE(&TempPte);
MI_PAGE_WRITE_THROUGH(&TempPte);
break;
-
+
case MiWriteCombined:
-
+
//
// Enable write combining
//
MI_PAGE_DISABLE_CACHE(&TempPte);
MI_PAGE_WRITE_COMBINED(&TempPte);
break;
-
+
default:
//
// Nothing to do
//
break;
}
-
+
//
// Loop all PTEs
//
// We're done here
//
if (*MdlPages == LIST_HEAD) break;
-
+
//
// Write the PTE
//
TempPte.u.Hard.PageFrameNumber = *MdlPages;
MI_WRITE_VALID_PTE(PointerPte++, TempPte);
} while (++MdlPages < LastPage);
-
+
//
// Mark it as mapped
//
ASSERT((Mdl->MdlFlags & MDL_MAPPED_TO_SYSTEM_VA) == 0);
Mdl->MappedSystemVa = Base;
Mdl->MdlFlags |= MDL_MAPPED_TO_SYSTEM_VA;
-
+
//
// Check if it was partial
//
//
Mdl->MdlFlags |= MDL_PARTIAL_HAS_BEEN_MAPPED;
}
-
+
//
// Return the mapped address
//
return Base;
}
-
+
UNIMPLEMENTED;
return NULL;
}
PFN_NUMBER PageCount;
PPFN_NUMBER MdlPages;
PMMPTE PointerPte;
-
+
//
// Sanity check
//
ASSERT(Mdl->ByteCount != 0);
-
+
//
// Check if this is a kernel request
//
//
Base = (PVOID)((ULONG_PTR)Mdl->StartVa + Mdl->ByteOffset);
PageCount = ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base, Mdl->ByteCount);
-
+
//
// Sanity checks
//
ASSERT((Mdl->MdlFlags & MDL_PARENT_MAPPED_SYSTEM_VA) == 0);
ASSERT(PageCount != 0);
ASSERT(Mdl->MdlFlags & MDL_MAPPED_TO_SYSTEM_VA);
-
+
//
// Get the PTE
//
ASSERT(PointerPte >= MmSystemPtesStart[SystemPteSpace]);
ASSERT(PointerPte <= MmSystemPtesEnd[SystemPteSpace]);
ASSERT(PointerPte->u.Hard.Valid == 1);
-
+
//
// Check if the caller wants us to free advanced pages
//
//
MdlPages = (PPFN_NUMBER)(Mdl + 1);
MdlPages += PageCount;
-
+
//
// Do the math
//
PointerPte -= *MdlPages;
ASSERT(PointerPte >= MmSystemPtesStart[SystemPteSpace]);
ASSERT(PointerPte <= MmSystemPtesEnd[SystemPteSpace]);
-
+
//
// Get the new base address
//
BaseAddress = (PVOID)((ULONG_PTR)BaseAddress -
((*MdlPages) << PAGE_SHIFT));
}
-
+
//
// Remove flags
//
Mdl->MdlFlags &= ~(MDL_MAPPED_TO_SYSTEM_VA |
MDL_PARTIAL_HAS_BEEN_MAPPED |
MDL_FREE_EXTRA_PTES);
-
+
//
// Release the system PTEs
//
USHORT OldRefCount, RefCount;
PMMPFN Pfn1;
DPRINT("Probing MDL: %p\n", Mdl);
-
+
//
// Sanity checks
//
MDL_SOURCE_IS_NONPAGED_POOL |
MDL_PARTIAL |
MDL_IO_SPACE)) == 0);
-
+
//
// Get page and base information
//
MdlPages = (PPFN_NUMBER)(Mdl + 1);
Base = (PVOID)Mdl->StartVa;
-
+
//
// Get the addresses and how many pages we span (and need to lock)
//
LastAddress = (PVOID)((ULONG_PTR)Address + Mdl->ByteCount);
LockPages = ADDRESS_AND_SIZE_TO_SPAN_PAGES(Address, Mdl->ByteCount);
ASSERT(LockPages != 0);
-
+
/* Block invalid access */
if ((AccessMode != KernelMode) &&
((LastAddress > (PVOID)MM_USER_PROBE_ADDRESS) || (Address >= LastAddress)))
*MdlPages = LIST_HEAD;
ExRaiseStatus(STATUS_ACCESS_VIOLATION);
}
-
+
//
// Get the process
//
//
CurrentProcess = NULL;
}
-
+
//
// Save the number of pages we'll have to lock, and the start address
//
TotalPages = LockPages;
StartAddress = Address;
-
+
/* Large pages not supported */
ASSERT(!MI_IS_PHYSICAL_ADDRESS(Address));
-
+
//
// Now probe them
//
// Assume failure
//
*MdlPages = LIST_HEAD;
-
+
//
// Read
//
*(volatile CHAR*)Address;
-
+
//
// Check if this is write access (only probe for user-mode)
//
//
ProbeForWriteChar(Address);
}
-
+
//
// Next address...
//
Address = PAGE_ALIGN((ULONG_PTR)Address + PAGE_SIZE);
-
+
//
// Next page...
//
LockPages--;
MdlPages++;
} while (Address < LastAddress);
-
+
//
// Reset back to the original page
//
ProbeStatus = _SEH2_GetExceptionCode();
}
_SEH2_END;
-
+
//
// So how did that go?
//
Mdl->Process = NULL;
ExRaiseStatus(ProbeStatus);
}
-
+
//
// Get the PTE and PDE
//
DPRINT1("PAE/x64 Not Implemented\n");
ASSERT(FALSE);
#endif
-
+
//
// Sanity check
//
ASSERT(MdlPages == (PPFN_NUMBER)(Mdl + 1));
-
+
//
// Check what kind of operation this is
//
//
Mdl->MdlFlags &= ~(MDL_WRITE_OPERATION);
}
-
+
//
// Mark the MDL as locked *now*
//
Mdl->MdlFlags |= MDL_PAGES_LOCKED;
-
+
//
// Check if this came from kernel mode
//
//
ASSERT(CurrentProcess == NULL);
Mdl->Process = NULL;
-
+
//
// In kernel mode, we don't need to check for write access
//
Operation = IoReadAccess;
-
+
//
// Use the PFN lock
//
//
ASSERT(TotalPages != 0);
ASSERT(CurrentProcess == PsGetCurrentProcess());
-
+
//
// Track locked pages
//
InterlockedExchangeAddSizeT(&CurrentProcess->NumberOfLockedPages,
TotalPages);
-
+
//
// Save the process
//
Mdl->Process = CurrentProcess;
-
+
/* Lock the process working set */
MiLockProcessWorkingSet(CurrentProcess, PsGetCurrentThread());
UsePfnLock = FALSE;
OldIrql = MM_NOIRQL;
}
-
+
//
// Get the last PTE
//
LastPte = MiAddressToPte((PVOID)((ULONG_PTR)LastAddress - 1));
-
+
//
// Loop the pages
//
/* Release process working set */
MiUnlockProcessWorkingSet(CurrentProcess, PsGetCurrentThread());
}
-
+
//
// Access the page
//
DPRINT1("Access fault failed\n");
goto Cleanup;
}
-
+
//
// Waht lock should we use?
//
MiLockProcessWorkingSet(CurrentProcess, PsGetCurrentThread());
}
}
-
+
//
// Check if this was a write or modify
//
/* Release process working set */
MiUnlockProcessWorkingSet(CurrentProcess, PsGetCurrentThread());
}
-
+
//
// Access the page
//
DPRINT1("Access fault failed\n");
goto Cleanup;
}
-
+
//
// Re-acquire the lock
//
/* Lock the process working set */
MiLockProcessWorkingSet(CurrentProcess, PsGetCurrentThread());
}
-
+
//
// Start over
//
continue;
}
}
-
+
//
// Fail, since we won't allow this
//
goto CleanupWithLock;
}
}
-
+
//
// Grab the PFN
//
{
/* Either this is for kernel-mode, or the working set is held */
ASSERT((CurrentProcess == NULL) || (UsePfnLock == FALSE));
-
+
/* No Physical VADs supported yet */
if (CurrentProcess) ASSERT(CurrentProcess->PhysicalVadRoot == NULL);
-
+
/* This address should already exist and be fully valid */
ASSERT(Pfn1->u3.e2.ReferenceCount != 0);
if (MI_IS_ROS_PFN(Pfn1))
{
/* On ARM3 pages, we should see a valid share count */
ASSERT((Pfn1->u2.ShareCount != 0) && (Pfn1->u3.e1.PageLocation == ActiveAndValid));
-
+
/* We don't support mapping a prototype page yet */
ASSERT((Pfn1->u3.e1.PrototypePte == 0) && (Pfn1->OriginalPte.u.Soft.Prototype == 0));
}
OldRefCount);
ASSERT(RefCount != 0);
} while (OldRefCount != RefCount);
-
+
/* Was this the first lock attempt? */
if (OldRefCount != 1)
{
//
Mdl->MdlFlags |= MDL_IO_SPACE;
}
-
+
//
// Write the page and move on
//
*MdlPages++ = PageFrameIndex;
PointerPte++;
-
+
/* Check if we're on a PDE boundary */
if (!((ULONG_PTR)PointerPte & (PD_SIZE - 1))) PointerPde++;
} while (PointerPte <= LastPte);
-
+
//
// What kind of lock where we using?
//
/* Release process working set */
MiUnlockProcessWorkingSet(CurrentProcess, PsGetCurrentThread());
}
-
+
//
// Sanity check
//
ASSERT((Mdl->MdlFlags & MDL_DESCRIBES_AWE) == 0);
return;
-
+
CleanupWithLock:
//
// This is the failure path
//
ASSERT(!NT_SUCCESS(Status));
-
+
//
// What kind of lock where we using?
//
//
ASSERT(Mdl->MdlFlags & MDL_PAGES_LOCKED);
MmUnlockPages(Mdl);
-
+
//
// Raise the error
//
USHORT RefCount, OldRefCount;
PMMPFN Pfn1;
DPRINT("Unlocking MDL: %p\n", Mdl);
-
+
//
// Sanity checks
//
ASSERT((Mdl->MdlFlags & MDL_SOURCE_IS_NONPAGED_POOL) == 0);
ASSERT((Mdl->MdlFlags & MDL_PARTIAL) == 0);
ASSERT(Mdl->ByteCount != 0);
-
+
//
// Get the process associated and capture the flags which are volatile
//
Process = Mdl->Process;
Flags = Mdl->MdlFlags;
-
+
//
// Automagically undo any calls to MmGetSystemAddressForMdl's for this MDL
//
//
MmUnmapLockedPages(Mdl->MappedSystemVa, Mdl);
}
-
+
//
// Get the page count
//
Base = (PVOID)((ULONG_PTR)Mdl->StartVa + Mdl->ByteOffset);
PageCount = ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base, Mdl->ByteCount);
ASSERT(PageCount != 0);
-
+
//
// We don't support AWE
//
- if (Flags & MDL_DESCRIBES_AWE) ASSERT(FALSE);
-
+ if (Flags & MDL_DESCRIBES_AWE) ASSERT(FALSE);
+
//
// Check if the buffer is mapped I/O space
//
if (Flags & MDL_IO_SPACE)
- {
+ {
//
// Acquire PFN lock
//
OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
-
+
//
// Loop every page
//
// Last page, break out
//
if (*MdlPages == LIST_HEAD) break;
-
+
//
// Check if this page is in the PFN database
//
ASSERT(Pfn1->u3.e2.ReferenceCount == 1);
ASSERT(Pfn1->u3.e1.PageLocation != ActiveAndValid);
ASSERT(Pfn1->u2.ShareCount == 0);
-
+
/* Not supported yet */
ASSERT(((Pfn1->u3.e1.PrototypePte == 0) &&
(Pfn1->OriginalPte.u.Soft.Prototype == 0)));
/* One less page */
InterlockedExchangeAddSizeT(&MmSystemLockPagesCount, -1);
-
+
/* Do the last dereference, we're done here */
MiDecrementReferenceCount(Pfn1, *MdlPages);
}
}
}
} while (++MdlPages < LastPage);
-
+
//
// Release the lock
//
KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
-
+
//
// Check if we have a process
//
InterlockedExchangeAddSizeT(&Process->NumberOfLockedPages,
-PageCount);
}
-
+
//
// We're done
//
Mdl->MdlFlags &= ~MDL_PAGES_LOCKED;
return;
}
-
+
//
// Check if we have a process
//
InterlockedExchangeAddSizeT(&Process->NumberOfLockedPages,
-PageCount);
}
-
+
//
// Loop every page
//
Mdl->MdlFlags &= ~MDL_PAGES_LOCKED;
return;
}
-
+
//
// Otherwise, stop here
//
LastPage = MdlPages;
break;
}
-
+
/* Save the PFN entry instead for the secondary loop */
*MdlPages = (PFN_NUMBER)MiGetPfnEntry(*MdlPages);
ASSERT((*MdlPages) != 0);
} while (++MdlPages < LastPage);
-
+
//
// Reset pointer
//
MdlPages = (PPFN_NUMBER)(Mdl + 1);
-
+
//
// Now grab the PFN lock for the actual unlock and dereference
//
ASSERT(Pfn1->u3.e2.ReferenceCount == 1);
ASSERT(Pfn1->u3.e1.PageLocation != ActiveAndValid);
ASSERT(Pfn1->u2.ShareCount == 0);
-
+
/* Not supported yet */
ASSERT(((Pfn1->u3.e1.PrototypePte == 0) &&
(Pfn1->OriginalPte.u.Soft.Prototype == 0)));
/* One less page */
InterlockedExchangeAddSizeT(&MmSystemLockPagesCount, -1);
-
+
/* Do the last dereference, we're done here */
MiDecrementReferenceCount(Pfn1, MiGetPfnEntryIndex(Pfn1));
}
}
}
} while (++MdlPages < LastPage);
-
+
//
// Release the lock
//
KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
-
+
//
// We're done
//