Use ULONG_PTR for physical adresses and PVOID for virtual addresses.
Added several consistency checks in the memory memory manager.
svn path=/trunk/; revision=2946
-/* $Id: section.c,v 1.13 2001/02/17 17:42:46 ekohl Exp $
+/* $Id: section.c,v 1.14 2002/05/13 18:10:38 chorns Exp $
*
* COPYRIGHT: See COPYING in the top level directory
* PROJECT: ReactOS kernel
{
BaseAddress = lpBaseAddress;
}
-
+
+ ViewSize = (ULONG) dwNumberOfBytesToMap;
+
Status = ZwMapViewOfSection(hFileMappingObject,
NtCurrentProcess(),
&BaseAddress,
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
-/* $Id: view.c,v 1.38 2002/05/05 14:57:42 chorns Exp $
+/* $Id: view.c,v 1.39 2002/05/13 18:10:38 chorns Exp $
*
* COPYRIGHT: See COPYING in the top level directory
* PROJECT: ReactOS kernel
ExReleaseFastMutex(&ViewLock);
for (i = 0; i < (Bcb->CacheSegmentSize / PAGESIZE); i++)
{
- PVOID Page;
+ ULONG_PTR Page;
Status = MmRequestPageMemoryConsumer(MC_CACHE, TRUE, &Page);
if (!NT_SUCCESS(Status))
Status = MmCreateVirtualMapping(NULL,
current->BaseAddress + (i * PAGESIZE),
PAGE_READWRITE,
- (ULONG)Page,
+ Page,
TRUE);
if (!NT_SUCCESS(Status))
{
CacheSeg));
}
-STATIC VOID
-CcFreeCachePage(PVOID Context, MEMORY_AREA* MemoryArea, PVOID Address,
- ULONG PhysAddr, SWAPENTRY SwapEntry, BOOLEAN Dirty)
+VOID
+CcFreeCachePage(IN BOOLEAN Before,
+ IN PVOID Context,
+ IN PMEMORY_AREA MemoryArea,
+ IN PVOID Address,
+ IN ULONG_PTR PhysicalAddress,
+ IN SWAPENTRY SwapEntry,
+ IN BOOLEAN Dirty)
{
- assert(SwapEntry == 0);
- if (PhysAddr != 0)
+ if (!Before)
{
- MmReleasePageMemoryConsumer(MC_CACHE, (PVOID)PhysAddr);
+ assert(SwapEntry == 0);
+ if (PhysicalAddress != 0)
+ {
+ MmReleasePageMemoryConsumer(MC_CACHE, PhysicalAddress);
+ }
}
}
if (ParentKey->SizeOfSubKeys <= ParentKey->NumberOfSubKeys)
{
- PKEY_OBJECT *tmpSubKeys = ExAllocatePool(PagedPool,
+ PKEY_OBJECT *tmpSubKeys = ExAllocatePool(NonPagedPool,
(ParentKey->NumberOfSubKeys + 1) * sizeof(DWORD));
if (ParentKey->NumberOfSubKeys > 0)
-/* $Id: work.c,v 1.11 2001/08/26 17:26:23 ekohl Exp $
+/* $Id: work.c,v 1.12 2002/05/13 18:10:39 chorns Exp $
*
* COPYRIGHT: See COPYING in the top level directory
* PROJECT: ReactOS kernel
static VOID ExInitializeWorkQueue(PWORK_QUEUE WorkQueue,
KPRIORITY Priority)
{
+ NTSTATUS Status;
ULONG i;
- PETHREAD Thread;
-
+
InitializeListHead(&WorkQueue->Head);
KeInitializeSpinLock(&WorkQueue->Lock);
KeInitializeSemaphore(&WorkQueue->Sem,
NULL,
ExWorkerThreadEntryPoint,
WorkQueue);
- ObReferenceObjectByHandle(WorkQueue->Thread[i],
- THREAD_ALL_ACCESS,
- PsThreadType,
- KernelMode,
- (PVOID*)&Thread,
- NULL);
- KeSetPriorityThread(&Thread->Tcb,
- Priority);
- ObDereferenceObject(Thread);
+ Status = PiSetPriorityThread(WorkQueue->Thread[i], Priority);
+
+ if (!NT_SUCCESS(Status))
+ {
+ DPRINT1("PiSetThreadPriority() failed with status 0x%.08x\n", Status);
+ KeBugCheck(0);
+ }
}
}
#define InternalNotificationTimer (InternalBaseType + 10)
#define InternalSynchronizationTimer (InternalBaseType + 11)
#define InternalQueueType (InternalBaseType + 12)
-
-
+#define InternalTypeMaximum InternalQueueType
VOID
KiDumpTrapFrame(PKTRAP_FRAME Tf, ULONG ExceptionNr, ULONG cr2);
+VOID
+KiValidateDispatcherObject(IN PDISPATCHER_HEADER Object);
+
+#ifdef DBG
+#define VALIDATE_DISPATCHER_OBJECT(Object) KiValidateDispatcherObject(Object);
+#else /* !DBG */
+#define VALIDATE_DISPATCHER_OBJECT(Object)
+#endif /* DBG */
+
#endif /* not __ASM__ */
#define MAXIMUM_PROCESSORS 32
#ifndef __INCLUDE_INTERNAL_MM_H
#define __INCLUDE_INTERNAL_MM_H
+#include <roscfg.h>
#include <internal/ntoskrnl.h>
#include <internal/arch/mm.h>
struct _MM_RMAP_ENTRY;
struct _MM_PAGEOP;
-typedef ULONG SWAPENTRY;
+typedef ULONG SWAPENTRY, *PSWAPENTRY;
+
#define MEMORY_AREA_INVALID (0)
#define MEMORY_AREA_SECTION_VIEW_COMMIT (1)
typedef struct
{
- ULONG Entry[NR_SECTION_PAGE_ENTRIES];
+ ULONG_PTR Entry[NR_SECTION_PAGE_ENTRIES];
} SECTION_PAGE_TABLE, *PSECTION_PAGE_TABLE;
typedef struct
BOOLEAN Executable;
} SECTION_OBJECT, *PSECTION_OBJECT;
-typedef struct
+
+typedef struct _MEMORY_AREA
{
+#ifdef DBG
+ ULONG Magic;
+#endif /* DBG */
ULONG Type;
PVOID BaseAddress;
ULONG Length;
ULONG Attributes;
LIST_ENTRY Entry;
ULONG LockCount;
+ ULONG ReferenceCount;
struct _EPROCESS* Process;
union
{
struct
- {
+ {
SECTION_OBJECT* Section;
ULONG ViewOffset;
LIST_ENTRY ViewListEntry;
} Data;
} MEMORY_AREA, *PMEMORY_AREA;
+
+#define MM_PAGEOP_PAGEIN (1)
+#define MM_PAGEOP_PAGEOUT (2)
+#define MM_PAGEOP_PAGESYNCH (3)
+#define MM_PAGEOP_ACCESSFAULT (4)
+#define MM_PAGEOP_MINIMUM MM_PAGEOP_PAGEIN
+#define MM_PAGEOP_MAXIMUM MM_PAGEOP_ACCESSFAULT
+
+typedef struct _MM_PAGEOP
+{
+#ifdef DBG
+ /* Magic ID */
+ ULONG Magic;
+#endif /* DBG */
+ /* Type of operation. */
+ ULONG OpType;
+ /* Number of threads interested in this operation. */
+ ULONG ReferenceCount;
+ /* Event that will be set when the operation is completed. */
+ KEVENT CompletionEvent;
+ /* Status of the operation once it is completed. */
+ NTSTATUS Status;
+ /* TRUE if the operation was abandoned. */
+ BOOLEAN Abandoned;
+ /* The memory area to be affected by the operation. */
+ PMEMORY_AREA MArea;
+ ULONG Hash;
+ struct _MM_PAGEOP* Next;
+ struct _ETHREAD* Thread;
+ /*
+ * These fields are used to identify the operation if it is against a
+ * virtual memory area.
+ */
+ ULONG Pid;
+ PVOID Address;
+ /*
+ * These fields are used to identify the operation if it is against a
+ * section mapping.
+ */
+ PMM_SECTION_SEGMENT Segment;
+ ULONG Offset;
+} MM_PAGEOP, *PMM_PAGEOP;
+
+
+#define PAGE_STATE_VALID (0)
+#define PAGE_STATE_PROTOTYPE (1)
+#define PAGE_STATE_DEMAND_ZERO (2)
+#define PAGE_STATE_TRANSITION (3)
+
+#define MM_PTYPE(x) (x)
+
+#define PAGE_LIST_FREE_ZEROED (0)
+#define PAGE_LIST_FREE_UNZEROED (1)
+#define PAGE_LIST_BIOS (2)
+#define PAGE_LIST_STANDBY (3)
+#define PAGE_LIST_MODIFIED (4)
+#define PAGE_LIST_MODIFIED_NO_WRITE (5)
+#define PAGE_LIST_MPW (6)
+
+/* PHYSICAL_PAGE.Flags */
+#define MM_PHYSICAL_PAGE_FREE (0x1)
+#define MM_PHYSICAL_PAGE_USED (0x2)
+#define MM_PHYSICAL_PAGE_BIOS (0x3)
+#define MM_PHYSICAL_PAGE_STANDBY (0x4)
+#define MM_PHYSICAL_PAGE_MODIFIED (0x5)
+#define MM_PHYSICAL_PAGE_MPW (0x6)
+
+typedef VOID (*PRMAP_DELETE_CALLBACK)(IN PVOID Context,
+ IN PEPROCESS Process,
+ IN PVOID Address);
+
+/* FIXME: Unionize this structure */
+typedef struct _PHYSICAL_PAGE
+{
+ ULONG Flags;
+ LIST_ENTRY ListEntry;
+ ULONG ReferenceCount;
+ SWAPENTRY SavedSwapEntry;
+ ULONG LockCount;
+ ULONG MapCount;
+ struct _MM_RMAP_ENTRY* RmapListHead;
+ PRMAP_DELETE_CALLBACK RmapDelete;
+ PVOID RmapDeleteContext;
+ PMM_PAGEOP PageOp;
+} PHYSICAL_PAGE, *PPHYSICAL_PAGE;
+
+
+extern PPHYSICAL_PAGE MmPageArray;
+
+#define MiPageFromDescriptor(pp)((((ULONG_PTR)(pp) - (ULONG_PTR) MmPageArray) / sizeof(PHYSICAL_PAGE)) * PAGESIZE)
+
typedef struct _MADDRESS_SPACE
{
+#ifdef DBG
+ ULONG Magic;
+#endif /* DBG */
LIST_ENTRY MAreaListHead;
KMUTEX Lock;
+ ULONG ReferenceCount;
ULONG LowestAddress;
struct _EPROCESS* Process;
PUSHORT PageTableRefCountTable;
ULONG PageTableRefCountTableSize;
} MADDRESS_SPACE, *PMADDRESS_SPACE;
+
+#define MmIsCopyOnWriteMemoryArea(MemoryArea) \
+( \
+ ((MemoryArea)->Data.SectionData.Segment->WriteCopy \
+ || (MemoryArea)->Data.SectionData.WriteCopyView) \
+ && ((MemoryArea)->Attributes == PAGE_READWRITE \
+ || (MemoryArea)->Attributes == PAGE_EXECUTE_READWRITE) \
+)
+
+
+extern ULONG MiMaximumModifiedPageListSize;
+extern ULONG MiModifiedPageListSize;
+extern ULONG MiMaximumStandbyPageListSize;
+extern ULONG MiStandbyPageListSize;
+
/* FUNCTIONS */
-VOID MmLockAddressSpace(PMADDRESS_SPACE AddressSpace);
-VOID MmUnlockAddressSpace(PMADDRESS_SPACE AddressSpace);
-VOID MmInitializeKernelAddressSpace(VOID);
-PMADDRESS_SPACE MmGetCurrentAddressSpace(VOID);
-PMADDRESS_SPACE MmGetKernelAddressSpace(VOID);
-NTSTATUS MmInitializeAddressSpace(struct _EPROCESS* Process,
- PMADDRESS_SPACE AddressSpace);
-NTSTATUS MmDestroyAddressSpace(PMADDRESS_SPACE AddressSpace);
-PVOID STDCALL MmAllocateSection (IN ULONG Length);
-NTSTATUS MmCreateMemoryArea(struct _EPROCESS* Process,
- PMADDRESS_SPACE AddressSpace,
- ULONG Type,
- PVOID* BaseAddress,
- ULONG Length,
- ULONG Attributes,
- MEMORY_AREA** Result,
- BOOL FixedAddress);
-MEMORY_AREA* MmOpenMemoryAreaByAddress(PMADDRESS_SPACE AddressSpace,
- PVOID Address);
-NTSTATUS MmInitMemoryAreas(VOID);
-VOID ExInitNonPagedPool(ULONG BaseAddress);
-NTSTATUS MmFreeMemoryArea(PMADDRESS_SPACE AddressSpace,
- PVOID BaseAddress,
- ULONG Length,
- VOID (*FreePage)(PVOID Context, MEMORY_AREA* MemoryArea,
- PVOID Address, ULONG PhysAddr, SWAPENTRY SwapEntry,
- BOOLEAN Dirty),
- PVOID FreePageContext);
+#ifdef DBG
+
+VOID
+DbgMmDumpProtection(IN ULONG Value);
+
+VOID
+MiDumpPTE(IN ULONG Value);
+
+VOID
+MiDumpProcessPTE(IN PEPROCESS Process,
+ IN PVOID Address);
+
+#endif /* DBG */
+
+VOID
+MiAcquirePageListLock(IN ULONG PageList,
+ OUT PLIST_ENTRY * ListHead);
+
+VOID
+MiReleasePageListLock();
+
+VOID
+MiReclaimPage(IN ULONG_PTR PhysicalAddress,
+ IN BOOLEAN Dirty);
+
+VOID
+MmInitMpwThreads();
+
+VOID
+MiShutdownMpwThreads();
+
+VOID
+MiSignalModifiedPageWriter();
+
+VOID
+MiSignalMappedPageWriter();
+
+VOID
+MmInitializeBalanceSetManager();
+
+ULONG
+MiGetLockCountPage(IN ULONG_PTR PhysicalAddress);
+
+VOID
+MiDisableAllRmaps(IN ULONG_PTR PhysicalAddress,
+ IN PBOOLEAN Modified);
+
+VOID
+MiEnableAllRmaps(IN ULONG_PTR PhysicalAddress,
+ IN BOOLEAN Modified);
+
+VOID
+MiGetDirtyAllRmaps(IN ULONG_PTR PhysicalAddress,
+ IN PBOOLEAN Dirty);
+
+VOID
+MiGetPageStateAllRmaps(IN ULONG_PTR PhysicalAddress,
+ IN ULONG PageState,
+ OUT PBOOLEAN Result);
+
+VOID
+MiClearPageStateAllRmaps(IN ULONG_PTR PhysicalAddress,
+ IN ULONG PageState);
+
+VOID
+MiSetPageStateAllRmaps(IN ULONG_PTR PhysicalAddress,
+ IN ULONG PageState);
+
+VOID
+MiSetDirtyAllRmaps(IN ULONG_PTR PhysicalAddress,
+ IN BOOLEAN Dirty);
+
+BOOLEAN
+MiPageState(IN PEPROCESS Process,
+ IN PVOID Address,
+ IN ULONG PageState);
+
+VOID
+MiClearPageState(IN PEPROCESS Process,
+ IN PVOID Address,
+ IN ULONG PageState);
+
+VOID
+MiSetPageState(IN PEPROCESS Process,
+ IN PVOID Address,
+ IN ULONG PageState);
+
+VOID
+MmInitializeKernelAddressSpace(VOID);
+
+PMADDRESS_SPACE
+MmGetCurrentAddressSpace();
+
+PMADDRESS_SPACE
+MmGetKernelAddressSpace();
+
+NTSTATUS
+MmInitializeAddressSpace(IN PEPROCESS Process,
+ IN PMADDRESS_SPACE AddressSpace);
+
+NTSTATUS
+MmDestroyAddressSpace(IN PMADDRESS_SPACE AddressSpace);
+
+VOID
+MmReferenceAddressSpace(IN PMADDRESS_SPACE AddressSpace);
+
+VOID
+MmDereferenceAddressSpace(IN PMADDRESS_SPACE AddressSpace);
+
+VOID
+MmApplyMemoryAreaProtection(IN PMEMORY_AREA MemoryArea);
+
+NTSTATUS
+MmFlushSection(IN PSECTION_OBJECT SectionObject,
+ IN PLARGE_INTEGER FileOffset OPTIONAL,
+ IN ULONG Length,
+ OUT PIO_STATUS_BLOCK IoStatus OPTIONAL);
+
+PVOID STDCALL
+MmAllocateSection(IN ULONG Length);
+
+NTSTATUS
+MmCreateMemoryArea(IN PEPROCESS Process,
+ IN PMADDRESS_SPACE AddressSpace,
+ IN ULONG Type,
+ IN OUT PVOID* BaseAddress,
+ IN ULONG Length,
+ IN ULONG Attributes,
+ OUT PMEMORY_AREA* Result,
+ IN BOOLEAN FixedAddress);
+
+PMEMORY_AREA
+MmOpenMemoryAreaByAddress(IN PMADDRESS_SPACE AddressSpace,
+ IN PVOID Address);
+
+VOID
+MmCloseMemoryArea(IN PMEMORY_AREA MemoryArea);
+
+NTSTATUS
+MmInitMemoryAreas();
+
+VOID
+ExInitNonPagedPool(IN PVOID BaseAddress);
+
+typedef VOID (*PFREE_MEMORY_AREA_PAGE_CALLBACK)(IN BOOLEAN Before,
+ IN PVOID Context,
+ IN PMEMORY_AREA MemoryArea,
+ IN PVOID Address,
+ IN ULONG_PTR PhysicalAddress,
+ IN SWAPENTRY SwapEntry,
+ IN BOOLEAN Dirty);
+
+NTSTATUS
+MmFreeMemoryArea(IN PMADDRESS_SPACE AddressSpace,
+ IN PVOID BaseAddress,
+ IN ULONG Length,
+ IN PFREE_MEMORY_AREA_PAGE_CALLBACK FreePage,
+ IN PVOID FreePageContext);
+
VOID MmDumpMemoryAreas(PLIST_ENTRY ListHead);
NTSTATUS MmLockMemoryArea(MEMORY_AREA* MemoryArea);
NTSTATUS MmUnlockMemoryArea(MEMORY_AREA* MemoryArea);
PADDRESS_RANGE BIOSMemoryMap,
ULONG AddressRangeCount);
-PVOID
-MmAllocPage(ULONG Consumer, SWAPENTRY SavedSwapEntry);
-VOID MmDereferencePage(PVOID PhysicalAddress);
-VOID MmReferencePage(PVOID PhysicalAddress);
+ULONG_PTR
+MmAllocPage(IN ULONG Consumer,
+ IN SWAPENTRY SavedSwapEntry);
+
+VOID MmDereferencePage(IN ULONG_PTR PhysicalAddress);
+VOID MmReferencePage(IN ULONG_PTR PhysicalAddress);
VOID MmDeletePageTable(struct _EPROCESS* Process,
PVOID Address);
NTSTATUS MmCopyMmInfo(struct _EPROCESS* Src,
NTSTATUS Mmi386ReleaseMmInfo(struct _EPROCESS* Process);
VOID
MmDeleteVirtualMapping(struct _EPROCESS* Process,
- PVOID Address,
- BOOL FreePage,
- BOOL* WasDirty,
- ULONG* PhysicalPage);
+ PVOID Address,
+ BOOLEAN FreePage,
+ PBOOLEAN WasDirty,
+ PULONG PhysicalPage);
#define MM_PAGE_CLEAN (0)
#define MM_PAGE_DIRTY (1)
-VOID MmBuildMdlFromPages(PMDL Mdl, PULONG Pages);
-PVOID MmGetMdlPageAddress(PMDL Mdl, PVOID Offset);
-VOID MiShutdownMemoryManager(VOID);
-ULONG MmGetPhysicalAddressForProcess(struct _EPROCESS* Process,
- PVOID Address);
+VOID
+MmBuildMdlFromPages(IN PMDL Mdl,
+ IN PULONG_PTR Pages);
+
+PVOID
+MmGetMdlPageAddress(IN PMDL Mdl,
+ IN PVOID Offset);
+
+VOID
+MiShutdownMemoryManager(VOID);
+
+ULONG
+MmGetPhysicalAddressForProcess(IN struct _EPROCESS* Process,
+ IN PVOID Address);
+
NTSTATUS STDCALL
MmUnmapViewOfSection(struct _EPROCESS* Process, PVOID BaseAddress);
-VOID MmInitPagingFile(VOID);
+
+VOID
+MmInitPagingFile(VOID);
/* FIXME: it should be in ddk/mmfuncs.h */
NTSTATUS
BOOLEAN Locked);
NTSTATUS
MmNotPresentFaultSectionView(PMADDRESS_SPACE AddressSpace,
- MEMORY_AREA* MemoryArea,
+ MEMORY_AREA* MemoryArea,
PVOID Address,
BOOLEAN Locked);
NTSTATUS MmWaitForPage(PVOID Page);
VOID MmSetWaitPage(PVOID Page);
BOOLEAN MmIsPageDirty(struct _EPROCESS* Process, PVOID Address);
BOOLEAN MmIsPageTablePresent(PVOID PAddress);
+
NTSTATUS
-MmPageOutVirtualMemory(PMADDRESS_SPACE AddressSpace,
+MmFlushVirtualMemory(PMADDRESS_SPACE AddressSpace,
PMEMORY_AREA MemoryArea,
PVOID Address,
struct _MM_PAGEOP* PageOp);
NTSTATUS
-MmPageOutSectionView(PMADDRESS_SPACE AddressSpace,
+MmFlushSectionView(PMADDRESS_SPACE AddressSpace,
PMEMORY_AREA MemoryArea,
PVOID Address,
struct _MM_PAGEOP* PageOp);
PVOID Address,
ULONG Length);
-VOID ExUnmapPage(PVOID Addr);
-PVOID ExAllocatePage(VOID);
+VOID
+ExUnmapPage(IN PVOID Addr);
+
+PVOID
+ExAllocatePage(VOID);
+
+VOID
+MmInitPagingFile(VOID);
+
+BOOLEAN
+MmReserveSwapPages(IN ULONG Nr);
+
+VOID
+MmDereserveSwapPages(IN ULONG Nr);
+
+SWAPENTRY
+MmAllocSwapPage(VOID);
+
+VOID
+MmFreeSwapPage(IN SWAPENTRY Entry);
+
+VOID
+MiValidateSwapEntry(IN SWAPENTRY Entry);
+
+VOID
+MiValidatePageOp(IN PMM_PAGEOP PageOp);
+
+VOID
+MiValidatePhysicalAddress(IN ULONG_PTR PhysicalAddress);
+
+VOID
+MiValidateAddressSpace(IN PMADDRESS_SPACE AddressSpace);
-VOID MmInitPagingFile(VOID);
-BOOLEAN MmReserveSwapPages(ULONG Nr);
-VOID MmDereserveSwapPages(ULONG Nr);
-SWAPENTRY MmAllocSwapPage(VOID);
-VOID MmFreeSwapPage(SWAPENTRY Entry);
+VOID
+MiValidateMemoryArea(IN PMEMORY_AREA MemoryArea);
+
+VOID
+MiValidateRmapList(struct _MM_RMAP_ENTRY* RmapList);
VOID MmInit1(ULONG FirstKernelPhysAddress,
ULONG LastKernelPhysAddress,
extern MM_STATS MmStats;
+extern BOOLEAN MiInitialized;
+
+#ifdef DBG
+#define VALIDATE_PHYSICAL_ADDRESS(PhysicalAddress) MiValidatePhysicalAddress((ULONG_PTR)PhysicalAddress)
+#define VALIDATE_SWAP_ENTRY(Entry) MiValidateSwapEntry(Entry)
+#define VALIDATE_PAGEOP(PageOp) MiValidatePageOp(PageOp)
+#define VALIDATE_ADDRESS_SPACE(AddressSpace) MiValidateAddressSpace(AddressSpace)
+#define VALIDATE_MEMORY_AREA(MemoryArea) MiValidateMemoryArea(MemoryArea)
+#define VALIDATE_RMAP_LIST(RmapList) MiValidateRmapList(RmapList)
+#else /* !DBG */
+#define VALIDATE_PHYSICAL_ADDRESS(PhysicalAddress)
+#define VALIDATE_SWAP_ENTRY(Entry)
+#define VALIDATE_PAGEOP(PageOp)
+#define VALIDATE_ADDRESS_SPACE(AddressSpace)
+#define VALIDATE_MEMORY_AREA(MemoryArea)
+#define VALIDATE_RMAP_LIST(RmapList)
+#endif /* DBG */
+
+#ifdef DBG
+
+VOID
+MiLockAddressSpace(IN PMADDRESS_SPACE AddressSpace,
+ IN LPSTR FileName,
+ IN ULONG LineNumber);
+
+VOID
+MiUnlockAddressSpace(IN PMADDRESS_SPACE AddressSpace,
+ IN LPSTR FileName,
+ IN ULONG LineNumber);
+
+/* Use macros for easier debugging */
+#define MmLockAddressSpace(AddressSpace) MiLockAddressSpace(AddressSpace, __FILE__, __LINE__)
+#define MmUnlockAddressSpace(AddressSpace) MiUnlockAddressSpace(AddressSpace, __FILE__, __LINE__)
+
+#else /* !DBG */
+
+VOID
+MiLockAddressSpace(IN PMADDRESS_SPACE AddressSpace);
+
+VOID
+MiUnlockAddressSpace(IN PMADDRESS_SPACE AddressSpace);
+
+#define MmLockAddressSpace MiLockAddressSpace
+#define MmUnlockAddressSpace MiUnlockAddressSpace
+
+#endif /* !DBG */
+
+
+#ifdef DBG
+
+VOID
+MiReferenceMemoryArea(IN PMEMORY_AREA MemoryArea,
+ IN LPSTR FileName,
+ IN ULONG LineNumber);
+
+VOID
+MiDereferenceMemoryArea(IN PMEMORY_AREA MemoryArea,
+ IN LPSTR FileName,
+ IN ULONG LineNumber);
+
+/* Use macros for easier debugging */
+#define MmReferenceMemoryArea(MemoryArea) MiReferenceMemoryArea(MemoryArea, __FILE__, __LINE__)
+#define MmDereferenceMemoryArea(MemoryArea) MiDereferenceMemoryArea(MemoryArea, __FILE__, __LINE__)
+
+#else /* !DBG */
+
+VOID
+MiReferenceMemoryArea(IN PMEMORY_AREA MemoryArea);
+
+VOID
+MiDereferenceMemoryArea(IN PMEMORY_AREA MemoryArea);
+
+#define MmReferenceMemoryArea MiReferenceMemoryArea
+#define MmDereferenceMemoryArea MiDereferenceMemoryArea
+
+#endif /* !DBG */
+
+
NTSTATUS
MmWritePageSectionView(PMADDRESS_SPACE AddressSpace,
- PMEMORY_AREA MArea,
- PVOID Address);
+ PMEMORY_AREA MArea,
+ PVOID Address);
+
NTSTATUS
MmWritePageVirtualMemory(PMADDRESS_SPACE AddressSpace,
- PMEMORY_AREA MArea,
- PVOID Address);
+ PMEMORY_AREA MArea,
+ PVOID Address);
+
PVOID
MmGetDirtyPagesFromWorkingSet(struct _EPROCESS* Process);
+
NTSTATUS
MmWriteToSwapPage(SWAPENTRY SwapEntry, PMDL Mdl);
+
NTSTATUS
MmReadFromSwapPage(SWAPENTRY SwapEntry, PMDL Mdl);
-VOID
-MmSetFlagsPage(PVOID PhysicalAddress, ULONG Flags);
+
+VOID
+MmSetFlagsPage(IN ULONG_PTR PhysicalAddress,
+ IN ULONG Flags);
+
ULONG
-MmGetFlagsPage(PVOID PhysicalAddress);
-VOID MmSetSavedSwapEntryPage(PVOID PhysicalAddress,
- SWAPENTRY SavedSwapEntry);
-SWAPENTRY MmGetSavedSwapEntryPage(PVOID PhysicalAddress);
-VOID MmSetCleanPage(struct _EPROCESS* Process, PVOID Address);
-VOID MmLockPage(PVOID PhysicalPage);
-VOID MmUnlockPage(PVOID PhysicalPage);
-
-NTSTATUS MmSafeCopyFromUser(PVOID Dest, PVOID Src, ULONG Count);
-NTSTATUS MmSafeCopyToUser(PVOID Dest, PVOID Src, ULONG Count);
-NTSTATUS
+MmGetFlagsPage(IN ULONG_PTR PhysicalAddress);
+
+VOID
+MmSetSavedSwapEntryPage(IN ULONG_PTR PhysicalAddress,
+ SWAPENTRY SavedSwapEntry);
+
+SWAPENTRY
+MmGetSavedSwapEntryPage(IN ULONG_PTR PhysicalAddress);
+
+VOID
+MmSetSavedPageOp(IN ULONG_PTR PhysicalAddress,
+ IN PMM_PAGEOP PageOp);
+
+PMM_PAGEOP
+MmGetSavedPageOp(IN ULONG_PTR PhysicalAddress);
+
+VOID
+MmSetCleanPage(IN PEPROCESS Process,
+ IN PVOID Address);
+
+VOID
+MmLockPage(IN ULONG_PTR PhysicalPage);
+
+VOID
+MmUnlockPage(IN ULONG_PTR PhysicalPage);
+
+NTSTATUS
+MmSafeCopyFromUser(PVOID Dest, PVOID Src, ULONG Count);
+
+NTSTATUS
+MmSafeCopyToUser(PVOID Dest, PVOID Src, ULONG Count);
+
+NTSTATUS
MmCreatePhysicalMemorySection(VOID);
+
PVOID
MmGetContinuousPages(ULONG NumberOfBytes,
- PHYSICAL_ADDRESS HighestAcceptableAddress,
- ULONG Alignment);
-
-#define MM_PHYSICAL_PAGE_MPW_PENDING (0x8)
+ PHYSICAL_ADDRESS HighestAcceptableAddress,
+ ULONG Alignment);
NTSTATUS
-MmAccessFaultSectionView(PMADDRESS_SPACE AddressSpace,
- MEMORY_AREA* MemoryArea,
- PVOID Address,
- BOOLEAN Locked);
-ULONG
-MmGetPageProtect(struct _EPROCESS* Process, PVOID Address);
-PVOID
-ExAllocatePageWithPhysPage(ULONG PhysPage);
+MmAccessFaultSectionView(IN PMADDRESS_SPACE AddressSpace,
+ IN MEMORY_AREA* IN MemoryArea,
+ IN PVOID Address,
+ IN BOOLEAN Locked);
+
ULONG
-MmGetReferenceCountPage(PVOID PhysicalAddress);
-BOOLEAN
-MmIsUsablePage(PVOID PhysicalAddress);
+MmGetPageProtect(IN struct _EPROCESS* Process,
+ IN PVOID Address);
-#define MM_PAGEOP_PAGEIN (1)
-#define MM_PAGEOP_PAGEOUT (2)
-#define MM_PAGEOP_PAGESYNCH (3)
-#define MM_PAGEOP_ACCESSFAULT (4)
+PVOID
+ExAllocatePageWithPhysPage(IN ULONG_PTR Page);
-typedef struct _MM_PAGEOP
-{
- /* Type of operation. */
- ULONG OpType;
- /* Number of threads interested in this operation. */
- ULONG ReferenceCount;
- /* Event that will be set when the operation is completed. */
- KEVENT CompletionEvent;
- /* Status of the operation once it is completed. */
- NTSTATUS Status;
- /* TRUE if the operation was abandoned. */
- BOOLEAN Abandoned;
- /* The memory area to be affected by the operation. */
- PMEMORY_AREA MArea;
- ULONG Hash;
- struct _MM_PAGEOP* Next;
- struct _ETHREAD* Thread;
- /*
- * These fields are used to identify the operation if it is against a
- * virtual memory area.
- */
- ULONG Pid;
- PVOID Address;
- /*
- * These fields are used to identify the operation if it is against a
- * section mapping.
- */
- PMM_SECTION_SEGMENT Segment;
- ULONG Offset;
-} MM_PAGEOP, *PMM_PAGEOP;
+ULONG
+MmGetReferenceCountPage(IN ULONG_PTR PhysicalAddress);
+
+BOOLEAN
+MmIsUsablePage(IN ULONG_PTR PhysicalAddress);
VOID
-MmReleasePageOp(PMM_PAGEOP PageOp);
+MmReleasePageOp(IN PMM_PAGEOP PageOp);
PMM_PAGEOP
-MmGetPageOp(PMEMORY_AREA MArea, ULONG Pid, PVOID Address,
- PMM_SECTION_SEGMENT Segment, ULONG Offset, ULONG OpType);
+MmGetPageOp(IN PMEMORY_AREA MArea,
+ IN ULONG Pid,
+ IN PVOID Address,
+ IN PMM_SECTION_SEGMENT Segment,
+ IN ULONG Offset,
+ IN ULONG OpType);
+
+PMM_PAGEOP
+MmGotPageOp(IN PMEMORY_AREA MArea,
+ IN ULONG Pid,
+ IN PVOID Address,
+ IN PMM_SECTION_SEGMENT Segment,
+ IN ULONG Offset);
VOID
MiDebugDumpNonPagedPool(BOOLEAN NewOnly);
+
VOID
MiDebugDumpNonPagedPoolStats(BOOLEAN NewOnly);
+
VOID
-MmMarkPageMapped(PVOID PhysicalAddress);
+MmMarkPageMapped(IN ULONG_PTR PhysicalAddress);
+
VOID
-MmMarkPageUnmapped(PVOID PhysicalAddress);
+MmMarkPageUnmapped(IN ULONG_PTR PhysicalAddress);
+
VOID
MmFreeSectionSegments(PFILE_OBJECT FileObject);
VOID
MmFreeVirtualMemory(struct _EPROCESS* Process, PMEMORY_AREA MemoryArea);
+
NTSTATUS
MiCopyFromUserPage(ULONG DestPhysPage, PVOID SourceAddress);
+
NTSTATUS
-MiZeroPage(ULONG PhysPage);
+MiZeroPage(IN ULONG_PTR Page);
+
BOOLEAN
MmIsAccessedAndResetAccessPage(struct _EPROCESS* Process, PVOID Address);
-SWAPENTRY
-MmGetSavedSwapEntryPage(PVOID PhysicalAddress);
#define STATUS_MM_RESTART_OPERATION (0xD0000001)
PULONG NrFreed));
VOID
MmInitializeBalancer(ULONG NrAvailablePages);
+
NTSTATUS
-MmReleasePageMemoryConsumer(ULONG Consumer, PVOID Page);
+MmReleasePageMemoryConsumer(IN ULONG Consumer,
+ IN ULONG_PTR Page);
+
+NTSTATUS
+MiFreePageMemoryConsumer(IN ULONG Consumer,
+ IN ULONG_PTR Page);
+
NTSTATUS
-MmRequestPageMemoryConsumer(ULONG Consumer, BOOLEAN CanWait, PVOID* AllocatedPage);
+MmRequestPageMemoryConsumer(IN ULONG Consumer,
+ IN BOOLEAN CanWait,
+ OUT PULONG_PTR pPage);
+
+VOID
+MiSatisfyAllocationRequest();
#define MC_CACHE (0)
#define MC_USER (1)
#define MC_NPPOOL (3)
#define MC_MAXIMUM (4)
+VOID
+MiTransitionAllRmaps(IN ULONG_PTR PhysicalAddress,
+ IN BOOLEAN Reference,
+ OUT PBOOLEAN Modified);
+
+NTSTATUS
+MiAbortTransition(IN ULONG_PTR Address);
+
+NTSTATUS
+MiFinishTransition(IN ULONG_PTR PhysicalAddress,
+ IN BOOLEAN Dirty);
+
VOID
-MmSetRmapListHeadPage(PVOID PhysicalAddress, struct _MM_RMAP_ENTRY* ListHead);
+MmSetRmapListHeadPage(IN ULONG_PTR PhysicalAddress,
+ IN struct _MM_RMAP_ENTRY* ListHead);
+
struct _MM_RMAP_ENTRY*
-MmGetRmapListHeadPage(PVOID PhysicalAddress);
+MmGetRmapListHeadPage(IN ULONG_PTR PhysicalAddress);
+
+VOID
+MmSetRmapCallback(IN ULONG_PTR PhysicalAddress,
+ IN PRMAP_DELETE_CALLBACK RmapDelete,
+ IN PVOID RmapDeleteContext);
+
+VOID
+MmGetRmapCallback(IN ULONG_PTR PhysicalAddress,
+ IN PRMAP_DELETE_CALLBACK *RmapDelete,
+ IN PVOID *RmapDeleteContext);
+
+NTSTATUS
+MmPageOutPhysicalAddress(ULONG_PTR PhysicalAddress);
+
VOID
-MmInsertRmap(PVOID PhysicalAddress, PEPROCESS Process, PVOID Address);
+MmInsertRmap(ULONG_PTR PhysicalAddress, PEPROCESS Process, PVOID Address);
+
VOID
-MmDeleteAllRmaps(PVOID PhysicalAddress, PVOID Context,
- VOID (*DeleteMapping)(PVOID Context, PEPROCESS Process, PVOID Address));
+MmDeleteAllRmaps(ULONG_PTR PhysicalAddress, PVOID Context,
+ VOID (*DeleteMapping)(PVOID Context, PEPROCESS Process,
+ PVOID Address));
+
VOID
-MmDeleteRmap(PVOID PhysicalAddress, PEPROCESS Process, PVOID Address);
+MmDeleteRmap(IN ULONG_PTR PhysicalAddress,
+ IN PEPROCESS Process,
+ IN PVOID Address);
+
VOID
MmInitializeRmapList(VOID);
-PVOID
-MmGetLRUNextUserPage(PVOID PreviousPhysicalAddress);
-PVOID
+
+ULONG_PTR
+MmGetLRUNextUserPage(IN ULONG_PTR PreviousPhysicalAddress);
+
+ULONG_PTR
MmGetLRUFirstUserPage(VOID);
+
+NTSTATUS
+MmPrepareFlushPhysicalAddress(IN ULONG_PTR PhysicalAddress);
+
NTSTATUS
-MmPageOutPhysicalAddress(PVOID PhysicalAddress);
+MmFlushPhysicalAddress(IN ULONG_PTR PhysicalAddress);
+
NTSTATUS
MmTrimUserMemory(ULONG Target, ULONG Priority, PULONG NrFreedPages);
VOID
-MmDisableVirtualMapping(PEPROCESS Process, PVOID Address, BOOL* WasDirty, ULONG* PhysicalAddr);
-VOID MmEnableVirtualMapping(PEPROCESS Process, PVOID Address);
+MmDisableVirtualMapping(IN PEPROCESS Process,
+ IN PVOID Address,
+ OUT PBOOLEAN WasDirty,
+ OUT PULONG_PTR PhysicalAddr);
+
+VOID MmEnableVirtualMapping(IN PEPROCESS Process,
+ IN PVOID Address);
+
VOID
-MmDeletePageFileMapping(PEPROCESS Process, PVOID Address, SWAPENTRY* SwapEntry);
-NTSTATUS
-MmCreatePageFileMapping(PEPROCESS Process,
- PVOID Address,
- SWAPENTRY SwapEntry);
-BOOLEAN MmIsPageSwapEntry(PEPROCESS Process, PVOID Address);
+MmDeletePageFileMapping(IN PEPROCESS Process,
+ IN PVOID Address,
+ OUT PSWAPENTRY SwapEntry);
+
+NTSTATUS
+MmCreatePageFileMapping(IN PEPROCESS Process,
+ IN PVOID Address,
+ IN SWAPENTRY SwapEntry);
+
+BOOLEAN
+MmIsPageSwapEntry(IN PEPROCESS Process,
+ IN PVOID Address);
+
VOID
-MmTransferOwnershipPage(PVOID PhysicalAddress, ULONG NewConsumer);
-VOID MmSetDirtyPage(PEPROCESS Process, PVOID Address);
+MmTransferOwnershipPage(IN ULONG_PTR PhysicalAddress,
+ IN ULONG NewConsumer);
+
+VOID
+MmSetDirtyPage(IN PEPROCESS Process,
+ IN PVOID Address);
+
+NTSTATUS
+MmPageOutSectionView(PMADDRESS_SPACE AddressSpace,
+ MEMORY_AREA* MemoryArea,
+ PVOID Address,
+ PMM_PAGEOP PageOp);
+
+NTSTATUS
+MmPageOutVirtualMemory(PMADDRESS_SPACE AddressSpace,
+ PMEMORY_AREA MemoryArea,
+ PVOID Address,
+ PMM_PAGEOP PageOp);
#endif
#ifndef __ASM__
+#include <roscfg.h>
#include <ddk/ntddk.h>
#include <stdarg.h>
+#ifdef DBG
+#define SET_MAGIC(x, magic)(x)->Magic = (magic);
+#else /* !DBG */
+#define SET_MAGIC(x, magic)
+#endif /* DBG */
+
/*
* Use these to place a function in a specific section of the executable
*/
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
-/* $Id: ps.h,v 1.31 2002/02/15 14:44:13 ekohl Exp $
+/* $Id: ps.h,v 1.32 2002/05/13 18:10:39 chorns Exp $
*
* FILE: ntoskrnl/ke/kthread.c
* PURPOSE: Process manager definitions
VOID STDCALL
PsDispatchThread(ULONG NewThreadStatus);
+NTSTATUS
+PiSetPriorityThread(IN HANDLE ThreadHandle,
+ IN KPRIORITY Priority);
+
#endif /* ASSEMBLER */
#endif /* __INCLUDE_INTERNAL_PS_H */
#include <internal/kd.h>
#include <internal/ke.h>
#include <internal/ps.h>
+#include <internal/module.h>
#include <internal/ldr.h>
#define NDEBUG
/* at least NUMREGBYTES*2 are needed for register packets */
#define BUFMAX 1000
-static BOOLEAN GspInitialized; /* boolean flag. TRUE means we've been initialized */
+static BOOLEAN GspInitialized;
+#if 0
static PKINTERRUPT GspInterrupt;
+#endif
static BOOLEAN GspRemoteDebug;
VOID
KdGdbStubInit(ULONG Phase)
{
+#if 0
KAFFINITY Affinity;
NTSTATUS Status;
ULONG MappedIrq;
KIRQL Dirql;
+#endif
if (Phase == 0)
{
GspRunThread = PsGetCurrentThread();
GspDbgThread = PsGetCurrentThread();
GspEnumThread = NULL;
+
+ DbgBreakPointWithStatus (DBG_STATUS_CONTROL_C);
}
else if (Phase == 1)
{
+#if 0
/* Hook an interrupt handler to allow the debugger to break into
the system */
MappedIrq = HalGetInterruptVector (Internal,
KdPortEnableInterrupts();
DbgBreakPointWithStatus (DBG_STATUS_CONTROL_C);
+#endif
}
}
-/* This function will generate a breakpoint exception. It is used at the
- beginning of a program to sync up with a debugger and can be used
- otherwise as a quick means to stop program execution and "break" into
- the debugger. */
VOID
-KdGdbDebugPrint (LPSTR Message)
+KdGdbDebugPrint(LPSTR Message)
{
-/* This can be quite annoying! */
#if 0
+ /* This can be quite annoying! */
if (GspInitialized)
- {
- ULONG Length;
-
- GspOutBuffer[0] = 'O';
- GspOutBuffer[1] = '\0';
- strcat (&GspOutBuffer[0], Message);
- Length = strlen (Message);
- GspOutBuffer[2 + Length] = '\n';
- GspOutBuffer[3 + Length] = '\0';
- GspPutPacketNoWait (&GspOutBuffer[0]);
- }
+ {
+ ULONG Length;
+
+ GspOutBuffer[0] = 'O';
+ GspOutBuffer[1] = '\0';
+ strcat (&GspOutBuffer[0], Message);
+ Length = strlen (Message);
+ GspOutBuffer[2 + Length] = '\n';
+ GspOutBuffer[3 + Length] = '\0';
+ GspPutPacketNoWait (&GspOutBuffer[0]);
+ }
+#else
+ HalDisplayString(Message);
#endif
}
+
+
+extern LIST_ENTRY ModuleListHead;
+
+VOID
+KdGdbListModules()
+{
+ PLIST_ENTRY CurrentEntry;
+ PMODULE_OBJECT Current;
+ ULONG ModuleCount;
+
+ DPRINT1("\n");
+
+ ModuleCount = 0;
+
+ CurrentEntry = ModuleListHead.Flink;
+ while (CurrentEntry != (&ModuleListHead))
+ {
+ Current = CONTAINING_RECORD (CurrentEntry, MODULE_OBJECT, ListEntry);
+
+ DbgPrint ("Module %S Base 0x%.08x Length 0x%.08x\n",
+ Current->BaseName.Buffer, Current->Base, Current->Length);
+
+ ModuleCount++;
+ CurrentEntry = CurrentEntry->Flink;
+ }
+
+ DbgPrint ("%d modules listed\n", ModuleCount);
+}
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
-/* $Id: bug.c,v 1.21 2002/05/02 23:45:32 dwelch Exp $
+/* $Id: bug.c,v 1.22 2002/05/13 18:10:39 chorns Exp $
*
* PROJECT: ReactOS kernel
* FILE: ntoskrnl/ke/bug.c
{
PRTL_MESSAGE_RESOURCE_ENTRY Message;
NTSTATUS Status;
-
- /* PJS: disable interrupts first, then do the rest */
- __asm__("cli\n\t");
+ KIRQL OldIrql;
+
+ KeRaiseIrql(HIGH_LEVEL, &OldIrql);
+
DbgPrint("Bug detected (code %x param %x %x %x %x)\n",
BugCheckCode,
BugCheckParameter1,
if (InBugCheck == 1)
{
DbgPrint("Recursive bug check halting now\n");
+
+ if (KdDebuggerEnabled)
+ {
+ DbgBreakPoint();
+ }
+
for (;;)
{
__asm__("hlt\n\t");
}
}
InBugCheck = 1;
+
if (PsGetCurrentProcess() != NULL)
{
DbgPrint("Pid: %x <", PsGetCurrentProcess()->UniqueProcessId);
PsGetCurrentThread(),
PsGetCurrentThread()->Cid.UniqueThread);
}
-// PsDumpThreads();
- KeDumpStackFrames((PULONG)__builtin_frame_address(0));
-
+ KeDumpStackFrames((PULONG)__builtin_frame_address(0));
+
if (KdDebuggerEnabled)
{
- __asm__("sti\n\t");
DbgBreakPoint();
}
*/
if (ExceptionNr == 14)
{
+#ifdef DBG
+ KIRQL SavedIrql = KeGetCurrentIrql();
+#endif /* DBG */
+
__asm__("sti\n\t");
Status = MmPageFault(Tf->Cs&0xffff,
&Tf->Eip,
Tf->ErrorCode);
if (NT_SUCCESS(Status))
{
+ assertmsg((KeGetCurrentIrql() == SavedIrql),
+ ("Page fault handler changed IRQL (Before %d After %d). Forgot to release a spin lock?\n",
+ SavedIrql, KeGetCurrentIrql()));
return(0);
}
ULONG KiPcrInitDone = 0;
static ULONG PcrsAllocated = 0;
-static PVOID PcrPages[MAXIMUM_PROCESSORS];
+static ULONG_PTR PcrPages[MAXIMUM_PROCESSORS];
/* FUNCTIONS *****************************************************************/
KPCR = (PKPCR)(KPCR_BASE + (Offset * PAGESIZE));
MmCreateVirtualMappingForKernel((PVOID)KPCR,
PAGE_READWRITE,
- (ULONG)PcrPages[Offset]);
+ PcrPages[Offset]);
memset(KPCR, 0, PAGESIZE);
KPCR->ProcessorNumber = Offset;
KPCR->Self = KPCR;
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
-/* $Id: kthread.c,v 1.23 2002/05/07 22:34:17 hbirr Exp $
+/* $Id: kthread.c,v 1.24 2002/05/13 18:10:39 chorns Exp $
*
* FILE: ntoskrnl/ke/kthread.c
* PURPOSE: Microkernel thread support
/* FUNCTIONS *****************************************************************/
VOID
-KeFreeStackPage(PVOID Context, MEMORY_AREA* MemoryArea, PVOID Address, ULONG PhysAddr,
- SWAPENTRY SwapEntry, BOOLEAN Dirty)
+KeFreeStackPage(IN BOOLEAN Before,
+ IN PVOID Context,
+ IN PMEMORY_AREA MemoryArea,
+ IN PVOID Address,
+ IN ULONG_PTR PhysicalAddress,
+ IN SWAPENTRY SwapEntry,
+ IN BOOLEAN Dirty)
{
- assert(SwapEntry == 0);
- if (PhysAddr != 0)
+ if (!Before)
{
- MmDereferencePage((PVOID)PhysAddr);
+ assert(SwapEntry == 0);
+ if (PhysicalAddress != 0)
+ {
+ MmDereferencePage(PhysicalAddress);
+ }
}
}
MEMORY_AREA_KERNEL_STACK,
&KernelStack,
MM_STACK_SIZE,
- 0,
+ PAGE_EXECUTE_READWRITE,
&StackArea,
FALSE);
MmUnlockAddressSpace(MmGetKernelAddressSpace());
}
for (i = 0; i < (MM_STACK_SIZE / PAGESIZE); i++)
{
- PVOID Page;
+ ULONG_PTR Page;
Status = MmRequestPageMemoryConsumer(MC_NPPOOL, TRUE, &Page);
if (!NT_SUCCESS(Status))
{
}
Status = MmCreateVirtualMapping(NULL,
KernelStack + (i * PAGESIZE),
- PAGE_EXECUTE_READWRITE,
- (ULONG)Page,
+ StackArea->Attributes,
+ Page,
TRUE);
}
Thread->InitialStack = KernelStack + MM_STACK_SIZE;
-/* $Id: timer.c,v 1.48 2002/04/26 13:11:28 ekohl Exp $
+/* $Id: timer.c,v 1.49 2002/05/13 18:10:39 chorns Exp $
*
* COPYRIGHT: See COPYING in the top level directory
* PROJECT: ReactOS kernel
KeSetTimer(&Thread->Timer, *Interval, NULL);
return (KeWaitForSingleObject(&Thread->Timer,
Executive,
- UserMode,
+ WaitMode,
Alertable,
NULL));
}
/* FUNCTIONS *****************************************************************/
+#ifdef DBG
+
+VOID
+KiValidateDispatcherObject(IN PDISPATCHER_HEADER Object)
+{
+ if ((Object->Type < InternalBaseType)
+ || (Object->Type > InternalTypeMaximum))
+ {
+ assertmsg(FALSE, ("Bad dispatcher object type %d\n", Object->Type));
+ }
+}
+
+
+PKTHREAD
+KiGetDispatcherObjectOwner(IN PDISPATCHER_HEADER Header)
+{
+ switch (Header->Type)
+ {
+ case InternalMutexType:
+ {
+ PKMUTEX Mutex = (PKMUTEX)Header;
+ return Mutex->OwnerThread;
+ }
+
+ default:
+ /* No owner */
+ return NULL;
+ }
+}
+
+
+VOID KiDeadlockDetectionMutualResource(IN PDISPATCHER_HEADER CurrentObject,
+ IN PKTHREAD CurrentThread,
+ IN PKTHREAD OwnerThread)
+/*
+ * PURPOSE: Perform deadlock detection.
+ * At this point, OwnerThread owns CurrentObject and CurrentThread
+ * is waiting on CurrentObject
+ * Deadlock happens if OwnerThread is waiting on a resource that
+ * InitialThread owns.
+ * NOTE: Called with the dispatcher database locked
+ */
+{
+ PDISPATCHER_HEADER Object;
+ PKWAIT_BLOCK WaitBlock;
+ PKTHREAD Thread;
+
+ DPRINT("KiDeadlockDetectionMutualResource("
+ "CurrentObject 0x%.08x, CurrentThread 0x%.08x OwnerThread 0x%.08x)\n",
+ CurrentObject, CurrentThread, OwnerThread);
+
+ /* Go through all dispather objects that OwnerThread is waiting on */
+ for (WaitBlock = OwnerThread->WaitBlockList;
+ WaitBlock;
+ WaitBlock = WaitBlock->NextWaitBlock)
+ {
+ Object = WaitBlock->Object;
+
+ /* If OwnerThread is waiting on a resource that CurrentThread has
+ * acquired then we have a deadlock */
+ Thread = KiGetDispatcherObjectOwner(Object);
+ if ((Thread != NULL) && (Thread == CurrentThread))
+ {
+ DbgPrint("Deadlock detected!\n");
+ DbgPrint("Thread 0x%.08x is waiting on Object 0x%.08x of type %d "
+ " which is owned by Thread 0x%.08x\n",
+ Thread, CurrentObject, CurrentObject->Type, OwnerThread);
+ DbgPrint("Thread 0x%.08x is waiting on Object 0x%.08x of type %d "
+ " which is owned by Thread 0x%.08x\n",
+ OwnerThread, Object, Object->Type, Thread);
+ KeBugCheck(0);
+ }
+ }
+}
+
+
+VOID KiDeadlockDetection(IN PDISPATCHER_HEADER Header)
+/*
+ * PURPOSE: Perform deadlock detection
+ * NOTE: Called with the dispatcher database locked
+ */
+{
+ PLIST_ENTRY CurrentEntry;
+ PKWAIT_BLOCK WaitBlock;
+ PKWAIT_BLOCK Current;
+ PKTHREAD OwnerThread;
+ PKTHREAD Thread;
+
+ DPRINT("KiDeadlockDetection(Header %x)\n", Header);
+ if (IsListEmpty(&(Header->WaitListHead)))
+ return;
+
+ CurrentEntry = Header->WaitListHead.Flink;
+ Current = CONTAINING_RECORD(CurrentEntry, KWAIT_BLOCK, WaitListEntry);
+
+ /* Go through all threads waiting on this dispatcher object */
+ for (WaitBlock = Current->Thread->WaitBlockList;
+ WaitBlock;
+ WaitBlock = WaitBlock->NextWaitBlock)
+ {
+ Thread = WaitBlock->Thread;
+
+ DPRINT("KiDeadlockDetection: WaitBlock->Thread %x waiting on WaitBlock->Object %x\n",
+ WaitBlock->Thread, WaitBlock->Object);
+
+ /* If another thread is currently owning this dispatcher object,
+ see if we have a deadlock */
+ OwnerThread = KiGetDispatcherObjectOwner(Current->Object);
+ DPRINT("KiDeadlockDetection: OwnerThread %x\n", OwnerThread);
+ if ((OwnerThread != NULL) && (OwnerThread != Thread))
+ {
+ KiDeadlockDetectionMutualResource(Current->Object,
+ Current->Thread,
+ OwnerThread);
+ }
+ }
+}
+
+#endif /* DBG */
+
+
VOID KeInitializeDispatcherHeader(DISPATCHER_HEADER* Header,
ULONG Type,
ULONG Size,
DPRINT1("Thread == NULL!\n");
KeBugCheck(0);
}
+
if (Abandoned != NULL)
*Abandoned = Mutex->Abandoned;
if (Thread != NULL)
KIRQL WaitIrql;
BOOLEAN Abandoned;
+ assert_irql(DISPATCH_LEVEL);
+
+ VALIDATE_DISPATCHER_OBJECT(Object);
+
CurrentThread = KeGetCurrentThread();
WaitIrql = KeGetCurrentIrql();
}
return(STATUS_TIMEOUT);
}
-
- /*
- * Set up for a wait
- */
+
+#ifdef DBG
+
+ if (hdr->Type == InternalMutexType)
+ {
+ PKMUTEX Mutex;
+
+ Mutex = CONTAINING_RECORD(hdr, KMUTEX, Header);
+ assertmsg(Mutex->OwnerThread != CurrentThread,
+ ("Recursive locking of mutex (0x%.08x)\n", Mutex));
+ }
+
+#endif /* DBG */
+
+ /*
+ * Set up for a wait
+ */
CurrentThread->WaitStatus = STATUS_UNSUCCESSFUL;
/* Append wait block to the KTHREAD wait block list */
CurrentThread->WaitBlockList = &CurrentThread->WaitBlock[0];
{
CurrentThread->WaitBlock[0].NextWaitBlock = NULL;
}
+
+#ifdef DBG
+ /*
+ * Do deadlock detection in checked version
+ * NOTE: This must be done after the the dispatcher object is put on
+ * the wait block list.
+ */
+ KiDeadlockDetection(Object);
+#endif /* DBG */
+
PsBlockThread(&Status, (UCHAR)Alertable, WaitMode, TRUE, WaitIrql);
} while (Status == STATUS_KERNEL_APC);
DPRINT("Entering KeWaitForMultipleObjects(Count %lu Object[] %p) "
"PsGetCurrentThread() %x\n",Count,Object,PsGetCurrentThread());
+ assert_irql(APC_LEVEL);
+
CountSignaled = 0;
CurrentThread = KeGetCurrentThread();
WaitIrql = KeGetCurrentIrql();
for (i = 0; i < Count; i++)
{
hdr = (DISPATCHER_HEADER *)Object[i];
+
+ VALIDATE_DISPATCHER_OBJECT(hdr);
if (KiIsObjectSignalled(hdr, CurrentThread, &Abandoned))
{
for (i = 0; i < Count; i++)
{
hdr = (DISPATCHER_HEADER *)Object[i];
-
+ assertmsg(hdr != NULL, ("Waiting on uninitialized object\n"));
blk->Object = Object[i];
blk->Thread = CurrentThread;
blk->WaitKey = STATUS_WAIT_0 + i;
{
blk->NextWaitBlock = blk + 1;
}
-
+
InsertTailList(&hdr->WaitListHead, &blk->WaitListEntry);
blk = blk->NextWaitBlock;
-/* $Id: aspace.c,v 1.8 2001/12/31 01:53:45 dwelch Exp $
+/* $Id: aspace.c,v 1.9 2002/05/13 18:10:40 chorns Exp $
*
* COPYRIGHT: See COPYING in the top level directory
* PROJECT: ReactOS kernel
/* INCLUDES *****************************************************************/
+#include <roscfg.h>
#include <ddk/ntddk.h>
#include <internal/mm.h>
#include <internal/ps.h>
#include <internal/debug.h>
+/* Define to track address space locking/unlocking */
+//#define TRACK_ADDRESS_SPACE_LOCK
+
/* GLOBALS ******************************************************************/
STATIC MADDRESS_SPACE KernelAddressSpace;
+#define TAG_ASPC TAG('A', 'S', 'P', 'C')
#define TAG_PTRC TAG('P', 'T', 'R', 'C')
/* FUNCTIONS *****************************************************************/
-VOID
-MmLockAddressSpace(PMADDRESS_SPACE AddressSpace)
+#ifdef DBG
+VOID
+MiValidateAddressSpace(IN PMADDRESS_SPACE AddressSpace)
+{
+ assertmsg(AddressSpace != NULL,
+ ("No address space can exist at 0x%.08x\n", AddressSpace));
+
+ assertmsg(AddressSpace->Magic == TAG_ASPC,
+ ("Bad magic (0x%.08x) for address space (0x%.08x). It should be 0x%.08x\n",
+ AddressSpace->Magic, AddressSpace, TAG_ASPC));
+
+ assertmsg(AddressSpace->ReferenceCount > 0,
+ ("No outstanding references on address space (0x%.08x)\n", AddressSpace));
+}
+#endif /* DBG */
+
+
+#ifdef DBG
+
+VOID
+MiLockAddressSpace(IN PMADDRESS_SPACE AddressSpace,
+ IN LPSTR FileName,
+ IN ULONG LineNumber)
+{
+ VALIDATE_ADDRESS_SPACE(AddressSpace);
+
+ /*
+ * Don't bother with locking if we are the first thread.
+ */
+ if (KeGetCurrentThread() == NULL)
+ {
+ return;
+ }
+
+#ifdef TRACK_ADDRESS_SPACE_LOCK
+ DbgPrint("(0x%.08x)(%s:%d) Locking address space 0x%.08x\n",
+ KeGetCurrentThread(), FileName, LineNumber, AddressSpace);
+#endif /* TRACK_ADDRESS_SPACE_LOCK */
+
+ (VOID)KeWaitForMutexObject(&AddressSpace->Lock,
+ Executive,
+ KernelMode,
+ FALSE,
+ NULL);
+
+#ifdef TRACK_ADDRESS_SPACE_LOCK
+ DbgPrint("(0x%.08x)(%s:%d) Locked address space 0x%.08x\n",
+ KeGetCurrentThread(), FileName, LineNumber, AddressSpace);
+#endif /* TRACK_ADDRESS_SPACE_LOCK */
+}
+
+
+VOID
+MiUnlockAddressSpace(IN PMADDRESS_SPACE AddressSpace,
+ IN LPSTR FileName,
+ IN ULONG LineNumber)
{
+ VALIDATE_ADDRESS_SPACE(AddressSpace);
+
/*
- * Don't bother with locking if we are the first thread.
+ * Don't bother locking if we are the first thread.
*/
if (KeGetCurrentThread() == NULL)
{
return;
}
- (VOID)KeWaitForMutexObject(&AddressSpace->Lock,
- 0,
- KernelMode,
- FALSE,
- NULL);
+ KeReleaseMutex(&AddressSpace->Lock, FALSE);
+
+#ifdef TRACK_ADDRESS_SPACE_LOCK
+ DbgPrint("(0x%.08x)(%s:%d) Unlocked address space 0x%.08x\n",
+ KeGetCurrentThread(), FileName, LineNumber, AddressSpace);
+#endif /* TRACK_ADDRESS_SPACE_LOCK */
}
-VOID
-MmUnlockAddressSpace(PMADDRESS_SPACE AddressSpace)
+#else /* !DBG */
+
+VOID
+MiLockAddressSpace(IN PMADDRESS_SPACE AddressSpace)
+{
+ /*
+ * Don't bother with locking if we are the first thread.
+ */
+ if (KeGetCurrentThread() == NULL)
+ {
+ return;
+ }
+
+ (VOID)KeWaitForMutexObject(&AddressSpace->Lock,
+ Executive,
+ KernelMode,
+ FALSE,
+ NULL);
+}
+
+
+VOID
+MiUnlockAddressSpace(IN PMADDRESS_SPACE AddressSpace)
{
/*
* Don't bother locking if we are the first thread.
KeReleaseMutex(&AddressSpace->Lock, FALSE);
}
+#endif /* !DBG */
+
+
VOID
-MmInitializeKernelAddressSpace(VOID)
+MmInitializeKernelAddressSpace()
{
MmInitializeAddressSpace(NULL, &KernelAddressSpace);
}
-PMADDRESS_SPACE MmGetCurrentAddressSpace(VOID)
+
+PMADDRESS_SPACE
+MmGetCurrentAddressSpace()
{
return(&PsGetCurrentProcess()->AddressSpace);
}
-PMADDRESS_SPACE MmGetKernelAddressSpace(VOID)
+
+PMADDRESS_SPACE
+MmGetKernelAddressSpace()
{
return(&KernelAddressSpace);
}
+
NTSTATUS
-MmInitializeAddressSpace(PEPROCESS Process,
- PMADDRESS_SPACE AddressSpace)
+MmInitializeAddressSpace(IN PEPROCESS Process,
+ IN PMADDRESS_SPACE AddressSpace)
{
+ SET_MAGIC(AddressSpace, TAG_ASPC)
+
+ AddressSpace->ReferenceCount = 1;
InitializeListHead(&AddressSpace->MAreaListHead);
KeInitializeMutex(&AddressSpace->Lock, 1);
if (Process != NULL)
return(STATUS_SUCCESS);
}
+
NTSTATUS
-MmDestroyAddressSpace(PMADDRESS_SPACE AddressSpace)
+MmDestroyAddressSpace(IN PMADDRESS_SPACE AddressSpace)
{
+ VALIDATE_ADDRESS_SPACE(AddressSpace);
+
+ AddressSpace->ReferenceCount--;
+
+ assertmsg(AddressSpace->ReferenceCount == 0,
+ ("There are %d outstanding references on address space (0x%.08x)\n",
+ AddressSpace->ReferenceCount, AddressSpace));
+
if (AddressSpace->PageTableRefCountTable != NULL)
{
ExFreePool(AddressSpace->PageTableRefCountTable);
}
return(STATUS_SUCCESS);
}
+
+
+VOID
+MmReferenceAddressSpace(IN PMADDRESS_SPACE AddressSpace)
+{
+ InterlockedIncrement(&AddressSpace->ReferenceCount);
+}
+
+
+VOID
+MmDereferenceAddressSpace(IN PMADDRESS_SPACE AddressSpace)
+{
+ InterlockedDecrement(&AddressSpace->ReferenceCount);
+
+ assertmsg(AddressSpace->ReferenceCount > 0,
+ ("No outstanding references on address space (0x%.08x)\n", AddressSpace));
+}
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
-/* $Id: balance.c,v 1.6 2002/02/28 17:44:48 hbirr Exp $
+/* $Id: balance.c,v 1.7 2002/05/13 18:10:40 chorns Exp $
*
* COPYRIGHT: See COPYING in the top directory
* PROJECT: ReactOS kernel
typedef struct _MM_ALLOCATION_REQUEST
{
- PVOID Page;
+ ULONG_PTR Page;
LIST_ENTRY ListEntry;
KEVENT Event;
} MM_ALLOCATION_REQUEST, *PMM_ALLOCATION_REQUEST;
}
NTSTATUS
-MmReleasePageMemoryConsumer(ULONG Consumer, PVOID Page)
+MmReleasePageMemoryConsumer(ULONG Consumer, ULONG_PTR Page)
{
PMM_ALLOCATION_REQUEST Request;
PLIST_ENTRY Entry;
KIRQL oldIrql;
- if (Page == NULL)
+ if (Page == 0)
{
DPRINT1("Tried to release page zero.\n");
KeBugCheck(0);
}
NTSTATUS
-MmRequestPageMemoryConsumer(ULONG Consumer, BOOLEAN CanWait, PVOID* AllocatedPage)
+MmRequestPageMemoryConsumer(ULONG Consumer, BOOLEAN CanWait, PULONG_PTR AllocatedPage)
{
ULONG OldUsed;
ULONG OldAvailable;
- PVOID Page;
+ ULONG_PTR Page;
KIRQL oldIrql;
/*
}
/* Insert an allocation request. */
- Request.Page = NULL;
+ Request.Page = 0;
KeInitializeEvent(&Request.Event, NotificationEvent, FALSE);
InterlockedIncrement(&MiPagesRequired);
{
Page = MmAllocPage(Consumer, 0);
KeReleaseSpinLock(&AllocationListLock, oldIrql);
- if (Page == NULL)
+ if (Page == 0)
{
KeBugCheck(0);
}
NULL);
Page = Request.Page;
- if (Page == NULL)
+ if (Page == 0)
{
KeBugCheck(0);
}
* Actually allocate the page.
*/
Page = MmAllocPage(Consumer, 0);
- if (Page == NULL)
+ if (Page == 0)
{
KeBugCheck(0);
}
-/* $Id: cont.c,v 1.17 2002/01/01 00:21:55 dwelch Exp $
+/* $Id: cont.c,v 1.18 2002/05/13 18:10:40 chorns Exp $
*
* COPYRIGHT: See COPYING in the top level directory
* PROJECT: ReactOS kernel
/* FUNCTIONS *****************************************************************/
-VOID STATIC
-MmFreeContinuousPage(PVOID Context, MEMORY_AREA* MemoryArea, PVOID Address, ULONG PhysAddr,
- SWAPENTRY SwapEntry, BOOLEAN Dirty)
+VOID
+MmFreeContinuousPage(IN BOOLEAN Before,
+ IN PVOID Context,
+ IN PMEMORY_AREA MemoryArea,
+ IN PVOID Address,
+ IN ULONG_PTR PhysicalAddress,
+ IN SWAPENTRY SwapEntry,
+ IN BOOLEAN Dirty)
{
- assert(SwapEntry == 0);
- if (PhysAddr != 0)
+ if (!Before)
{
- MmDereferencePage((PVOID)PhysAddr);
+ assert(SwapEntry == 0);
+ if (PhysicalAddress != 0)
+ {
+ MmDereferencePage(PhysicalAddress);
+ }
}
}
MmLockAddressSpace(MmGetKernelAddressSpace());
Status = MmCreateMemoryArea(NULL,
- MmGetKernelAddressSpace(),
- MEMORY_AREA_CONTINUOUS_MEMORY,
- &BaseAddress,
- NumberOfBytes,
- 0,
- &MArea,
- FALSE);
+ MmGetKernelAddressSpace(),
+ MEMORY_AREA_CONTINUOUS_MEMORY,
+ &BaseAddress,
+ NumberOfBytes,
+ 0,
+ &MArea,
+ FALSE);
MmUnlockAddressSpace(MmGetKernelAddressSpace());
if (!NT_SUCCESS(Status))
VOID STDCALL
MmFreeContiguousMemory(IN PVOID BaseAddress)
{
- MmFreeMemoryArea(MmGetKernelAddressSpace(),
- BaseAddress,
- 0,
- MmFreeContinuousPage,
- NULL);
+ MmFreeMemoryArea(MmGetKernelAddressSpace(),
+ BaseAddress,
+ 0,
+ MmFreeContinuousPage,
+ NULL);
}
* FILE: ntoskrnl/mm/freelist.c
* PURPOSE: Handle the list of free physical pages
* PROGRAMMER: David Welch (welch@cwcom.net)
+ * Casper S. Hornstrup (chorns@users.sourceforge.net)
* UPDATE HISTORY:
* 27/05/98: Created
* 18/08/98: Added a fix from Robert Bergkvist
/* TYPES *******************************************************************/
-#define MM_PHYSICAL_PAGE_FREE (0x1)
-#define MM_PHYSICAL_PAGE_USED (0x2)
-#define MM_PHYSICAL_PAGE_BIOS (0x3)
+/* GLOBALS ****************************************************************/
-#define MM_PTYPE(x) ((x) & 0x3)
+PPHYSICAL_PAGE MmPageArray;
+
+KSPIN_LOCK MiPageListLock;
+LIST_ENTRY MiUsedPageListHeads[MC_MAXIMUM];
+LIST_ENTRY MiFreeZeroedPageListHead;
+LIST_ENTRY MiFreeUnzeroedPageListHead;
+LIST_ENTRY MiBiosPageListHead;
+LIST_ENTRY MiStandbyPageListHead;
+ULONG MiStandbyPageListSize;
+LIST_ENTRY MiModifiedPageListHead;
+ULONG MiModifiedPageListSize;
+LIST_ENTRY MiModifiedNoWritePageListHead;
+ULONG MiModifiedNoWritePageListSize;
+/* LIST_ENTRY BadPageListHead; */
-typedef struct _PHYSICAL_PAGE
-{
- ULONG Flags;
- LIST_ENTRY ListEntry;
- ULONG ReferenceCount;
- SWAPENTRY SavedSwapEntry;
- ULONG LockCount;
- ULONG MapCount;
- struct _MM_RMAP_ENTRY* RmapListHead;
-} PHYSICAL_PAGE, *PPHYSICAL_PAGE;
+/* FUNCTIONS *************************************************************/
-/* GLOBALS ****************************************************************/
+VOID
+MiAcquirePageListLock(IN ULONG PageList,
+ OUT PLIST_ENTRY * ListHead)
+{
+ KeAcquireSpinLockAtDpcLevel(&MiPageListLock);
+ switch (PageList)
+ {
+ case PAGE_LIST_FREE_ZEROED:
+ *ListHead = &MiFreeZeroedPageListHead;
+ break;
+ case PAGE_LIST_FREE_UNZEROED:
+ *ListHead = &MiFreeUnzeroedPageListHead;
+ break;
+ case PAGE_LIST_BIOS:
+ *ListHead = &MiBiosPageListHead;
+ break;
+ case PAGE_LIST_STANDBY:
+ *ListHead = &MiStandbyPageListHead;
+ break;
+ case PAGE_LIST_MODIFIED:
+ *ListHead = &MiModifiedPageListHead;
+ break;
+ case PAGE_LIST_MODIFIED_NO_WRITE:
+ *ListHead = &MiModifiedNoWritePageListHead;
+ break;
+ default:
+ DPRINT1("Bad page list type 0x%.08x\n", PageList);
+ KeBugCheck(0);
+ break;
+ }
+}
-static PPHYSICAL_PAGE MmPageArray;
-static KSPIN_LOCK PageListLock;
-static LIST_ENTRY UsedPageListHeads[MC_MAXIMUM];
-static LIST_ENTRY FreeZeroedPageListHead;
-static LIST_ENTRY FreeUnzeroedPageListHead;
-static LIST_ENTRY BiosPageListHead;
+VOID
+MiReleasePageListLock()
+{
+ KeReleaseSpinLockFromDpcLevel(&MiPageListLock);
+}
-/* FUNCTIONS *************************************************************/
VOID
-MmTransferOwnershipPage(PVOID PhysicalAddress, ULONG NewConsumer)
+MmTransferOwnershipPage(IN ULONG_PTR PhysicalAddress,
+ IN ULONG NewConsumer)
{
- ULONG Start = (ULONG)PhysicalAddress / PAGESIZE;
+ ULONG Start = PhysicalAddress / PAGESIZE;
KIRQL oldIrql;
-
- KeAcquireSpinLock(&PageListLock, &oldIrql);
+
+ VALIDATE_PHYSICAL_ADDRESS(PhysicalAddress);
+
+ KeAcquireSpinLock(&MiPageListLock, &oldIrql);
RemoveEntryList(&MmPageArray[Start].ListEntry);
- InsertTailList(&UsedPageListHeads[NewConsumer],
+ MmPageArray[Start].Flags = MM_PHYSICAL_PAGE_USED;
+ InsertTailList(&MiUsedPageListHeads[NewConsumer],
&MmPageArray[Start].ListEntry);
- KeReleaseSpinLock(&PageListLock, oldIrql);
+ KeReleaseSpinLock(&MiPageListLock, oldIrql);
}
-PVOID
+ULONG_PTR
MmGetLRUFirstUserPage(VOID)
{
PLIST_ENTRY NextListEntry;
- ULONG Next;
+ ULONG_PTR Next;
PHYSICAL_PAGE* PageDescriptor;
KIRQL oldIrql;
- KeAcquireSpinLock(&PageListLock, &oldIrql);
- NextListEntry = UsedPageListHeads[MC_USER].Flink;
- if (NextListEntry == &UsedPageListHeads[MC_USER])
+ KeAcquireSpinLock(&MiPageListLock, &oldIrql);
+ NextListEntry = MiUsedPageListHeads[MC_USER].Flink;
+ if (NextListEntry == &MiUsedPageListHeads[MC_USER])
{
- KeReleaseSpinLock(&PageListLock, oldIrql);
- return(NULL);
+ KeReleaseSpinLock(&MiPageListLock, oldIrql);
+ return(0);
}
PageDescriptor = CONTAINING_RECORD(NextListEntry, PHYSICAL_PAGE, ListEntry);
- Next = (ULONG)((ULONG)PageDescriptor - (ULONG)MmPageArray);
- Next = (Next / sizeof(PHYSICAL_PAGE)) * PAGESIZE;
- KeReleaseSpinLock(&PageListLock, oldIrql);
- return((PVOID)Next);
+ //Next = ((ULONG_PTR) PageDescriptor - (ULONG_PTR)MmPageArray);
+ //Next = (Next / sizeof(PHYSICAL_PAGE)) * PAGESIZE;
+ Next = MiPageFromDescriptor(PageDescriptor);
+
+ assertmsg(PageDescriptor->Flags == MM_PHYSICAL_PAGE_USED,
+ ("Page at 0x%.08x on used page list is not used.\n", Next));
+
+ KeReleaseSpinLock(&MiPageListLock, oldIrql);
+ return(Next);
}
-PVOID
-MmGetLRUNextUserPage(PVOID PreviousPhysicalAddress)
+ULONG_PTR
+MmGetLRUNextUserPage(IN ULONG_PTR PreviousPhysicalAddress)
{
- ULONG Start = (ULONG)PreviousPhysicalAddress / PAGESIZE;
+ ULONG Start = PreviousPhysicalAddress / PAGESIZE;
PLIST_ENTRY NextListEntry;
- ULONG Next;
+ ULONG_PTR Next;
PHYSICAL_PAGE* PageDescriptor;
KIRQL oldIrql;
- KeAcquireSpinLock(&PageListLock, &oldIrql);
+ VALIDATE_PHYSICAL_ADDRESS(PreviousPhysicalAddress);
+
+ KeAcquireSpinLock(&MiPageListLock, &oldIrql);
if (!(MmPageArray[Start].Flags & MM_PHYSICAL_PAGE_USED))
{
- NextListEntry = UsedPageListHeads[MC_USER].Flink;
+ NextListEntry = MiUsedPageListHeads[MC_USER].Flink;
}
else
{
NextListEntry = MmPageArray[Start].ListEntry.Flink;
}
- if (NextListEntry == &UsedPageListHeads[MC_USER])
+ if (NextListEntry == &MiUsedPageListHeads[MC_USER])
{
- KeReleaseSpinLock(&PageListLock, oldIrql);
- return(NULL);
+ KeReleaseSpinLock(&MiPageListLock, oldIrql);
+ return(0);
}
PageDescriptor = CONTAINING_RECORD(NextListEntry, PHYSICAL_PAGE, ListEntry);
- Next = (ULONG)((ULONG)PageDescriptor - (ULONG)MmPageArray);
- Next = (Next / sizeof(PHYSICAL_PAGE)) * PAGESIZE;
- KeReleaseSpinLock(&PageListLock, oldIrql);
- return((PVOID)Next);
+ Next = MiPageFromDescriptor(PageDescriptor);
+
+ assertmsg(PageDescriptor->Flags == MM_PHYSICAL_PAGE_USED,
+ ("Page at 0x%.08x on used page list is not used.\n", Next));
+
+ KeReleaseSpinLock(&MiPageListLock, oldIrql);
+ return(Next);
}
PVOID
NrPages = PAGE_ROUND_UP(NumberOfBytes) / PAGESIZE;
- KeAcquireSpinLock(&PageListLock, &oldIrql);
+ KeAcquireSpinLock(&MiPageListLock, &oldIrql);
start = -1;
length = 0;
}
if (start == -1 || length != NrPages)
{
- KeReleaseSpinLock(&PageListLock, oldIrql);
+ KeReleaseSpinLock(&MiPageListLock, oldIrql);
return(NULL);
}
for (i = start; i < (start + length); i++)
MmPageArray[i].LockCount = 0;
MmPageArray[i].MapCount = 0;
MmPageArray[i].SavedSwapEntry = 0;
- InsertTailList(&UsedPageListHeads[MC_NPPOOL],
+ InsertTailList(&MiUsedPageListHeads[MC_NPPOOL],
&MmPageArray[i].ListEntry);
}
- KeReleaseSpinLock(&PageListLock, oldIrql);
+ KeReleaseSpinLock(&MiPageListLock, oldIrql);
return((PVOID)(start * 4096));
}
{
MmPageArray[i].Flags = MM_PHYSICAL_PAGE_FREE;
MmPageArray[i].ReferenceCount = 0;
- InsertTailList(&FreeUnzeroedPageListHead,
+ InsertTailList(&MiFreeUnzeroedPageListHead,
&MmPageArray[i].ListEntry);
}
}
{
MmPageArray[i].Flags = MM_PHYSICAL_PAGE_BIOS;
MmPageArray[i].ReferenceCount = 1;
- InsertTailList(&BiosPageListHead,
+ InsertTailList(&MiBiosPageListHead,
&MmPageArray[i].ListEntry);
}
}
for (i = 0; i < MC_MAXIMUM; i++)
{
- InitializeListHead(&UsedPageListHeads[i]);
+ InitializeListHead(&MiUsedPageListHeads[i]);
}
- KeInitializeSpinLock(&PageListLock);
- InitializeListHead(&FreeUnzeroedPageListHead);
- InitializeListHead(&FreeZeroedPageListHead);
- InitializeListHead(&BiosPageListHead);
+ KeInitializeSpinLock(&MiPageListLock);
+ InitializeListHead(&MiFreeUnzeroedPageListHead);
+ InitializeListHead(&MiFreeZeroedPageListHead);
+ InitializeListHead(&MiBiosPageListHead);
+ InitializeListHead(&MiStandbyPageListHead);
+ MiStandbyPageListSize = 0;
+ InitializeListHead(&MiModifiedPageListHead);
+ MiModifiedPageListSize = 0;
+ InitializeListHead(&MiModifiedNoWritePageListHead);
+ MiModifiedNoWritePageListSize = 0;
LastKernelAddress = PAGE_ROUND_UP(LastKernelAddress);
DPRINT("Reserved %d\n", Reserved);
LastKernelAddress = PAGE_ROUND_UP(LastKernelAddress);
- LastKernelAddress = ((ULONG)LastKernelAddress + (Reserved * PAGESIZE));
+ LastKernelAddress = ((ULONG_PTR) LastKernelAddress + (Reserved * PAGESIZE));
LastPhysKernelAddress = (PVOID)PAGE_ROUND_UP(LastPhysKernelAddress);
LastPhysKernelAddress = LastPhysKernelAddress + (Reserved * PAGESIZE);
-
+
MmStats.NrTotalPages = 0;
MmStats.NrSystemPages = 0;
MmStats.NrUserPages = 0;
for (i = 0; i < Reserved; i++)
{
- if (!MmIsPagePresent(NULL,
- (PVOID)((ULONG)MmPageArray + (i * PAGESIZE))))
+ if (!MmIsPagePresent(NULL, (PVOID) ((ULONG_PTR)MmPageArray + (i * PAGESIZE))))
{
- Status =
+ Status =
MmCreateVirtualMappingUnsafe(NULL,
- (PVOID)((ULONG)MmPageArray +
+ (PVOID)((ULONG_PTR)MmPageArray +
(i * PAGESIZE)),
PAGE_READWRITE,
(ULONG)(LastPhysKernelAddress
*/
MmPageArray[0].Flags = MM_PHYSICAL_PAGE_BIOS;
MmPageArray[0].ReferenceCount = 0;
- InsertTailList(&BiosPageListHead,
+ InsertTailList(&MiBiosPageListHead,
&MmPageArray[0].ListEntry);
/*
*/
MmPageArray[1].Flags = MM_PHYSICAL_PAGE_BIOS;
MmPageArray[1].ReferenceCount = 0;
- InsertTailList(&BiosPageListHead,
+ InsertTailList(&MiBiosPageListHead,
&MmPageArray[1].ListEntry);
i = 2;
- if ((ULONG)FirstPhysKernelAddress < 0xa0000)
+ if ((ULONG_PTR) FirstPhysKernelAddress < 0xa0000)
{
- MmStats.NrFreePages += (((ULONG)FirstPhysKernelAddress/PAGESIZE) - 1);
- for (; i<((ULONG)FirstPhysKernelAddress/PAGESIZE); i++)
+ MmStats.NrFreePages += (((ULONG_PTR) FirstPhysKernelAddress/PAGESIZE) - 1);
+ for (; i<((ULONG_PTR) FirstPhysKernelAddress/PAGESIZE); i++)
{
MmPageArray[i].Flags = MM_PHYSICAL_PAGE_FREE;
MmPageArray[i].ReferenceCount = 0;
- InsertTailList(&FreeUnzeroedPageListHead,
+ InsertTailList(&MiFreeUnzeroedPageListHead,
&MmPageArray[i].ListEntry);
}
MmStats.NrSystemPages +=
- ((((ULONG)LastPhysKernelAddress) / PAGESIZE) - i);
- for (; i<((ULONG)LastPhysKernelAddress / PAGESIZE); i++)
+ ((((ULONG_PTR) LastPhysKernelAddress) / PAGESIZE) - i);
+ for (; i<((ULONG_PTR) LastPhysKernelAddress / PAGESIZE); i++)
{
MmPageArray[i].Flags = MM_PHYSICAL_PAGE_USED;
MmPageArray[i].ReferenceCount = 1;
- InsertTailList(&UsedPageListHeads[MC_NPPOOL],
+ InsertTailList(&MiUsedPageListHeads[MC_NPPOOL],
&MmPageArray[i].ListEntry);
}
MmStats.NrFreePages += ((0xa0000/PAGESIZE) - i);
{
MmPageArray[i].Flags = MM_PHYSICAL_PAGE_FREE;
MmPageArray[i].ReferenceCount = 0;
- InsertTailList(&FreeUnzeroedPageListHead,
+ InsertTailList(&MiFreeUnzeroedPageListHead,
&MmPageArray[i].ListEntry);
}
MmStats.NrReservedPages += ((0x100000/PAGESIZE) - i);
{
MmPageArray[i].Flags = MM_PHYSICAL_PAGE_BIOS;
MmPageArray[i].ReferenceCount = 1;
- InsertTailList(&BiosPageListHead,
+ InsertTailList(&MiBiosPageListHead,
&MmPageArray[i].ListEntry);
}
}
{
MmPageArray[i].Flags = MM_PHYSICAL_PAGE_FREE;
MmPageArray[i].ReferenceCount = 0;
- InsertTailList(&FreeUnzeroedPageListHead,
+ InsertTailList(&MiFreeUnzeroedPageListHead,
&MmPageArray[i].ListEntry);
}
MmStats.NrReservedPages += (0x60000 / PAGESIZE);
{
MmPageArray[i].Flags = MM_PHYSICAL_PAGE_BIOS;
MmPageArray[i].ReferenceCount = 1;
- InsertTailList(&BiosPageListHead,
+ InsertTailList(&MiBiosPageListHead,
&MmPageArray[i].ListEntry);
}
- MmStats.NrFreePages += (((ULONG)FirstPhysKernelAddress/PAGESIZE) - i);
- for (; i<((ULONG)FirstPhysKernelAddress/PAGESIZE); i++)
+ MmStats.NrFreePages += (((ULONG_PTR) FirstPhysKernelAddress/PAGESIZE) - i);
+ for (; i<((ULONG_PTR) FirstPhysKernelAddress/PAGESIZE); i++)
{
MmPageArray[i].Flags = MM_PHYSICAL_PAGE_FREE;
MmPageArray[i].ReferenceCount = 0;
- InsertTailList(&FreeUnzeroedPageListHead,
+ InsertTailList(&MiFreeUnzeroedPageListHead,
&MmPageArray[i].ListEntry);
}
MmStats.NrSystemPages +=
- (((ULONG)LastPhysKernelAddress/PAGESIZE) - i);
- for (; i<((ULONG)LastPhysKernelAddress/PAGESIZE); i++)
+ (((ULONG_PTR) LastPhysKernelAddress/PAGESIZE) - i);
+ for (; i<((ULONG_PTR) LastPhysKernelAddress/PAGESIZE); i++)
{
MmPageArray[i].Flags = MM_PHYSICAL_PAGE_USED;
MmPageArray[i].ReferenceCount = 1;
- InsertTailList(&UsedPageListHeads[MC_NPPOOL],
+ InsertTailList(&MiUsedPageListHeads[MC_NPPOOL],
&MmPageArray[i].ListEntry);
}
}
{
MmPageArray[i].Flags = MM_PHYSICAL_PAGE_FREE;
MmPageArray[i].ReferenceCount = 0;
- InsertTailList(&FreeUnzeroedPageListHead,
+ InsertTailList(&MiFreeUnzeroedPageListHead,
&MmPageArray[i].ListEntry);
}
return((PVOID)LastKernelAddress);
}
-VOID MmSetFlagsPage(PVOID PhysicalAddress,
- ULONG Flags)
+
+VOID
+MmSetFlagsPage(IN ULONG_PTR PhysicalAddress,
+ IN ULONG Flags)
{
- ULONG Start = (ULONG)PhysicalAddress / PAGESIZE;
+ ULONG Start = PhysicalAddress / PAGESIZE;
KIRQL oldIrql;
- KeAcquireSpinLock(&PageListLock, &oldIrql);
+ KeAcquireSpinLock(&MiPageListLock, &oldIrql);
MmPageArray[Start].Flags = Flags;
- KeReleaseSpinLock(&PageListLock, oldIrql);
+ KeReleaseSpinLock(&MiPageListLock, oldIrql);
}
-VOID
-MmSetRmapListHeadPage(PVOID PhysicalAddress, struct _MM_RMAP_ENTRY* ListHead)
+
+VOID
+MmSetRmapListHeadPage(IN ULONG_PTR PhysicalAddress,
+ IN struct _MM_RMAP_ENTRY* ListHead)
{
- ULONG Start = (ULONG)PhysicalAddress / PAGESIZE;
+ ULONG Start = PhysicalAddress / PAGESIZE;
+
+ VALIDATE_PHYSICAL_ADDRESS(PhysicalAddress);
- MmPageArray[Start].RmapListHead = ListHead;
+ VALIDATE_RMAP_LIST(ListHead);
+
+ MmPageArray[Start].RmapListHead = ListHead;
}
struct _MM_RMAP_ENTRY*
-MmGetRmapListHeadPage(PVOID PhysicalAddress)
+MmGetRmapListHeadPage(IN ULONG_PTR PhysicalAddress)
{
- ULONG Start = (ULONG)PhysicalAddress / PAGESIZE;
+ ULONG Start = PhysicalAddress / PAGESIZE;
+
+ VALIDATE_PHYSICAL_ADDRESS(PhysicalAddress);
+
+ VALIDATE_RMAP_LIST(MmPageArray[Start].RmapListHead);
return(MmPageArray[Start].RmapListHead);
}
+
VOID
-MmMarkPageMapped(PVOID PhysicalAddress)
+MmSetRmapCallback(IN ULONG_PTR PhysicalAddress,
+ IN PRMAP_DELETE_CALLBACK RmapDelete,
+ IN PVOID RmapDeleteContext)
{
- ULONG Start = (ULONG)PhysicalAddress / PAGESIZE;
- KIRQL oldIrql;
-
- KeAcquireSpinLock(&PageListLock, &oldIrql);
- MmPageArray[Start].MapCount++;
- KeReleaseSpinLock(&PageListLock, oldIrql);
+ ULONG Start = PhysicalAddress / PAGESIZE;
+
+ VALIDATE_PHYSICAL_ADDRESS(PhysicalAddress);
+
+ MmPageArray[Start].RmapDelete = RmapDelete;
+ MmPageArray[Start].RmapDeleteContext = RmapDeleteContext;
+}
+
+
+VOID
+MmGetRmapCallback(IN ULONG_PTR PhysicalAddress,
+ IN PRMAP_DELETE_CALLBACK *RmapDelete,
+ IN PVOID *RmapDeleteContext)
+{
+ ULONG Start = PhysicalAddress / PAGESIZE;
+
+ VALIDATE_PHYSICAL_ADDRESS(PhysicalAddress);
+
+ *RmapDelete = MmPageArray[Start].RmapDelete;
+ *RmapDeleteContext = MmPageArray[Start].RmapDeleteContext;
}
+
VOID
-MmMarkPageUnmapped(PVOID PhysicalAddress)
+MmMarkPageMapped(IN ULONG_PTR PhysicalAddress)
{
- ULONG Start = (ULONG)PhysicalAddress / PAGESIZE;
- KIRQL oldIrql;
-
- KeAcquireSpinLock(&PageListLock, &oldIrql);
- MmPageArray[Start].MapCount--;
- KeReleaseSpinLock(&PageListLock, oldIrql);
+ ULONG Start = PhysicalAddress / PAGESIZE;
+ KIRQL oldIrql;
+
+ VALIDATE_PHYSICAL_ADDRESS(PhysicalAddress);
+
+ KeAcquireSpinLock(&MiPageListLock, &oldIrql);
+ MmPageArray[Start].MapCount++;
+ KeReleaseSpinLock(&MiPageListLock, oldIrql);
}
-ULONG MmGetFlagsPage(PVOID PhysicalAddress)
+VOID
+MmMarkPageUnmapped(IN ULONG_PTR PhysicalAddress)
{
- ULONG Start = (ULONG)PhysicalAddress / PAGESIZE;
- KIRQL oldIrql;
- ULONG Flags;
-
- KeAcquireSpinLock(&PageListLock, &oldIrql);
- Flags = MmPageArray[Start].Flags;
- KeReleaseSpinLock(&PageListLock, oldIrql);
+ ULONG Start = PhysicalAddress / PAGESIZE;
+ KIRQL oldIrql;
+
+ VALIDATE_PHYSICAL_ADDRESS(PhysicalAddress);
- return(Flags);
+ KeAcquireSpinLock(&MiPageListLock, &oldIrql);
+ MmPageArray[Start].MapCount--;
+ KeReleaseSpinLock(&MiPageListLock, oldIrql);
+}
+
+ULONG
+MmGetFlagsPage(IN ULONG_PTR PhysicalAddress)
+{
+ ULONG Start = PhysicalAddress / PAGESIZE;
+ KIRQL oldIrql;
+ ULONG Flags;
+
+ VALIDATE_PHYSICAL_ADDRESS(PhysicalAddress);
+
+ KeAcquireSpinLock(&MiPageListLock, &oldIrql);
+ Flags = MmPageArray[Start].Flags;
+ KeReleaseSpinLock(&MiPageListLock, oldIrql);
+
+ return(Flags);
}
-VOID MmSetSavedSwapEntryPage(PVOID PhysicalAddress,
- SWAPENTRY SavedSwapEntry)
+VOID
+MmSetSavedSwapEntryPage(IN ULONG_PTR PhysicalAddress,
+ IN SWAPENTRY SavedSwapEntry)
{
- ULONG Start = (ULONG)PhysicalAddress / PAGESIZE;
- KIRQL oldIrql;
+ ULONG Start = PhysicalAddress / PAGESIZE;
+ KIRQL oldIrql;
- KeAcquireSpinLock(&PageListLock, &oldIrql);
- MmPageArray[Start].SavedSwapEntry = SavedSwapEntry;
- KeReleaseSpinLock(&PageListLock, oldIrql);
+ VALIDATE_PHYSICAL_ADDRESS(PhysicalAddress);
+
+ VALIDATE_SWAP_ENTRY(SavedSwapEntry);
+
+ KeAcquireSpinLock(&MiPageListLock, &oldIrql);
+ MmPageArray[Start].SavedSwapEntry = SavedSwapEntry;
+ KeReleaseSpinLock(&MiPageListLock, oldIrql);
}
SWAPENTRY
-MmGetSavedSwapEntryPage(PVOID PhysicalAddress)
+MmGetSavedSwapEntryPage(IN ULONG_PTR PhysicalAddress)
{
- ULONG Start = (ULONG)PhysicalAddress / PAGESIZE;
- SWAPENTRY SavedSwapEntry;
- KIRQL oldIrql;
-
- KeAcquireSpinLock(&PageListLock, &oldIrql);
- SavedSwapEntry = MmPageArray[Start].SavedSwapEntry;
- KeReleaseSpinLock(&PageListLock, oldIrql);
+ ULONG Start = PhysicalAddress / PAGESIZE;
+ SWAPENTRY SavedSwapEntry;
+ KIRQL oldIrql;
+
+ VALIDATE_PHYSICAL_ADDRESS(PhysicalAddress);
+
+ KeAcquireSpinLock(&MiPageListLock, &oldIrql);
+ SavedSwapEntry = MmPageArray[Start].SavedSwapEntry;
+ KeReleaseSpinLock(&MiPageListLock, oldIrql);
+
+ VALIDATE_SWAP_ENTRY(SavedSwapEntry);
+
+ return(SavedSwapEntry);
+}
+
+
+VOID
+MmSetSavedPageOp(IN ULONG_PTR PhysicalAddress,
+ IN PMM_PAGEOP PageOp)
+{
+ ULONG Start = PhysicalAddress / PAGESIZE;
+
+ VALIDATE_PHYSICAL_ADDRESS(PhysicalAddress);
+
+ VALIDATE_PAGEOP(PageOp);
- return(SavedSwapEntry);
+ MmPageArray[Start].PageOp = PageOp;
}
-VOID MmReferencePage(PVOID PhysicalAddress)
+
+PMM_PAGEOP
+MmGetSavedPageOp(IN ULONG_PTR PhysicalAddress)
{
- ULONG Start = (ULONG)PhysicalAddress / PAGESIZE;
- KIRQL oldIrql;
-
- DPRINT("MmReferencePage(PhysicalAddress %x)\n", PhysicalAddress);
-
- if (((ULONG)PhysicalAddress) == 0)
- {
- KeBugCheck(0);
- }
-
- KeAcquireSpinLock(&PageListLock, &oldIrql);
-
- if (MM_PTYPE(MmPageArray[Start].Flags) != MM_PHYSICAL_PAGE_USED)
- {
- DbgPrint("Referencing non-used page\n");
- KeBugCheck(0);
- }
+ ULONG Start = PhysicalAddress / PAGESIZE;
+ PMM_PAGEOP PageOp;
+
+ VALIDATE_PHYSICAL_ADDRESS(PhysicalAddress);
+
+ PageOp = MmPageArray[Start].PageOp;
+
+ VALIDATE_PAGEOP(PageOp);
+
+ return(PageOp);
+}
+
+
+VOID MmReferencePage(IN ULONG_PTR PhysicalAddress)
+{
+ ULONG Start = PhysicalAddress / PAGESIZE;
+ KIRQL oldIrql;
+
+ VALIDATE_PHYSICAL_ADDRESS(PhysicalAddress);
- MmPageArray[Start].ReferenceCount++;
- KeReleaseSpinLock(&PageListLock, oldIrql);
+ DPRINT("MmReferencePage(PhysicalAddress %x)\n", PhysicalAddress);
+
+ if (PhysicalAddress == 0)
+ {
+ assert(FALSE);
+ }
+
+ KeAcquireSpinLock(&MiPageListLock, &oldIrql);
+
+ if (MM_PTYPE(MmPageArray[Start].Flags) == MM_PHYSICAL_PAGE_FREE)
+ {
+ assertmsg(FALSE, ("Referencing non-used page\n"));
+ }
+
+ MmPageArray[Start].ReferenceCount++;
+ KeReleaseSpinLock(&MiPageListLock, oldIrql);
}
ULONG
-MmGetReferenceCountPage(PVOID PhysicalAddress)
+MmGetReferenceCountPage(IN ULONG_PTR PhysicalAddress)
{
- ULONG Start = (ULONG)PhysicalAddress / PAGESIZE;
- KIRQL oldIrql;
- ULONG RCount;
+ ULONG Start = PhysicalAddress / PAGESIZE;
+ KIRQL oldIrql;
+ ULONG RCount;
- DPRINT("MmGetReferenceCountPage(PhysicalAddress %x)\n", PhysicalAddress);
+ VALIDATE_PHYSICAL_ADDRESS(PhysicalAddress);
- if (((ULONG)PhysicalAddress) == 0)
- {
+ DPRINT("MmGetReferenceCountPage(PhysicalAddress %x)\n", PhysicalAddress);
+
+ if (PhysicalAddress == 0)
+ {
KeBugCheck(0);
- }
-
- KeAcquireSpinLock(&PageListLock, &oldIrql);
-
- if (MM_PTYPE(MmPageArray[Start].Flags) != MM_PHYSICAL_PAGE_USED)
- {
+ }
+
+ KeAcquireSpinLock(&MiPageListLock, &oldIrql);
+
+ if (MM_PTYPE(MmPageArray[Start].Flags) == MM_PHYSICAL_PAGE_FREE)
+ {
DbgPrint("Getting reference count for free page\n");
KeBugCheck(0);
- }
-
- RCount = MmPageArray[Start].ReferenceCount;
-
- KeReleaseSpinLock(&PageListLock, oldIrql);
- return(RCount);
+ }
+
+ RCount = MmPageArray[Start].ReferenceCount;
+
+ KeReleaseSpinLock(&MiPageListLock, oldIrql);
+ return(RCount);
}
BOOLEAN
-MmIsUsablePage(PVOID PhysicalAddress)
+MmIsUsablePage(IN ULONG_PTR PhysicalAddress)
{
- ULONG Start = (ULONG)PhysicalAddress / PAGESIZE;
+ ULONG Start = PhysicalAddress / PAGESIZE;
- DPRINT("MmGetReferenceCountPage(PhysicalAddress %x)\n", PhysicalAddress);
+ VALIDATE_PHYSICAL_ADDRESS(PhysicalAddress);
- if (((ULONG)PhysicalAddress) == 0)
- {
+ DPRINT("MmGetReferenceCountPage(PhysicalAddress %x)\n", PhysicalAddress);
+
+ if (PhysicalAddress == 0)
+ {
KeBugCheck(0);
- }
-
- if (MM_PTYPE(MmPageArray[Start].Flags) != MM_PHYSICAL_PAGE_USED &&
- MM_PTYPE(MmPageArray[Start].Flags) != MM_PHYSICAL_PAGE_BIOS)
- {
- return(FALSE);
- }
-
- return(TRUE);
+ }
+
+ if (MM_PTYPE(MmPageArray[Start].Flags) == MM_PHYSICAL_PAGE_FREE)
+ {
+ return(FALSE);
+ }
+
+ return(TRUE);
}
-VOID MmDereferencePage(PVOID PhysicalAddress)
+VOID MmDereferencePage(IN ULONG_PTR PhysicalAddress)
{
- ULONG Start = (ULONG)PhysicalAddress / PAGESIZE;
- KIRQL oldIrql;
-
- DPRINT("MmDereferencePage(PhysicalAddress %x)\n", PhysicalAddress);
+ ULONG Start = PhysicalAddress / PAGESIZE;
+ KIRQL oldIrql;
- if (((ULONG)PhysicalAddress) == 0)
- {
- KeBugCheck(0);
- }
+ VALIDATE_PHYSICAL_ADDRESS(PhysicalAddress);
- KeAcquireSpinLock(&PageListLock, &oldIrql);
-
-
- if (MM_PTYPE(MmPageArray[Start].Flags) != MM_PHYSICAL_PAGE_USED)
- {
- DbgPrint("Dereferencing free page\n");
- KeBugCheck(0);
- }
-
- MmPageArray[Start].ReferenceCount--;
- if (MmPageArray[Start].ReferenceCount == 0)
- {
- MmStats.NrFreePages++;
- MmStats.NrSystemPages--;
- RemoveEntryList(&MmPageArray[Start].ListEntry);
- if (MmPageArray[Start].RmapListHead != NULL)
- {
- DbgPrint("Freeing page with rmap entries.\n");
- KeBugCheck(0);
- }
- if (MmPageArray[Start].MapCount != 0)
+ DPRINT("MmDereferencePage(PhysicalAddress %x)\n", PhysicalAddress);
+
+ if (PhysicalAddress == 0)
{
- DbgPrint("Freeing mapped page (0x%x count %d)\n",
- PhysicalAddress, MmPageArray[Start].MapCount);
- KeBugCheck(0);
- }
- if (MmPageArray[Start].LockCount > 0)
- {
- DbgPrint("Freeing locked page\n");
- KeBugCheck(0);
+ KeBugCheck(0);
}
- if (MmPageArray[Start].SavedSwapEntry != 0)
+
+ KeAcquireSpinLock(&MiPageListLock, &oldIrql);
+
+ if (MM_PTYPE(MmPageArray[Start].Flags) == MM_PHYSICAL_PAGE_FREE)
{
- DbgPrint("Freeing page with swap entry.\n");
- KeBugCheck(0);
+ DbgPrint("Dereferencing free page\n");
+ KeBugCheck(0);
}
- if (MmPageArray[Start].Flags != MM_PHYSICAL_PAGE_USED)
+
+ MmPageArray[Start].ReferenceCount--;
+ if (MmPageArray[Start].ReferenceCount == 0)
{
- DbgPrint("Freeing page with flags %x\n",
- MmPageArray[Start].Flags);
- KeBugCheck(0);
+ MmStats.NrFreePages++;
+ MmStats.NrSystemPages--;
+ RemoveEntryList(&MmPageArray[Start].ListEntry);
+ if (MmPageArray[Start].RmapListHead != NULL)
+ {
+ DbgPrint("Freeing page with rmap entries.\n");
+ KeBugCheck(0);
+ }
+ if (MmPageArray[Start].MapCount != 0)
+ {
+ DbgPrint("Freeing mapped page (0x%x count %d)\n",
+ PhysicalAddress, MmPageArray[Start].MapCount);
+ KeBugCheck(0);
+ }
+ if (MmPageArray[Start].LockCount > 0)
+ {
+ DbgPrint("Freeing locked page\n");
+ KeBugCheck(0);
+ }
+ if (MmPageArray[Start].SavedSwapEntry != 0)
+ {
+ DbgPrint("Freeing page with swap entry.\n");
+ KeBugCheck(0);
+ }
+ if (MmPageArray[Start].Flags == MM_PHYSICAL_PAGE_FREE)
+ {
+ DbgPrint("Freeing page with flags %x\n",
+ MmPageArray[Start].Flags);
+ KeBugCheck(0);
+ }
+ MmPageArray[Start].Flags = MM_PHYSICAL_PAGE_FREE;
+ InsertTailList(&MiFreeUnzeroedPageListHead,
+ &MmPageArray[Start].ListEntry);
}
- MmPageArray[Start].Flags = MM_PHYSICAL_PAGE_FREE;
- InsertTailList(&FreeUnzeroedPageListHead,
- &MmPageArray[Start].ListEntry);
- }
- KeReleaseSpinLock(&PageListLock, oldIrql);
+ KeReleaseSpinLock(&MiPageListLock, oldIrql);
}
-ULONG MmGetLockCountPage(PVOID PhysicalAddress)
+ULONG
+MiGetLockCountPage(IN ULONG_PTR PhysicalAddress)
{
- ULONG Start = (ULONG)PhysicalAddress / PAGESIZE;
- KIRQL oldIrql;
- ULONG LockCount;
-
- DPRINT("MmGetLockCountPage(PhysicalAddress %x)\n", PhysicalAddress);
+ ULONG Start = PhysicalAddress / PAGESIZE;
+ KIRQL oldIrql;
+ ULONG LockCount;
+
+ VALIDATE_PHYSICAL_ADDRESS(PhysicalAddress);
- if (((ULONG)PhysicalAddress) == 0)
- {
+ DPRINT("MmGetLockCountPage(PhysicalAddress %x)\n", PhysicalAddress);
+
+ if (PhysicalAddress == 0)
+ {
KeBugCheck(0);
- }
-
- KeAcquireSpinLock(&PageListLock, &oldIrql);
-
- if (MM_PTYPE(MmPageArray[Start].Flags) != MM_PHYSICAL_PAGE_USED)
- {
+ }
+
+ KeAcquireSpinLock(&MiPageListLock, &oldIrql);
+
+ if (MM_PTYPE(MmPageArray[Start].Flags) == MM_PHYSICAL_PAGE_FREE)
+ {
DbgPrint("Getting lock count for free page\n");
KeBugCheck(0);
- }
-
- LockCount = MmPageArray[Start].LockCount;
- KeReleaseSpinLock(&PageListLock, oldIrql);
-
- return(LockCount);
+ }
+
+ LockCount = MmPageArray[Start].LockCount;
+ KeReleaseSpinLock(&MiPageListLock, oldIrql);
+
+ return(LockCount);
}
-VOID MmLockPage(PVOID PhysicalAddress)
+
+VOID
+MmLockPage(IN ULONG_PTR PhysicalAddress)
{
- ULONG Start = (ULONG)PhysicalAddress / PAGESIZE;
- KIRQL oldIrql;
-
- DPRINT("MmLockPage(PhysicalAddress %x)\n", PhysicalAddress);
+ ULONG Start = PhysicalAddress / PAGESIZE;
+ KIRQL oldIrql;
+
+ VALIDATE_PHYSICAL_ADDRESS(PhysicalAddress);
- if (((ULONG)PhysicalAddress) == 0)
- {
+ DPRINT("MmLockPage(PhysicalAddress %x)\n", PhysicalAddress);
+
+ if (PhysicalAddress == 0)
+ {
KeBugCheck(0);
- }
-
- KeAcquireSpinLock(&PageListLock, &oldIrql);
-
- if (MM_PTYPE(MmPageArray[Start].Flags) != MM_PHYSICAL_PAGE_USED)
- {
+ }
+
+ KeAcquireSpinLock(&MiPageListLock, &oldIrql);
+
+ if (MM_PTYPE(MmPageArray[Start].Flags) == MM_PHYSICAL_PAGE_FREE)
+ {
DbgPrint("Locking free page\n");
KeBugCheck(0);
- }
-
- MmPageArray[Start].LockCount++;
- KeReleaseSpinLock(&PageListLock, oldIrql);
+ }
+
+ MmPageArray[Start].LockCount++;
+ KeReleaseSpinLock(&MiPageListLock, oldIrql);
}
-VOID MmUnlockPage(PVOID PhysicalAddress)
+
+VOID
+MmUnlockPage(IN ULONG_PTR PhysicalAddress)
{
- ULONG Start = (ULONG)PhysicalAddress / PAGESIZE;
- KIRQL oldIrql;
-
- DPRINT("MmUnlockPage(PhysicalAddress %x)\n", PhysicalAddress);
+ ULONG Start = PhysicalAddress / PAGESIZE;
+ KIRQL oldIrql;
+
+ VALIDATE_PHYSICAL_ADDRESS(PhysicalAddress);
- if (((ULONG)PhysicalAddress) == 0)
- {
+ DPRINT("MmUnlockPage(PhysicalAddress %x)\n", PhysicalAddress);
+
+ if (PhysicalAddress == 0)
+ {
KeBugCheck(0);
- }
-
- KeAcquireSpinLock(&PageListLock, &oldIrql);
-
- if (MM_PTYPE(MmPageArray[Start].Flags) != MM_PHYSICAL_PAGE_USED)
- {
+ }
+
+ KeAcquireSpinLock(&MiPageListLock, &oldIrql);
+
+ if (MM_PTYPE(MmPageArray[Start].Flags) == MM_PHYSICAL_PAGE_FREE)
+ {
DbgPrint("Unlocking free page\n");
KeBugCheck(0);
- }
-
- MmPageArray[Start].LockCount--;
- KeReleaseSpinLock(&PageListLock, oldIrql);
+ }
+
+ MmPageArray[Start].LockCount--;
+ KeReleaseSpinLock(&MiPageListLock, oldIrql);
}
-PVOID
+ULONG_PTR
MmAllocPage(ULONG Consumer, SWAPENTRY SavedSwapEntry)
{
ULONG offset;
DPRINT("MmAllocPage()\n");
- KeAcquireSpinLock(&PageListLock, &oldIrql);
- if (IsListEmpty(&FreeZeroedPageListHead))
+ KeAcquireSpinLock(&MiPageListLock, &oldIrql);
+ if (IsListEmpty(&MiFreeZeroedPageListHead))
{
- if (IsListEmpty(&FreeUnzeroedPageListHead))
+ if (IsListEmpty(&MiFreeUnzeroedPageListHead))
{
DPRINT1("MmAllocPage(): Out of memory\n");
- KeReleaseSpinLock(&PageListLock, oldIrql);
- return(NULL);
+ KeReleaseSpinLock(&MiPageListLock, oldIrql);
+ return(0);
}
- ListEntry = RemoveTailList(&FreeUnzeroedPageListHead);
+ ListEntry = RemoveTailList(&MiFreeUnzeroedPageListHead);
PageDescriptor = CONTAINING_RECORD(ListEntry, PHYSICAL_PAGE, ListEntry);
- KeReleaseSpinLock(&PageListLock, oldIrql);
+ KeReleaseSpinLock(&MiPageListLock, oldIrql);
NeedClear = TRUE;
}
else
{
- ListEntry = RemoveTailList(&FreeZeroedPageListHead);
- KeReleaseSpinLock(&PageListLock, oldIrql);
+ ListEntry = RemoveTailList(&MiFreeZeroedPageListHead);
+ KeReleaseSpinLock(&MiPageListLock, oldIrql);
PageDescriptor = CONTAINING_RECORD(ListEntry, PHYSICAL_PAGE, ListEntry);
}
PageDescriptor->LockCount = 0;
PageDescriptor->MapCount = 0;
PageDescriptor->SavedSwapEntry = SavedSwapEntry;
- ExInterlockedInsertTailList(&UsedPageListHeads[Consumer], ListEntry,
- &PageListLock);
-
+ ExInterlockedInsertTailList(&MiUsedPageListHeads[Consumer], ListEntry,
+ &MiPageListLock);
+
MmStats.NrSystemPages++;
MmStats.NrFreePages--;
offset = (ULONG)((ULONG)PageDescriptor - (ULONG)MmPageArray);
- offset = (offset / sizeof(PHYSICAL_PAGE)) * PAGESIZE;
+ offset = (offset / sizeof(PHYSICAL_PAGE)) * PAGESIZE;
if (NeedClear)
{
MiZeroPage(offset);
}
DPRINT("MmAllocPage() = %x\n",offset);
- return((PVOID)offset);
+ return(offset);
}
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
-/* $Id: page.c,v 1.34 2002/01/08 00:49:01 dwelch Exp $
+/* $Id: page.c,v 1.35 2002/05/13 18:10:41 chorns Exp $
*
* PROJECT: ReactOS kernel
* FILE: ntoskrnl/mm/i386/page.c
/* GLOBALS *****************************************************************/
-#define PA_BIT_PRESENT (0)
-#define PA_BIT_READWRITE (1)
-#define PA_BIT_USER (2)
-#define PA_BIT_WT (3)
-#define PA_BIT_CD (4)
-#define PA_BIT_ACCESSED (5)
-#define PA_BIT_DIRTY (6)
-
-#define PA_PRESENT (1 << PA_BIT_PRESENT)
-#define PA_READWRITE (1 << PA_BIT_READWRITE)
-#define PA_USER (1 << PA_BIT_USER)
-#define PA_DIRTY (1 << PA_BIT_DIRTY)
-#define PA_WT (1 << PA_BIT_WT)
-#define PA_CD (1 << PA_BIT_CD)
-#define PA_ACCESSED (1 << PA_BIT_ACCESSED)
-#define PA_DIRTY (1 << PA_BIT_DIRTY)
+/* See pagefile.c for the layout of a swap entry PTE */
+#define PA_BIT_PRESENT (0)
+#define PA_BIT_READWRITE (1)
+#define PA_BIT_USER (2)
+#define PA_BIT_WT (3)
+#define PA_BIT_CD (4)
+#define PA_BIT_ACCESSED (5)
+#define PA_BIT_DIRTY (6)
+#define PA_BIT_PROTOTYPE (7)
+#define PA_BIT_TRANSITION (8)
+#define PA_BIT_DEMAND_ZERO (9)
+
+#define PA_PRESENT (1 << PA_BIT_PRESENT)
+#define PA_READWRITE (1 << PA_BIT_READWRITE)
+#define PA_USER (1 << PA_BIT_USER)
+#define PA_DIRTY (1 << PA_BIT_DIRTY)
+#define PA_WT (1 << PA_BIT_WT)
+#define PA_CD (1 << PA_BIT_CD)
+#define PA_ACCESSED (1 << PA_BIT_ACCESSED)
+#define PA_DIRTY (1 << PA_BIT_DIRTY)
+#define PA_PROTOTYPE (1 << PA_BIT_PROTOTYPE)
+#define PA_TRANSITION (1 << PA_BIT_TRANSITION)
+#define PA_DEMAND_ZERO (1 << PA_BIT_DEMAND_ZERO)
+
+#define GET_SWAPENTRY_FROM_PTE(pte)((pte) & 0x1ffffffe) >> 1
+#define SET_SWAPENTRY_IN_PTE(pte, entry)((pte) = ((entry << 1) & 0x1ffffffe))
+#define IS_SWAPENTRY_PTE(pte)((!(Pte & PA_PRESENT)) && ((pte) & 0x1ffffffe))
#define PAGETABLE_MAP (0xf0000000)
#define PAGEDIRECTORY_MAP (0xf0000000 + (PAGETABLE_MAP / (1024)))
ULONG MmGlobalKernelPageDirectory[1024] = {0, };
+#ifdef DBG
+
+PVOID MiBreakPointAddressLow = (PVOID)0x0;
+PVOID MiBreakPointAddressHigh = (PVOID)0x0;
+
+#endif /* DBG */
+
/* FUNCTIONS ***************************************************************/
+#ifdef DBG
+
+VOID
+MiDumpPTE(IN ULONG Value)
+{
+ if (Value & PA_PRESENT)
+ DbgPrint("Valid\n");
+ else
+ DbgPrint("Invalid\n");
+
+ if (Value & PA_READWRITE)
+ DbgPrint("Read/Write\n");
+ else
+ DbgPrint("Read only\n");
+
+ if (Value & PA_USER)
+ DbgPrint("User access\n");
+ else
+ DbgPrint("System access\n");
+
+ if (Value & PA_WT)
+ DbgPrint("Write through\n");
+ else
+ DbgPrint("Not write through\n");
+
+ if (Value & PA_CD)
+ DbgPrint("No cache\n");
+ else
+ DbgPrint("Cache\n");
+
+ if (Value & PA_ACCESSED)
+ DbgPrint("Accessed\n");
+ else
+ DbgPrint("Not accessed\n");
+
+ if (Value & PA_DIRTY)
+ DbgPrint("Dirty\n");
+ else
+ DbgPrint("Clean\n");
+
+ if (Value & PA_PROTOTYPE)
+ DbgPrint("Prototype\n");
+ else
+ DbgPrint("Not prototype\n");
+
+ if (Value & PA_TRANSITION)
+ DbgPrint("Transition\n");
+ else
+ DbgPrint("Not transition\n");
+
+ if (Value & PA_DEMAND_ZERO)
+ DbgPrint("Demand zero\n");
+ else
+ DbgPrint("Not demand zero\n");
+}
+
+
+/*
+ * Call from a debugger to have the OS break into the
+ * debugger when a PTE in this range is changed
+ */
+VOID
+DbgMmSetBreakPointAddressRange(IN PVOID LowAddress,
+ IN PVOID HighAddress)
+{
+ MiBreakPointAddressLow = LowAddress;
+ MiBreakPointAddressHigh = HighAddress;
+}
+
+
+/*
+ * Single page version of DbgSetBreakPointAddressRange()
+ */
+VOID
+DbgMmSetBreakPointAddress(IN PVOID Address)
+{
+ MiBreakPointAddressLow = Address;
+ MiBreakPointAddressHigh = Address;
+}
+
+
+VOID
+MiValidatePhysicalAddress(IN ULONG_PTR PhysicalAddress)
+{
+ if (!MiInitialized)
+ return;
+
+ assertmsg((PhysicalAddress / PAGESIZE) < MmStats.NrTotalPages,
+ ("Bad physical address 0x%.08x\n", PhysicalAddress))
+}
+
+#endif /* DBG */
+
PULONG
MmGetPageDirectory(VOID)
{
return((PULONG)page_dir);
}
+
static ULONG
ProtectToPTE(ULONG flProtect)
{
{
DPRINT("Mmi386ReleaseMmInfo(Process %x)\n",Process);
- MmDereferencePage(Process->Pcb.DirectoryTableBase[0]);
+ MmDereferencePage((ULONG_PTR) Process->Pcb.DirectoryTableBase[0]);
Process->Pcb.DirectoryTableBase[0] = NULL;
DPRINT("Finished Mmi386ReleaseMmInfo()\n");
return(STATUS_UNSUCCESSFUL);
}
PhysPageDirectory = (PULONG)(MmGetPhysicalAddress(PageDirectory)).u.LowPart;
+
+ VALIDATE_PHYSICAL_ADDRESS(PhysPageDirectory);
+
KProcess->DirectoryTableBase[0] = PhysPageDirectory;
CurrentPageDirectory = (PULONG)PAGEDIRECTORY_MAP;
KeAttachProcess(Process);
}
*(ADDR_TO_PDE(Address)) = 0;
- if (Address >= (PVOID)KERNEL_BASE)
+ if (Address >= (PVOID) KERNEL_BASE)
{
MmGlobalKernelPageDirectory[ADDR_TO_PDE_OFFSET(Address)] = 0;
}
PEPROCESS CurrentProcess = PsGetCurrentProcess();
PULONG PageTable;
ULONG i;
- ULONG npage;
+ ULONG_PTR npage;
if (Process != NULL && Process != CurrentProcess)
{
KeAttachProcess(Process);
}
- PageTable = (PULONG)PAGE_ROUND_DOWN((PVOID)ADDR_TO_PTE(Address));
+ PageTable = (PULONG) PAGE_ROUND_DOWN((PVOID) ADDR_TO_PTE(Address));
for (i = 0; i < 1024; i++)
{
if (PageTable[i] != 0)
}
npage = *(ADDR_TO_PDE(Address));
*(ADDR_TO_PDE(Address)) = 0;
- if (Address >= (PVOID)KERNEL_BASE)
+ if (Address >= (PVOID) KERNEL_BASE)
{
MmGlobalKernelPageDirectory[ADDR_TO_PDE_OFFSET(Address)] = 0;
}
- MmDereferencePage((PVOID)PAGE_MASK(npage));
+ MmDereferencePage((ULONG_PTR) PAGE_MASK(npage));
FLUSH_TLB;
if (Process != NULL && Process != CurrentProcess)
{
*/
{
PULONG Pde;
- ULONG Address = (ULONG)PAddress;
- ULONG npage;
+ ULONG_PTR Address = (ULONG)PAddress;
+ ULONG_PTR npage;
DPRINT("MmGetPageEntry(Address %x)\n", Address);
else
{
NTSTATUS Status;
- Status = MmRequestPageMemoryConsumer(MC_NPPOOL, MayWait, (PVOID*)&npage);
+ Status = MmRequestPageMemoryConsumer(MC_NPPOOL, MayWait, &npage);
if (!NT_SUCCESS(Status))
{
return(Status);
}
+
+ VALIDATE_PHYSICAL_ADDRESS(PAGE_MASK(npage));
+
(*Pde) = npage | 0x7;
if (Address >= KERNEL_BASE)
{
MmGlobalKernelPageDirectory[ADDR_TO_PDE_OFFSET(Address)] =
*Pde;
}
- memset((PVOID)PAGE_ROUND_DOWN(ADDR_TO_PTE(Address)), 0, PAGESIZE);
+ memset((PVOID) PAGE_ROUND_DOWN(ADDR_TO_PTE(Address)), 0, PAGESIZE);
FLUSH_TLB;
}
}
PULONG page_dir;
ULONG Address = (ULONG)PAddress;
- DPRINT("MmGetPageEntry(Address %x)\n", Address);
+ DPRINT("MmGetPageEntry1(Address %x)\n", Address);
page_dir = ADDR_TO_PDE(Address);
if ((*page_dir) == 0 &&
}
-ULONG MmGetPhysicalAddressForProcess(PEPROCESS Process,
- PVOID Address)
+ULONG
+MmGetPhysicalAddressForProcess(IN PEPROCESS Process,
+ IN PVOID Address)
{
- ULONG PageEntry;
-
- PageEntry = MmGetPageEntryForProcess(Process, Address);
-
- if (!(PageEntry & PA_PRESENT))
- {
- return(0);
- }
- return(PAGE_MASK(PageEntry));
+ ULONG PageEntry;
+
+ PageEntry = MmGetPageEntryForProcess(Process, Address);
+
+ VALIDATE_PHYSICAL_ADDRESS(PAGE_MASK(PageEntry));
+
+ return(PAGE_MASK(PageEntry));
}
VOID
-MmDisableVirtualMapping(PEPROCESS Process, PVOID Address, BOOL* WasDirty, ULONG* PhysicalAddr)
+MmDisableVirtualMapping(IN PEPROCESS Process,
+ IN PVOID Address,
+ OUT PBOOLEAN WasDirty,
+ OUT PULONG_PTR PhysicalAddr)
/*
- * FUNCTION: Delete a virtual mapping
+ * FUNCTION: Disable a virtual mapping
*/
{
ULONG Pte;
PULONG Pde;
PEPROCESS CurrentProcess = PsGetCurrentProcess();
- BOOLEAN WasValid;
/*
* If we are setting a page in another process we need to be in its
}
if ((*Pde) == 0)
{
+ DPRINT1("PDE for address 0x%.08x does not exist\n", Address);
KeBugCheck(0);
}
/*
- * Atomically set the entry to zero and get the old value.
+ * Reset the present bit
*/
Pte = *ADDR_TO_PTE(Address);
*ADDR_TO_PTE(Address) = Pte & (~PA_PRESENT);
FLUSH_TLB;
- WasValid = (PAGE_MASK(Pte) != 0);
- if (!WasValid)
- {
- KeBugCheck(0);
- }
/*
* If necessary go back to the original context
}
if (PhysicalAddr != NULL)
{
- *PhysicalAddr = PAGE_MASK(Pte);
+ VALIDATE_PHYSICAL_ADDRESS(PAGE_MASK(Pte));
+
+ *PhysicalAddr = PAGE_MASK(Pte);
}
}
VOID
-MmDeleteVirtualMapping(PEPROCESS Process, PVOID Address, BOOL FreePage,
- BOOL* WasDirty, ULONG* PhysicalAddr)
+MmDeleteVirtualMapping(PEPROCESS Process,
+ PVOID Address,
+ BOOLEAN FreePage,
+ PBOOLEAN WasDirty,
+ PULONG PhysicalAddr)
/*
* FUNCTION: Delete a virtual mapping
*/
WasValid = (PAGE_MASK(Pte) != 0);
if (WasValid)
{
- MmMarkPageUnmapped((PVOID)PAGE_MASK(Pte));
+ MmMarkPageUnmapped((ULONG_PTR) PAGE_MASK(Pte));
}
if (FreePage && WasValid)
{
- MmDereferencePage((PVOID)PAGE_MASK(Pte));
+ MmDereferencePage((ULONG_PTR) PAGE_MASK(Pte));
}
/*
}
if (PhysicalAddr != NULL)
{
+ VALIDATE_PHYSICAL_ADDRESS(PAGE_MASK(Pte));
+
*PhysicalAddr = PAGE_MASK(Pte);
}
}
VOID
-MmDeletePageFileMapping(PEPROCESS Process, PVOID Address,
- SWAPENTRY* SwapEntry)
+MmDeletePageFileMapping(IN PEPROCESS Process,
+ IN PVOID Address,
+ OUT PSWAPENTRY SwapEntry)
/*
* FUNCTION: Delete a virtual mapping
*/
ULONG Pte;
PULONG Pde;
PEPROCESS CurrentProcess = PsGetCurrentProcess();
- BOOLEAN WasValid;
/*
* If we are setting a page in another process we need to be in its
/*
* Decrement the reference count for this page table.
*/
- if (Process != NULL && WasValid &&
+ if (Process != NULL &&
Process->AddressSpace.PageTableRefCountTable != NULL &&
ADDR_TO_PAGE_TABLE(Address) < 768)
{
/*
* Return some information to the caller
*/
- *SwapEntry = Pte >> 1;
+ *SwapEntry = GET_SWAPENTRY_FROM_PTE(Pte);
}
BOOLEAN
NTSTATUS MmCreatePageTable(PVOID PAddress)
{
PULONG page_dir;
- ULONG Address = (ULONG)PAddress;
- ULONG npage;
+ ULONG_PTR Address = (ULONG)PAddress;
+ ULONG_PTR npage;
DPRINT("MmGetPageEntry(Address %x)\n", Address);
if ((*page_dir) == 0)
{
NTSTATUS Status;
- Status = MmRequestPageMemoryConsumer(MC_NPPOOL, FALSE, (PVOID*)&npage);
+ Status = MmRequestPageMemoryConsumer(MC_NPPOOL, FALSE, &npage);
if (!NT_SUCCESS(Status))
{
return(Status);
}
+
+ VALIDATE_PHYSICAL_ADDRESS(PAGE_MASK(npage));
+
(*page_dir) = npage | 0x7;
- memset((PVOID)PAGE_ROUND_DOWN(ADDR_TO_PTE(Address)), 0, PAGESIZE);
+ memset((PVOID) PAGE_ROUND_DOWN(ADDR_TO_PTE(Address)), 0, PAGESIZE);
FLUSH_TLB;
}
return(STATUS_SUCCESS);
{
PULONG page_tlb;
PULONG page_dir;
- ULONG Address = (ULONG)PAddress;
- ULONG npage;
+ ULONG_PTR Address = (ULONG)PAddress;
+ ULONG_PTR npage;
DPRINT("MmGetPageEntry(Address %x)\n", Address);
if ((*page_dir) == 0)
{
NTSTATUS Status;
- Status = MmRequestPageMemoryConsumer(MC_NPPOOL, FALSE, (PVOID*)&npage);
+ Status = MmRequestPageMemoryConsumer(MC_NPPOOL, FALSE, &npage);
if (!NT_SUCCESS(Status))
{
+ DPRINT1("\n");
KeBugCheck(0);
}
(*page_dir) = npage | 0x7;
- memset((PVOID)PAGE_ROUND_DOWN(ADDR_TO_PTE(Address)), 0, PAGESIZE);
+ memset((PVOID) PAGE_ROUND_DOWN(ADDR_TO_PTE(Address)), 0, PAGESIZE);
FLUSH_TLB;
}
page_tlb = ADDR_TO_PTE(Address);
return(page_tlb);
}
-BOOLEAN MmIsPageDirty(PEPROCESS Process, PVOID Address)
+
+BOOLEAN
+MmIsPageDirty(PEPROCESS Process, PVOID Address)
{
return((MmGetPageEntryForProcess(Process, Address)) & PA_DIRTY);
}
+
BOOLEAN
MmIsAccessedAndResetAccessPage(PEPROCESS Process, PVOID Address)
{
return(Accessed);
}
-VOID MmSetCleanPage(PEPROCESS Process, PVOID Address)
+
+VOID
+MmSetCleanPage(IN PEPROCESS Process,
+ IN PVOID Address)
{
PULONG PageEntry;
PEPROCESS CurrentProcess = PsGetCurrentProcess();
}
}
-VOID MmSetDirtyPage(PEPROCESS Process, PVOID Address)
+
+BOOLEAN
+MiPageState(IN PEPROCESS Process,
+ IN PVOID Address,
+ IN ULONG PageState)
+{
+ PULONG PageEntry;
+ BOOLEAN Value;
+ PEPROCESS CurrentProcess = PsGetCurrentProcess();
+
+ if (Process != NULL && Process != CurrentProcess)
+ {
+ KeAttachProcess(Process);
+ }
+
+ PageEntry = MmGetPageEntry(Address);
+
+ switch (PageState)
+ {
+ case PAGE_STATE_VALID:
+ Value = ((*PageEntry) & PA_PRESENT) != 0;
+ break;
+ case PAGE_STATE_PROTOTYPE:
+ Value = ((*PageEntry) & PA_PROTOTYPE) != 0;
+ break;
+ case PAGE_STATE_TRANSITION:
+ Value = ((*PageEntry) & PA_TRANSITION) != 0;
+ break;
+ case PAGE_STATE_DEMAND_ZERO:
+ Value = ((*PageEntry) & PA_DEMAND_ZERO) != 0;
+ break;
+ default:
+ DPRINT1("Unknown page state 0x%.08x\n", PageState);
+ KeBugCheck(0);
+ break;
+ }
+
+ if (Process != NULL && Process != CurrentProcess)
+ {
+ KeDetachProcess();
+ }
+
+ return Value;
+}
+
+
+VOID
+MiClearPageState(IN PEPROCESS Process,
+ IN PVOID Address,
+ IN ULONG PageState)
+{
+ PULONG PageEntry;
+ PEPROCESS CurrentProcess = PsGetCurrentProcess();
+
+ if (Process != NULL && Process != CurrentProcess)
+ {
+ KeAttachProcess(Process);
+ }
+ PageEntry = MmGetPageEntry(Address);
+
+ switch (PageState)
+ {
+ case PAGE_STATE_VALID:
+ (*PageEntry) = (*PageEntry) & (~PA_PRESENT);
+ break;
+ case PAGE_STATE_PROTOTYPE:
+ (*PageEntry) = (*PageEntry) & (~PA_PROTOTYPE);
+ break;
+ case PAGE_STATE_TRANSITION:
+ assertmsg(!((*PageEntry) & PA_PRESENT), ("Page 0x%.08x in transition is present\n", Address));
+ (*PageEntry) = (*PageEntry) & (~PA_TRANSITION);
+ break;
+ case PAGE_STATE_DEMAND_ZERO:
+ assertmsg(!((*PageEntry) & PA_PRESENT), ("Demand zero page 0x%.08x is present\n", Address));
+ (*PageEntry) = (*PageEntry) & (~PA_DEMAND_ZERO);
+ break;
+ default:
+ DPRINT1("Unknown page state 0x%.08x\n", PageState);
+ KeBugCheck(0);
+ break;
+ }
+
+ FLUSH_TLB;
+ if (Process != NULL && Process != CurrentProcess)
+ {
+ KeDetachProcess();
+ }
+}
+
+
+VOID
+MiSetPageState(IN PEPROCESS Process,
+ IN PVOID Address,
+ IN ULONG PageState)
+{
+ PULONG PageEntry;
+ PEPROCESS CurrentProcess = PsGetCurrentProcess();
+
+ if (Process != NULL && Process != CurrentProcess)
+ {
+ KeAttachProcess(Process);
+ }
+ PageEntry = MmGetPageEntry(Address);
+
+ switch (PageState)
+ {
+ case PAGE_STATE_VALID:
+ (*PageEntry) = (*PageEntry) | PA_PRESENT;
+ break;
+ case PAGE_STATE_PROTOTYPE:
+ (*PageEntry) = (*PageEntry) | PA_PROTOTYPE;
+ break;
+ case PAGE_STATE_TRANSITION:
+ assertmsg(!((*PageEntry) & PA_PRESENT), ("Page 0x%.08x in transition is present\n", Address));
+ (*PageEntry) = (*PageEntry) | PA_TRANSITION;
+ break;
+ case PAGE_STATE_DEMAND_ZERO:
+ assertmsg(!((*PageEntry) & PA_PRESENT), ("Demand zero page 0x%.08x is present\n", Address));
+ (*PageEntry) = (*PageEntry) | PA_DEMAND_ZERO;
+ break;
+ default:
+ DPRINT1("Unknown page state 0x%.08x\n", PageState);
+ KeBugCheck(0);
+ break;
+ }
+
+ FLUSH_TLB;
+ if (Process != NULL && Process != CurrentProcess)
+ {
+ KeDetachProcess();
+ }
+}
+
+
+VOID
+MmSetDirtyPage(IN PEPROCESS Process,
+ IN PVOID Address)
{
PULONG PageEntry;
PEPROCESS CurrentProcess = PsGetCurrentProcess();
}
}
-VOID MmEnableVirtualMapping(PEPROCESS Process, PVOID Address)
+
+VOID
+MmEnableVirtualMapping(IN PEPROCESS Process,
+ IN PVOID Address)
{
PULONG PageEntry;
PEPROCESS CurrentProcess = PsGetCurrentProcess();
}
}
+
BOOLEAN MmIsPagePresent(PEPROCESS Process, PVOID Address)
{
return((MmGetPageEntryForProcess1(Process, Address)) & PA_PRESENT);
}
-BOOLEAN MmIsPageSwapEntry(PEPROCESS Process, PVOID Address)
+BOOLEAN
+MmIsPageSwapEntry(IN PEPROCESS Process,
+ IN PVOID Address)
{
ULONG Pte;
Pte = MmGetPageEntryForProcess1(Process, Address);
- return((!(Pte & PA_PRESENT)) && Pte != 0);
+ return(IS_SWAPENTRY_PTE(Pte));
}
NTSTATUS
CurrentProcess = NULL;
}
- if (Process == NULL && Address < (PVOID)KERNEL_BASE)
+ if (Process == NULL && Address < (PVOID) KERNEL_BASE)
{
DPRINT1("No process\n");
KeBugCheck(0);
}
- if (Process != NULL && Address >= (PVOID)KERNEL_BASE)
+ if (Process != NULL && Address >= (PVOID) KERNEL_BASE)
{
DPRINT1("Setting kernel address with process context\n");
KeBugCheck(0);
}
if (PAGE_MASK((*Pte)) != 0)
{
- MmMarkPageUnmapped((PVOID)PAGE_MASK((*Pte)));
+ MmMarkPageUnmapped((ULONG_PTR) PAGE_MASK((*Pte)));
}
*Pte = PhysicalAddress | Attributes;
if (Process != NULL &&
}
NTSTATUS
-MmCreatePageFileMapping(PEPROCESS Process,
- PVOID Address,
- SWAPENTRY SwapEntry)
+MmCreatePageFileMapping(IN PEPROCESS Process,
+ IN PVOID Address,
+ IN SWAPENTRY SwapEntry)
{
PEPROCESS CurrentProcess;
PULONG Pte;
CurrentProcess = NULL;
}
- if (Process == NULL && Address < (PVOID)KERNEL_BASE)
+ if (Process == NULL && Address < (PVOID) KERNEL_BASE)
{
DPRINT1("No process\n");
KeBugCheck(0);
}
- if (Process != NULL && Address >= (PVOID)KERNEL_BASE)
+ if (Process != NULL && Address >= (PVOID) KERNEL_BASE)
{
DPRINT1("Setting kernel address with process context\n");
KeBugCheck(0);
}
if (PAGE_MASK((*Pte)) != 0)
{
- MmMarkPageUnmapped((PVOID)PAGE_MASK((*Pte)));
+ MmMarkPageUnmapped((ULONG_PTR) PAGE_MASK((*Pte)));
}
- *Pte = SwapEntry << 1;
+ SET_SWAPENTRY_IN_PTE(*Pte, SwapEntry);
if (Process != NULL &&
Process->AddressSpace.PageTableRefCountTable != NULL &&
ADDR_TO_PAGE_TABLE(Address) < 768)
CurrentProcess = NULL;
}
- if (Process == NULL && Address < (PVOID)KERNEL_BASE)
+ if (Process == NULL && Address < (PVOID) KERNEL_BASE)
{
DPRINT1("No process\n");
KeBugCheck(0);
}
- if (Process != NULL && Address >= (PVOID)KERNEL_BASE)
+ if (Process != NULL && Address >= (PVOID) KERNEL_BASE)
{
DPRINT1("Setting kernel address with process context\n");
KeBugCheck(0);
}
- MmMarkPageMapped((PVOID)PhysicalAddress);
+ MmMarkPageMapped(PhysicalAddress);
Attributes = ProtectToPTE(flProtect);
if (!(Attributes & PA_PRESENT) && PhysicalAddress != 0)
Address, Attributes, flProtect);
KeBugCheck(0);
}
-
+
if (Process != NULL && Process != CurrentProcess)
{
KeAttachProcess(Process);
}
if (PAGE_MASK((*Pte)) != 0)
{
- MmMarkPageUnmapped((PVOID)PAGE_MASK((*Pte)));
+ MmMarkPageUnmapped((ULONG_PTR) PAGE_MASK((*Pte)));
}
+
+#ifdef DBG
+
+ if ((MiBreakPointAddressLow != NULL) && (MiBreakPointAddressHigh != NULL) &&
+ ((PAGE_ROUND_DOWN(Address) >= PAGE_ROUND_DOWN(MiBreakPointAddressLow))
+ && (PAGE_ROUND_DOWN(Address) < PAGE_ROUND_UP(MiBreakPointAddressHigh))))
+ {
+ DbgPrint("Changing PTE of virtual address 0x%.08x from PTE 0x%.08x\n", Address, *Pte);
+ MiDumpPTE(*Pte);
+ DbgPrint("To 0x%.08x\n", PhysicalAddress | Attributes);
+ MiDumpPTE(PhysicalAddress | Attributes);
+ //assert(FALSE);
+ }
+
+#endif /* DBG */
+
*Pte = PhysicalAddress | Attributes;
if (Process != NULL &&
Process->AddressSpace.PageTableRefCountTable != NULL &&
ULONG PhysicalAddress,
BOOLEAN MayWait)
{
- if (!MmIsUsablePage((PVOID)PhysicalAddress))
+ if (!MmIsUsablePage(PhysicalAddress))
{
DPRINT1("Page at address %x not usable\n", PhysicalAddress);
KeBugCheck(0);
Process, Address, flProtect);
Attributes = ProtectToPTE(flProtect);
- if (Process != CurrentProcess)
+
+ if ((Process != NULL) && (Process != CurrentProcess))
{
KeAttachProcess(Process);
}
PageEntry = MmGetPageEntry(Address);
(*PageEntry) = PAGE_MASK(*PageEntry) | Attributes;
FLUSH_TLB;
- if (Process != CurrentProcess)
+ if ((Process != NULL) && (Process != CurrentProcess))
{
KeDetachProcess();
}
Pte = *MmGetPageEntry(vaddr);
if (Pte & PA_PRESENT)
{
+ VALIDATE_PHYSICAL_ADDRESS(PAGE_MASK(Pte));
+
p.QuadPart = PAGE_MASK(Pte);
}
else
return p;
}
+#ifdef DBG
+
+VOID
+MiDumpProcessPTE(IN PEPROCESS Process,
+ IN PVOID Address)
+{
+ ULONG Value;
+
+ Value = MmGetPageEntryForProcess1(Process, Address);
+
+ MiDumpPTE(Value);
+}
+
+#endif /* DBG */
/* EOF */
-/* $Id: kmap.c,v 1.14 2002/01/01 00:21:56 dwelch Exp $
+/* $Id: kmap.c,v 1.15 2002/05/13 18:10:40 chorns Exp $
*
* COPYRIGHT: See COPYING in the top level directory
* PROJECT: ReactOS kernel
KeReleaseSpinLock(&AllocMapLock, oldIrql);
}
-PVOID
+PVOID
ExAllocatePage(VOID)
{
- ULONG PhysPage;
+ ULONG_PTR Page;
NTSTATUS Status;
- Status = MmRequestPageMemoryConsumer(MC_NPPOOL, FALSE, (PVOID*)&PhysPage);
+ Status = MmRequestPageMemoryConsumer(MC_NPPOOL, FALSE, &Page);
if (!NT_SUCCESS(Status))
{
return(NULL);
}
- return(ExAllocatePageWithPhysPage(PhysPage));
+ return(ExAllocatePageWithPhysPage(Page));
}
NTSTATUS
-MiZeroPage(ULONG PhysPage)
+MiZeroPage(IN ULONG_PTR Page)
{
PVOID TempAddress;
- TempAddress = ExAllocatePageWithPhysPage(PhysPage);
+ TempAddress = ExAllocatePageWithPhysPage(Page);
if (TempAddress == NULL)
{
return(STATUS_NO_MEMORY);
}
+
memset(TempAddress, 0, PAGESIZE);
ExUnmapPage(TempAddress);
return(STATUS_SUCCESS);
}
PVOID
-ExAllocatePageWithPhysPage(ULONG PhysPage)
+ExAllocatePageWithPhysPage(IN ULONG_PTR Page)
{
KIRQL oldlvl;
- ULONG addr;
+ PVOID addr;
ULONG i;
NTSTATUS Status;
{
DPRINT("i %x\n",i);
AllocMap[i / 32] |= (1 << (i % 32));
- addr = (ULONG)(NonPagedPoolBase + (i*PAGESIZE));
+ addr = (PVOID) (NonPagedPoolBase + (i*PAGESIZE));
Status = MmCreateVirtualMapping(NULL,
- (PVOID)addr,
+ addr,
PAGE_READWRITE | PAGE_SYSTEM,
- PhysPage,
+ Page,
FALSE);
if (!NT_SUCCESS(Status))
{
KeBugCheck(0);
}
KeReleaseSpinLock(&AllocMapLock, oldlvl);
- return((PVOID)addr);
+ return(addr);
}
}
KeReleaseSpinLock(&AllocMapLock, oldlvl);
/*
* ReactOS kernel
- * Copyright (C) 1998, 1999, 2000, 2001 ReactOS Team
+ * Copyright (C) 1998-2002 ReactOS Team
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
/* INCLUDES *****************************************************************/
+#include <roscfg.h>
#include <ddk/ntddk.h>
#include <internal/mm.h>
#include <internal/ps.h>
#define TAG_MAREA TAG('M', 'A', 'R', 'E')
+#ifdef DBG
+PVOID MiMemoryAreaBugCheckAddress = (PVOID) NULL;
+#endif /* DBG */
+
+/* Define to track memory area references */
+//#define TRACK_MEMORY_AREA_REFERENCES
+
/* FUNCTIONS *****************************************************************/
VOID MmDumpMemoryAreas(PLIST_ENTRY ListHead)
DbgPrint("Finished MmDumpMemoryAreas()\n");
}
-MEMORY_AREA* MmOpenMemoryAreaByAddress(PMADDRESS_SPACE AddressSpace,
- PVOID Address)
+#ifdef DBG
+
+VOID
+MiValidateMemoryAreaPTEs(IN PMEMORY_AREA MemoryArea)
+{
+ ULONG PteProtect;
+ ULONG i;
+
+ if (!MiInitialized)
+ return;
+
+ for (i = 0; i <= (MemoryArea->Length / PAGESIZE); i++)
+ {
+ if (MmIsPagePresent(MemoryArea->Process, MemoryArea->BaseAddress + (i * PAGESIZE)))
+ {
+ PteProtect = MmGetPageProtect(MemoryArea->Process, MemoryArea->BaseAddress + (i * PAGESIZE));
+ if (PteProtect != MemoryArea->Attributes)
+ {
+ if (MmIsCopyOnWriteMemoryArea(MemoryArea))
+ {
+ if ((PteProtect != PAGE_READONLY) && (PteProtect != PAGE_EXECUTE_READ))
+ {
+ DPRINT1("COW memory area attributes 0x%.08x\n", MemoryArea->Attributes);
+ DbgMmDumpProtection(MemoryArea->Attributes);
+ DPRINT1("PTE attributes 0x%.08x\n", PteProtect);
+ DbgMmDumpProtection(PteProtect);
+ assertmsg(FALSE, ("PTE attributes and memory area protection are different. Area 0x%.08x\n",
+ MemoryArea->BaseAddress));
+ }
+ }
+ else
+ {
+ DPRINT1("Memory area attributes 0x%.08x\n", MemoryArea->Attributes);
+ DbgMmDumpProtection(MemoryArea->Attributes);
+ DPRINT1("PTE attributes 0x%.08x\n", PteProtect);
+ DbgMmDumpProtection(PteProtect);
+ assertmsg(FALSE, ("PTE attributes and memory area protection are different. Area 0x%.08x\n",
+ MemoryArea->BaseAddress));
+ }
+ }
+ }
+ }
+}
+
+
+VOID
+MiValidateMemoryArea(IN PMEMORY_AREA MemoryArea)
+{
+ assertmsg(MemoryArea != NULL,
+ ("No memory area can exist at 0x%.08x\n", MemoryArea));
+
+ assertmsg(MemoryArea->Magic == TAG_MAREA,
+ ("Bad magic (0x%.08x) for memory area (0x%.08x). It should be 0x%.08x\n",
+ MemoryArea->Magic, MemoryArea, TAG_MAREA));
+
+ /* FIXME: Can cause page faults and deadlock on the address space lock */
+ //MiValidateMemoryAreaPTEs(MemoryArea);
+}
+
+#endif /* DBG */
+
+VOID
+MmApplyMemoryAreaProtection(IN PMEMORY_AREA MemoryArea)
+{
+ ULONG i;
+
+ if (!MiInitialized)
+ return;
+
+ for (i = 0; i <= (MemoryArea->Length / PAGESIZE); i++)
+ {
+ if (MmIsPagePresent(MemoryArea->Process, MemoryArea->BaseAddress + (i * PAGESIZE)))
+ {
+ MmSetPageProtect(MemoryArea->Process,
+ MemoryArea->BaseAddress + (i * PAGESIZE),
+ MemoryArea->Attributes);
+ }
+ }
+}
+
+
+/*
+ * NOTE: If the memory area is found, then it is referenced. The caller must
+ * call MmCloseMemoryArea() after use.
+ */
+PMEMORY_AREA
+MmOpenMemoryAreaByAddress(IN PMADDRESS_SPACE AddressSpace,
+ IN PVOID Address)
{
PLIST_ENTRY current_entry;
MEMORY_AREA* current;
DPRINT("MmOpenMemoryAreaByAddress(AddressSpace %x, Address %x)\n",
AddressSpace, Address);
-
+
// MmDumpMemoryAreas(&AddressSpace->MAreaListHead);
-
+
previous_entry = &AddressSpace->MAreaListHead;
current_entry = AddressSpace->MAreaListHead.Flink;
while (current_entry != &AddressSpace->MAreaListHead)
(current->BaseAddress + current->Length) > Address)
{
DPRINT("%s() = %x\n",__FUNCTION__,current);
+ MmReferenceMemoryArea(current);
return(current);
}
if (current->BaseAddress > Address)
return(NULL);
}
+/*
+ * NOTE: If the memory area is found, then it is referenced. The caller must
+ * call MmCloseMemoryArea() after use.
+ */
MEMORY_AREA* MmOpenMemoryAreaByRegion(PMADDRESS_SPACE AddressSpace,
PVOID Address,
ULONG Length)
{
DPRINT("Finished MmOpenMemoryAreaByRegion() = %x\n",
current);
+ MmReferenceMemoryArea(current);
return(current);
}
Extent = (ULONG)current->BaseAddress + current->Length;
{
DPRINT("Finished MmOpenMemoryAreaByRegion() = %x\n",
current);
+ MmReferenceMemoryArea(current);
return(current);
}
if (current->BaseAddress <= Address &&
{
DPRINT("Finished MmOpenMemoryAreaByRegion() = %x\n",
current);
+ MmReferenceMemoryArea(current);
return(current);
}
if (current->BaseAddress >= (Address+Length))
return(NULL);
}
+
+VOID
+MmCloseMemoryArea(IN PMEMORY_AREA MemoryArea)
+{
+ MmDereferenceMemoryArea(MemoryArea);
+}
+
+
static VOID MmInsertMemoryArea(PMADDRESS_SPACE AddressSpace,
MEMORY_AREA* marea)
{
PLIST_ENTRY inserted_entry = &marea->Entry;
MEMORY_AREA* current;
MEMORY_AREA* next;
-
+
DPRINT("MmInsertMemoryArea(marea %x)\n", marea);
DPRINT("marea->BaseAddress %x\n", marea->BaseAddress);
DPRINT("marea->Length %x\n", marea->Length);
}
if (current->BaseAddress < marea->BaseAddress &&
next->BaseAddress > marea->BaseAddress)
- {
+ {
DPRINT("Inserting before %x\n", current_entry);
inserted_entry->Flink = current_entry->Flink;
inserted_entry->Blink = current_entry;
return(STATUS_SUCCESS);
}
-NTSTATUS
-MmFreeMemoryArea(PMADDRESS_SPACE AddressSpace,
- PVOID BaseAddress,
- ULONG Length,
- VOID (*FreePage)(PVOID Context, MEMORY_AREA* MemoryArea, PVOID Address,
- ULONG PhysAddr, SWAPENTRY SwapEntry, BOOLEAN Dirty),
- PVOID FreePageContext)
+/* NOTE: The address space lock must be held when called */
+NTSTATUS
+MmFreeMemoryArea(IN PMADDRESS_SPACE AddressSpace,
+ IN PVOID BaseAddress,
+ IN ULONG Length,
+ IN PFREE_MEMORY_AREA_PAGE_CALLBACK FreePage,
+ IN PVOID FreePageContext)
{
- MEMORY_AREA* MemoryArea;
- ULONG i;
-
- DPRINT("MmFreeMemoryArea(AddressSpace %x, BaseAddress %x, Length %x,"
- "FreePageContext %d)\n",AddressSpace,BaseAddress,Length,FreePageContext);
-
- MemoryArea = MmOpenMemoryAreaByAddress(AddressSpace,
- BaseAddress);
- if (MemoryArea == NULL)
- {
- KeBugCheck(0);
- return(STATUS_UNSUCCESSFUL);
- }
+ MEMORY_AREA* MemoryArea;
+ ULONG i;
+
+ DPRINT("MmFreeMemoryArea(AddressSpace %x, BaseAddress %x, Length %x, "
+ "FreePageContext %d)\n",AddressSpace,BaseAddress,Length,FreePageContext);
+
+ MemoryArea = MmOpenMemoryAreaByAddress(AddressSpace, BaseAddress);
+ if (MemoryArea == NULL)
+ {
+ assertmsg(FALSE, ("Freeing non-existant memory area at 0x%.08x\n", BaseAddress));
+ return(STATUS_UNSUCCESSFUL);
+ }
+
+ MmCloseMemoryArea(MemoryArea);
+ InterlockedDecrement(&MemoryArea->ReferenceCount);
+#if 0
+ assertmsg(MemoryArea->ReferenceCount == 0,
+ ("Memory area at address 0x%.08x has %d outstanding references\n",
+ BaseAddress, MemoryArea->ReferenceCount));
+#endif
for (i=0; i<(PAGE_ROUND_UP(MemoryArea->Length)/PAGESIZE); i++)
{
- ULONG PhysAddr = 0;
- BOOL Dirty;
+ ULONG_PTR PhysicalPage = 0;
+ BOOLEAN Dirty = FALSE;
SWAPENTRY SwapEntry = 0;
+ PVOID VirtualPage = NULL;
- if (MmIsPageSwapEntry(AddressSpace->Process,
- MemoryArea->BaseAddress + (i * PAGESIZE)))
+ VirtualPage = MemoryArea->BaseAddress + (i * PAGESIZE);
+
+#ifdef DBG
+ if ((MiMemoryAreaBugCheckAddress != NULL)
+ && ((MiMemoryAreaBugCheckAddress >= VirtualPage)
+ && MiMemoryAreaBugCheckAddress < VirtualPage + PAGESIZE))
+ {
+ assertmsg(FALSE, ("VirtualPage 0x%.08x MiMemoryAreaBugCheckAddress 0x%.08x \n",
+ VirtualPage));
+ }
+#endif
+
+ if (FreePage != NULL)
+ {
+ FreePage(TRUE, FreePageContext, MemoryArea,
+ VirtualPage, 0, 0, FALSE);
+ }
+
+ if (MmIsPageSwapEntry(AddressSpace->Process, VirtualPage))
{
MmDeletePageFileMapping(AddressSpace->Process,
- MemoryArea->BaseAddress + (i * PAGESIZE),
+ VirtualPage,
&SwapEntry);
}
else
{
MmDeleteVirtualMapping(AddressSpace->Process,
- MemoryArea->BaseAddress + (i*PAGESIZE),
- FALSE, &Dirty, &PhysAddr);
+ VirtualPage,
+ FALSE, &Dirty, &PhysicalPage);
}
if (FreePage != NULL)
{
- FreePage(FreePageContext, MemoryArea,
- MemoryArea->BaseAddress + (i * PAGESIZE), PhysAddr, SwapEntry, Dirty);
+ FreePage(FALSE, FreePageContext, MemoryArea,
+ VirtualPage, PhysicalPage, SwapEntry, Dirty);
}
}
PMEMORY_AREA Result;
PMEMORY_AREA Split;
- Result = ExAllocatePoolWithTag(NonPagedPool, sizeof(MEMORY_AREA),
+ Result = ExAllocatePoolWithTag(NonPagedPool, sizeof(MEMORY_AREA),
TAG_MAREA);
RtlZeroMemory(Result,sizeof(MEMORY_AREA));
Result->Type = NewType;
Result->Length = Length;
Result->Attributes = NewAttributes;
Result->LockCount = 0;
+ Result->ReferenceCount = 1;
Result->Process = Process;
-
+
if (BaseAddress == OriginalMemoryArea->BaseAddress)
{
OriginalMemoryArea->BaseAddress = BaseAddress + Length;
return(Result);
}
-
+
Split = ExAllocatePoolWithTag(NonPagedPool, sizeof(MEMORY_AREA),
TAG_MAREA);
RtlCopyMemory(Split,OriginalMemoryArea,sizeof(MEMORY_AREA));
return(Split);
}
-NTSTATUS MmCreateMemoryArea(PEPROCESS Process,
- PMADDRESS_SPACE AddressSpace,
- ULONG Type,
- PVOID* BaseAddress,
- ULONG Length,
- ULONG Attributes,
- MEMORY_AREA** Result,
- BOOL FixedAddress)
+NTSTATUS
+MmCreateMemoryArea(IN PEPROCESS Process,
+ IN PMADDRESS_SPACE AddressSpace,
+ IN ULONG Type,
+ IN OUT PVOID* BaseAddress,
+ IN ULONG Length,
+ IN ULONG Attributes,
+ OUT PMEMORY_AREA* Result,
+ IN BOOLEAN FixedAddress)
/*
* FUNCTION: Create a memory area
* ARGUMENTS:
* Length = Length to allocate
* Attributes = Protection attributes for the memory area
* Result = Receives a pointer to the memory area on exit
+ * FixedAddress = Wether the memory area must be based at BaseAddress or not
* RETURNS: Status
* NOTES: Lock the address space before calling this function
*/
{
+ PMEMORY_AREA MemoryArea;
+
DPRINT("MmCreateMemoryArea(Type %d, BaseAddress %x,"
"*BaseAddress %x, Length %x, Attributes %x, Result %x)\n",
Type,BaseAddress,*BaseAddress,Length,Attributes,Result);
else
{
(*BaseAddress) = (PVOID)PAGE_ROUND_DOWN((*BaseAddress));
- if (MmOpenMemoryAreaByRegion(AddressSpace,
- *BaseAddress,
- Length)!=NULL)
+ MemoryArea = MmOpenMemoryAreaByRegion(AddressSpace, *BaseAddress, Length);
+ if (MemoryArea)
{
+ MmCloseMemoryArea(MemoryArea);
DPRINT("Memory area already occupied\n");
return(STATUS_CONFLICTING_ADDRESSES);
}
}
-
+
+ DPRINT("MmCreateMemoryArea(*BaseAddress %x)\n", *BaseAddress);
+
*Result = ExAllocatePoolWithTag(NonPagedPool, sizeof(MEMORY_AREA),
TAG_MAREA);
RtlZeroMemory(*Result,sizeof(MEMORY_AREA));
+ SET_MAGIC(*Result, TAG_MAREA)
(*Result)->Type = Type;
(*Result)->BaseAddress = *BaseAddress;
(*Result)->Length = Length;
(*Result)->Attributes = Attributes;
(*Result)->LockCount = 0;
+ (*Result)->ReferenceCount = 1;
(*Result)->Process = Process;
-
+
+ MmApplyMemoryAreaProtection(*Result);
+
MmInsertMemoryArea(AddressSpace, *Result);
DPRINT("MmCreateMemoryArea() succeeded\n");
return(STATUS_SUCCESS);
}
+
+#ifdef DBG
+
+VOID
+MiReferenceMemoryArea(IN PMEMORY_AREA MemoryArea,
+ IN LPSTR FileName,
+ IN ULONG LineNumber)
+{
+ VALIDATE_MEMORY_AREA(MemoryArea);
+
+ InterlockedIncrement(&MemoryArea->ReferenceCount);
+
+#ifdef TRACK_MEMORY_AREA_REFERENCES
+ DbgPrint("(0x%.08x)(%s:%d) Referencing memory area 0x%.08x (New ref.count %d)\n",
+ KeGetCurrentThread(), FileName, LineNumber,
+ MemoryArea->BaseAddress,
+ MemoryArea->ReferenceCount);
+#endif /* TRACK_MEMORY_AREA_REFERENCES */
+}
+
+
+VOID
+MiDereferenceMemoryArea(IN PMEMORY_AREA MemoryArea,
+ IN LPSTR FileName,
+ IN ULONG LineNumber)
+{
+ VALIDATE_MEMORY_AREA(MemoryArea);
+
+ InterlockedDecrement(&MemoryArea->ReferenceCount);
+
+#ifdef TRACK_MEMORY_AREA_REFERENCES
+ DbgPrint("(0x%.08x)(%s:%d) Dereferencing memory area 0x%.08x (New ref.count %d)\n",
+ KeGetCurrentThread(), FileName, LineNumber,
+ MemoryArea->BaseAddress,
+ MemoryArea->ReferenceCount);
+#endif /* TRACK_MEMORY_AREA_REFERENCES */
+
+ assertmsg(MemoryArea->ReferenceCount > 0,
+ ("No outstanding references on memory area (0x%.08x)\n", MemoryArea));
+}
+
+#else /* !DBG */
+
+VOID
+MiReferenceMemoryArea(IN PMEMORY_AREA MemoryArea)
+{
+ InterlockedIncrement(&MemoryArea->ReferenceCount);
+}
+
+
+VOID
+MiDereferenceMemoryArea(IN PMEMORY_AREA MemoryArea)
+{
+ InterlockedDecrement(&MemoryArea->ReferenceCount);
+}
+
+#endif /* !DBG */
-/* $Id: mdl.c,v 1.37 2002/05/07 22:35:02 hbirr Exp $
+/* $Id: mdl.c,v 1.38 2002/05/13 18:10:40 chorns Exp $
*
* COPYRIGHT: See COPYING in the top level directory
* PROJECT: ReactOS kernel
PVOID
MmGetMdlPageAddress(PMDL Mdl, PVOID Offset)
{
- PULONG MdlPages;
+ PULONG_PTR MdlPages;
- MdlPages = (PULONG)(Mdl + 1);
+ MdlPages = (PULONG_PTR)(Mdl + 1);
- return((PVOID)MdlPages[((ULONG)Offset) / PAGESIZE]);
+ return((PVOID) MdlPages[((ULONG_PTR) Offset) / PAGESIZE]);
}
VOID STDCALL
* MDL is updated
*/
{
+ PULONG_PTR MdlPages;
ULONG i;
- PULONG MdlPages;
/*
* FIXME: I don't know whether this right, but it looks sensible
}
/*
- * FIXME: Seems sensible
+ * FIXME: Seems sensible
*/
if (!(Mdl->MdlFlags & MDL_PAGES_LOCKED))
{
return;
}
- MdlPages = (PULONG)(Mdl + 1);
+ MdlPages = (PULONG_PTR) (Mdl + 1);
for (i=0; i<(PAGE_ROUND_UP(Mdl->ByteCount+Mdl->ByteOffset)/PAGESIZE); i++)
{
- MmUnlockPage((PVOID)MdlPages[i]);
- MmDereferencePage((PVOID)MdlPages[i]);
+ MmUnlockPage(MdlPages[i]);
+ MmDereferencePage(MdlPages[i]);
}
Mdl->MdlFlags = Mdl->MdlFlags & (~MDL_PAGES_LOCKED);
}
{
PVOID Base;
ULONG i;
- PULONG MdlPages;
+ PULONG_PTR MdlPages;
MEMORY_AREA* Result;
NTSTATUS Status;
Base = NULL;
Status = MmCreateMemoryArea(NULL,
- MmGetKernelAddressSpace(),
- MEMORY_AREA_MDL_MAPPING,
- &Base,
- Mdl->ByteCount + Mdl->ByteOffset,
- 0,
- &Result,
- FALSE);
+ MmGetKernelAddressSpace(),
+ MEMORY_AREA_MDL_MAPPING,
+ &Base,
+ Mdl->ByteCount + Mdl->ByteOffset,
+ PAGE_READWRITE,
+ &Result,
+ FALSE);
if (!NT_SUCCESS(Status))
{
MmUnlockAddressSpace(MmGetKernelAddressSpace());
KeBugCheck(0);
- return(STATUS_SUCCESS);
+ return(NULL);
}
MmUnlockAddressSpace(MmGetKernelAddressSpace());
for (i=0; i<(PAGE_ROUND_UP(Mdl->ByteCount+Mdl->ByteOffset)/PAGESIZE); i++)
{
Status = MmCreateVirtualMapping(NULL,
- (PVOID)((ULONG)Base+(i*PAGESIZE)),
- PAGE_READWRITE,
+ (PVOID)((ULONG_PTR) Base+(i*PAGESIZE)),
+ Result->Attributes,
MdlPages[i],
TRUE);
if (!NT_SUCCESS(Status))
VOID
-MmBuildMdlFromPages(PMDL Mdl, PULONG Pages)
+MmBuildMdlFromPages(IN PMDL Mdl,
+ IN PULONG_PTR Pages)
{
ULONG i;
- PULONG MdlPages;
+ PULONG_PTR MdlPages;
Mdl->MdlFlags = Mdl->MdlFlags |
(MDL_PAGES_LOCKED | MDL_IO_PAGE_READ);
- MdlPages = (PULONG)(Mdl + 1);
+ MdlPages = (PULONG_PTR) (Mdl + 1);
for (i=0;i<(PAGE_ROUND_UP(Mdl->ByteOffset+Mdl->ByteCount)/PAGESIZE);i++)
{
*/
MmLockAddressSpace(&Mdl->Process->AddressSpace);
- MdlPages = (ULONG *)(Mdl + 1);
+ MdlPages = (ULONG *)(Mdl + 1);
NrPages = PAGE_ROUND_UP(Mdl->ByteOffset + Mdl->ByteCount) / PAGESIZE;
for (i = 0; i < NrPages; i++)
{
PVOID Address;
-
- Address = Mdl->StartVa + (i*PAGESIZE);
-
+
+ Address = Mdl->StartVa + (i*PAGESIZE);
+
if (!MmIsPagePresent(NULL, Address))
{
Status = MmNotPresentFault(Mode, (ULONG)Address, TRUE);
{
for (j = 0; j < i; j++)
{
- MmUnlockPage((PVOID)MdlPages[j]);
- MmDereferencePage((PVOID)MdlPages[j]);
+ MmUnlockPage((ULONG_PTR) MdlPages[j]);
+ MmDereferencePage((ULONG_PTR) MdlPages[j]);
}
ExRaiseStatus(Status);
}
}
else
{
- MmLockPage((PVOID)MmGetPhysicalAddressForProcess(NULL, Address));
+ MmLockPage(MmGetPhysicalAddressForProcess(NULL, Address));
}
if ((Operation == IoWriteAccess || Operation == IoModifyAccess) &&
(!(MmGetPageProtect(NULL, (PVOID)Address) & PAGE_READWRITE)))
{
for (j = 0; j < i; j++)
{
- MmUnlockPage((PVOID)MdlPages[j]);
- MmDereferencePage((PVOID)MdlPages[j]);
+ MmUnlockPage((ULONG_PTR) MdlPages[j]);
+ MmDereferencePage((ULONG_PTR) MdlPages[j]);
}
ExRaiseStatus(Status);
}
}
MdlPages[i] = MmGetPhysicalAddressForProcess(NULL, Address);
- MmReferencePage((PVOID)MdlPages[i]);
+ MmReferencePage((ULONG_PTR) MdlPages[i]);
}
MmUnlockAddressSpace(&Mdl->Process->AddressSpace);
if (Mdl->Process != CurrentProcess)
}
/* EOF */
-
-
-
-
-
-
-
-
-
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
-/* $Id: mm.c,v 1.54 2002/01/23 23:39:26 chorns Exp $
+/* $Id: mm.c,v 1.55 2002/05/13 18:10:40 chorns Exp $
*
* COPYRIGHT: See COPYING in the top directory
* PROJECT: ReactOS kernel
/* GLOBALS *****************************************************************/
-PVOID EXPORTED MmUserProbeAddress = NULL;
+PVOID EXPORTED MmUserProbeAddress = NULL;
PVOID EXPORTED MmHighestUserAddress = NULL;
-MM_STATS MmStats;
+MM_STATS MmStats;
extern PVOID MmSharedDataPagePhysicalAddress;
/* FUNCTIONS ****************************************************************/
+#ifdef DBG
+
+VOID
+DbgMmDumpProtection(IN ULONG Value)
+{
+ if (Value & PAGE_NOACCESS)
+ DbgPrint("No access\n");
+
+ if (Value & PAGE_GUARD)
+ DbgPrint("Guard\n");
+
+ if (Value & PAGE_READWRITE)
+ DbgPrint("Read/Write\n");
+
+ if (Value & PAGE_EXECUTE_READWRITE)
+ DbgPrint("Execute/Read/Write\n");
+
+ if (Value & PAGE_READONLY)
+ DbgPrint("Read only\n");
+
+ if (Value & PAGE_EXECUTE)
+ DbgPrint("Execute\n");
+
+ if (Value & PAGE_EXECUTE_READ)
+ DbgPrint("Execute/Read\n");
+
+ if (Value & PAGE_SYSTEM)
+ DbgPrint("System\n");
+
+ if (Value & PAGE_NOCACHE)
+ DbgPrint("No cache\n");
+
+ if (Value & PAGE_WRITETHROUGH)
+ DbgPrint("No cache\n");
+}
+
+#endif /* DBG */
+
+
NTSTATUS MmReleaseMemoryArea(PEPROCESS Process, PMEMORY_AREA Marea)
{
NTSTATUS Status;
NTSTATUS MmCommitPagedPoolAddress(PVOID Address)
{
NTSTATUS Status;
- PVOID AllocatedPage;
+ ULONG_PTR AllocatedPage;
Status = MmRequestPageMemoryConsumer(MC_PPOOL, FALSE, &AllocatedPage);
if (!NT_SUCCESS(Status))
{
-/* $Id: mminit.c,v 1.31 2002/04/26 13:11:55 ekohl Exp $
+/* $Id: mminit.c,v 1.32 2002/05/13 18:10:40 chorns Exp $
*
* COPYRIGHT: See COPYING in the top directory
* PROJECT: ReactOS kernel
/* GLOBALS *****************************************************************/
-/*
- * Size of extended memory (kb) (fixed for now)
- */
-#define EXTENDED_MEMORY_SIZE (3*1024*1024)
-
/*
* Compiler defined symbols
*/
static MEMORY_AREA* kernel_shared_data_desc = NULL;
static MEMORY_AREA* MiPagedPoolDescriptor = NULL;
-PVOID MmSharedDataPagePhysicalAddress = NULL;
+ULONG_PTR MmSharedDataPagePhysicalAddress = 0;
+
+BOOLEAN MiInitialized = FALSE;
/* FUNCTIONS ****************************************************************/
return(MmSystemSize);
}
-VOID MiShutdownMemoryManager(VOID)
+VOID
+MiShutdownMemoryManager()
{
}
-VOID MmInitVirtualMemory(ULONG LastKernelAddress,
- ULONG KernelLength)
-/*
- * FUNCTION: Intialize the memory areas list
- * ARGUMENTS:
- * bp = Pointer to the boot parameters
- * kernel_len = Length of the kernel
- */
+VOID
+MmInitVirtualMemory(IN ULONG LastKernelAddress,
+ IN ULONG KernelLength)
{
PVOID BaseAddress;
ULONG Length;
LastKernelAddress = PAGE_ROUND_UP(LastKernelAddress);
MmInitMemoryAreas();
- ExInitNonPagedPool(LastKernelAddress + PAGESIZE);
+ ExInitNonPagedPool((PVOID) (LastKernelAddress + PAGESIZE));
/*
* Setup the system area descriptor list
Status = MmCreateVirtualMapping(NULL,
(PVOID)KI_USER_SHARED_DATA,
PAGE_READWRITE,
- (ULONG)MmSharedDataPagePhysicalAddress,
+ MmSharedDataPagePhysicalAddress,
TRUE);
if (!NT_SUCCESS(Status))
{
MmInitializeMemoryConsumer(MC_USER, MmTrimUserMemory);
}
+/*
+ * Called at DISPATCH_LEVEL
+ */
VOID MmInit1(ULONG FirstKrnlPhysAddr,
ULONG LastKrnlPhysAddr,
ULONG LastKernelAddress,
*/
MmUserProbeAddress = (PVOID)0x7fff0000;
MmHighestUserAddress = (PVOID)0x7ffeffff;
-
+
/*
* Initialize memory managment statistics
*/
#ifdef BIOS_MEM_FIX
MmStats.NrTotalPages += 16;
#endif
+
DbgPrint("Used memory %dKb\n", (MmStats.NrTotalPages * PAGESIZE) / 1024);
LastKernelAddress = (ULONG)MmInitializePageList(
* Intialize memory areas
*/
MmInitVirtualMemory(LastKernelAddress, kernel_len);
+
+ MiInitialized = TRUE;
}
+/*
+ * Called at DISPATCH_LEVEL
+ */
VOID MmInit2(VOID)
{
- MmInitSectionImplementation();
- MmInitPagingFile();
+ MmInitSectionImplementation();
+ MmInitPagingFile();
}
+/*
+ * Called at PASSIVE_LEVEL
+ */
VOID MmInit3(VOID)
{
- MmInitPagerThread();
- MmCreatePhysicalMemorySection();
- MmInitializeRmapList();
+ MmCreatePhysicalMemorySection();
+ MmInitializeRmapList();
+ MmInitPagerThread();
- /* FIXME: Read parameters from memory */
+ /* FIXME: Read parameters from memory */
}
-/* $Id: ncache.c,v 1.15 2002/01/01 00:21:56 dwelch Exp $
+/* $Id: ncache.c,v 1.16 2002/05/13 18:10:40 chorns Exp $
*
* COPYRIGHT: See COPYING in the top level directory
* PROJECT: ReactOS kernel
PAGE_WRITETHROUGH;
for (i = 0; i <= (NumberOfBytes / PAGESIZE); i++)
{
- PVOID NPage;
+ ULONG_PTR NPage;
Status = MmRequestPageMemoryConsumer(MC_NPPOOL, TRUE, &NPage);
MmCreateVirtualMapping (NULL,
return ((PVOID)Result);
}
-VOID STATIC
-MmFreeNonCachedPage(PVOID Context, MEMORY_AREA* MemoryArea, PVOID Address, ULONG PhysAddr,
- SWAPENTRY SwapEntry, BOOLEAN Dirty)
+VOID
+MmFreeNonCachedPage(IN BOOLEAN Before,
+ IN PVOID Context,
+ IN PMEMORY_AREA MemoryArea,
+ IN PVOID Address,
+ IN ULONG_PTR PhysicalAddress,
+ IN SWAPENTRY SwapEntry,
+ IN BOOLEAN Dirty)
{
- assert(SwapEntry == 0);
- if (PhysAddr != 0)
+ if (!Before)
{
- MmDereferencePage((PVOID)PhysAddr);
+ assert(SwapEntry == 0);
+ if (PhysicalAddress != 0)
+ {
+ MmDereferencePage(PhysicalAddress);
+ }
}
}
-/* $Id: npool.c,v 1.55 2002/01/01 05:09:50 dwelch Exp $
+/* $Id: npool.c,v 1.56 2002/05/13 18:10:40 chorns Exp $
*
* COPYRIGHT: See COPYING in the top level directory
* PROJECT: ReactOS kernel
/*
* Memory managment initalized symbol for the base of the pool
*/
-static unsigned int kernel_pool_base = 0;
+static PVOID kernel_pool_base = 0;
/*
* Head of the list of free blocks
#endif /* TAG_STATISTICS_TRACKING */
VOID
-ExInitNonPagedPool(ULONG BaseAddress)
+ExInitNonPagedPool(IN PVOID BaseAddress)
{
kernel_pool_base = BaseAddress;
KeInitializeSpinLock(&MmNpoolLock);
KeBugCheck(KBUG_POOL_FREE_LIST_CORRUPT);
}
- if (base_addr < (kernel_pool_base) ||
- (base_addr+current->Size) > (kernel_pool_base)+NONPAGED_POOL_SIZE)
+ if (base_addr < (ULONG_PTR) (kernel_pool_base) ||
+ (base_addr+current->Size) > (ULONG_PTR) (kernel_pool_base)+NONPAGED_POOL_SIZE)
{
DbgPrint("Block %x found outside pool area\n",current);
DbgPrint("Size %d\n",current->Size);
- DbgPrint("Limits are %x %x\n",kernel_pool_base,
- kernel_pool_base+NONPAGED_POOL_SIZE);
+ DbgPrint("Limits are %x %x\n", kernel_pool_base,
+ (ULONG_PTR) kernel_pool_base + NONPAGED_POOL_SIZE);
KeBugCheck(KBUG_POOL_FREE_LIST_CORRUPT);
}
blocks_seen++;
current);
KeBugCheck(KBUG_POOL_FREE_LIST_CORRUPT);
}
- if (base_addr < (kernel_pool_base) ||
+ if (base_addr < (ULONG_PTR) (kernel_pool_base) ||
(base_addr+current->Size) >
- (kernel_pool_base)+NONPAGED_POOL_SIZE)
+ (ULONG_PTR) (kernel_pool_base) + NONPAGED_POOL_SIZE)
{
DbgPrint("Block %x found outside pool area\n",current);
for(;;);
ULONG end;
ULONG i;
- start = (ULONG)blk;
- end = (ULONG)blk + sizeof(BLOCK_HDR) + blk->Size;
+ start = (ULONG_PTR) blk;
+ end = (ULONG_PTR) blk + sizeof(BLOCK_HDR) + blk->Size;
/*
* If the block doesn't contain a whole page then there is nothing to do
NTSTATUS Status;
KIRQL oldIrql;
- start = (ULONG)MiAllocNonPagedPoolRegion(nr_pages);
+ start = (ULONG_PTR) MiAllocNonPagedPoolRegion(nr_pages);
DPRINT("growing heap for block size %d, ",size);
DPRINT("start %x\n",start);
for (i=0;i<nr_pages;i++)
{
- PVOID Page;
+ ULONG_PTR Page;
+
/* FIXME: Check whether we can really wait here. */
Status = MmRequestPageMemoryConsumer(MC_NPPOOL, TRUE, &Page);
if (!NT_SUCCESS(Status))
Status = MmCreateVirtualMapping(NULL,
(PVOID)(start + (i*PAGESIZE)),
PAGE_READWRITE,
- (ULONG)Page,
+ Page,
FALSE);
if (!NT_SUCCESS(Status))
{
return;
}
- DPRINT("freeing block %x\n",blk);
-
- POOL_TRACE("ExFreePool(block %x), size %d, caller %x\n",block,blk->size,
+ DPRINT("freeing block %x\n",block);
+
+ POOL_TRACE("ExFreePool(block %x), size %d, caller %x\n",block,block->size,
((PULONG)&block)[-1]);
KeAcquireSpinLock(&MmNpoolLock, &oldIrql);
}
DPRINT("freeing block %x\n",blk);
-
+
POOL_TRACE("ExFreePool(block %x), size %d, caller %x\n",block,blk->size,
((PULONG)&block)[-1]);
ExAllocateWholePageBlock(ULONG UserSize)
{
PVOID Address;
- PVOID Page;
+ ULONG_PTR Page;
ULONG i;
ULONG Size;
ULONG NrPages;
for (i = 0; i < NrPages; i++)
{
Page = MmAllocPage(MC_NPPOOL, 0);
- if (Page == NULL)
+ if (Page == 0)
{
KeBugCheck(0);
}
MmCreateVirtualMapping(NULL,
Address + (i * PAGESIZE),
PAGE_READWRITE | PAGE_SYSTEM,
- (ULONG)Page,
+ Page,
TRUE);
}
- *((PULONG)((ULONG)Address + (NrPages * PAGESIZE) - Size)) = NrPages;
- return((PVOID)((ULONG)Address + (NrPages * PAGESIZE) - UserSize));
+ *((PULONG)((ULONG_PTR) Address + (NrPages * PAGESIZE) - Size)) = NrPages;
+ return((PVOID)((ULONG_PTR) Address + (NrPages * PAGESIZE) - UserSize));
}
VOID STDCALL
{
ULONG NrPages;
- if ((ULONG)Addr < kernel_pool_base ||
- (ULONG)Addr >= (kernel_pool_base + NONPAGED_POOL_SIZE))
+ if ((ULONG_PTR)Addr < (ULONG_PTR) kernel_pool_base ||
+ (ULONG_PTR)Addr >= ((ULONG_PTR) kernel_pool_base + NONPAGED_POOL_SIZE))
{
DbgPrint("Block %x found outside pool area\n", Addr);
KeBugCheck(0);
}
- NrPages = *(PULONG)((ULONG)Addr - sizeof(ULONG));
- MiFreeNonPagedPoolRegion((PVOID)PAGE_ROUND_DOWN((ULONG)Addr), NrPages, TRUE);
+ NrPages = *(PULONG)((ULONG_PTR) Addr - sizeof(ULONG));
+ MiFreeNonPagedPoolRegion((PVOID)PAGE_ROUND_DOWN((ULONG_PTR) Addr), NrPages, TRUE);
}
#endif /* WHOLE_PAGE_ALLOCATIONS */
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
-/* $Id: pagefile.c,v 1.18 2002/03/18 16:15:08 ekohl Exp $
+/* $Id: pagefile.c,v 1.19 2002/05/13 18:10:40 chorns Exp $
*
* PROJECT: ReactOS kernel
* FILE: ntoskrnl/mm/pagefile.c
/*
* Translate between a swap entry and a file and offset pair.
*/
-#define FILE_FROM_ENTRY(i) ((i) >> 24)
+#define PTE_SWAP_FILE_MASK 0x0f000000
+#define PTE_SWAP_FILE_BIT 24
+#define FILE_FROM_ENTRY(i) (((i) & PTE_SWAP_FILE_MASK) >> PTE_SWAP_FILE_BIT)
#define OFFSET_FROM_ENTRY(i) (((i) & 0xffffff) - 1)
-#define ENTRY_FROM_FILE_OFFSET(i, j) (((i) << 24) | ((j) + 1))
+#define ENTRY_FROM_FILE_OFFSET(i, j) (((i) << PTE_SWAP_FILE_BIT) | ((j) + 1))
/* FUNCTIONS *****************************************************************/
+#ifdef DBG
+
+VOID
+MiValidateSwapEntry(SWAPENTRY Entry)
+{
+ ULONG i;
+ ULONG off;
+
+ if (Entry != 0)
+ {
+ DPRINT("MiValidateSwapEntry(SwapEntry 0x%.08x)\n", Entry);
+
+ i = FILE_FROM_ENTRY(Entry);
+
+ assertmsg(i < MAX_PAGING_FILES,
+ ("Bad SwapEntry (0x%.08x). Wrong paging file number (%d, 0x%.08x)\n", Entry, i, off));
+
+ off = OFFSET_FROM_ENTRY(Entry);
+
+ assertmsg(off / 32 <= PagingFileList[i]->AllocMapSize,
+ ("Bad SwapEntry (0x%.08x). Wrong paging file offset (%d, 0x%.08x)\n", Entry, i, off));
+ }
+}
+
+#endif
+
NTSTATUS MmWriteToSwapPage(SWAPENTRY SwapEntry, PMDL Mdl)
{
ULONG i, offset;
LARGE_INTEGER file_offset;
IO_STATUS_BLOCK Iosb;
NTSTATUS Status;
-
+
if (SwapEntry == 0)
{
KeBugCheck(0);
return(STATUS_UNSUCCESSFUL);
}
-
+
i = FILE_FROM_ENTRY(SwapEntry);
offset = OFFSET_FROM_ENTRY(SwapEntry);
file_offset.QuadPart = offset * 4096;
+ if (file_offset.QuadPart > PagingFileList[i]->MaximumSize.QuadPart)
+ {
+ DPRINT1("Bad swap file offset 0x%.08x\n", file_offset.u.LowPart);
+ KeBugCheck(0);
+ }
+
Status = IoPageWrite(PagingFileList[i]->FileObject,
Mdl,
&file_offset,
IO_STATUS_BLOCK Iosb;
NTSTATUS Status;
+ DPRINT("MmReadFromSwapPage(SwapEntry 0x%.08x)\n", SwapEntry);
+
+ VALIDATE_SWAP_ENTRY(SwapEntry);
+
if (SwapEntry == 0)
{
KeBugCheck(0);
}
file_offset.QuadPart = offset * 4096;
+
+ if (file_offset.QuadPart > PagingFileList[i]->MaximumSize.QuadPart)
+ {
+ DPRINT1("Bad swap file offset 0x%.08x\n", file_offset.u.LowPart);
+ KeBugCheck(0);
+ }
Status = IoPageRead(PagingFileList[i]->FileObject,
Mdl,
&file_offset,
&Iosb,
TRUE);
+ DPRINT("MmReadFromSwapPage() Status 0x%.8X\n", Status);
return(Status);
}
ULONG i;
ULONG off;
KIRQL oldIrql;
-
+
i = FILE_FROM_ENTRY(Entry);
+
+ assertmsg(i < MAX_PAGING_FILES,
+ ("Bad SwapEntry (0x%.08x). Wrong paging file number (%d, 0x%.08x)\n", Entry, i, off));
+
off = OFFSET_FROM_ENTRY(Entry);
+
+ assertmsg(off / 32 <= PagingFileList[i]->AllocMapSize,
+ ("Bad SwapEntry (0x%.08x). Wrong paging file offset (%d, 0x%.08x)\n", Entry, i, off));
KeAcquireSpinLock(&PagingFileListLock, &oldIrql);
if (PagingFileList[i] == NULL)
KeBugCheck(0);
}
KeAcquireSpinLockAtDpcLevel(&PagingFileList[i]->AllocMapLock);
-
+
PagingFileList[i]->AllocMap[off / 32] &= (~(1 << (off % 32)));
-
PagingFileList[i]->FreePages++;
PagingFileList[i]->UsedPages--;
SL_OPEN_PAGING_FILE);
if (!NT_SUCCESS(Status))
{
+ DPRINT1("Failed to open swap file (Status 0x%.08x)\n", Status);
return(Status);
}
NULL);
if (!NT_SUCCESS(Status))
{
+ DPRINT1("Failed to write to swap file (Status 0x%.08x)\n", Status);
NtClose(FileHandle);
return(Status);
}
NULL);
if (!NT_SUCCESS(Status))
{
+ DPRINT1("Failed to reference swap file (Status 0x%.08x)\n", Status);
NtClose(FileHandle);
return(Status);
}
MiFreeSwapPages = MiFreeSwapPages + PagingFile->FreePages;
MiPagingFileCount++;
KeReleaseSpinLock(&PagingFileListLock, oldIrql);
+
+ DPRINT("Successfully opened swap file\n");
return(STATUS_SUCCESS);
}
/* EOF */
-
-
-
-
-
-
-
-/* $Id: pageop.c,v 1.7 2002/02/18 18:41:23 hbirr Exp $
+/* $Id: pageop.c,v 1.8 2002/05/13 18:10:40 chorns Exp $
*
* COPYRIGHT: See COPYING in the top level directory
* PROJECT: ReactOS kernel
/* FUNCTIONS *****************************************************************/
+#ifdef DBG
+
+VOID
+MiValidatePageOp(IN PMM_PAGEOP PageOp)
+{
+ if (PageOp != NULL)
+ {
+ DPRINT("MiValidatePageOp(PageOp 0x%.08x)\n", PageOp);
+
+ assertmsg(PageOp->Magic == TAG_MM_PAGEOP,
+ ("Bad PageOp (0x%.08x). Wrong magic (0x%.08x)\n", PageOp->Magic));
+
+ assertmsg((PageOp->OpType >= MM_PAGEOP_MINIMUM) && (PageOp->OpType <= MM_PAGEOP_MAXIMUM),
+ ("Bad PageOp (0x%.08x). Wrong type (%d)\n", PageOp->OpType));
+ }
+}
+
+#endif /* DBG */
+
VOID
-MmReleasePageOp(PMM_PAGEOP PageOp)
+MmReleasePageOp(IN PMM_PAGEOP PageOp)
/*
* FUNCTION: Release a reference to a page operation descriptor
*/
KIRQL oldIrql;
PMM_PAGEOP PrevPageOp;
+ assert(PageOp);
+ VALIDATE_PAGEOP(PageOp);
+
KeAcquireSpinLock(&MmPageOpHashTableLock, &oldIrql);
PageOp->ReferenceCount--;
if (PageOp->ReferenceCount > 0)
PrevPageOp = PrevPageOp->Next;
}
KeReleaseSpinLock(&MmPageOpHashTableLock, oldIrql);
+ CPRINT("PageOp (0x%.08x) not found\n", PageOp);
KeBugCheck(0);
}
+
PMM_PAGEOP
-MmGetPageOp(PMEMORY_AREA MArea, ULONG Pid, PVOID Address,
- PMM_SECTION_SEGMENT Segment, ULONG Offset, ULONG OpType)
- /*
- * FUNCTION: Get a page operation descriptor corresponding to
- * the memory area and either the segment, offset pair or the
- * pid, address pair.
- */
+MmGetPageOp(IN PMEMORY_AREA MArea,
+ IN ULONG Pid,
+ IN PVOID Address,
+ IN PMM_SECTION_SEGMENT Segment,
+ IN ULONG Offset,
+ IN ULONG OpType)
+/*
+ * FUNCTION: Get a page operation descriptor corresponding to
+ * the memory area and either the segment, offset pair or the
+ * pid, address pair. Create a new pageop if one does not exist.
+ * FIXME: Make Offset 64-bit
+ */
{
ULONG Hash;
KIRQL oldIrql;
PMM_PAGEOP PageOp;
/*
- * Calcuate the hash value for pageop structure
+ * Calculate the hash value for pageop structure
*/
if (MArea->Type == MEMORY_AREA_SECTION_VIEW_COMMIT)
{
}
else
{
+ assertmsg(((ULONG_PTR)Address % PAGESIZE) == 0,
+ ("Address must be page aligned (0x%.08x)\n", Address));
+
Hash = (((ULONG)Pid) | (((ULONG)Address) / PAGESIZE));
}
Hash = Hash % PAGEOP_HASH_TABLE_SIZE;
*/
if (PageOp != NULL)
{
+ VALIDATE_PAGEOP(PageOp);
+
PageOp->ReferenceCount++;
KeReleaseSpinLock(&MmPageOpHashTableLock, oldIrql);
return(PageOp);
return(NULL);
}
+ SET_MAGIC(PageOp, TAG_MM_PAGEOP);
+
if (MArea->Type != MEMORY_AREA_SECTION_VIEW_COMMIT)
{
PageOp->Pid = Pid;
KeInitializeEvent(&PageOp->CompletionEvent, NotificationEvent, FALSE);
MmPageOpHashTable[Hash] = PageOp;
+ VALIDATE_PAGEOP(PageOp);
+
KeReleaseSpinLock(&MmPageOpHashTableLock, oldIrql);
return(PageOp);
}
+PMM_PAGEOP
+MmGotPageOp(IN PMEMORY_AREA MArea,
+ IN ULONG Pid,
+ IN PVOID Address,
+ IN PMM_SECTION_SEGMENT Segment,
+ IN ULONG Offset)
+/*
+ * FUNCTION: Get a page operation descriptor corresponding to
+ * the memory area and either the segment, offset pair or the
+ * pid, address pair. Returns NULL if a pageop does not exist.
+ * FIXME: Make Offset 64-bit
+ */
+{
+ ULONG Hash;
+ KIRQL oldIrql;
+ PMM_PAGEOP PageOp;
+ /*
+ * Calculate the hash value for pageop structure
+ */
+ if (MArea->Type == MEMORY_AREA_SECTION_VIEW_COMMIT)
+ {
+ Hash = (((ULONG)Segment) | (((ULONG)Offset) / PAGESIZE));
+ }
+ else
+ {
+ assertmsg(((ULONG_PTR)Address % PAGESIZE) == 0,
+ ("Address must be page aligned (0x%.08x)\n", Address));
+ Hash = (((ULONG)Pid) | (((ULONG)Address) / PAGESIZE));
+ }
+ Hash = Hash % PAGEOP_HASH_TABLE_SIZE;
+ KeAcquireSpinLock(&MmPageOpHashTableLock, &oldIrql);
+ /*
+ * Check for an existing pageop structure
+ */
+ PageOp = MmPageOpHashTable[Hash];
+ while (PageOp != NULL)
+ {
+ if (MArea->Type == MEMORY_AREA_SECTION_VIEW_COMMIT)
+ {
+ if (PageOp->Segment == Segment &&
+ PageOp->Offset == Offset)
+ {
+ break;
+ }
+ }
+ else
+ {
+ if (PageOp->Pid == Pid &&
+ PageOp->Address == Address)
+ {
+ break;
+ }
+ }
+ PageOp = PageOp->Next;
+ }
+
+ /*
+ * If we found an existing pageop then increment the reference count
+ * and return it.
+ */
+ if (PageOp != NULL)
+ {
+ VALIDATE_PAGEOP(PageOp);
+ PageOp->ReferenceCount++;
+ KeReleaseSpinLock(&MmPageOpHashTableLock, oldIrql);
+ return(PageOp);
+ }
+ KeReleaseSpinLock(&MmPageOpHashTableLock, oldIrql);
+
+ /*
+ * Otherwise return NULL.
+ */
+ return(NULL);
+}
-/* $Id: ppool.c,v 1.7 2002/02/14 00:07:23 hbirr Exp $
+/* $Id: ppool.c,v 1.8 2002/05/13 18:10:40 chorns Exp $
*
* COPYRIGHT: See COPYING in the top level directory
* PROJECT: ReactOS kernel
PMM_PPOOL_FREE_BLOCK_HEADER BestPreviousBlock;
PVOID BlockAddress;
+ assert_irql(APC_LEVEL);
+
/*
* Don't bother allocating anything for a zero-byte block.
*/
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
-/* $Id: rmap.c,v 1.3 2002/01/08 00:49:00 dwelch Exp $
+/* $Id: rmap.c,v 1.4 2002/05/13 18:10:40 chorns Exp $
*
* COPYRIGHT: See COPYING in the top directory
* PROJECT: ReactOS kernel
* FILE: ntoskrnl/mm/rmap.c
* PURPOSE: kernel memory managment functions
* PROGRAMMER: David Welch (welch@cwcom.net)
+ * Casper S. Hornstrup (chorns@users.sourceforge.net)
* UPDATE HISTORY:
* Created 27/12/01
*/
struct _MM_RMAP_ENTRY* Next;
PEPROCESS Process;
PVOID Address;
+ ULONG ReferenceCount;
} MM_RMAP_ENTRY, *PMM_RMAP_ENTRY;
/* GLOBALS ******************************************************************/
-static FAST_MUTEX RmapListLock;
+static FAST_MUTEX MiRmapListLock;
/* FUNCTIONS ****************************************************************/
+#ifdef DBG
+
+VOID
+MiValidateRmapList(struct _MM_RMAP_ENTRY* RmapList)
+{
+ if (RmapList != NULL)
+ {
+ PMM_RMAP_ENTRY CurrentEntry = RmapList;
+
+ while (CurrentEntry != NULL)
+ {
+ PMM_RMAP_ENTRY Entry = RmapList;
+ ULONG Count = 0;
+
+ assertmsg(RmapList->ReferenceCount >= 0, ("Bad reference count %d for rmap entry\n",
+ RmapList->ReferenceCount));
+
+ while (Entry != NULL)
+ {
+ if (Entry->Process == CurrentEntry->Process)
+ {
+ Count++;
+ }
+ Entry = Entry->Next;
+ }
+
+ assertmsg(Count == 1, ("%d rmap entries for process 0x%.08x\n",
+ Count, CurrentEntry->Process));
+
+ CurrentEntry = CurrentEntry->Next;
+ }
+ }
+}
+
+#endif /* DBG */
+
+
VOID
MmInitializeRmapList(VOID)
{
- ExInitializeFastMutex(&RmapListLock);
+ ExInitializeFastMutex(&MiRmapListLock);
}
+
NTSTATUS
-MmPageOutPhysicalAddress(PVOID PhysicalAddress)
+MmPageOutPhysicalAddress(ULONG_PTR PhysicalAddress)
{
PMM_RMAP_ENTRY entry;
PMEMORY_AREA MemoryArea;
LARGE_INTEGER Offset;
NTSTATUS Status;
- ExAcquireFastMutex(&RmapListLock);
+ ExAcquireFastMutex(&MiRmapListLock);
entry = MmGetRmapListHeadPage(PhysicalAddress);
if (entry == NULL)
{
- ExReleaseFastMutex(&RmapListLock);
+ ExReleaseFastMutex(&MiRmapListLock);
return(STATUS_UNSUCCESSFUL);
}
Process = entry->Process;
/*
* Get or create a pageop
*/
- PageOp = MmGetPageOp(MemoryArea, 0, 0,
- MemoryArea->Data.SectionData.Segment,
+ PageOp = MmGetPageOp(MemoryArea, 0, 0,
+ MemoryArea->Data.SectionData.Segment,
Offset.u.LowPart, MM_PAGEOP_PAGEOUT);
if (PageOp == NULL)
{
{
MmReleasePageOp(PageOp);
MmUnlockAddressSpace(&Process->AddressSpace);
- ExReleaseFastMutex(&RmapListLock);
+ ExReleaseFastMutex(&MiRmapListLock);
return(STATUS_UNSUCCESSFUL);
}
-
+
/*
* Release locks now we have a page op.
*/
MmUnlockAddressSpace(&Process->AddressSpace);
- ExReleaseFastMutex(&RmapListLock);
+ ExReleaseFastMutex(&MiRmapListLock);
/*
* Do the actual page out work.
*/
- Status = MmPageOutSectionView(&Process->AddressSpace, MemoryArea,
+ Status = MmPageOutSectionView(&Process->AddressSpace, MemoryArea,
Address, PageOp);
}
else if (Type == MEMORY_AREA_VIRTUAL_MEMORY)
{
MmReleasePageOp(PageOp);
MmUnlockAddressSpace(&Process->AddressSpace);
- ExReleaseFastMutex(&RmapListLock);
+ ExReleaseFastMutex(&MiRmapListLock);
return(STATUS_UNSUCCESSFUL);
}
* Release locks now we have a page op.
*/
MmUnlockAddressSpace(&Process->AddressSpace);
- ExReleaseFastMutex(&RmapListLock);
+ ExReleaseFastMutex(&MiRmapListLock);
/*
* Do the actual page out work.
*/
- Status = MmPageOutVirtualMemory(&Process->AddressSpace, MemoryArea,
+ Status = MmPageOutVirtualMemory(&Process->AddressSpace, MemoryArea,
Address, PageOp);
}
else
return(Status);
}
+
VOID
-MmInsertRmap(PVOID PhysicalAddress, PEPROCESS Process, PVOID Address)
+MmReferenceRmap(IN PMM_RMAP_ENTRY RmapEntry)
{
- PMM_RMAP_ENTRY current_entry;
- PMM_RMAP_ENTRY new_entry;
+ InterlockedIncrement(&RmapEntry->ReferenceCount);
+}
- Address = (PVOID)PAGE_ROUND_DOWN(Address);
- new_entry = ExAllocatePool(NonPagedPool, sizeof(MM_RMAP_ENTRY));
- if (new_entry == NULL)
+VOID
+MmDereferenceRmap(IN PMM_RMAP_ENTRY RmapEntry)
+{
+ InterlockedDecrement(&RmapEntry->ReferenceCount);
+
+ assertmsg(RmapEntry->ReferenceCount > 0, ("Bad reference count (%d) for "
+ "Process (0x%.08x) Addresss (0x%.08x)\n",
+ RmapEntry->ReferenceCount, RmapEntry->Process, RmapEntry->Address));
+}
+
+
+VOID
+MiDisableAllRmaps(IN ULONG_PTR PhysicalAddress,
+ IN PBOOLEAN Modified)
+{
+ PMM_RMAP_ENTRY CurrentEntry;
+ PMM_RMAP_ENTRY NextEntry;
+ BOOLEAN WasDirty;
+
+ ExAcquireFastMutex(&MiRmapListLock);
+
+ CurrentEntry = MmGetRmapListHeadPage(PhysicalAddress);
+
+ assertmsg(CurrentEntry != NULL, ("MiDisableAllRmaps: No rmaps.\n"))
+
+ *Modified = FALSE;
+ while (CurrentEntry != NULL)
{
- KeBugCheck(0);
+ NextEntry = CurrentEntry->Next;
+
+ MmLockAddressSpace(&CurrentEntry->Process->AddressSpace);
+
+ MmDisableVirtualMapping(CurrentEntry->Process,
+ CurrentEntry->Address,
+ &WasDirty,
+ NULL);
+
+ if (WasDirty)
+ {
+ *Modified = TRUE;
+ }
+
+ MmUnlockAddressSpace(&CurrentEntry->Process->AddressSpace);
+
+ CurrentEntry = NextEntry;
+ }
+
+ ExReleaseFastMutex(&MiRmapListLock);
+}
+
+
+VOID
+MiEnableAllRmaps(IN ULONG_PTR PhysicalAddress,
+ IN BOOLEAN Modified)
+{
+ PMM_RMAP_ENTRY CurrentEntry;
+ PMM_RMAP_ENTRY NextEntry;
+
+ ExAcquireFastMutex(&MiRmapListLock);
+
+ CurrentEntry = MmGetRmapListHeadPage(PhysicalAddress);
+
+ assertmsg(CurrentEntry != NULL, ("MiEnableAllRmaps: No rmaps.\n"))
+
+ while (CurrentEntry != NULL)
+ {
+ NextEntry = CurrentEntry->Next;
+
+ MmLockAddressSpace(&CurrentEntry->Process->AddressSpace);
+
+ MmEnableVirtualMapping(CurrentEntry->Process,
+ CurrentEntry->Address);
+
+ if (Modified)
+ {
+ MmSetDirtyPage(CurrentEntry->Process,
+ CurrentEntry->Address);
+ }
+
+ MmUnlockAddressSpace(&CurrentEntry->Process->AddressSpace);
+
+ CurrentEntry = NextEntry;
+ }
+
+ ExReleaseFastMutex(&MiRmapListLock);
+}
+
+
+VOID
+MiGetDirtyAllRmaps(IN ULONG_PTR PhysicalAddress,
+ OUT PBOOLEAN Dirty)
+{
+ PMM_RMAP_ENTRY CurrentEntry;
+ PMM_RMAP_ENTRY NextEntry;
+ BOOLEAN WasDirty;
+
+ ExAcquireFastMutex(&MiRmapListLock);
+
+ CurrentEntry = MmGetRmapListHeadPage(PhysicalAddress);
+
+ assertmsg(CurrentEntry != NULL, ("MiGetDirtyAllRmaps: No rmaps.\n"))
+
+ *Dirty = FALSE;
+ while (CurrentEntry != NULL)
+ {
+ NextEntry = CurrentEntry->Next;
+
+ MmLockAddressSpace(&CurrentEntry->Process->AddressSpace);
+
+ WasDirty = MmIsPageDirty(CurrentEntry->Process,
+ CurrentEntry->Address);
+
+ MmUnlockAddressSpace(&CurrentEntry->Process->AddressSpace);
+
+ if (WasDirty)
+ {
+ *Dirty = TRUE;
+ }
+
+ CurrentEntry = NextEntry;
+ }
+
+ ExReleaseFastMutex(&MiRmapListLock);
+}
+
+
+VOID
+MiSetDirtyAllRmaps(IN ULONG_PTR PhysicalAddress,
+ IN BOOLEAN Dirty)
+{
+ PMM_RMAP_ENTRY CurrentEntry;
+ PMM_RMAP_ENTRY NextEntry;
+
+ ExAcquireFastMutex(&MiRmapListLock);
+
+ CurrentEntry = MmGetRmapListHeadPage(PhysicalAddress);
+
+ assertmsg(CurrentEntry != NULL, ("MiSetDirtyAllRmaps: No rmaps.\n"))
+
+ while (CurrentEntry != NULL)
+ {
+ NextEntry = CurrentEntry->Next;
+
+ MmLockAddressSpace(&CurrentEntry->Process->AddressSpace);
+
+ if (Dirty)
+ {
+ MmSetDirtyPage(CurrentEntry->Process,
+ CurrentEntry->Address);
+ }
+ else
+ {
+ MmSetCleanPage(CurrentEntry->Process,
+ CurrentEntry->Address);
+ }
+
+ MmUnlockAddressSpace(&CurrentEntry->Process->AddressSpace);
+
+ CurrentEntry = NextEntry;
+ }
+
+ ExReleaseFastMutex(&MiRmapListLock);
+}
+
+
+VOID
+MiGetPageStateAllRmaps(IN ULONG_PTR PhysicalAddress,
+ IN ULONG PageState,
+ OUT PBOOLEAN Result)
+{
+ PMM_RMAP_ENTRY CurrentEntry;
+ PMM_RMAP_ENTRY NextEntry;
+ ULONG State;
+
+ ExAcquireFastMutex(&MiRmapListLock);
+
+ CurrentEntry = MmGetRmapListHeadPage(PhysicalAddress);
+
+ assertmsg(CurrentEntry != NULL, ("MiGetPageStateAllRmaps: No rmaps.\n"))
+
+ *Result = FALSE;
+ while (CurrentEntry != NULL)
+ {
+ NextEntry = CurrentEntry->Next;
+
+ MmLockAddressSpace(&CurrentEntry->Process->AddressSpace);
+
+ State = MiPageState(CurrentEntry->Process,
+ CurrentEntry->Address,
+ PageState);
+
+ MmUnlockAddressSpace(&CurrentEntry->Process->AddressSpace);
+
+ if (State)
+ {
+ *Result = TRUE;
+ }
+
+ CurrentEntry = NextEntry;
+ }
+
+ ExReleaseFastMutex(&MiRmapListLock);
+}
+
+
+VOID
+MiClearPageStateAllRmaps(IN ULONG_PTR PhysicalAddress,
+ IN ULONG PageState)
+{
+ PMM_RMAP_ENTRY CurrentEntry;
+ PMM_RMAP_ENTRY NextEntry;
+
+ ExAcquireFastMutex(&MiRmapListLock);
+
+ CurrentEntry = MmGetRmapListHeadPage(PhysicalAddress);
+
+ assertmsg(CurrentEntry != NULL, ("MiClearPageStateAllRmaps: No rmaps.\n"))
+
+ while (CurrentEntry != NULL)
+ {
+ NextEntry = CurrentEntry->Next;
+
+ MmLockAddressSpace(&CurrentEntry->Process->AddressSpace);
+
+ MiClearPageState(CurrentEntry->Process,
+ CurrentEntry->Address,
+ PageState);
+
+ MmUnlockAddressSpace(&CurrentEntry->Process->AddressSpace);
+
+ CurrentEntry = NextEntry;
}
- new_entry->Address = Address;
- new_entry->Process = Process;
- if (MmGetPhysicalAddressForProcess(Process, Address) !=
- (ULONG)PhysicalAddress)
+ ExReleaseFastMutex(&MiRmapListLock);
+}
+
+
+VOID
+MiSetPageStateAllRmaps(IN ULONG_PTR PhysicalAddress,
+ IN ULONG PageState)
+{
+ PMM_RMAP_ENTRY CurrentEntry;
+ PMM_RMAP_ENTRY NextEntry;
+
+ ExAcquireFastMutex(&MiRmapListLock);
+
+ CurrentEntry = MmGetRmapListHeadPage(PhysicalAddress);
+
+ assertmsg(CurrentEntry != NULL, ("MiSetPageStateAllRmaps: No rmaps.\n"))
+
+ while (CurrentEntry != NULL)
{
- DPRINT1("Insert rmap (%d, 0x%.8X) 0x%.8X which doesn't match physical "
- "address 0x%.8X\n", Process->UniqueProcessId, Address,
- MmGetPhysicalAddressForProcess(Process, Address),
+ NextEntry = CurrentEntry->Next;
+
+ MmLockAddressSpace(&CurrentEntry->Process->AddressSpace);
+
+ MiSetPageState(CurrentEntry->Process,
+ CurrentEntry->Address,
+ PageState);
+
+ MmUnlockAddressSpace(&CurrentEntry->Process->AddressSpace);
+
+ CurrentEntry = NextEntry;
+ }
+
+ ExReleaseFastMutex(&MiRmapListLock);
+}
+
+
+VOID
+MmInsertRmap(ULONG_PTR PhysicalAddress, PEPROCESS Process, PVOID Address)
+{
+ PMM_RMAP_ENTRY Current;
+ PMM_RMAP_ENTRY New;
+
+ Address = (PVOID)PAGE_ROUND_DOWN(Address);
+
+ New = ExAllocatePool(NonPagedPool, sizeof(MM_RMAP_ENTRY));
+ assert(New);
+ New->ReferenceCount = 1;
+ New->Address = Address;
+ New->Process = Process;
+
+ if (MmGetPhysicalAddressForProcess(Process, Address) !=
+ (ULONG_PTR)PhysicalAddress)
+ {
+ DPRINT("Insert rmap (%d, 0x%.8X) 0x%.8X which doesn't match physical "
+ "address 0x%.8X\n", Process->UniqueProcessId, Address,
+ MmGetPhysicalAddressForProcess(Process, Address),
PhysicalAddress)
KeBugCheck(0);
}
- ExAcquireFastMutex(&RmapListLock);
- current_entry = MmGetRmapListHeadPage(PhysicalAddress);
- new_entry->Next = current_entry;
- MmSetRmapListHeadPage(PhysicalAddress, new_entry);
- ExReleaseFastMutex(&RmapListLock);
+ ExAcquireFastMutex(&MiRmapListLock);
+ Current = MmGetRmapListHeadPage(PhysicalAddress);
+ New->Next = Current;
+ MmSetRmapListHeadPage(PhysicalAddress, New);
+ MmSetRmapCallback(PhysicalAddress, NULL, NULL);
+ ExReleaseFastMutex(&MiRmapListLock);
}
VOID
-MmDeleteAllRmaps(PVOID PhysicalAddress, PVOID Context,
- VOID (*DeleteMapping)(PVOID Context, PEPROCESS Process,
+MmDeleteAllRmaps(ULONG_PTR PhysicalAddress, PVOID Context,
+ VOID (*DeleteMapping)(PVOID Context, PEPROCESS Process,
PVOID Address))
{
PMM_RMAP_ENTRY current_entry;
PMM_RMAP_ENTRY previous_entry;
- ExAcquireFastMutex(&RmapListLock);
+ ExAcquireFastMutex(&MiRmapListLock);
current_entry = MmGetRmapListHeadPage(PhysicalAddress);
if (current_entry == NULL)
{
current_entry = current_entry->Next;
if (DeleteMapping)
{
- DeleteMapping(Context, previous_entry->Process,
+ DeleteMapping(Context, previous_entry->Process,
previous_entry->Address);
}
ExFreePool(previous_entry);
}
MmSetRmapListHeadPage(PhysicalAddress, NULL);
- ExReleaseFastMutex(&RmapListLock);
+ ExReleaseFastMutex(&MiRmapListLock);
}
+
VOID
-MmDeleteRmap(PVOID PhysicalAddress, PEPROCESS Process, PVOID Address)
+MmDeleteRmap(IN ULONG_PTR PhysicalAddress,
+ IN PEPROCESS Process,
+ IN PVOID Address)
{
- PMM_RMAP_ENTRY current_entry, previous_entry;
+ PMM_RMAP_ENTRY Current;
+ PMM_RMAP_ENTRY Previous;
- ExAcquireFastMutex(&RmapListLock);
- previous_entry = NULL;
- current_entry = MmGetRmapListHeadPage(PhysicalAddress);
- while (current_entry != NULL)
+ ExAcquireFastMutex(&MiRmapListLock);
+ Previous = NULL;
+ Current = MmGetRmapListHeadPage(PhysicalAddress);
+ while (Current != NULL)
{
- if (current_entry->Process == Process &&
- current_entry->Address == Address)
- {
- if (previous_entry == NULL)
- {
- MmSetRmapListHeadPage(PhysicalAddress, current_entry->Next);
- ExReleaseFastMutex(&RmapListLock);
- ExFreePool(current_entry);
- }
- else
- {
- previous_entry->Next = current_entry->Next;
- ExReleaseFastMutex(&RmapListLock);
- ExFreePool(current_entry);
- }
- return;
- }
- previous_entry = current_entry;
- current_entry = current_entry->Next;
+ if (Current->Process == Process && Current->Address == Address)
+ {
+ Current->ReferenceCount--;
+ assertmsg(Current->ReferenceCount == 0, ("Rmap has outstanding references (%d) for Page (0x%.08x) "
+ "Process (0x%.08x) Addresss (0x%.08x)\n",
+ Current->ReferenceCount, PhysicalAddress, Process, Address));
+
+ if (Previous == NULL)
+ {
+ MmSetRmapListHeadPage(PhysicalAddress, Current->Next);
+ ExReleaseFastMutex(&MiRmapListLock);
+ ExFreePool(Current);
+ }
+ else
+ {
+ Previous->Next = Current->Next;
+ ExReleaseFastMutex(&MiRmapListLock);
+ ExFreePool(Current);
+ }
+ return;
+ }
+ Previous = Current;
+ Current = Current->Next;
}
- KeBugCheck(0);
+ assertmsg(FALSE, ("No rmap entry for Page (0x%.08x) Process (0x%.08x) Addresss (0x%.08x)\n",
+ PhysicalAddress, Process, Address));
}
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
-/* $Id: section.c,v 1.80 2002/05/07 22:53:05 hbirr Exp $
+/* $Id: section.c,v 1.81 2002/05/13 18:10:40 chorns Exp $
*
* PROJECT: ReactOS kernel
* FILE: ntoskrnl/mm/section.c
NTSTATUS
MiReadPage(PMEMORY_AREA MemoryArea,
PLARGE_INTEGER Offset,
- PVOID* Page)
+ PULONG_PTR Page)
/*
* FUNCTION: Read a page for a section backed memory area.
* PARAMETERS:
*/
Addr = MmGetPhysicalAddress(BaseAddress +
Offset->QuadPart - BaseOffset);
- (*Page) = (PVOID)(ULONG)Addr.QuadPart;
+ (*Page) = (ULONG_PTR)Addr.QuadPart;
MmReferencePage((*Page));
CcRosReleaseCacheSegment(Fcb->Bcb, CacheSeg, TRUE, FALSE, TRUE);
BOOLEAN Locked)
{
LARGE_INTEGER Offset;
- PVOID Page;
+ ULONG_PTR Page;
NTSTATUS Status;
ULONG PAddress;
PSECTION_OBJECT Section;
{
if (Locked)
{
- MmLockPage((PVOID)MmGetPhysicalAddressForProcess(NULL, Address));
+ MmLockPage(MmGetPhysicalAddressForProcess(NULL, Address));
}
return(STATUS_SUCCESS);
}
Section = MemoryArea->Data.SectionData.Section;
MmLockSection(Section);
MmLockSectionSegment(Segment);
-
+
/*
* Get or create a page operation descriptor
*/
return(STATUS_MM_RESTART_OPERATION);
}
- Page = (PVOID)(PAGE_FROM_SSE(Entry));
+ Page = (ULONG_PTR)(PAGE_FROM_SSE(Entry));
MmReferencePage(Page);
MmSharePageEntrySectionSegment(Segment, Offset.u.LowPart);
}
if (Locked)
{
- MmLockPage((PVOID)MmGetPhysicalAddressForProcess(NULL, Address));
+ MmLockPage(MmGetPhysicalAddressForProcess(NULL, Address));
}
MmUnlockSectionSegment(Segment);
MmUnlockSection(Section);
*/
if (Locked)
{
- MmLockPage((PVOID)MmGetPhysicalAddressForProcess(NULL, Address));
+ MmLockPage(MmGetPhysicalAddressForProcess(NULL, Address));
}
PageOp->Status = STATUS_SUCCESS;
KeSetEvent(&PageOp->CompletionEvent, IO_NO_INCREMENT, FALSE);
*/
if (Locked)
{
- MmLockPage((PVOID)MmGetPhysicalAddressForProcess(NULL, Address));
+ MmLockPage(MmGetPhysicalAddressForProcess(NULL, Address));
}
/*
(PVOID)PAGE_ROUND_DOWN(Address));
if (Locked)
{
- MmLockPage((PVOID)MmGetPhysicalAddressForProcess(NULL, Address));
+ MmLockPage(MmGetPhysicalAddressForProcess(NULL, Address));
}
/*
}
if (Locked)
{
- MmLockPage((PVOID)MmGetPhysicalAddressForProcess(NULL, Address));
+ MmLockPage(MmGetPhysicalAddressForProcess(NULL, Address));
}
PageOp->Status = STATUS_SUCCESS;
KeSetEvent(&PageOp->CompletionEvent, IO_NO_INCREMENT, FALSE);
}
if (Locked)
{
- MmLockPage((PVOID)MmGetPhysicalAddressForProcess(NULL, Address));
+ MmLockPage(MmGetPhysicalAddressForProcess(NULL, Address));
}
PageOp->Status = STATUS_SUCCESS;
KeSetEvent(&PageOp->CompletionEvent, IO_NO_INCREMENT, FALSE);
* If the section offset is already in-memory and valid then just
* take another reference to the page
*/
-
- Page = (PVOID)PAGE_FROM_SSE(Entry);
+
+ Page = (ULONG_PTR)PAGE_FROM_SSE(Entry);
MmReferencePage(Page);
MmSharePageEntrySectionSegment(Segment, Offset.QuadPart);
Status = MmCreateVirtualMapping(PsGetCurrentProcess(),
Address,
Attributes,
- (ULONG)Page,
+ Page,
FALSE);
- MmInsertRmap(Page, PsGetCurrentProcess(),
+ MmInsertRmap(Page, PsGetCurrentProcess(),
(PVOID)PAGE_ROUND_DOWN(Address));
if (!NT_SUCCESS(Status))
{
}
if (Locked)
{
- MmLockPage((PVOID)MmGetPhysicalAddressForProcess(NULL, Address));
+ MmLockPage(MmGetPhysicalAddressForProcess(NULL, Address));
}
PageOp->Status = STATUS_SUCCESS;
KeSetEvent(&PageOp->CompletionEvent, IO_NO_INCREMENT, FALSE);
}
}
-NTSTATUS
+NTSTATUS
MmAccessFaultSectionView(PMADDRESS_SPACE AddressSpace,
- MEMORY_AREA* MemoryArea,
+ MEMORY_AREA* MemoryArea,
PVOID Address,
BOOLEAN Locked)
{
PMM_SECTION_SEGMENT Segment;
PSECTION_OBJECT Section;
- ULONG OldPage;
- PVOID NewPage;
+ ULONG_PTR OldPage;
+ ULONG_PTR NewPage;
PVOID NewAddress;
NTSTATUS Status;
ULONG PAddress;
Address,
MemoryArea->Attributes,
(ULONG)NewPage,
- FALSE);
- MmInsertRmap(NewPage, PsGetCurrentProcess(),
+ FALSE);
+ MmInsertRmap(NewPage, PsGetCurrentProcess(),
(PVOID)PAGE_ROUND_DOWN(Address));
if (!NT_SUCCESS(Status))
{
}
if (Locked)
{
- MmLockPage((PVOID)MmGetPhysicalAddressForProcess(NULL, Address));
+ MmLockPage(MmGetPhysicalAddressForProcess(NULL, Address));
}
/*
* Unshare the old page.
*/
MmUnsharePageEntrySectionSegment(Section, Segment, Offset.QuadPart, FALSE);
- MmDeleteRmap((PVOID)OldPage, PsGetCurrentProcess(),
+ MmDeleteRmap(OldPage, PsGetCurrentProcess(),
(PVOID)PAGE_ROUND_DOWN(Address));
- MmDereferencePage((PVOID)OldPage);
+ MmDereferencePage(OldPage);
PageOp->Status = STATUS_SUCCESS;
KeSetEvent(&PageOp->CompletionEvent, IO_NO_INCREMENT, FALSE);
MmPageOutDeleteMapping(PVOID Context, PEPROCESS Process, PVOID Address)
{
MM_SECTION_PAGEOUT_CONTEXT* PageOutContext;
- BOOL WasDirty;
- PVOID PhysicalAddress;
+ BOOLEAN WasDirty;
+ ULONG_PTR PhysicalAddress;
PageOutContext = (MM_SECTION_PAGEOUT_CONTEXT*)Context;
MmDeleteVirtualMapping(Process,
Address,
FALSE,
&WasDirty,
- (PULONG)&PhysicalAddress);
+ (PULONG_PTR)&PhysicalAddress);
if (WasDirty)
{
PageOutContext->WasDirty = TRUE;
LARGE_INTEGER Offset;
PSECTION_OBJECT Section;
PMM_SECTION_SEGMENT Segment;
- PVOID PhysicalAddress;
+ ULONG_PTR PhysicalAddress;
MM_SECTION_PAGEOUT_CONTEXT Context;
SWAPENTRY SwapEntry;
PMDL Mdl;
AddressSpace->Process->UniqueProcessId, Address);
KeBugCheck(0);
}
- PhysicalAddress =
- (PVOID)MmGetPhysicalAddressForProcess(AddressSpace->Process,
+ PhysicalAddress =
+ MmGetPhysicalAddressForProcess(AddressSpace->Process,
Address);
SwapEntry = MmGetSavedSwapEntryPage(PhysicalAddress);
Context.WasDirty = FALSE;
if (Segment->Characteristics & IMAGE_SECTION_CHAR_BSS ||
IS_SWAP_FROM_SSE(Entry) ||
- (PVOID)(PAGE_FROM_SSE(Entry)) != PhysicalAddress)
+ (ULONG_PTR)(PAGE_FROM_SSE(Entry)) != PhysicalAddress)
{
Context.Private = Private = TRUE;
}
}
}
}
-
+
PageOp->Status = STATUS_SUCCESS;
KeSetEvent(&PageOp->CompletionEvent, IO_NO_INCREMENT, FALSE);
MmReleasePageOp(PageOp);
if (DirectMapped && !Private)
{
assert(SwapEntry == 0);
- MmDereferencePage((PVOID)PhysicalAddress);
+ MmDereferencePage(PhysicalAddress);
PageOp->Status = STATUS_SUCCESS;
KeSetEvent(&PageOp->CompletionEvent, IO_NO_INCREMENT, FALSE);
MmReleasePageOp(PageOp);
/*
* If necessary, allocate an entry in the paging file for this page
*/
- SwapEntry = MmGetSavedSwapEntryPage((PVOID)PhysicalAddress);
+ SwapEntry = MmGetSavedSwapEntryPage(PhysicalAddress);
if (SwapEntry == 0)
{
SwapEntry = MmAllocSwapPage();
(ULONG)PhysicalAddress,
FALSE);
MmSetDirtyPage(MemoryArea->Process, Address);
- MmInsertRmap(PhysicalAddress,
+ MmInsertRmap(PhysicalAddress,
MemoryArea->Process,
Address);
MmSetPageEntrySectionSegment(Segment, Offset.QuadPart,
InheritDisposition,
AllocationType,
Protect);
-
+
ObDereferenceObject(Section);
ObDereferenceObject(Process);
-
+
return(Status);
}
-VOID STATIC
-MmFreeSectionPage(PVOID Context, MEMORY_AREA* MemoryArea, PVOID Address, ULONG PhysAddr,
- SWAPENTRY SwapEntry, BOOLEAN Dirty)
+VOID
+MmFreeSectionPage (IN BOOLEAN Before,
+ IN PVOID Context,
+ IN PMEMORY_AREA MemoryArea,
+ IN PVOID Address,
+ IN ULONG_PTR PhysAddr,
+ IN SWAPENTRY SwapEntry,
+ IN BOOLEAN Dirty)
{
PMEMORY_AREA MArea;
ULONG Entry;
+ if (Before)
+ return;
+
MArea = (PMEMORY_AREA)Context;
if (SwapEntry != 0)
else if (PhysAddr != 0)
{
ULONG Offset;
-
- Offset =
- ((ULONG)PAGE_ROUND_DOWN(Address) - (ULONG)MArea->BaseAddress) +
+
+ Offset =
+ ((ULONG)PAGE_ROUND_DOWN(Address) - (ULONG)MArea->BaseAddress) +
MArea->Data.SectionData.ViewOffset;
Entry = MmGetPageEntrySectionSegment(MArea->Data.SectionData.Segment,
if (IS_SWAP_FROM_SSE(Entry))
{
KeBugCheck(0);
- }
+ }
else if (PhysAddr != (PAGE_FROM_SSE(Entry)))
{
/*
* Just dereference private pages
*/
- MmDeleteRmap((PVOID)PhysAddr, MArea->Process, Address);
- MmDereferencePage((PVOID)PhysAddr);
+ MmDeleteRmap(PhysAddr, MArea->Process, Address);
+ MmDereferencePage(PhysAddr);
}
else
{
MArea->Data.SectionData.Segment,
Offset,
Dirty);
- MmDeleteRmap((PVOID)PhysAddr, MArea->Process, Address);
- MmDereferencePage((PVOID)PhysAddr);
+ MmDeleteRmap(PhysAddr, MArea->Process, Address);
+ MmDereferencePage(PhysAddr);
}
}
}
KIRQL oldIrql;
AddressSpace = &Process->AddressSpace;
-
+
DPRINT("Opening memory area Process %x BaseAddress %x\n",
Process, BaseAddress);
MmLockAddressSpace(AddressSpace);
MmUnlockAddressSpace(AddressSpace);
return(STATUS_UNSUCCESSFUL);
}
-
+
MmLockSection(MemoryArea->Data.SectionData.Section);
MmLockSectionSegment(MemoryArea->Data.SectionData.Segment);
Section = MemoryArea->Data.SectionData.Section;
DPRINT("Result %p\n",Result);
for (i = 0; (i <= (Length / PAGESIZE)); i++)
{
- PVOID Page;
+ ULONG_PTR Page;
Status = MmRequestPageMemoryConsumer(MC_NPPOOL, TRUE, &Page);
if (!NT_SUCCESS(Status))
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
-/* $Id: slab.c,v 1.2 2002/01/03 22:52:29 dwelch Exp $
+/* $Id: slab.c,v 1.3 2002/05/13 18:10:41 chorns Exp $
*
* COPYRIGHT: See COPYING in the top directory
* PROJECT: ReactOS kernel
ExGrowSlabCache(PSLAB_CACHE Slab)
{
PSLAB_CACHE_PAGE SlabPage;
- PVOID PhysicalPage;
+ ULONG_PTR PhysicalPage;
PVOID Page;
NTSTATUS Status;
ULONG i;
PSLAB_CACHE_BUFCTL BufCtl;
PVOID Object;
-
+
Status = MmRequestPageMemoryConsumer(MC_NPPOOL, TRUE, &PhysicalPage);
if (!NT_SUCCESS(Status))
{
return(NULL);
}
- Page = ExAllocatePageWithPhysPage((ULONG)PhysicalPage);
+ Page = ExAllocatePageWithPhysPage(PhysicalPage);
if (Page == NULL)
{
MmReleasePageMemoryConsumer(MC_NPPOOL, PhysicalPage);
while (current_entry != &Slab->PageListHead)
{
PVOID Base;
- PVOID PhysicalPage;
+ ULONG_PTR PhysicalPage;
current = CONTAINING_RECORD(current_entry,
SLAB_CACHE_PAGE,
PageListEntry);
- Base = (PVOID)current + sizeof(SLAB_CACHE_PAGE) - PAGESIZE;
+ Base = (PVOID)(current + sizeof(SLAB_CACHE_PAGE) - PAGESIZE);
if (Slab->Destructor != NULL)
{
for (i = 0; i < Slab->ObjectsPerPage; i++)
{
- Object = Base + (i * Slab->ObjectSize) +
- sizeof(SLAB_CACHE_BUFCTL);
+ Object = (PVOID)(Base + (i * Slab->ObjectSize) +
+ sizeof(SLAB_CACHE_BUFCTL));
Slab->Destructor(Object, Slab->BaseSize);
}
}
- PhysicalPage = (PVOID)MmGetPhysicalAddressForProcess(NULL, Base);
+ PhysicalPage = MmGetPhysicalAddressForProcess(NULL, Base);
ExUnmapPage(Base);
MmReleasePageMemoryConsumer(MC_NPPOOL, PhysicalPage);
}
-/* $Id: virtual.c,v 1.56 2002/05/07 22:36:46 hbirr Exp $
+/* $Id: virtual.c,v 1.57 2002/05/13 18:10:41 chorns Exp $
*
* COPYRIGHT: See COPYING in the top directory
* PROJECT: ReactOS kernel
/* FUNCTIONS *****************************************************************/
-PMM_SEGMENT
+PMM_SEGMENT
MmGetSegmentForAddress(PMEMORY_AREA MArea,
PVOID Address,
PVOID* PCurrentAddress)
/*
* FUNCTION: Get the segment corresponding to a particular memory area and
- * address.
+ * address.
* ARGUMENTS:
* MArea (IN) = The memory area
* Address (IN) = The address to get the segment for
}
-NTSTATUS
+NTSTATUS
MmPageOutVirtualMemory(PMADDRESS_SPACE AddressSpace,
PMEMORY_AREA MemoryArea,
PVOID Address,
PMM_PAGEOP PageOp)
{
- PVOID PhysicalAddress;
- BOOL WasDirty;
+ ULONG_PTR PhysicalAddress;
+ BOOLEAN WasDirty;
SWAPENTRY SwapEntry;
NTSTATUS Status;
PMDL Mdl;
*/
if ((MemoryArea->Attributes & PAGE_READONLY) ||
(MemoryArea->Attributes & PAGE_EXECUTE_READ))
- {
+ {
MmDeleteVirtualMapping(MemoryArea->Process, Address, FALSE,
NULL, (PULONG)&PhysicalAddress);
MmDeleteAllRmaps(PhysicalAddress, NULL, NULL);
KeBugCheck(0);
}
MmReleasePageMemoryConsumer(MC_USER, PhysicalAddress);
-
+
PageOp->Status = STATUS_SUCCESS;
KeSetEvent(&PageOp->CompletionEvent, IO_NO_INCREMENT, FALSE);
MmReleasePageOp(PageOp);
* Otherwise this is read-write data
*/
MmDisableVirtualMapping(MemoryArea->Process, Address,
- &WasDirty, (PULONG)&PhysicalAddress);
+ &WasDirty, (PULONG_PTR)&PhysicalAddress);
if (PhysicalAddress == 0)
{
KeBugCheck(0);
/*
* If necessary, allocate an entry in the paging file for this page
*/
- SwapEntry = MmGetSavedSwapEntryPage((PVOID)PhysicalAddress);
+ SwapEntry = MmGetSavedSwapEntryPage(PhysicalAddress);
if (SwapEntry == 0)
{
SwapEntry = MmAllocSwapPage();
return(STATUS_UNSUCCESSFUL);
}
}
-
+
/*
* Write the page to the pagefile
*/
Status = MmWriteToSwapPage(SwapEntry, Mdl);
if (!NT_SUCCESS(Status))
{
- DPRINT1("MM: Failed to write to swap page (Status was 0x%.8X)\n",
+ DPRINT1("MM: Failed to write to swap page (Status was 0x%.8X)\n",
Status);
MmEnableVirtualMapping(MemoryArea->Process, Address);
PageOp->Status = STATUS_UNSUCCESSFUL;
NTSTATUS
MmNotPresentFaultVirtualMemory(PMADDRESS_SPACE AddressSpace,
- MEMORY_AREA* MemoryArea,
+ MEMORY_AREA* MemoryArea,
PVOID Address,
BOOLEAN Locked)
/*
* NOTES: This function is called with the address space lock held.
*/
{
- PVOID Page;
+ ULONG_PTR Page;
NTSTATUS Status;
PMM_SEGMENT Segment;
PVOID CurrentAddress;
PMM_PAGEOP PageOp;
-
+
/*
* There is a window between taking the page fault and locking the
* address space when another thread could load the page so we check
{
if (Locked)
{
- MmLockPage((PVOID)MmGetPhysicalAddressForProcess(NULL, Address));
- }
+ MmLockPage(MmGetPhysicalAddressForProcess(NULL, Address));
+ }
return(STATUS_SUCCESS);
}
MmLockAddressSpace(AddressSpace);
if (Locked)
{
- MmLockPage((PVOID)MmGetPhysicalAddressForProcess(NULL, Address));
+ MmLockPage(MmGetPhysicalAddressForProcess(NULL, Address));
}
MmReleasePageOp(PageOp);
return(STATUS_SUCCESS);
(ULONG)Page,
TRUE);
MmLockAddressSpace(AddressSpace);
- }
+ }
if (!NT_SUCCESS(Status))
{
DPRINT1("MmCreateVirtualMapping failed, not out of memory\n");
*/
if (Locked)
{
- MmLockPage((PVOID)MmGetPhysicalAddressForProcess(NULL, Address));
+ MmLockPage(MmGetPhysicalAddressForProcess(NULL, Address));
}
PageOp->Status = STATUS_SUCCESS;
KeSetEvent(&PageOp->CompletionEvent, IO_NO_INCREMENT, FALSE);
BaseAddress + (i * PAGESIZE)))
{
SWAPENTRY SwapEntry;
-
+
MmDeletePageFileMapping(AddressSpace->Process,
BaseAddress + (i * PAGESIZE),
&SwapEntry);
FALSE, NULL, NULL);
if (PhysicalAddr.u.LowPart != 0)
{
- MmDeleteRmap((PVOID)PhysicalAddr.u.LowPart, AddressSpace->Process,
+ MmDeleteRmap((ULONG_PTR)PhysicalAddr.u.LowPart, AddressSpace->Process,
BaseAddress + (i * PAGESIZE));
- MmDereferencePage((PVOID)(ULONG)(PhysicalAddr.u.LowPart));
+ MmDereferencePage((ULONG_PTR)(PhysicalAddr.u.LowPart));
}
}
}
ULONG OldType;
ULONG OldProtect;
ULONG OldLength;
-
+
DPRINT("MmSplitSegment()\n");
/*
* Save the type and protection and length of the current segment
FirstSegment->Protect,
Type,
Protect);
-
+
CurrentAddress = FirstAddress + FirstSegment->Length +
RegionSegment->Length;
}
OldLength,
OldType,
OldProtect,
- Type,
+ Type,
Protect);
CurrentSegment = CONTAINING_RECORD(CurrentEntry,
{
KeBugCheck(0);
}
-
+
if (BaseAddress >= CurrentAddress &&
(BaseAddress + RegionSize) <= (CurrentAddress + CurrentSegment->Length))
{
* PAGE_EXECUTE_READ, PAGE_EXECUTE_READWRITE, PAGE_GUARD,
* PAGE_NOACCESS
* REMARKS:
- * This function maps to the win32 VirtualAllocEx. Virtual memory is
+ * This function maps to the win32 VirtualAllocEx. Virtual memory is
* process based so the protocol starts with a ProcessHandle. I
* splitted the functionality of obtaining the actual address and
* specifying the start address in two parameters ( BaseAddress and
ObDereferenceObject(Process);
return(STATUS_UNSUCCESSFUL);
}
-
+
Status = MmCreateMemoryArea(Process,
&Process->AddressSpace,
MEMORY_AREA_VIRTUAL_MEMORY,
{
MmReserveSwapPages(RegionSize);
}
-
+
*UBaseAddress = BaseAddress;
*URegionSize = RegionSize;
DPRINT("*UBaseAddress %x *URegionSize %x\n", BaseAddress, RegionSize);
-
+
MmUnlockAddressSpace(AddressSpace);
ObDereferenceObject(Process);
return(STATUS_SUCCESS);
}
-NTSTATUS STDCALL
+NTSTATUS STDCALL
NtFlushVirtualMemory(IN HANDLE ProcessHandle,
IN PVOID BaseAddress,
IN ULONG NumberOfBytesToFlush,
/*
* FUNCTION: Flushes virtual memory to file
* ARGUMENTS:
- * ProcessHandle = Points to the process that allocated the virtual
+ * ProcessHandle = Points to the process that allocated the virtual
* memory
* BaseAddress = Points to the memory address
* NumberOfBytesToFlush = Limits the range to flush,
* NumberOfBytesFlushed = Actual number of bytes flushed
- * RETURNS: Status
+ * RETURNS: Status
*/
{
UNIMPLEMENTED;
}
-
-VOID STATIC
-MmFreeVirtualMemoryPage(PVOID Context,
- MEMORY_AREA* MemoryArea,
- PVOID Address,
- ULONG PhysicalAddr,
- SWAPENTRY SwapEntry,
- BOOLEAN Dirty)
+VOID
+MmFreeVirtualMemoryPage (IN BOOLEAN Before,
+ IN PVOID Context,
+ IN PMEMORY_AREA MemoryArea,
+ IN PVOID Address,
+ IN ULONG_PTR PhysicalAddress,
+ IN SWAPENTRY SwapEntry,
+ IN BOOLEAN Dirty)
{
PEPROCESS Process = (PEPROCESS)Context;
-
- if (PhysicalAddr != 0)
+
+ if (Before)
+ return;
+
+ if (PhysicalAddress != 0)
{
- MmDeleteRmap((PVOID)PhysicalAddr, Process, Address);
- MmDereferencePage((PVOID)PhysicalAddr);
+ MmDeleteRmap(PhysicalAddress, Process, Address);
+ MmDereferencePage(PhysicalAddress);
}
else if (SwapEntry != 0)
{
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
-/* $Id: wset.c,v 1.11 2001/12/31 01:53:45 dwelch Exp $
+/* $Id: wset.c,v 1.12 2002/05/13 18:10:41 chorns Exp $
*
* PROJECT: ReactOS kernel
* FILE: ntoskrnl/mm/wset.c
NTSTATUS
MmTrimUserMemory(ULONG Target, ULONG Priority, PULONG NrFreedPages)
{
- PVOID CurrentPhysicalAddress;
- PVOID NextPhysicalAddress;
+ ULONG_PTR CurrentPhysicalAddress;
+ ULONG_PTR NextPhysicalAddress;
NTSTATUS Status;
(*NrFreedPages) = 0;
CurrentPhysicalAddress = MmGetLRUFirstUserPage();
- while (CurrentPhysicalAddress != NULL && Target > 0)
+ while (CurrentPhysicalAddress != 0 && Target > 0)
{
NextPhysicalAddress = MmGetLRUNextUserPage(CurrentPhysicalAddress);
-/* $Id: object.c,v 1.47 2002/05/07 22:39:26 hbirr Exp $
+/* $Id: object.c,v 1.48 2002/05/13 18:10:41 chorns Exp $
*
* COPYRIGHT: See COPYING in the top level directory
* PROJECT: ReactOS kernel
Path = ObjectAttributes->ObjectName->Buffer;
- if (Path[0] == 0)
+ if ((Path == NULL) && (Path[0] == 0))
{
*ReturnedObject = CurrentObject;
return(STATUS_SUCCESS);
return(STATUS_UNSUCCESSFUL);
}
- if (Path)
- {
RtlCreateUnicodeString (&PathString, Path);
current = PathString.Buffer;
- }
- else
- {
- RtlInitUnicodeString (&PathString, NULL);
- current = NULL;
- }
RootObject = CurrentObject;
DPRINT("ObCreateObject(Handle %x, ObjectAttributes %x, Type %x)\n",
Handle, ObjectAttributes, Type);
+
if (ObjectAttributes != NULL &&
- ObjectAttributes->ObjectName != NULL)
+ ObjectAttributes->ObjectName != NULL &&
+ ObjectAttributes->ObjectName->Buffer != NULL)
{
DPRINT("ObjectAttributes->ObjectName->Buffer %S\n",
ObjectAttributes->ObjectName->Buffer);
}
if (ObjectAttributes != NULL &&
- ObjectAttributes->ObjectName != NULL)
+ ObjectAttributes->ObjectName != NULL &&
+ ObjectAttributes->ObjectName->Buffer != NULL)
{
Status = ObFindObject(ObjectAttributes,
&Parent,
{
RtlInitUnicodeString(&RemainingPath, NULL);
}
-
RtlMapGenericMask(&DesiredAccess,
Type->Mapping);
Parent,
RemainingPath.Buffer,
ObjectAttributes);
+
if (!NT_SUCCESS(Status))
{
if (ObjectAttached == TRUE)
return(Status);
}
}
+
RtlFreeUnicodeString( &RemainingPath );
*Object = HEADER_TO_BODY(Header);
-/* $Id: thread.c,v 1.90 2002/03/08 17:04:03 hbirr Exp $
+/* $Id: thread.c,v 1.91 2002/05/13 18:10:41 chorns Exp $
*
* COPYRIGHT: See COPYING in the top level directory
* PROJECT: ReactOS kernel
/* FUNCTIONS ***************************************************************/
+NTSTATUS
+PiSetPriorityThread(IN HANDLE ThreadHandle,
+ IN KPRIORITY Priority)
+{
+ PETHREAD Thread;
+ NTSTATUS Status;
+
+ Status = ObReferenceObjectByHandle(ThreadHandle,
+ THREAD_ALL_ACCESS,
+ PsThreadType,
+ KernelMode,
+ (PVOID*) &Thread,
+ NULL);
+
+ if (!NT_SUCCESS(Status))
+ {
+ return(Status);
+ }
+
+ KeSetPriorityThread(&Thread->Tcb, Priority);
+ ObReferenceObject(Thread);
+ return(STATUS_SUCCESS);
+}
+
PKTHREAD STDCALL KeGetCurrentThread(VOID)
{
return(KeGetCurrentKPCR()->CurrentThread);
static PETHREAD PsScanThreadList (KPRIORITY Priority, ULONG Affinity)
{
-#if 0
- PLIST_ENTRY current_entry;
- PETHREAD current;
-
- current_entry = RemoveHeadList(&PriorityListHead[Priority]);
- if (current_entry != &PriorityListHead[Priority])
- {
- current = CONTAINING_RECORD(current_entry, ETHREAD,
- Tcb.QueueListEntry);
- }
- else
- {
- current = NULL;
- }
-
- return(current);
-#else
PLIST_ENTRY current_entry;
PETHREAD current;
current_entry = current_entry->Flink;
}
return(NULL);
-#endif
}
}
for (i = 0; i < (StackSize / PAGESIZE); i++)
{
- PVOID Page;
+ ULONG_PTR Page;
Status = MmRequestPageMemoryConsumer(MC_NPPOOL, TRUE, &Page);
if (!NT_SUCCESS(Status))
{
Status = MmCreateVirtualMapping(NULL,
KernelStack + (i * PAGESIZE),
PAGE_EXECUTE_READWRITE,
- (ULONG)Page,
+ Page,
TRUE);
}
return(KernelStack);
-/* $Id: mem.c,v 1.11 2001/03/16 10:58:47 dwelch Exp $
+/* $Id: mem.c,v 1.12 2002/05/13 18:10:41 chorns Exp $
*
* COPYRIGHT: See COPYING in the top level directory
* PROJECT: ReactOS kernel
{
NTSTATUS Status;
+ //assertmsg(KeGetCurrentIrql() < DISPATCH_LEVEL, ("MmCopyToCaller() called at >= DISPATCH_LEVEL\n"));
+
if (ExGetPreviousMode() == UserMode)
{
if ((ULONG)Dest >= KERNEL_BASE)
{
NTSTATUS Status;
+ //assertmsg(KeGetCurrentIrql() < DISPATCH_LEVEL, ("MmCopyFromCaller() called at >= DISPATCH_LEVEL\n"));
+
if (ExGetPreviousMode() == UserMode)
{
if ((ULONG)Src >= KERNEL_BASE)