* PURPOSE: Cache manager
*
* PROGRAMMERS: David Welch (welch@mcmail.com)
+ * Pierre Schweitzer (pierre@reactos.org)
*/
/* NOTES **********************************************************************
static NPAGED_LOOKASIDE_LIST SharedCacheMapLookasideList;
static NPAGED_LOOKASIDE_LIST VacbLookasideList;
-/* Counters:
- * - Amount of pages flushed by lazy writer
- * - Number of times lazy writer ran
- */
-ULONG CcLazyWritePages = 0;
-ULONG CcLazyWriteIos = 0;
-
/* Internal vars (MS):
* - Threshold above which lazy writer will start action
* - Amount of dirty pages
+ * - List for deferred writes
+ * - Spinlock when dealing with the deferred list
+ * - List for "clean" shared cache maps
*/
ULONG CcDirtyPageThreshold = 0;
ULONG CcTotalDirtyPages = 0;
-
-/* Internal vars (ROS):
- * - Event to notify lazy writer to shutdown
- * - Event to inform watchers lazy writer is done for this loop
- */
-KEVENT iLazyWriterShutdown;
-KEVENT iLazyWriterNotify;
+LIST_ENTRY CcDeferredWrites;
+KSPIN_LOCK CcDeferredWriteSpinLock;
+LIST_ENTRY CcCleanSharedCacheMapList;
#if DBG
-static void CcRosVacbIncRefCount_(PROS_VACB vacb, const char* file, int line)
+VOID CcRosVacbIncRefCount_(PROS_VACB vacb, PCSTR file, INT line)
{
++vacb->ReferenceCount;
if (vacb->SharedCacheMap->Trace)
file, line, vacb, vacb->ReferenceCount, vacb->Dirty, vacb->PageOut);
}
}
-static void CcRosVacbDecRefCount_(PROS_VACB vacb, const char* file, int line)
+VOID CcRosVacbDecRefCount_(PROS_VACB vacb, PCSTR file, INT line)
{
+ ASSERT(vacb->ReferenceCount != 0);
--vacb->ReferenceCount;
+ ASSERT(!(vacb->ReferenceCount == 0 && vacb->Dirty));
if (vacb->SharedCacheMap->Trace)
{
DbgPrint("(%s:%i) VACB %p --RefCount=%lu, Dirty %u, PageOut %lu\n",
file, line, vacb, vacb->ReferenceCount, vacb->Dirty, vacb->PageOut);
}
}
-#define CcRosVacbIncRefCount(vacb) CcRosVacbIncRefCount_(vacb,__FILE__,__LINE__)
-#define CcRosVacbDecRefCount(vacb) CcRosVacbDecRefCount_(vacb,__FILE__,__LINE__)
-#else
-#define CcRosVacbIncRefCount(vacb) (++((vacb)->ReferenceCount))
-#define CcRosVacbDecRefCount(vacb) (--((vacb)->ReferenceCount))
#endif
NTSTATUS
PROS_VACB Vacb)
{
NTSTATUS Status;
- KIRQL oldIrql;
Status = CcWriteVirtualAddress(Vacb);
if (NT_SUCCESS(Status))
{
- KeAcquireGuardedMutex(&ViewLock);
- KeAcquireSpinLock(&Vacb->SharedCacheMap->CacheMapLock, &oldIrql);
-
- Vacb->Dirty = FALSE;
- RemoveEntryList(&Vacb->DirtyVacbListEntry);
- CcTotalDirtyPages -= VACB_MAPPING_GRANULARITY / PAGE_SIZE;
- CcRosVacbDecRefCount(Vacb);
-
- KeReleaseSpinLock(&Vacb->SharedCacheMap->CacheMapLock, oldIrql);
- KeReleaseGuardedMutex(&ViewLock);
+ CcRosUnmarkDirtyVacb(Vacb, TRUE);
}
return Status;
return STATUS_SUCCESS;
}
-/* FIXME: Someday this could somewhat implement write-behind/read-ahead */
-VOID
-NTAPI
-CciLazyWriter(PVOID Unused)
-{
- LARGE_INTEGER OneSecond;
-
- OneSecond.QuadPart = (LONGLONG)-1*1000*1000*10;
-
- while (TRUE)
- {
- NTSTATUS Status;
- ULONG Target, Count = 0;
-
- /* One per second or until we have to stop */
- Status = KeWaitForSingleObject(&iLazyWriterShutdown,
- Executive,
- KernelMode,
- FALSE,
- &OneSecond);
-
- /* If we succeeed, we've to stop running! */
- if (Status == STATUS_SUCCESS)
- {
- break;
- }
-
- /* We're not sleeping anymore */
- KeClearEvent(&iLazyWriterNotify);
-
- /* Our target is one-eighth of the dirty pages */
- Target = CcTotalDirtyPages / 8;
- if (Target != 0)
- {
- /* Flush! */
- DPRINT("Lazy writer starting (%d)\n", Target);
- CcRosFlushDirtyPages(Target, &Count, FALSE, TRUE);
-
- /* And update stats */
- CcLazyWritePages += Count;
- ++CcLazyWriteIos;
- DPRINT("Lazy writer done (%d)\n", Count);
- }
-
- /* Inform people waiting on us that we're done */
- KeSetEvent(&iLazyWriterNotify, IO_DISK_INCREMENT, FALSE);
- }
-}
-
NTSTATUS
CcRosTrimCache (
ULONG Target,
CcRosVacbDecRefCount(current);
/* Check if we can free this entry now */
- if (current->ReferenceCount == 0)
+ if (current->ReferenceCount < 2)
{
ASSERT(!current->Dirty);
ASSERT(!current->MappedCount);
+ ASSERT(current->ReferenceCount == 1);
RemoveEntryList(¤t->CacheMapVacbListEntry);
RemoveEntryList(¤t->VacbLruListEntry);
current = CONTAINING_RECORD(current_entry,
ROS_VACB,
CacheMapVacbListEntry);
+ CcRosVacbDecRefCount(current);
CcRosInternalFreeVacb(current);
}
BOOLEAN Dirty,
BOOLEAN Mapped)
{
- BOOLEAN WasDirty;
- KIRQL oldIrql;
-
ASSERT(SharedCacheMap);
DPRINT("CcRosReleaseVacb(SharedCacheMap 0x%p, Vacb 0x%p, Valid %u)\n",
SharedCacheMap, Vacb, Valid);
- KeAcquireGuardedMutex(&ViewLock);
- KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
-
Vacb->Valid = Valid;
- WasDirty = Vacb->Dirty;
- Vacb->Dirty = Vacb->Dirty || Dirty;
-
- if (!WasDirty && Vacb->Dirty)
+ if (Dirty && !Vacb->Dirty)
{
- InsertTailList(&DirtyVacbListHead, &Vacb->DirtyVacbListEntry);
- CcTotalDirtyPages += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
+ CcRosMarkDirtyVacb(Vacb);
}
if (Mapped)
{
CcRosVacbIncRefCount(Vacb);
}
- if (!WasDirty && Vacb->Dirty)
- {
- CcRosVacbIncRefCount(Vacb);
- }
- KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
- KeReleaseGuardedMutex(&ViewLock);
+ ASSERT(Vacb->ReferenceCount > 0);
+
CcRosReleaseVacbLock(Vacb);
return STATUS_SUCCESS;
KeAcquireGuardedMutex(&ViewLock);
KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
- if (!Vacb->Dirty)
- {
- InsertTailList(&DirtyVacbListHead, &Vacb->DirtyVacbListEntry);
- CcTotalDirtyPages += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
- }
- else
- {
- CcRosVacbDecRefCount(Vacb);
- }
+ ASSERT(!Vacb->Dirty);
+
+ InsertTailList(&DirtyVacbListHead, &Vacb->DirtyVacbListEntry);
+ CcTotalDirtyPages += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
+ Vacb->SharedCacheMap->DirtyPages += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
+ CcRosVacbIncRefCount(Vacb);
/* Move to the tail of the LRU list */
RemoveEntryList(&Vacb->VacbLruListEntry);
KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
KeReleaseGuardedMutex(&ViewLock);
+
+ /* Schedule a lazy writer run to now that we have dirty VACB */
+ oldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
+ if (!LazyWriter.ScanActive)
+ {
+ CcScheduleLazyWriteScan(FALSE);
+ }
+ KeReleaseQueuedSpinLock(LockQueueMasterLock, oldIrql);
+}
+
+VOID
+NTAPI
+CcRosUnmarkDirtyVacb (
+ PROS_VACB Vacb,
+ BOOLEAN LockViews)
+{
+ KIRQL oldIrql;
+ PROS_SHARED_CACHE_MAP SharedCacheMap;
+
+ SharedCacheMap = Vacb->SharedCacheMap;
+
+ if (LockViews)
+ {
+ KeAcquireGuardedMutex(&ViewLock);
+ KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
+ }
+
+ ASSERT(Vacb->Dirty);
+
+ Vacb->Dirty = FALSE;
+
+ RemoveEntryList(&Vacb->DirtyVacbListEntry);
+ CcTotalDirtyPages -= VACB_MAPPING_GRANULARITY / PAGE_SIZE;
+ Vacb->SharedCacheMap->DirtyPages -= VACB_MAPPING_GRANULARITY / PAGE_SIZE;
+ CcRosVacbDecRefCount(Vacb);
+
+ if (LockViews)
+ {
+ KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
+ KeReleaseGuardedMutex(&ViewLock);
+ }
}
NTSTATUS
KeBugCheck(CACHE_MANAGER);
}
- CcRosMarkDirtyVacb(Vacb);
-
-
- CcRosReleaseVacbLock(Vacb);
+ CcRosReleaseVacb(SharedCacheMap, Vacb, Vacb->Valid, TRUE, FALSE);
return STATUS_SUCCESS;
}
+/*
+ * Note: this is not the contrary function of
+ * CcRosMapVacbInKernelSpace()
+ */
NTSTATUS
NTAPI
CcRosUnmapVacb (
BOOLEAN NowDirty)
{
PROS_VACB Vacb;
- BOOLEAN WasDirty;
- KIRQL oldIrql;
ASSERT(SharedCacheMap);
return STATUS_UNSUCCESSFUL;
}
- KeAcquireGuardedMutex(&ViewLock);
- KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
-
- WasDirty = Vacb->Dirty;
- Vacb->Dirty = Vacb->Dirty || NowDirty;
-
+ ASSERT(Vacb->MappedCount != 0);
Vacb->MappedCount--;
- if (!WasDirty && NowDirty)
- {
- InsertTailList(&DirtyVacbListHead, &Vacb->DirtyVacbListEntry);
- CcTotalDirtyPages += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
- }
-
- CcRosVacbDecRefCount(Vacb);
- if (!WasDirty && NowDirty)
- {
- CcRosVacbIncRefCount(Vacb);
- }
if (Vacb->MappedCount == 0)
{
CcRosVacbDecRefCount(Vacb);
}
- KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
- KeReleaseGuardedMutex(&ViewLock);
- CcRosReleaseVacbLock(Vacb);
+ CcRosReleaseVacb(SharedCacheMap, Vacb, Vacb->Valid, NowDirty, FALSE);
return STATUS_SUCCESS;
}
static
NTSTATUS
-CcRosMapVacb(
+CcRosMapVacbInKernelSpace(
PROS_VACB Vacb)
{
ULONG i;
current->MappedCount = 0;
current->DirtyVacbListEntry.Flink = NULL;
current->DirtyVacbListEntry.Blink = NULL;
- current->ReferenceCount = 1;
+ current->ReferenceCount = 0;
current->PinCount = 0;
KeInitializeMutex(¤t->Mutex, 0);
CcRosAcquireVacbLock(current, NULL);
}
KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
InsertTailList(&VacbLruListHead, ¤t->VacbLruListEntry);
+ CcRosVacbIncRefCount(current);
KeReleaseGuardedMutex(&ViewLock);
MI_SET_USAGE(MI_USAGE_CACHE);
#if MI_TRACE_PFNS
if ((SharedCacheMap->FileObject) && (SharedCacheMap->FileObject->FileName.Buffer))
{
- PWCHAR pos = NULL;
+ PWCHAR pos;
ULONG len = 0;
pos = wcsrchr(SharedCacheMap->FileObject->FileName.Buffer, '\\');
if (pos)
}
#endif
- Status = CcRosMapVacb(current);
+ Status = CcRosMapVacbInKernelSpace(current);
if (!NT_SUCCESS(Status))
{
RemoveEntryList(¤t->CacheMapVacbListEntry);
ExFreeToNPagedLookasideList(&VacbLookasideList, current);
}
+ /* Reference it to allow release */
+ CcRosVacbIncRefCount(current);
+
return Status;
}
DPRINT("*BaseAddress %p\n", *BaseAddress);
*Vacb = current;
*BaseOffset = current->FileOffset.QuadPart;
+
+ ASSERT(current->ReferenceCount > 1);
+
return STATUS_SUCCESS;
}
NULL);
MmUnlockAddressSpace(MmGetKernelAddressSpace());
+ if (Vacb->PinCount != 0 || Vacb->ReferenceCount != 0)
+ {
+ DPRINT1("Invalid free: %ld, %ld\n", Vacb->ReferenceCount, Vacb->PinCount);
+ if (Vacb->SharedCacheMap->FileObject && Vacb->SharedCacheMap->FileObject->FileName.Length)
+ {
+ DPRINT1("For file: %wZ\n", &Vacb->SharedCacheMap->FileObject->FileName);
+ }
+ }
+
+ ASSERT(Vacb->PinCount == 0);
+ ASSERT(Vacb->ReferenceCount == 0);
ExFreeToNPagedLookasideList(&VacbLookasideList, Vacb);
return STATUS_SUCCESS;
}
LONGLONG RemainingLength;
PROS_VACB current;
NTSTATUS Status;
- KIRQL oldIrql;
CCTRACE(CC_API_DEBUG, "SectionObjectPointers=%p FileOffset=%p Length=%lu\n",
SectionObjectPointers, FileOffset, Length);
}
}
- CcRosReleaseVacbLock(current);
-
- KeAcquireGuardedMutex(&ViewLock);
- KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
- CcRosVacbDecRefCount(current);
- KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
- KeReleaseGuardedMutex(&ViewLock);
+ CcRosReleaseVacb(SharedCacheMap, current, current->Valid, current->Dirty, FALSE);
}
Offset.QuadPart += VACB_MAPPING_GRANULARITY;
SharedCacheMap->OpenCount--;
if (SharedCacheMap->OpenCount == 0)
{
+ KIRQL OldIrql;
+
FileObject->SectionObjectPointer->SharedCacheMap = NULL;
/*
while (!IsListEmpty(&SharedCacheMap->CacheMapVacbListHead))
{
current_entry = RemoveTailList(&SharedCacheMap->CacheMapVacbListHead);
+ KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
+
current = CONTAINING_RECORD(current_entry, ROS_VACB, CacheMapVacbListEntry);
+ CcRosAcquireVacbLock(current, NULL);
RemoveEntryList(¤t->VacbLruListEntry);
if (current->Dirty)
{
- RemoveEntryList(¤t->DirtyVacbListEntry);
- CcTotalDirtyPages -= VACB_MAPPING_GRANULARITY / PAGE_SIZE;
+ KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
+ CcRosUnmarkDirtyVacb(current, FALSE);
+ KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
DPRINT1("Freeing dirty VACB\n");
}
InsertHeadList(&FreeList, ¤t->CacheMapVacbListEntry);
+ CcRosReleaseVacbLock(current);
+
+ KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
}
#if DBG
SharedCacheMap->Trace = FALSE;
{
current_entry = RemoveTailList(&FreeList);
current = CONTAINING_RECORD(current_entry, ROS_VACB, CacheMapVacbListEntry);
+ CcRosVacbDecRefCount(current);
CcRosInternalFreeVacb(current);
}
+
+ OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
+ RemoveEntryList(&SharedCacheMap->SharedCacheMapLinks);
+ KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
+
ExFreeToNPagedLookasideList(&SharedCacheMapLookasideList, SharedCacheMap);
KeAcquireGuardedMutex(&ViewLock);
}
* has been closed.
*/
{
+ KIRQL OldIrql;
+ PPRIVATE_CACHE_MAP PrivateMap;
PROS_SHARED_CACHE_MAP SharedCacheMap;
KeAcquireGuardedMutex(&ViewLock);
if (FileObject->SectionObjectPointer->SharedCacheMap != NULL)
{
SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
- if (FileObject->PrivateCacheMap != NULL)
+
+ /* Closing the handle, so kill the private cache map
+ * Before you event try to remove it from FO, always
+ * lock the master lock, to be sure not to race
+ * with a potential read ahead ongoing!
+ */
+ OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
+ PrivateMap = FileObject->PrivateCacheMap;
+ FileObject->PrivateCacheMap = NULL;
+ KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
+
+ if (PrivateMap != NULL)
{
- FileObject->PrivateCacheMap = NULL;
+ /* Remove it from the file */
+ KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &OldIrql);
+ RemoveEntryList(&PrivateMap->PrivateLinks);
+ KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, OldIrql);
+
+ /* And free it. */
+ if (PrivateMap != &SharedCacheMap->PrivateCacheMap)
+ {
+ ExFreePoolWithTag(PrivateMap, TAG_PRIVATE_CACHE_MAP);
+ }
+ else
+ {
+ PrivateMap->NodeTypeCode = 0;
+ }
+
if (SharedCacheMap->OpenCount > 0)
{
SharedCacheMap->OpenCount--;
return STATUS_SUCCESS;
}
-NTSTATUS
-NTAPI
-CcTryToInitializeFileCache (
- PFILE_OBJECT FileObject)
-{
- PROS_SHARED_CACHE_MAP SharedCacheMap;
- NTSTATUS Status;
-
- KeAcquireGuardedMutex(&ViewLock);
-
- ASSERT(FileObject->SectionObjectPointer);
- SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
- if (SharedCacheMap == NULL)
- {
- Status = STATUS_UNSUCCESSFUL;
- }
- else
- {
- if (FileObject->PrivateCacheMap == NULL)
- {
- FileObject->PrivateCacheMap = SharedCacheMap;
- SharedCacheMap->OpenCount++;
- }
- Status = STATUS_SUCCESS;
- }
- KeReleaseGuardedMutex(&ViewLock);
-
- return Status;
-}
-
-
NTSTATUS
NTAPI
CcRosInitializeFileCache (
* FUNCTION: Initializes a shared cache map for a file object
*/
{
+ KIRQL OldIrql;
+ BOOLEAN Allocated;
PROS_SHARED_CACHE_MAP SharedCacheMap;
SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
DPRINT("CcRosInitializeFileCache(FileObject 0x%p, SharedCacheMap 0x%p)\n",
FileObject, SharedCacheMap);
+ Allocated = FALSE;
KeAcquireGuardedMutex(&ViewLock);
if (SharedCacheMap == NULL)
{
+ Allocated = TRUE;
SharedCacheMap = ExAllocateFromNPagedLookasideList(&SharedCacheMapLookasideList);
if (SharedCacheMap == NULL)
{
FILE_ALL_ACCESS,
NULL,
KernelMode);
+ SharedCacheMap->NodeTypeCode = NODE_TYPE_SHARED_MAP;
+ SharedCacheMap->NodeByteSize = sizeof(*SharedCacheMap);
SharedCacheMap->FileObject = FileObject;
SharedCacheMap->Callbacks = CallBacks;
SharedCacheMap->LazyWriteContext = LazyWriterContext;
SharedCacheMap->SectionSize = FileSizes->AllocationSize;
SharedCacheMap->FileSize = FileSizes->FileSize;
SharedCacheMap->PinAccess = PinAccess;
+ SharedCacheMap->DirtyPageThreshold = 0;
+ SharedCacheMap->DirtyPages = 0;
+ InitializeListHead(&SharedCacheMap->PrivateList);
KeInitializeSpinLock(&SharedCacheMap->CacheMapLock);
InitializeListHead(&SharedCacheMap->CacheMapVacbListHead);
FileObject->SectionObjectPointer->SharedCacheMap = SharedCacheMap;
+
+ OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
+ InsertTailList(&CcCleanSharedCacheMapList, &SharedCacheMap->SharedCacheMapLinks);
+ KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
}
if (FileObject->PrivateCacheMap == NULL)
{
- FileObject->PrivateCacheMap = SharedCacheMap;
+ PPRIVATE_CACHE_MAP PrivateMap;
+
+ /* Allocate the private cache map for this handle */
+ if (SharedCacheMap->PrivateCacheMap.NodeTypeCode != 0)
+ {
+ PrivateMap = ExAllocatePoolWithTag(NonPagedPool, sizeof(PRIVATE_CACHE_MAP), TAG_PRIVATE_CACHE_MAP);
+ }
+ else
+ {
+ PrivateMap = &SharedCacheMap->PrivateCacheMap;
+ }
+
+ if (PrivateMap == NULL)
+ {
+ /* If we also allocated the shared cache map for this file, kill it */
+ if (Allocated)
+ {
+ OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
+ RemoveEntryList(&SharedCacheMap->SharedCacheMapLinks);
+ KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
+
+ FileObject->SectionObjectPointer->SharedCacheMap = NULL;
+ ObDereferenceObject(FileObject);
+ ExFreeToNPagedLookasideList(&SharedCacheMapLookasideList, SharedCacheMap);
+ }
+
+ KeReleaseGuardedMutex(&ViewLock);
+ return STATUS_INSUFFICIENT_RESOURCES;
+ }
+
+ /* Initialize it */
+ RtlZeroMemory(PrivateMap, sizeof(PRIVATE_CACHE_MAP));
+ PrivateMap->NodeTypeCode = NODE_TYPE_PRIVATE_MAP;
+ PrivateMap->ReadAheadMask = PAGE_SIZE - 1;
+ PrivateMap->FileObject = FileObject;
+ KeInitializeSpinLock(&PrivateMap->ReadAheadSpinLock);
+
+ /* Link it to the file */
+ KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &OldIrql);
+ InsertTailList(&SharedCacheMap->PrivateList, &PrivateMap->PrivateLinks);
+ KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, OldIrql);
+
+ FileObject->PrivateCacheMap = PrivateMap;
SharedCacheMap->OpenCount++;
}
KeReleaseGuardedMutex(&ViewLock);
}
VOID
-NTAPI
-CcShutdownLazyWriter (
- VOID)
-{
- /* Simply set the event, lazy writer will stop when it's done */
- KeSetEvent(&iLazyWriterShutdown, IO_DISK_INCREMENT, FALSE);
-}
-
-BOOLEAN
INIT_FUNCTION
NTAPI
CcInitView (
VOID)
{
- HANDLE LazyWriter;
- NTSTATUS Status;
- OBJECT_ATTRIBUTES ObjectAttributes;
-
DPRINT("CcInitView()\n");
InitializeListHead(&DirtyVacbListHead);
InitializeListHead(&VacbLruListHead);
+ InitializeListHead(&CcDeferredWrites);
+ InitializeListHead(&CcCleanSharedCacheMapList);
+ KeInitializeSpinLock(&CcDeferredWriteSpinLock);
KeInitializeGuardedMutex(&ViewLock);
ExInitializeNPagedLookasideList(&iBcbLookasideList,
NULL,
MmInitializeMemoryConsumer(MC_CACHE, CcRosTrimCache);
- /* Initialize lazy writer events */
- KeInitializeEvent(&iLazyWriterShutdown, SynchronizationEvent, FALSE);
- KeInitializeEvent(&iLazyWriterNotify, NotificationEvent, FALSE);
+ CcInitCacheZeroPage();
+}
- /* Define lazy writer threshold, depending on system type */
- switch (MmQuerySystemSize())
+#if DBG && defined(KDBG)
+BOOLEAN
+ExpKdbgExtFileCache(ULONG Argc, PCHAR Argv[])
+{
+ PLIST_ENTRY ListEntry;
+ UNICODE_STRING NoName = RTL_CONSTANT_STRING(L"No name for File");
+
+ KdbpPrint(" Usage Summary (in kb)\n");
+ KdbpPrint("Shared\t\tValid\tDirty\tName\n");
+ /* No need to lock the spin lock here, we're in DBG */
+ for (ListEntry = CcCleanSharedCacheMapList.Flink;
+ ListEntry != &CcCleanSharedCacheMapList;
+ ListEntry = ListEntry->Flink)
{
- case MmSmallSystem:
- CcDirtyPageThreshold = MmNumberOfPhysicalPages / 8;
- break;
+ PLIST_ENTRY Vacbs;
+ ULONG Valid = 0, Dirty = 0;
+ PROS_SHARED_CACHE_MAP SharedCacheMap;
+ PUNICODE_STRING FileName;
- case MmMediumSystem:
- CcDirtyPageThreshold = MmNumberOfPhysicalPages / 4;
- break;
+ SharedCacheMap = CONTAINING_RECORD(ListEntry, ROS_SHARED_CACHE_MAP, SharedCacheMapLinks);
- case MmLargeSystem:
- CcDirtyPageThreshold = MmNumberOfPhysicalPages / 8 + MmNumberOfPhysicalPages / 4;
- break;
- }
+ /* Dirty size */
+ Dirty = (SharedCacheMap->DirtyPages * PAGE_SIZE) / 1024;
- /* Start the lazy writer thread */
- InitializeObjectAttributes(&ObjectAttributes,
- NULL,
- OBJ_KERNEL_HANDLE,
- NULL,
- NULL);
- Status = PsCreateSystemThread(&LazyWriter,
- THREAD_ALL_ACCESS,
- &ObjectAttributes,
- NULL,
- NULL,
- CciLazyWriter,
- NULL);
- if (!NT_SUCCESS(Status))
- {
- return FALSE;
+ /* First, count for all the associated VACB */
+ for (Vacbs = SharedCacheMap->CacheMapVacbListHead.Flink;
+ Vacbs != &SharedCacheMap->CacheMapVacbListHead;
+ Vacbs = Vacbs->Flink)
+ {
+ PROS_VACB Vacb;
+
+ Vacb = CONTAINING_RECORD(Vacbs, ROS_VACB, CacheMapVacbListEntry);
+ if (Vacb->Valid)
+ {
+ Valid += VACB_MAPPING_GRANULARITY / 1024;
+ }
+ }
+
+ /* Setup name */
+ if (SharedCacheMap->FileObject != NULL &&
+ SharedCacheMap->FileObject->FileName.Length != 0)
+ {
+ FileName = &SharedCacheMap->FileObject->FileName;
+ }
+ else
+ {
+ FileName = &NoName;
+ }
+
+ /* And print */
+ KdbpPrint("%p\t%d\t%d\t%wZ\n", SharedCacheMap, Valid, Dirty, FileName);
}
- /* Handle is not needed */
- ObCloseHandle(LazyWriter, KernelMode);
+ return TRUE;
+}
- CcInitCacheZeroPage();
+BOOLEAN
+ExpKdbgExtDefWrites(ULONG Argc, PCHAR Argv[])
+{
+ KdbpPrint("CcTotalDirtyPages:\t%lu (%lu Kb)\n", CcTotalDirtyPages,
+ (CcTotalDirtyPages * PAGE_SIZE) / 1024);
+ KdbpPrint("CcDirtyPageThreshold:\t%lu (%lu Kb)\n", CcDirtyPageThreshold,
+ (CcDirtyPageThreshold * PAGE_SIZE) / 1024);
+ KdbpPrint("MmAvailablePages:\t%lu (%lu Kb)\n", MmAvailablePages,
+ (MmAvailablePages * PAGE_SIZE) / 1024);
+ KdbpPrint("MmThrottleTop:\t\t%lu (%lu Kb)\n", MmThrottleTop,
+ (MmThrottleTop * PAGE_SIZE) / 1024);
+ KdbpPrint("MmThrottleBottom:\t%lu (%lu Kb)\n", MmThrottleBottom,
+ (MmThrottleBottom * PAGE_SIZE) / 1024);
+ KdbpPrint("MmModifiedPageListHead.Total:\t%lu (%lu Kb)\n", MmModifiedPageListHead.Total,
+ (MmModifiedPageListHead.Total * PAGE_SIZE) / 1024);
+
+ if (CcTotalDirtyPages >= CcDirtyPageThreshold)
+ {
+ KdbpPrint("CcTotalDirtyPages above the threshold, writes should be throttled\n");
+ }
+ else if (CcTotalDirtyPages + 64 >= CcDirtyPageThreshold)
+ {
+ KdbpPrint("CcTotalDirtyPages within 64 (max charge) pages of the threshold, writes may be throttled\n");
+ }
+ else
+ {
+ KdbpPrint("CcTotalDirtyPages below the threshold, writes should not be throttled\n");
+ }
return TRUE;
}
+#endif
/* EOF */