* PURPOSE: Cache manager
*
* PROGRAMMERS: David Welch (welch@mcmail.com)
+ * Pierre Schweitzer (pierre@reactos.org)
*/
/* NOTES **********************************************************************
/* Internal vars (MS):
* - Threshold above which lazy writer will start action
* - Amount of dirty pages
+ * - List for deferred writes
+ * - Spinlock when dealing with the deferred list
+ * - List for "clean" shared cache maps
*/
ULONG CcDirtyPageThreshold = 0;
ULONG CcTotalDirtyPages = 0;
+LIST_ENTRY CcDeferredWrites;
+KSPIN_LOCK CcDeferredWriteSpinLock;
+LIST_ENTRY CcCleanSharedCacheMapList;
/* Internal vars (ROS):
* - Event to notify lazy writer to shutdown
* - Event to inform watchers lazy writer is done for this loop
+ * - Lock for the CcCleanSharedCacheMapList list
*/
KEVENT iLazyWriterShutdown;
KEVENT iLazyWriterNotify;
+KSPIN_LOCK iSharedCacheMapLock;
#if DBG
static void CcRosVacbIncRefCount_(PROS_VACB vacb, const char* file, int line)
Vacb->Dirty = FALSE;
RemoveEntryList(&Vacb->DirtyVacbListEntry);
CcTotalDirtyPages -= VACB_MAPPING_GRANULARITY / PAGE_SIZE;
+ Vacb->SharedCacheMap->DirtyPages -= VACB_MAPPING_GRANULARITY / PAGE_SIZE;
CcRosVacbDecRefCount(Vacb);
KeReleaseSpinLock(&Vacb->SharedCacheMap->CacheMapLock, oldIrql);
ASSERT(current->Dirty);
/* One reference is added above */
- if (current->ReferenceCount > 2)
+ if ((current->ReferenceCount > 2 && current->PinCount == 0) ||
+ (current->ReferenceCount > 3 && current->PinCount > 1))
{
CcRosReleaseVacbLock(current);
current->SharedCacheMap->Callbacks->ReleaseFromLazyWrite(
while (TRUE)
{
NTSTATUS Status;
+ PLIST_ENTRY ListEntry;
ULONG Target, Count = 0;
/* One per second or until we have to stop */
/* Inform people waiting on us that we're done */
KeSetEvent(&iLazyWriterNotify, IO_DISK_INCREMENT, FALSE);
+
+ /* Likely not optimal, but let's handle one deferred write now! */
+ ListEntry = ExInterlockedRemoveHeadList(&CcDeferredWrites, &CcDeferredWriteSpinLock);
+ if (ListEntry != NULL)
+ {
+ PROS_DEFERRED_WRITE_CONTEXT Context;
+
+ /* Extract the context */
+ Context = CONTAINING_RECORD(ListEntry, ROS_DEFERRED_WRITE_CONTEXT, CcDeferredWritesEntry);
+
+ /* Can we write now? */
+ if (CcCanIWrite(Context->FileObject, Context->BytesToWrite, FALSE, Context->Retrying))
+ {
+ /* Yes! Do it, and destroy the associated context */
+ Context->PostRoutine(Context->Context1, Context->Context2);
+ ExFreePoolWithTag(Context, 'CcDw');
+ }
+ else
+ {
+ /* Otherwise, requeue it, but in tail, so that it doesn't block others
+ * This is clearly to improve, but given the poor algorithm used now
+ * It's better than nothing!
+ */
+ ExInterlockedInsertTailList(&CcDeferredWrites,
+ &Context->CcDeferredWritesEntry,
+ &CcDeferredWriteSpinLock);
+ }
+ }
}
}
BOOLEAN Mapped)
{
BOOLEAN WasDirty;
- KIRQL oldIrql;
ASSERT(SharedCacheMap);
DPRINT("CcRosReleaseVacb(SharedCacheMap 0x%p, Vacb 0x%p, Valid %u)\n",
SharedCacheMap, Vacb, Valid);
- KeAcquireGuardedMutex(&ViewLock);
- KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
-
Vacb->Valid = Valid;
- WasDirty = Vacb->Dirty;
- Vacb->Dirty = Vacb->Dirty || Dirty;
-
- if (!WasDirty && Vacb->Dirty)
+ WasDirty = FALSE;
+ if (Dirty)
{
- InsertTailList(&DirtyVacbListHead, &Vacb->DirtyVacbListEntry);
- CcTotalDirtyPages += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
+ if (!Vacb->Dirty && Dirty)
+ {
+ CcRosMarkDirtyVacb(Vacb);
+ }
+ else
+ {
+ WasDirty = TRUE;
+ }
}
if (Mapped)
CcRosVacbIncRefCount(Vacb);
}
- KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
- KeReleaseGuardedMutex(&ViewLock);
CcRosReleaseVacbLock(Vacb);
return STATUS_SUCCESS;
{
InsertTailList(&DirtyVacbListHead, &Vacb->DirtyVacbListEntry);
CcTotalDirtyPages += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
+ Vacb->SharedCacheMap->DirtyPages += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
}
else
{
{
PROS_VACB Vacb;
BOOLEAN WasDirty;
- KIRQL oldIrql;
ASSERT(SharedCacheMap);
return STATUS_UNSUCCESSFUL;
}
- KeAcquireGuardedMutex(&ViewLock);
- KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
-
- WasDirty = Vacb->Dirty;
- Vacb->Dirty = Vacb->Dirty || NowDirty;
-
- Vacb->MappedCount--;
-
- if (!WasDirty && NowDirty)
+ WasDirty = FALSE;
+ if (NowDirty)
{
- InsertTailList(&DirtyVacbListHead, &Vacb->DirtyVacbListEntry);
- CcTotalDirtyPages += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
+ if (!Vacb->Dirty && NowDirty)
+ {
+ CcRosMarkDirtyVacb(Vacb);
+ }
+ else
+ {
+ WasDirty = TRUE;
+ }
}
+ Vacb->MappedCount--;
+
CcRosVacbDecRefCount(Vacb);
if (!WasDirty && NowDirty)
{
CcRosVacbDecRefCount(Vacb);
}
- KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
- KeReleaseGuardedMutex(&ViewLock);
CcRosReleaseVacbLock(Vacb);
return STATUS_SUCCESS;
#if MI_TRACE_PFNS
if ((SharedCacheMap->FileObject) && (SharedCacheMap->FileObject->FileName.Buffer))
{
- PWCHAR pos = NULL;
+ PWCHAR pos;
ULONG len = 0;
pos = wcsrchr(SharedCacheMap->FileObject->FileName.Buffer, '\\');
if (pos)
SharedCacheMap->OpenCount--;
if (SharedCacheMap->OpenCount == 0)
{
+ KIRQL OldIrql;
+
FileObject->SectionObjectPointer->SharedCacheMap = NULL;
/*
{
RemoveEntryList(¤t->DirtyVacbListEntry);
CcTotalDirtyPages -= VACB_MAPPING_GRANULARITY / PAGE_SIZE;
+ current->SharedCacheMap->DirtyPages -= VACB_MAPPING_GRANULARITY / PAGE_SIZE;
DPRINT1("Freeing dirty VACB\n");
}
InsertHeadList(&FreeList, ¤t->CacheMapVacbListEntry);
current = CONTAINING_RECORD(current_entry, ROS_VACB, CacheMapVacbListEntry);
CcRosInternalFreeVacb(current);
}
+
+ KeAcquireSpinLock(&iSharedCacheMapLock, &OldIrql);
+ RemoveEntryList(&SharedCacheMap->SharedCacheMapLinks);
+ KeReleaseSpinLock(&iSharedCacheMapLock, OldIrql);
+
ExFreeToNPagedLookasideList(&SharedCacheMapLookasideList, SharedCacheMap);
KeAcquireGuardedMutex(&ViewLock);
}
KeAcquireGuardedMutex(&ViewLock);
if (SharedCacheMap == NULL)
{
+ KIRQL OldIrql;
+
SharedCacheMap = ExAllocateFromNPagedLookasideList(&SharedCacheMapLookasideList);
if (SharedCacheMap == NULL)
{
SharedCacheMap->SectionSize = FileSizes->AllocationSize;
SharedCacheMap->FileSize = FileSizes->FileSize;
SharedCacheMap->PinAccess = PinAccess;
+ SharedCacheMap->DirtyPageThreshold = 0;
+ SharedCacheMap->DirtyPages = 0;
KeInitializeSpinLock(&SharedCacheMap->CacheMapLock);
InitializeListHead(&SharedCacheMap->CacheMapVacbListHead);
FileObject->SectionObjectPointer->SharedCacheMap = SharedCacheMap;
+
+ KeAcquireSpinLock(&iSharedCacheMapLock, &OldIrql);
+ InsertTailList(&CcCleanSharedCacheMapList, &SharedCacheMap->SharedCacheMapLinks);
+ KeReleaseSpinLock(&iSharedCacheMapLock, OldIrql);
}
if (FileObject->PrivateCacheMap == NULL)
{
{
HANDLE LazyWriter;
NTSTATUS Status;
+ KPRIORITY Priority;
OBJECT_ATTRIBUTES ObjectAttributes;
DPRINT("CcInitView()\n");
InitializeListHead(&DirtyVacbListHead);
InitializeListHead(&VacbLruListHead);
+ InitializeListHead(&CcDeferredWrites);
+ InitializeListHead(&CcCleanSharedCacheMapList);
+ KeInitializeSpinLock(&CcDeferredWriteSpinLock);
+ KeInitializeSpinLock(&iSharedCacheMapLock);
KeInitializeGuardedMutex(&ViewLock);
ExInitializeNPagedLookasideList(&iBcbLookasideList,
NULL,
return FALSE;
}
+ Priority = 27;
+ Status = NtSetInformationThread(LazyWriter,
+ ThreadPriority,
+ &Priority,
+ sizeof(Priority));
+ ASSERT(NT_SUCCESS(Status));
+
/* Handle is not needed */
ObCloseHandle(LazyWriter, KernelMode);
return TRUE;
}
+#if DBG && defined(KDBG)
+BOOLEAN
+ExpKdbgExtFileCache(ULONG Argc, PCHAR Argv[])
+{
+ PLIST_ENTRY ListEntry;
+ UNICODE_STRING NoName = RTL_CONSTANT_STRING(L"No name for File");
+
+ KdbpPrint(" Usage Summary (in kb)\n");
+ KdbpPrint("Shared\t\tValid\tDirty\tName\n");
+ /* No need to lock the spin lock here, we're in DBG */
+ for (ListEntry = CcCleanSharedCacheMapList.Flink;
+ ListEntry != &CcCleanSharedCacheMapList;
+ ListEntry = ListEntry->Flink)
+ {
+ PLIST_ENTRY Vacbs;
+ ULONG Valid = 0, Dirty = 0;
+ PROS_SHARED_CACHE_MAP SharedCacheMap;
+ PUNICODE_STRING FileName;
+
+ SharedCacheMap = CONTAINING_RECORD(ListEntry, ROS_SHARED_CACHE_MAP, SharedCacheMapLinks);
+
+ /* Dirty size */
+ Dirty = (SharedCacheMap->DirtyPages * PAGE_SIZE) / 1024;
+
+ /* First, count for all the associated VACB */
+ for (Vacbs = SharedCacheMap->CacheMapVacbListHead.Flink;
+ Vacbs != &SharedCacheMap->CacheMapVacbListHead;
+ Vacbs = Vacbs->Flink)
+ {
+ PROS_VACB Vacb;
+
+ Vacb = CONTAINING_RECORD(Vacbs, ROS_VACB, CacheMapVacbListEntry);
+ if (Vacb->Valid)
+ {
+ Valid += VACB_MAPPING_GRANULARITY / 1024;
+ }
+ }
+
+ /* Setup name */
+ if (SharedCacheMap->FileObject != NULL &&
+ SharedCacheMap->FileObject->FileName.Length != 0)
+ {
+ FileName = &SharedCacheMap->FileObject->FileName;
+ }
+ else
+ {
+ FileName = &NoName;
+ }
+
+ /* And print */
+ KdbpPrint("%p\t%d\t%d\t%wZ\n", SharedCacheMap, Valid, Dirty, FileName);
+ }
+
+ return TRUE;
+}
+#endif
+
/* EOF */