* - List for deferred writes
* - Spinlock when dealing with the deferred list
* - List for "clean" shared cache maps
+ * - One second delay for lazy writer
*/
ULONG CcDirtyPageThreshold = 0;
ULONG CcTotalDirtyPages = 0;
LIST_ENTRY CcDeferredWrites;
KSPIN_LOCK CcDeferredWriteSpinLock;
LIST_ENTRY CcCleanSharedCacheMapList;
+LARGE_INTEGER CcIdleDelay = RTL_CONSTANT_LARGE_INTEGER((LONGLONG)-1*1000*1000*10);
/* Internal vars (ROS):
* - Event to notify lazy writer to shutdown
static void CcRosVacbDecRefCount_(PROS_VACB vacb, const char* file, int line)
{
--vacb->ReferenceCount;
+ ASSERT(!(vacb->ReferenceCount == 0 && vacb->Dirty));
if (vacb->SharedCacheMap->Trace)
{
DbgPrint("(%s:%i) VACB %p --RefCount=%lu, Dirty %u, PageOut %lu\n",
PROS_VACB Vacb)
{
NTSTATUS Status;
- KIRQL oldIrql;
Status = CcWriteVirtualAddress(Vacb);
if (NT_SUCCESS(Status))
{
- KeAcquireGuardedMutex(&ViewLock);
- KeAcquireSpinLock(&Vacb->SharedCacheMap->CacheMapLock, &oldIrql);
-
- Vacb->Dirty = FALSE;
- RemoveEntryList(&Vacb->DirtyVacbListEntry);
- CcTotalDirtyPages -= VACB_MAPPING_GRANULARITY / PAGE_SIZE;
- Vacb->SharedCacheMap->DirtyPages -= VACB_MAPPING_GRANULARITY / PAGE_SIZE;
- CcRosVacbDecRefCount(Vacb);
-
- KeReleaseSpinLock(&Vacb->SharedCacheMap->CacheMapLock, oldIrql);
- KeReleaseGuardedMutex(&ViewLock);
+ CcRosUnmarkDirtyVacb(Vacb, TRUE);
}
return Status;
NTAPI
CciLazyWriter(PVOID Unused)
{
- LARGE_INTEGER OneSecond;
-
- OneSecond.QuadPart = (LONGLONG)-1*1000*1000*10;
-
while (TRUE)
{
NTSTATUS Status;
Executive,
KernelMode,
FALSE,
- &OneSecond);
+ &CcIdleDelay);
/* If we succeeed, we've to stop running! */
if (Status == STATUS_SUCCESS)
ListEntry = ExInterlockedRemoveHeadList(&CcDeferredWrites, &CcDeferredWriteSpinLock);
if (ListEntry != NULL)
{
- PROS_DEFERRED_WRITE_CONTEXT Context;
+ PDEFERRED_WRITE Context;
/* Extract the context */
- Context = CONTAINING_RECORD(ListEntry, ROS_DEFERRED_WRITE_CONTEXT, CcDeferredWritesEntry);
+ Context = CONTAINING_RECORD(ListEntry, DEFERRED_WRITE, DeferredWriteLinks);
+ ASSERT(Context->NodeTypeCode == NODE_TYPE_DEFERRED_WRITE);
/* Can we write now? */
- if (CcCanIWrite(Context->FileObject, Context->BytesToWrite, FALSE, Context->Retrying))
+ if (CcCanIWrite(Context->FileObject, Context->BytesToWrite, FALSE, TRUE))
{
/* Yes! Do it, and destroy the associated context */
Context->PostRoutine(Context->Context1, Context->Context2);
* It's better than nothing!
*/
ExInterlockedInsertTailList(&CcDeferredWrites,
- &Context->CcDeferredWritesEntry,
+ &Context->DeferredWriteLinks,
&CcDeferredWriteSpinLock);
}
}
BOOLEAN Dirty,
BOOLEAN Mapped)
{
- BOOLEAN WasDirty;
- KIRQL oldIrql;
-
ASSERT(SharedCacheMap);
DPRINT("CcRosReleaseVacb(SharedCacheMap 0x%p, Vacb 0x%p, Valid %u)\n",
SharedCacheMap, Vacb, Valid);
- KeAcquireGuardedMutex(&ViewLock);
- KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
-
Vacb->Valid = Valid;
- WasDirty = Vacb->Dirty;
- Vacb->Dirty = Vacb->Dirty || Dirty;
-
- if (!WasDirty && Vacb->Dirty)
+ if (Dirty && !Vacb->Dirty)
{
- InsertTailList(&DirtyVacbListHead, &Vacb->DirtyVacbListEntry);
- CcTotalDirtyPages += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
- Vacb->SharedCacheMap->DirtyPages += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
+ CcRosMarkDirtyVacb(Vacb);
}
if (Mapped)
{
CcRosVacbIncRefCount(Vacb);
}
- if (!WasDirty && Vacb->Dirty)
- {
- CcRosVacbIncRefCount(Vacb);
- }
- KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
- KeReleaseGuardedMutex(&ViewLock);
CcRosReleaseVacbLock(Vacb);
return STATUS_SUCCESS;
KeAcquireGuardedMutex(&ViewLock);
KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
- if (!Vacb->Dirty)
- {
- InsertTailList(&DirtyVacbListHead, &Vacb->DirtyVacbListEntry);
- CcTotalDirtyPages += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
- Vacb->SharedCacheMap->DirtyPages += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
- }
- else
- {
- CcRosVacbDecRefCount(Vacb);
- }
+ ASSERT(!Vacb->Dirty);
+
+ InsertTailList(&DirtyVacbListHead, &Vacb->DirtyVacbListEntry);
+ CcTotalDirtyPages += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
+ Vacb->SharedCacheMap->DirtyPages += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
+ CcRosVacbIncRefCount(Vacb);
/* Move to the tail of the LRU list */
RemoveEntryList(&Vacb->VacbLruListEntry);
KeReleaseGuardedMutex(&ViewLock);
}
+VOID
+NTAPI
+CcRosUnmarkDirtyVacb (
+ PROS_VACB Vacb,
+ BOOLEAN LockViews)
+{
+ KIRQL oldIrql;
+ PROS_SHARED_CACHE_MAP SharedCacheMap;
+
+ SharedCacheMap = Vacb->SharedCacheMap;
+
+ if (LockViews)
+ {
+ KeAcquireGuardedMutex(&ViewLock);
+ KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
+ }
+
+ ASSERT(Vacb->Dirty);
+
+ Vacb->Dirty = FALSE;
+
+ RemoveEntryList(&Vacb->DirtyVacbListEntry);
+ CcTotalDirtyPages -= VACB_MAPPING_GRANULARITY / PAGE_SIZE;
+ Vacb->SharedCacheMap->DirtyPages -= VACB_MAPPING_GRANULARITY / PAGE_SIZE;
+ CcRosVacbDecRefCount(Vacb);
+
+ if (LockViews)
+ {
+ KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
+ KeReleaseGuardedMutex(&ViewLock);
+ }
+}
+
NTSTATUS
NTAPI
CcRosMarkDirtyFile (
KeBugCheck(CACHE_MANAGER);
}
- CcRosMarkDirtyVacb(Vacb);
-
+ if (!Vacb->Dirty)
+ {
+ CcRosMarkDirtyVacb(Vacb);
+ }
CcRosReleaseVacbLock(Vacb);
BOOLEAN NowDirty)
{
PROS_VACB Vacb;
- BOOLEAN WasDirty;
- KIRQL oldIrql;
ASSERT(SharedCacheMap);
return STATUS_UNSUCCESSFUL;
}
- KeAcquireGuardedMutex(&ViewLock);
- KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
-
- WasDirty = Vacb->Dirty;
- Vacb->Dirty = Vacb->Dirty || NowDirty;
-
- Vacb->MappedCount--;
-
- if (!WasDirty && NowDirty)
+ if (NowDirty && !Vacb->Dirty)
{
- InsertTailList(&DirtyVacbListHead, &Vacb->DirtyVacbListEntry);
- CcTotalDirtyPages += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
- Vacb->SharedCacheMap->DirtyPages += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
+ CcRosMarkDirtyVacb(Vacb);
}
+ Vacb->MappedCount--;
+
CcRosVacbDecRefCount(Vacb);
- if (!WasDirty && NowDirty)
- {
- CcRosVacbIncRefCount(Vacb);
- }
if (Vacb->MappedCount == 0)
{
CcRosVacbDecRefCount(Vacb);
}
- KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
- KeReleaseGuardedMutex(&ViewLock);
CcRosReleaseVacbLock(Vacb);
return STATUS_SUCCESS;
while (!IsListEmpty(&SharedCacheMap->CacheMapVacbListHead))
{
current_entry = RemoveTailList(&SharedCacheMap->CacheMapVacbListHead);
+ KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
+
current = CONTAINING_RECORD(current_entry, ROS_VACB, CacheMapVacbListEntry);
+ CcRosAcquireVacbLock(current, NULL);
RemoveEntryList(¤t->VacbLruListEntry);
if (current->Dirty)
{
- RemoveEntryList(¤t->DirtyVacbListEntry);
- CcTotalDirtyPages -= VACB_MAPPING_GRANULARITY / PAGE_SIZE;
- current->SharedCacheMap->DirtyPages -= VACB_MAPPING_GRANULARITY / PAGE_SIZE;
+ KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
+ CcRosUnmarkDirtyVacb(current, FALSE);
+ KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
DPRINT1("Freeing dirty VACB\n");
}
InsertHeadList(&FreeList, ¤t->CacheMapVacbListEntry);
+ CcRosReleaseVacbLock(current);
+
+ KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
}
#if DBG
SharedCacheMap->Trace = FALSE;
SharedCacheMap = CONTAINING_RECORD(ListEntry, ROS_SHARED_CACHE_MAP, SharedCacheMapLinks);
+ /* Dirty size */
+ Dirty = (SharedCacheMap->DirtyPages * PAGE_SIZE) / 1024;
+
/* First, count for all the associated VACB */
for (Vacbs = SharedCacheMap->CacheMapVacbListHead.Flink;
Vacbs != &SharedCacheMap->CacheMapVacbListHead;
PROS_VACB Vacb;
Vacb = CONTAINING_RECORD(Vacbs, ROS_VACB, CacheMapVacbListEntry);
- if (Vacb->Dirty)
- {
- Dirty += VACB_MAPPING_GRANULARITY / 1024;
- }
if (Vacb->Valid)
{
Valid += VACB_MAPPING_GRANULARITY / 1024;