* - List for deferred writes
* - Spinlock when dealing with the deferred list
* - List for "clean" shared cache maps
+ * - One second delay for lazy writer
*/
ULONG CcDirtyPageThreshold = 0;
ULONG CcTotalDirtyPages = 0;
LIST_ENTRY CcDeferredWrites;
KSPIN_LOCK CcDeferredWriteSpinLock;
LIST_ENTRY CcCleanSharedCacheMapList;
+LARGE_INTEGER CcIdleDelay = {.QuadPart = (LONGLONG)-1*1000*1000*10};
/* Internal vars (ROS):
* - Event to notify lazy writer to shutdown
Vacb->Dirty = FALSE;
RemoveEntryList(&Vacb->DirtyVacbListEntry);
CcTotalDirtyPages -= VACB_MAPPING_GRANULARITY / PAGE_SIZE;
+ Vacb->SharedCacheMap->DirtyPages -= VACB_MAPPING_GRANULARITY / PAGE_SIZE;
CcRosVacbDecRefCount(Vacb);
KeReleaseSpinLock(&Vacb->SharedCacheMap->CacheMapLock, oldIrql);
ASSERT(current->Dirty);
/* One reference is added above */
- if (current->ReferenceCount > 2)
+ if ((current->ReferenceCount > 2 && current->PinCount == 0) ||
+ (current->ReferenceCount > 3 && current->PinCount > 1))
{
CcRosReleaseVacbLock(current);
current->SharedCacheMap->Callbacks->ReleaseFromLazyWrite(
NTAPI
CciLazyWriter(PVOID Unused)
{
- LARGE_INTEGER OneSecond;
-
- OneSecond.QuadPart = (LONGLONG)-1*1000*1000*10;
-
while (TRUE)
{
NTSTATUS Status;
Executive,
KernelMode,
FALSE,
- &OneSecond);
+ &CcIdleDelay);
/* If we succeeed, we've to stop running! */
if (Status == STATUS_SUCCESS)
BOOLEAN Mapped)
{
BOOLEAN WasDirty;
- KIRQL oldIrql;
ASSERT(SharedCacheMap);
DPRINT("CcRosReleaseVacb(SharedCacheMap 0x%p, Vacb 0x%p, Valid %u)\n",
SharedCacheMap, Vacb, Valid);
- KeAcquireGuardedMutex(&ViewLock);
- KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
-
Vacb->Valid = Valid;
- WasDirty = Vacb->Dirty;
- Vacb->Dirty = Vacb->Dirty || Dirty;
-
- if (!WasDirty && Vacb->Dirty)
+ WasDirty = FALSE;
+ if (Dirty)
{
- InsertTailList(&DirtyVacbListHead, &Vacb->DirtyVacbListEntry);
- CcTotalDirtyPages += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
+ if (!Vacb->Dirty)
+ {
+ CcRosMarkDirtyVacb(Vacb);
+ }
+ else
+ {
+ WasDirty = TRUE;
+ }
}
if (Mapped)
CcRosVacbIncRefCount(Vacb);
}
- KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
- KeReleaseGuardedMutex(&ViewLock);
CcRosReleaseVacbLock(Vacb);
return STATUS_SUCCESS;
{
InsertTailList(&DirtyVacbListHead, &Vacb->DirtyVacbListEntry);
CcTotalDirtyPages += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
+ Vacb->SharedCacheMap->DirtyPages += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
}
else
{
CcRosMarkDirtyVacb(Vacb);
-
CcRosReleaseVacbLock(Vacb);
return STATUS_SUCCESS;
{
PROS_VACB Vacb;
BOOLEAN WasDirty;
- KIRQL oldIrql;
ASSERT(SharedCacheMap);
return STATUS_UNSUCCESSFUL;
}
- KeAcquireGuardedMutex(&ViewLock);
- KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
-
- WasDirty = Vacb->Dirty;
- Vacb->Dirty = Vacb->Dirty || NowDirty;
-
- Vacb->MappedCount--;
-
- if (!WasDirty && NowDirty)
+ WasDirty = FALSE;
+ if (NowDirty)
{
- InsertTailList(&DirtyVacbListHead, &Vacb->DirtyVacbListEntry);
- CcTotalDirtyPages += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
+ if (!Vacb->Dirty)
+ {
+ CcRosMarkDirtyVacb(Vacb);
+ }
+ else
+ {
+ WasDirty = TRUE;
+ }
}
+ Vacb->MappedCount--;
+
CcRosVacbDecRefCount(Vacb);
if (!WasDirty && NowDirty)
{
CcRosVacbDecRefCount(Vacb);
}
- KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
- KeReleaseGuardedMutex(&ViewLock);
CcRosReleaseVacbLock(Vacb);
return STATUS_SUCCESS;
#if MI_TRACE_PFNS
if ((SharedCacheMap->FileObject) && (SharedCacheMap->FileObject->FileName.Buffer))
{
- PWCHAR pos = NULL;
+ PWCHAR pos;
ULONG len = 0;
pos = wcsrchr(SharedCacheMap->FileObject->FileName.Buffer, '\\');
if (pos)
while (!IsListEmpty(&SharedCacheMap->CacheMapVacbListHead))
{
current_entry = RemoveTailList(&SharedCacheMap->CacheMapVacbListHead);
+ KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
+
current = CONTAINING_RECORD(current_entry, ROS_VACB, CacheMapVacbListEntry);
+ CcRosAcquireVacbLock(current, NULL);
RemoveEntryList(¤t->VacbLruListEntry);
if (current->Dirty)
{
RemoveEntryList(¤t->DirtyVacbListEntry);
CcTotalDirtyPages -= VACB_MAPPING_GRANULARITY / PAGE_SIZE;
+ current->SharedCacheMap->DirtyPages -= VACB_MAPPING_GRANULARITY / PAGE_SIZE;
DPRINT1("Freeing dirty VACB\n");
}
InsertHeadList(&FreeList, ¤t->CacheMapVacbListEntry);
+ CcRosReleaseVacbLock(current);
+
+ KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
}
#if DBG
SharedCacheMap->Trace = FALSE;
SharedCacheMap->FileSize = FileSizes->FileSize;
SharedCacheMap->PinAccess = PinAccess;
SharedCacheMap->DirtyPageThreshold = 0;
+ SharedCacheMap->DirtyPages = 0;
KeInitializeSpinLock(&SharedCacheMap->CacheMapLock);
InitializeListHead(&SharedCacheMap->CacheMapVacbListHead);
FileObject->SectionObjectPointer->SharedCacheMap = SharedCacheMap;
PLIST_ENTRY ListEntry;
UNICODE_STRING NoName = RTL_CONSTANT_STRING(L"No name for File");
- KdbpPrint("Control\t\tValid\tDirty\tName\n");
+ KdbpPrint(" Usage Summary (in kb)\n");
+ KdbpPrint("Shared\t\tValid\tDirty\tName\n");
/* No need to lock the spin lock here, we're in DBG */
for (ListEntry = CcCleanSharedCacheMapList.Flink;
ListEntry != &CcCleanSharedCacheMapList;
SharedCacheMap = CONTAINING_RECORD(ListEntry, ROS_SHARED_CACHE_MAP, SharedCacheMapLinks);
+ /* Dirty size */
+ Dirty = (SharedCacheMap->DirtyPages * PAGE_SIZE) / 1024;
+
/* First, count for all the associated VACB */
for (Vacbs = SharedCacheMap->CacheMapVacbListHead.Flink;
Vacbs != &SharedCacheMap->CacheMapVacbListHead;
PROS_VACB Vacb;
Vacb = CONTAINING_RECORD(Vacbs, ROS_VACB, CacheMapVacbListEntry);
- if (Vacb->Dirty)
- {
- Dirty += VACB_MAPPING_GRANULARITY / 1024;
- }
if (Vacb->Valid)
{
Valid += VACB_MAPPING_GRANULARITY / 1024;