/* GLOBALS *****************************************************************/
-extern KGUARDED_MUTEX ViewLock;
-
NTSTATUS CcRosInternalFreeVacb(PROS_VACB Vacb);
/* FUNCTIONS *****************************************************************/
{
PROS_VACB Vacb;
PLIST_ENTRY Entry;
+ KIRQL oldIrql;
/* Assume no dirty data */
BOOLEAN Dirty = FALSE;
CCTRACE(CC_API_DEBUG, "Vpb=%p\n", Vpb);
- KeAcquireGuardedMutex(&ViewLock);
+ oldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
/* Browse dirty VACBs */
for (Entry = DirtyVacbListHead.Flink; Entry != &DirtyVacbListHead; Entry = Entry->Flink)
}
}
- KeReleaseGuardedMutex(&ViewLock);
+ KeReleaseQueuedSpinLock(LockQueueMasterLock, oldIrql);
return Dirty;
}
/* Assume success */
Success = TRUE;
- KeAcquireGuardedMutex(&ViewLock);
- KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &OldIrql);
+ OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
+ KeAcquireSpinLockAtDpcLevel(&SharedCacheMap->CacheMapLock);
ListEntry = SharedCacheMap->CacheMapVacbListHead.Flink;
while (ListEntry != &SharedCacheMap->CacheMapVacbListHead)
{
/* This VACB is in range, so unlink it and mark for free */
ASSERT(Refs == 1 || Vacb->Dirty);
RemoveEntryList(&Vacb->VacbLruListEntry);
+ InitializeListHead(&Vacb->VacbLruListEntry);
if (Vacb->Dirty)
{
CcRosUnmarkDirtyVacb(Vacb, FALSE);
RemoveEntryList(&Vacb->CacheMapVacbListEntry);
InsertHeadList(&FreeList, &Vacb->CacheMapVacbListEntry);
}
- KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, OldIrql);
- KeReleaseGuardedMutex(&ViewLock);
+ KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock);
+ KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
while (!IsListEmpty(&FreeList))
{
+ ULONG Refs;
+
Vacb = CONTAINING_RECORD(RemoveHeadList(&FreeList),
ROS_VACB,
CacheMapVacbListEntry);
- CcRosVacbDecRefCount(Vacb);
- CcRosInternalFreeVacb(Vacb);
+ InitializeListHead(&Vacb->CacheMapVacbListEntry);
+ Refs = CcRosVacbDecRefCount(Vacb);
+ ASSERT(Refs == 0);
}
return Success;
0,
FALSE);
}
+ else
+ {
+ PROS_VACB LastVacb;
+
+ /*
+ * If file (allocation) size has increased, then we need to check whether
+ * it just grows in a single VACB (the last one).
+ * If so, we must mark the VACB as invalid to trigger a read to the
+ * FSD at the next VACB usage, and thus avoid returning garbage
+ */
+
+ /* Check for allocation size and the last VACB */
+ if (SharedCacheMap->SectionSize.QuadPart < FileSizes->AllocationSize.QuadPart &&
+ SharedCacheMap->SectionSize.QuadPart % VACB_MAPPING_GRANULARITY)
+ {
+ LastVacb = CcRosLookupVacb(SharedCacheMap,
+ SharedCacheMap->SectionSize.QuadPart);
+ if (LastVacb != NULL)
+ {
+ /* Mark it as invalid */
+ CcRosReleaseVacb(SharedCacheMap, LastVacb, LastVacb->Dirty ? LastVacb->Valid : FALSE, FALSE, FALSE);
+ }
+ }
+
+ /* Check for file size and the last VACB */
+ if (SharedCacheMap->FileSize.QuadPart < FileSizes->FileSize.QuadPart &&
+ SharedCacheMap->FileSize.QuadPart % VACB_MAPPING_GRANULARITY)
+ {
+ LastVacb = CcRosLookupVacb(SharedCacheMap,
+ SharedCacheMap->FileSize.QuadPart);
+ if (LastVacb != NULL)
+ {
+ /* Mark it as invalid */
+ CcRosReleaseVacb(SharedCacheMap, LastVacb, LastVacb->Dirty ? LastVacb->Valid : FALSE, FALSE, FALSE);
+ }
+ }
+ }
KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldirql);
SharedCacheMap->SectionSize = FileSizes->AllocationSize;