BOOLEAN Dirty,
BOOLEAN Mapped)
{
- BOOLEAN WasDirty = CacheSeg->Dirty;
+ BOOLEAN WasDirty;
KIRQL oldIrql;
ASSERT(Bcb);
DPRINT("CcReleaseCacheSegment(Bcb 0x%p, CacheSeg 0x%p, Valid %d)\n",
Bcb, CacheSeg, Valid);
+ KeAcquireGuardedMutex(&ViewLock);
+ KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
+
CacheSeg->Valid = Valid;
+
+ WasDirty = CacheSeg->Dirty;
CacheSeg->Dirty = CacheSeg->Dirty || Dirty;
- KeAcquireGuardedMutex(&ViewLock);
if (!WasDirty && CacheSeg->Dirty)
{
InsertTailList(&DirtySegmentListHead, &CacheSeg->DirtySegmentListEntry);
{
CacheSeg->MappedCount++;
}
- KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
CcRosCacheSegmentDecRefCount(CacheSeg);
if (Mapped && CacheSeg->MappedCount == 1)
{
{
CcRosCacheSegmentIncRefCount(CacheSeg);
}
+
KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
KeReleaseGuardedMutex(&ViewLock);
KeReleaseMutex(&CacheSeg->Mutex, 0);
ASSERT(Bcb);
DPRINT("CcRosLookupCacheSegment(Bcb -x%p, FileOffset %d)\n", Bcb, FileOffset);
-
+
+ KeAcquireGuardedMutex(&ViewLock);
KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
+
current_entry = Bcb->BcbSegmentListHead.Flink;
while (current_entry != &Bcb->BcbSegmentListHead)
{
{
CcRosCacheSegmentIncRefCount(current);
KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
+ KeReleaseGuardedMutex(&ViewLock);
KeWaitForSingleObject(¤t->Mutex,
Executive,
KernelMode,
}
current_entry = current_entry->Flink;
}
+
KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
+ KeReleaseGuardedMutex(&ViewLock);
+
return(NULL);
}
{
KeBugCheck(CACHE_MANAGER);
}
+
+ KeAcquireGuardedMutex(&ViewLock);
+ KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
+
if (!CacheSeg->Dirty)
- {
- KeAcquireGuardedMutex(&ViewLock);
+ {
InsertTailList(&DirtySegmentListHead, &CacheSeg->DirtySegmentListEntry);
DirtyPageCount += Bcb->CacheSegmentSize / PAGE_SIZE;
- KeReleaseGuardedMutex(&ViewLock);
- }
+ }
else
{
- KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
- CcRosCacheSegmentDecRefCount(CacheSeg);
- KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
+ CcRosCacheSegmentDecRefCount(CacheSeg);
}
- KeAcquireGuardedMutex(&ViewLock);
-
/* Move to the tail of the LRU list */
RemoveEntryList(&CacheSeg->CacheSegmentLRUListEntry);
InsertTailList(&CacheSegmentLRUListHead, &CacheSeg->CacheSegmentLRUListEntry);
- KeReleaseGuardedMutex(&ViewLock);
-
CacheSeg->Dirty = TRUE;
+
+ KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
+ KeReleaseGuardedMutex(&ViewLock);
KeReleaseMutex(&CacheSeg->Mutex, 0);
return(STATUS_SUCCESS);
return(STATUS_UNSUCCESSFUL);
}
+ KeAcquireGuardedMutex(&ViewLock);
+ KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
+
WasDirty = CacheSeg->Dirty;
CacheSeg->Dirty = CacheSeg->Dirty || NowDirty;
if (!WasDirty && NowDirty)
{
- KeAcquireGuardedMutex(&ViewLock);
InsertTailList(&DirtySegmentListHead, &CacheSeg->DirtySegmentListEntry);
DirtyPageCount += Bcb->CacheSegmentSize / PAGE_SIZE;
- KeReleaseGuardedMutex(&ViewLock);
}
- KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
CcRosCacheSegmentDecRefCount(CacheSeg);
if (!WasDirty && NowDirty)
{
{
CcRosCacheSegmentDecRefCount(CacheSeg);
}
- KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
+ KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
+ KeReleaseGuardedMutex(&ViewLock);
KeReleaseMutex(&CacheSeg->Mutex, 0);
return(STATUS_SUCCESS);
}
}
KeReleaseMutex(¤t->Mutex, 0);
+
+ KeAcquireGuardedMutex(&ViewLock);
KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
CcRosCacheSegmentDecRefCount(current);
KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
+ KeReleaseGuardedMutex(&ViewLock);
}
Offset.QuadPart += Bcb->CacheSegmentSize;