if (!Wait)
{
KeAcquireSpinLock(&Bcb->BcbLock, &oldirql);
+ /* FIXME: this loop doesn't take into account areas that don't have
+ * a segment in the list yet */
current_entry = Bcb->BcbSegmentListHead.Flink;
while (current_entry != &Bcb->BcbSegmentListHead)
{
current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT,
BcbSegmentListEntry);
- if (!current->Valid && current->FileOffset < ReadOffset + Length
- && current->FileOffset + Bcb->CacheSegmentSize > ReadOffset)
+ if (!current->Valid &&
+ DoSegmentsIntersect(current->FileOffset, Bcb->CacheSegmentSize,
+ ReadOffset, Length))
{
KeReleaseSpinLock(&Bcb->BcbLock, oldirql);
IoStatus->Status = STATUS_UNSUCCESSFUL;
{
/* testing, if the requested datas are available */
KeAcquireSpinLock(&Bcb->BcbLock, &oldirql);
+ /* FIXME: this loop doesn't take into account areas that don't have
+ * a segment in the list yet */
current_entry = Bcb->BcbSegmentListHead.Flink;
while (current_entry != &Bcb->BcbSegmentListHead)
{
CacheSeg = CONTAINING_RECORD(current_entry, CACHE_SEGMENT,
BcbSegmentListEntry);
- if (!CacheSeg->Valid)
+ if (!CacheSeg->Valid &&
+ DoSegmentsIntersect(CacheSeg->FileOffset, Bcb->CacheSegmentSize,
+ WriteOffset, Length))
{
- if (((WriteOffset >= CacheSeg->FileOffset) &&
- (WriteOffset < CacheSeg->FileOffset + Bcb->CacheSegmentSize))
- || ((WriteOffset + Length > CacheSeg->FileOffset) &&
- (WriteOffset + Length <= CacheSeg->FileOffset +
- Bcb->CacheSegmentSize)))
- {
- KeReleaseSpinLock(&Bcb->BcbLock, oldirql);
- /* datas not available */
- return FALSE;
- }
+ KeReleaseSpinLock(&Bcb->BcbLock, oldirql);
+ /* datas not available */
+ return FALSE;
}
current_entry = current_entry->Flink;
}
{
/* testing, if the requested datas are available */
KeAcquireSpinLock(&Bcb->BcbLock, &oldirql);
+ /* FIXME: this loop doesn't take into account areas that don't have
+ * a segment in the list yet */
current_entry = Bcb->BcbSegmentListHead.Flink;
while (current_entry != &Bcb->BcbSegmentListHead)
{
CacheSeg = CONTAINING_RECORD(current_entry, CACHE_SEGMENT,
BcbSegmentListEntry);
- if (!CacheSeg->Valid)
+ if (!CacheSeg->Valid &&
+ DoSegmentsIntersect(CacheSeg->FileOffset, Bcb->CacheSegmentSize,
+ WriteOffset.u.LowPart, Length))
{
- if (((WriteOffset.u.LowPart >= CacheSeg->FileOffset) &&
- (WriteOffset.u.LowPart < CacheSeg->FileOffset + Bcb->CacheSegmentSize))
- || ((WriteOffset.u.LowPart + Length > CacheSeg->FileOffset) &&
- (WriteOffset.u.LowPart + Length <=
- CacheSeg->FileOffset + Bcb->CacheSegmentSize)))
- {
- KeReleaseSpinLock(&Bcb->BcbLock, oldirql);
- /* datas not available */
- return FALSE;
- }
+ KeReleaseSpinLock(&Bcb->BcbLock, oldirql);
+ /* datas not available */
+ return FALSE;
}
current_entry = current_entry->Flink;
}
/* One reference is added above */
if (current->ReferenceCount > 2)
{
- KeReleaseMutex(¤t->Mutex, 0);
+ KeReleaseMutex(¤t->Mutex, FALSE);
current->Bcb->Callbacks->ReleaseFromLazyWrite(
current->Bcb->LazyWriteContext);
CcRosCacheSegmentDecRefCount(current);
Status = CcRosFlushCacheSegment(current);
- KeReleaseMutex(¤t->Mutex, 0);
+ KeReleaseMutex(¤t->Mutex, FALSE);
current->Bcb->Callbacks->ReleaseFromLazyWrite(
current->Bcb->LazyWriteContext);
KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
KeReleaseGuardedMutex(&ViewLock);
- KeReleaseMutex(&CacheSeg->Mutex, 0);
+ KeReleaseMutex(&CacheSeg->Mutex, FALSE);
return(STATUS_SUCCESS);
}
{
current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT,
BcbSegmentListEntry);
- if ((current->FileOffset <= FileOffset) &&
- ((current->FileOffset + Bcb->CacheSegmentSize) > FileOffset))
+ if (IsPointInSegment(current->FileOffset, Bcb->CacheSegmentSize,
+ FileOffset))
{
CcRosCacheSegmentIncRefCount(current);
KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
KernelMode,
FALSE,
NULL);
- return(current);
+ return current;
}
current_entry = current_entry->Flink;
}
KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
KeReleaseGuardedMutex(&ViewLock);
- return(NULL);
+ return NULL;
}
NTSTATUS
KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
KeReleaseGuardedMutex(&ViewLock);
- KeReleaseMutex(&CacheSeg->Mutex, 0);
+ KeReleaseMutex(&CacheSeg->Mutex, FALSE);
return(STATUS_SUCCESS);
}
KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
KeReleaseGuardedMutex(&ViewLock);
- KeReleaseMutex(&CacheSeg->Mutex, 0);
+ KeReleaseMutex(&CacheSeg->Mutex, FALSE);
return(STATUS_SUCCESS);
}
{
current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT,
BcbSegmentListEntry);
- if (current->FileOffset <= FileOffset &&
- (current->FileOffset + Bcb->CacheSegmentSize) > FileOffset)
+ if (IsPointInSegment(current->FileOffset, Bcb->CacheSegmentSize,
+ FileOffset))
{
CcRosCacheSegmentIncRefCount(current);
KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
current );
}
#endif
- KeReleaseMutex(&(*CacheSeg)->Mutex, 0);
+ KeReleaseMutex(&(*CacheSeg)->Mutex, FALSE);
KeReleaseGuardedMutex(&ViewLock);
ExFreeToNPagedLookasideList(&CacheSegLookasideList, *CacheSeg);
*CacheSeg = current;
return(STATUS_SUCCESS);
}
-NTSTATUS
-NTAPI
-CcRosFreeCacheSegment (
- PBCB Bcb,
- PCACHE_SEGMENT CacheSeg)
-{
- NTSTATUS Status;
- KIRQL oldIrql;
-
- ASSERT(Bcb);
-
- DPRINT("CcRosFreeCacheSegment(Bcb 0x%p, CacheSeg 0x%p)\n",
- Bcb, CacheSeg);
-
- KeAcquireGuardedMutex(&ViewLock);
- KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
- RemoveEntryList(&CacheSeg->BcbSegmentListEntry);
- RemoveEntryList(&CacheSeg->CacheSegmentListEntry);
- RemoveEntryList(&CacheSeg->CacheSegmentLRUListEntry);
- if (CacheSeg->Dirty)
- {
- RemoveEntryList(&CacheSeg->DirtySegmentListEntry);
- DirtyPageCount -= Bcb->CacheSegmentSize / PAGE_SIZE;
-
- }
- KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
- KeReleaseGuardedMutex(&ViewLock);
-
- Status = CcRosInternalFreeCacheSegment(CacheSeg);
- return(Status);
-}
-
/*
* @implemented
*/
IoStatus->Status = Status;
}
}
- KeReleaseMutex(¤t->Mutex, 0);
+ KeReleaseMutex(¤t->Mutex, FALSE);
KeAcquireGuardedMutex(&ViewLock);
KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
*/
InitializeListHead(&FreeList);
KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
- current_entry = Bcb->BcbSegmentListHead.Flink;
while (!IsListEmpty(&Bcb->BcbSegmentListHead))
{
current_entry = RemoveTailList(&Bcb->BcbSegmentListHead);