PROS_VACB current;
BOOLEAN Locked;
NTSTATUS Status;
- LARGE_INTEGER ZeroTimeout;
DPRINT("CcRosFlushDirtyPages(Target %lu)\n", Target);
(*Count) = 0;
- ZeroTimeout.QuadPart = 0;
KeEnterCriticalRegion();
KeAcquireGuardedMutex(&ViewLock);
continue;
}
- Status = KeWaitForSingleObject(¤t->Mutex,
- Executive,
- KernelMode,
- FALSE,
- Wait ? NULL : &ZeroTimeout);
- if (Status != STATUS_SUCCESS)
+ Locked = ExAcquireResourceExclusiveLite(¤t->Lock, Wait);
+ if (!Locked)
{
current->SharedCacheMap->Callbacks->ReleaseFromLazyWrite(
current->SharedCacheMap->LazyWriteContext);
/* One reference is added above */
if (current->ReferenceCount > 2)
{
- KeReleaseMutex(¤t->Mutex, FALSE);
+ ExReleaseResourceLite(¤t->Lock);
current->SharedCacheMap->Callbacks->ReleaseFromLazyWrite(
current->SharedCacheMap->LazyWriteContext);
CcRosVacbDecRefCount(current);
Status = CcRosFlushVacb(current);
- KeReleaseMutex(¤t->Mutex, FALSE);
+ ExReleaseResourceLite(¤t->Lock);
current->SharedCacheMap->Callbacks->ReleaseFromLazyWrite(
current->SharedCacheMap->LazyWriteContext);
KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
KeReleaseGuardedMutex(&ViewLock);
- KeReleaseMutex(&Vacb->Mutex, FALSE);
+ ExReleaseResourceLite(&Vacb->Lock);
return STATUS_SUCCESS;
}
CcRosVacbIncRefCount(current);
KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
KeReleaseGuardedMutex(&ViewLock);
- KeWaitForSingleObject(¤t->Mutex,
- Executive,
- KernelMode,
- FALSE,
- NULL);
+ ExAcquireResourceExclusiveLite(¤t->Lock, TRUE);
return current;
}
if (current->FileOffset.QuadPart > FileOffset)
KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
KeReleaseGuardedMutex(&ViewLock);
- KeReleaseMutex(&Vacb->Mutex, FALSE);
+ ExReleaseResourceLite(&Vacb->Lock);
return STATUS_SUCCESS;
}
KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
KeReleaseGuardedMutex(&ViewLock);
- KeReleaseMutex(&Vacb->Mutex, FALSE);
+ ExReleaseResourceLite(&Vacb->Lock);
return STATUS_SUCCESS;
}
current->DirtyVacbListEntry.Flink = NULL;
current->DirtyVacbListEntry.Blink = NULL;
current->ReferenceCount = 1;
- KeInitializeMutex(¤t->Mutex, 0);
- KeWaitForSingleObject(¤t->Mutex,
- Executive,
- KernelMode,
- FALSE,
- NULL);
+ ExInitializeResourceLite(¤t->Lock);
+ ExAcquireResourceExclusiveLite(¤t->Lock, TRUE);
KeAcquireGuardedMutex(&ViewLock);
*Vacb = current;
current);
}
#endif
- KeReleaseMutex(&(*Vacb)->Mutex, FALSE);
+ ExReleaseResourceLite(&(*Vacb)->Lock);
KeReleaseGuardedMutex(&ViewLock);
ExFreeToNPagedLookasideList(&VacbLookasideList, *Vacb);
*Vacb = current;
- KeWaitForSingleObject(¤t->Mutex,
- Executive,
- KernelMode,
- FALSE,
- NULL);
+ ExAcquireResourceExclusiveLite(¤t->Lock, TRUE);
return STATUS_SUCCESS;
}
if (current->FileOffset.QuadPart < FileOffset)
CcFreeCachePage,
NULL);
MmUnlockAddressSpace(MmGetKernelAddressSpace());
+ ExDeleteResourceLite(&Vacb->Lock);
ExFreeToNPagedLookasideList(&VacbLookasideList, Vacb);
return STATUS_SUCCESS;
IoStatus->Status = Status;
}
}
- KeReleaseMutex(¤t->Mutex, FALSE);
+ ExReleaseResourceLite(¤t->Lock);
KeAcquireGuardedMutex(&ViewLock);
KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
* filesystems do because it is safe for us to use an offset with an
* alignment less than the file system block size.
*/
+ KeEnterCriticalRegion();
Status = CcRosGetVacb(SharedCacheMap,
FileOffset,
&BaseOffset,
&Vacb);
if (!NT_SUCCESS(Status))
{
+ KeLeaveCriticalRegion();
return(Status);
}
if (!UptoDate)
if (!NT_SUCCESS(Status))
{
CcRosReleaseVacb(SharedCacheMap, Vacb, FALSE, FALSE, FALSE);
+ KeLeaveCriticalRegion();
return Status;
}
}
FileOffset - BaseOffset).LowPart >> PAGE_SHIFT;
CcRosReleaseVacb(SharedCacheMap, Vacb, TRUE, FALSE, TRUE);
+ KeLeaveCriticalRegion();
}
else
{
{
return(Status);
}
+ KeEnterCriticalRegion();
Status = CcRosGetVacb(SharedCacheMap,
FileOffset,
&BaseOffset,
&Vacb);
if (!NT_SUCCESS(Status))
{
+ KeLeaveCriticalRegion();
return(Status);
}
if (!UptoDate)
if (!NT_SUCCESS(Status))
{
CcRosReleaseVacb(SharedCacheMap, Vacb, FALSE, FALSE, FALSE);
+ KeLeaveCriticalRegion();
return Status;
}
}
&Vacb);
if (!NT_SUCCESS(Status))
{
+ KeLeaveCriticalRegion();
return(Status);
}
if (!UptoDate)
if (!NT_SUCCESS(Status))
{
CcRosReleaseVacb(SharedCacheMap, Vacb, FALSE, FALSE, FALSE);
+ KeLeaveCriticalRegion();
return Status;
}
}
}
MiUnmapPageInHyperSpace(Process, PageAddr, Irql);
CcRosReleaseVacb(SharedCacheMap, Vacb, TRUE, FALSE, FALSE);
+ KeLeaveCriticalRegion();
}
return(STATUS_SUCCESS);
}
BufferSize = PAGE_ROUND_UP(BufferSize);
/* Flush data since we're about to perform a non-cached read */
+ KeEnterCriticalRegion();
CcFlushCache(FileObject->SectionObjectPointer,
&FileOffset,
BufferSize,
&Iosb);
+ KeLeaveCriticalRegion();
/*
* It's ok to use paged pool, because this is a temporary buffer only used in