KSPIN_LOCK CcDeferredWriteSpinLock;
LIST_ENTRY CcCleanSharedCacheMapList;
-/* Internal vars (ROS):
- * - Lock for the CcCleanSharedCacheMapList list
- */
-KSPIN_LOCK iSharedCacheMapLock;
-
#if DBG
-static void CcRosVacbIncRefCount_(PROS_VACB vacb, const char* file, int line)
+VOID CcRosVacbIncRefCount_(PROS_VACB vacb, PCSTR file, INT line)
{
++vacb->ReferenceCount;
if (vacb->SharedCacheMap->Trace)
file, line, vacb, vacb->ReferenceCount, vacb->Dirty, vacb->PageOut);
}
}
-static void CcRosVacbDecRefCount_(PROS_VACB vacb, const char* file, int line)
+VOID CcRosVacbDecRefCount_(PROS_VACB vacb, PCSTR file, INT line)
{
ASSERT(vacb->ReferenceCount != 0);
--vacb->ReferenceCount;
file, line, vacb, vacb->ReferenceCount, vacb->Dirty, vacb->PageOut);
}
}
-#define CcRosVacbIncRefCount(vacb) CcRosVacbIncRefCount_(vacb,__FILE__,__LINE__)
-#define CcRosVacbDecRefCount(vacb) CcRosVacbDecRefCount_(vacb,__FILE__,__LINE__)
-#else
-#define CcRosVacbIncRefCount(vacb) (++((vacb)->ReferenceCount))
-#define CcRosVacbDecRefCount(vacb) (--((vacb)->ReferenceCount))
#endif
NTSTATUS
ASSERT(current->Dirty);
/* One reference is added above */
- if ((current->ReferenceCount > 2 && current->PinCount == 0) ||
- (current->ReferenceCount > 3 && current->PinCount > 1))
+ if (current->ReferenceCount > 2)
{
CcRosReleaseVacbLock(current);
current->SharedCacheMap->Callbacks->ReleaseFromLazyWrite(
CcRosVacbDecRefCount(current);
/* Check if we can free this entry now */
- if (current->ReferenceCount == 0)
+ if (current->ReferenceCount < 2)
{
ASSERT(!current->Dirty);
ASSERT(!current->MappedCount);
+ ASSERT(current->ReferenceCount == 1);
RemoveEntryList(¤t->CacheMapVacbListEntry);
RemoveEntryList(¤t->VacbLruListEntry);
current = CONTAINING_RECORD(current_entry,
ROS_VACB,
CacheMapVacbListEntry);
+ CcRosVacbDecRefCount(current);
CcRosInternalFreeVacb(current);
}
CcRosVacbIncRefCount(Vacb);
}
+ ASSERT(Vacb->ReferenceCount > 0);
+
CcRosReleaseVacbLock(Vacb);
return STATUS_SUCCESS;
KeBugCheck(CACHE_MANAGER);
}
- if (!Vacb->Dirty)
- {
- CcRosMarkDirtyVacb(Vacb);
- }
-
- CcRosReleaseVacbLock(Vacb);
+ CcRosReleaseVacb(SharedCacheMap, Vacb, Vacb->Valid, TRUE, FALSE);
return STATUS_SUCCESS;
}
+/*
+ * Note: this is not the contrary function of
+ * CcRosMapVacbInKernelSpace()
+ */
NTSTATUS
NTAPI
CcRosUnmapVacb (
return STATUS_UNSUCCESSFUL;
}
- if (NowDirty && !Vacb->Dirty)
- {
- CcRosMarkDirtyVacb(Vacb);
- }
-
ASSERT(Vacb->MappedCount != 0);
Vacb->MappedCount--;
- CcRosVacbDecRefCount(Vacb);
if (Vacb->MappedCount == 0)
{
CcRosVacbDecRefCount(Vacb);
}
- CcRosReleaseVacbLock(Vacb);
+ CcRosReleaseVacb(SharedCacheMap, Vacb, Vacb->Valid, NowDirty, FALSE);
return STATUS_SUCCESS;
}
static
NTSTATUS
-CcRosMapVacb(
+CcRosMapVacbInKernelSpace(
PROS_VACB Vacb)
{
ULONG i;
current->MappedCount = 0;
current->DirtyVacbListEntry.Flink = NULL;
current->DirtyVacbListEntry.Blink = NULL;
- current->ReferenceCount = 1;
+ current->ReferenceCount = 0;
current->PinCount = 0;
KeInitializeMutex(¤t->Mutex, 0);
CcRosAcquireVacbLock(current, NULL);
}
KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
InsertTailList(&VacbLruListHead, ¤t->VacbLruListEntry);
+ CcRosVacbIncRefCount(current);
KeReleaseGuardedMutex(&ViewLock);
MI_SET_USAGE(MI_USAGE_CACHE);
}
#endif
- Status = CcRosMapVacb(current);
+ Status = CcRosMapVacbInKernelSpace(current);
if (!NT_SUCCESS(Status))
{
RemoveEntryList(¤t->CacheMapVacbListEntry);
ExFreeToNPagedLookasideList(&VacbLookasideList, current);
}
+ /* Reference it to allow release */
+ CcRosVacbIncRefCount(current);
+
return Status;
}
DPRINT("*BaseAddress %p\n", *BaseAddress);
*Vacb = current;
*BaseOffset = current->FileOffset.QuadPart;
+
+ ASSERT(current->ReferenceCount > 1);
+
return STATUS_SUCCESS;
}
NULL);
MmUnlockAddressSpace(MmGetKernelAddressSpace());
+ if (Vacb->PinCount != 0 || Vacb->ReferenceCount != 0)
+ {
+ DPRINT1("Invalid free: %ld, %ld\n", Vacb->ReferenceCount, Vacb->PinCount);
+ if (Vacb->SharedCacheMap->FileObject && Vacb->SharedCacheMap->FileObject->FileName.Length)
+ {
+ DPRINT1("For file: %wZ\n", &Vacb->SharedCacheMap->FileObject->FileName);
+ }
+ }
+
+ ASSERT(Vacb->PinCount == 0);
+ ASSERT(Vacb->ReferenceCount == 0);
ExFreeToNPagedLookasideList(&VacbLookasideList, Vacb);
return STATUS_SUCCESS;
}
LONGLONG RemainingLength;
PROS_VACB current;
NTSTATUS Status;
- KIRQL oldIrql;
CCTRACE(CC_API_DEBUG, "SectionObjectPointers=%p FileOffset=%p Length=%lu\n",
SectionObjectPointers, FileOffset, Length);
}
}
- CcRosReleaseVacbLock(current);
-
- KeAcquireGuardedMutex(&ViewLock);
- KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
- CcRosVacbDecRefCount(current);
- KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
- KeReleaseGuardedMutex(&ViewLock);
+ CcRosReleaseVacb(SharedCacheMap, current, current->Valid, current->Dirty, FALSE);
}
Offset.QuadPart += VACB_MAPPING_GRANULARITY;
{
current_entry = RemoveTailList(&FreeList);
current = CONTAINING_RECORD(current_entry, ROS_VACB, CacheMapVacbListEntry);
+ CcRosVacbDecRefCount(current);
CcRosInternalFreeVacb(current);
}
- KeAcquireSpinLock(&iSharedCacheMapLock, &OldIrql);
+ OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
RemoveEntryList(&SharedCacheMap->SharedCacheMapLinks);
- KeReleaseSpinLock(&iSharedCacheMapLock, OldIrql);
+ KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
ExFreeToNPagedLookasideList(&SharedCacheMapLookasideList, SharedCacheMap);
KeAcquireGuardedMutex(&ViewLock);
* has been closed.
*/
{
+ KIRQL OldIrql;
+ PPRIVATE_CACHE_MAP PrivateMap;
PROS_SHARED_CACHE_MAP SharedCacheMap;
KeAcquireGuardedMutex(&ViewLock);
if (FileObject->SectionObjectPointer->SharedCacheMap != NULL)
{
SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
- if (FileObject->PrivateCacheMap != NULL)
+
+ /* Closing the handle, so kill the private cache map
+ * Before you event try to remove it from FO, always
+ * lock the master lock, to be sure not to race
+ * with a potential read ahead ongoing!
+ */
+ OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
+ PrivateMap = FileObject->PrivateCacheMap;
+ FileObject->PrivateCacheMap = NULL;
+ KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
+
+ if (PrivateMap != NULL)
{
- FileObject->PrivateCacheMap = NULL;
+ /* Remove it from the file */
+ KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &OldIrql);
+ RemoveEntryList(&PrivateMap->PrivateLinks);
+ KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, OldIrql);
+
+ /* And free it. */
+ if (PrivateMap != &SharedCacheMap->PrivateCacheMap)
+ {
+ ExFreePoolWithTag(PrivateMap, TAG_PRIVATE_CACHE_MAP);
+ }
+ else
+ {
+ PrivateMap->NodeTypeCode = 0;
+ }
+
if (SharedCacheMap->OpenCount > 0)
{
SharedCacheMap->OpenCount--;
* FUNCTION: Initializes a shared cache map for a file object
*/
{
+ KIRQL OldIrql;
+ BOOLEAN Allocated;
PROS_SHARED_CACHE_MAP SharedCacheMap;
SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
DPRINT("CcRosInitializeFileCache(FileObject 0x%p, SharedCacheMap 0x%p)\n",
FileObject, SharedCacheMap);
+ Allocated = FALSE;
KeAcquireGuardedMutex(&ViewLock);
if (SharedCacheMap == NULL)
{
- KIRQL OldIrql;
-
+ Allocated = TRUE;
SharedCacheMap = ExAllocateFromNPagedLookasideList(&SharedCacheMapLookasideList);
if (SharedCacheMap == NULL)
{
FILE_ALL_ACCESS,
NULL,
KernelMode);
+ SharedCacheMap->NodeTypeCode = NODE_TYPE_SHARED_MAP;
+ SharedCacheMap->NodeByteSize = sizeof(*SharedCacheMap);
SharedCacheMap->FileObject = FileObject;
SharedCacheMap->Callbacks = CallBacks;
SharedCacheMap->LazyWriteContext = LazyWriterContext;
SharedCacheMap->PinAccess = PinAccess;
SharedCacheMap->DirtyPageThreshold = 0;
SharedCacheMap->DirtyPages = 0;
+ InitializeListHead(&SharedCacheMap->PrivateList);
KeInitializeSpinLock(&SharedCacheMap->CacheMapLock);
InitializeListHead(&SharedCacheMap->CacheMapVacbListHead);
FileObject->SectionObjectPointer->SharedCacheMap = SharedCacheMap;
- KeAcquireSpinLock(&iSharedCacheMapLock, &OldIrql);
+ OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
InsertTailList(&CcCleanSharedCacheMapList, &SharedCacheMap->SharedCacheMapLinks);
- KeReleaseSpinLock(&iSharedCacheMapLock, OldIrql);
+ KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
}
if (FileObject->PrivateCacheMap == NULL)
{
- FileObject->PrivateCacheMap = SharedCacheMap;
+ PPRIVATE_CACHE_MAP PrivateMap;
+
+ /* Allocate the private cache map for this handle */
+ if (SharedCacheMap->PrivateCacheMap.NodeTypeCode != 0)
+ {
+ PrivateMap = ExAllocatePoolWithTag(NonPagedPool, sizeof(PRIVATE_CACHE_MAP), TAG_PRIVATE_CACHE_MAP);
+ }
+ else
+ {
+ PrivateMap = &SharedCacheMap->PrivateCacheMap;
+ }
+
+ if (PrivateMap == NULL)
+ {
+ /* If we also allocated the shared cache map for this file, kill it */
+ if (Allocated)
+ {
+ OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
+ RemoveEntryList(&SharedCacheMap->SharedCacheMapLinks);
+ KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
+
+ FileObject->SectionObjectPointer->SharedCacheMap = NULL;
+ ObDereferenceObject(FileObject);
+ ExFreeToNPagedLookasideList(&SharedCacheMapLookasideList, SharedCacheMap);
+ }
+
+ KeReleaseGuardedMutex(&ViewLock);
+ return STATUS_INSUFFICIENT_RESOURCES;
+ }
+
+ /* Initialize it */
+ RtlZeroMemory(PrivateMap, sizeof(PRIVATE_CACHE_MAP));
+ PrivateMap->NodeTypeCode = NODE_TYPE_PRIVATE_MAP;
+ PrivateMap->ReadAheadMask = PAGE_SIZE - 1;
+ PrivateMap->FileObject = FileObject;
+ KeInitializeSpinLock(&PrivateMap->ReadAheadSpinLock);
+
+ /* Link it to the file */
+ KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &OldIrql);
+ InsertTailList(&SharedCacheMap->PrivateList, &PrivateMap->PrivateLinks);
+ KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, OldIrql);
+
+ FileObject->PrivateCacheMap = PrivateMap;
SharedCacheMap->OpenCount++;
}
KeReleaseGuardedMutex(&ViewLock);
InitializeListHead(&CcDeferredWrites);
InitializeListHead(&CcCleanSharedCacheMapList);
KeInitializeSpinLock(&CcDeferredWriteSpinLock);
- KeInitializeSpinLock(&iSharedCacheMapLock);
KeInitializeGuardedMutex(&ViewLock);
ExInitializeNPagedLookasideList(&iBcbLookasideList,
NULL,
return TRUE;
}
+
+BOOLEAN
+ExpKdbgExtDefWrites(ULONG Argc, PCHAR Argv[])
+{
+ KdbpPrint("CcTotalDirtyPages:\t%lu (%lu Kb)\n", CcTotalDirtyPages,
+ (CcTotalDirtyPages * PAGE_SIZE) / 1024);
+ KdbpPrint("CcDirtyPageThreshold:\t%lu (%lu Kb)\n", CcDirtyPageThreshold,
+ (CcDirtyPageThreshold * PAGE_SIZE) / 1024);
+ KdbpPrint("MmAvailablePages:\t%lu (%lu Kb)\n", MmAvailablePages,
+ (MmAvailablePages * PAGE_SIZE) / 1024);
+ KdbpPrint("MmThrottleTop:\t\t%lu (%lu Kb)\n", MmThrottleTop,
+ (MmThrottleTop * PAGE_SIZE) / 1024);
+ KdbpPrint("MmThrottleBottom:\t%lu (%lu Kb)\n", MmThrottleBottom,
+ (MmThrottleBottom * PAGE_SIZE) / 1024);
+ KdbpPrint("MmModifiedPageListHead.Total:\t%lu (%lu Kb)\n", MmModifiedPageListHead.Total,
+ (MmModifiedPageListHead.Total * PAGE_SIZE) / 1024);
+
+ if (CcTotalDirtyPages >= CcDirtyPageThreshold)
+ {
+ KdbpPrint("CcTotalDirtyPages above the threshold, writes should be throttled\n");
+ }
+ else if (CcTotalDirtyPages + 64 >= CcDirtyPageThreshold)
+ {
+ KdbpPrint("CcTotalDirtyPages within 64 (max charge) pages of the threshold, writes may be throttled\n");
+ }
+ else
+ {
+ KdbpPrint("CcTotalDirtyPages below the threshold, writes should not be throttled\n");
+ }
+
+ return TRUE;
+}
#endif
/* EOF */