* PURPOSE: Cache manager
*
* PROGRAMMERS: David Welch (welch@mcmail.com)
+ * Pierre Schweitzer (pierre@reactos.org)
*/
/* NOTES **********************************************************************
/* GLOBALS *******************************************************************/
-static LIST_ENTRY DirtyVacbListHead;
+LIST_ENTRY DirtyVacbListHead;
static LIST_ENTRY VacbLruListHead;
-ULONG DirtyPageCount = 0;
KGUARDED_MUTEX ViewLock;
static NPAGED_LOOKASIDE_LIST SharedCacheMapLookasideList;
static NPAGED_LOOKASIDE_LIST VacbLookasideList;
+/* Counters:
+ * - Amount of pages flushed by lazy writer
+ * - Number of times lazy writer ran
+ */
+ULONG CcLazyWritePages = 0;
+ULONG CcLazyWriteIos = 0;
+
+/* Internal vars (MS):
+ * - Threshold above which lazy writer will start action
+ * - Amount of dirty pages
+ * - List for deferred writes
+ * - Spinlock when dealing with the deferred list
+ * - List for "clean" shared cache maps
+ * - One second delay for lazy writer
+ */
+ULONG CcDirtyPageThreshold = 0;
+ULONG CcTotalDirtyPages = 0;
+LIST_ENTRY CcDeferredWrites;
+KSPIN_LOCK CcDeferredWriteSpinLock;
+LIST_ENTRY CcCleanSharedCacheMapList;
+LARGE_INTEGER CcIdleDelay = RTL_CONSTANT_LARGE_INTEGER((LONGLONG)-1*1000*1000*10);
+
+/* Internal vars (ROS):
+ * - Event to notify lazy writer to shutdown
+ * - Event to inform watchers lazy writer is done for this loop
+ * - Lock for the CcCleanSharedCacheMapList list
+ */
+KEVENT iLazyWriterShutdown;
+KEVENT iLazyWriterNotify;
+KSPIN_LOCK iSharedCacheMapLock;
+
#if DBG
static void CcRosVacbIncRefCount_(PROS_VACB vacb, const char* file, int line)
{
}
static void CcRosVacbDecRefCount_(PROS_VACB vacb, const char* file, int line)
{
+ ASSERT(vacb->ReferenceCount != 0);
--vacb->ReferenceCount;
+ ASSERT(!(vacb->ReferenceCount == 0 && vacb->Dirty));
if (vacb->SharedCacheMap->Trace)
{
DbgPrint("(%s:%i) VACB %p --RefCount=%lu, Dirty %u, PageOut %lu\n",
PROS_VACB Vacb)
{
NTSTATUS Status;
- KIRQL oldIrql;
Status = CcWriteVirtualAddress(Vacb);
if (NT_SUCCESS(Status))
{
- KeAcquireGuardedMutex(&ViewLock);
- KeAcquireSpinLock(&Vacb->SharedCacheMap->CacheMapLock, &oldIrql);
-
- Vacb->Dirty = FALSE;
- RemoveEntryList(&Vacb->DirtyVacbListEntry);
- DirtyPageCount -= VACB_MAPPING_GRANULARITY / PAGE_SIZE;
- CcRosVacbDecRefCount(Vacb);
-
- KeReleaseSpinLock(&Vacb->SharedCacheMap->CacheMapLock, oldIrql);
- KeReleaseGuardedMutex(&ViewLock);
+ CcRosUnmarkDirtyVacb(Vacb, TRUE);
}
return Status;
CcRosFlushDirtyPages (
ULONG Target,
PULONG Count,
- BOOLEAN Wait)
+ BOOLEAN Wait,
+ BOOLEAN CalledFromLazy)
{
PLIST_ENTRY current_entry;
PROS_VACB current;
CcRosVacbIncRefCount(current);
+ /* When performing lazy write, don't handle temporary files */
+ if (CalledFromLazy &&
+ BooleanFlagOn(current->SharedCacheMap->FileObject->Flags, FO_TEMPORARY_FILE))
+ {
+ CcRosVacbDecRefCount(current);
+ continue;
+ }
+
Locked = current->SharedCacheMap->Callbacks->AcquireForLazyWrite(
current->SharedCacheMap->LazyWriteContext, Wait);
if (!Locked)
continue;
}
- Status = KeWaitForSingleObject(¤t->Mutex,
- Executive,
- KernelMode,
- FALSE,
- Wait ? NULL : &ZeroTimeout);
+ Status = CcRosAcquireVacbLock(current,
+ Wait ? NULL : &ZeroTimeout);
if (Status != STATUS_SUCCESS)
{
current->SharedCacheMap->Callbacks->ReleaseFromLazyWrite(
ASSERT(current->Dirty);
/* One reference is added above */
- if (current->ReferenceCount > 2)
+ if ((current->ReferenceCount > 2 && current->PinCount == 0) ||
+ (current->ReferenceCount > 3 && current->PinCount > 1))
{
- KeReleaseMutex(¤t->Mutex, FALSE);
+ CcRosReleaseVacbLock(current);
current->SharedCacheMap->Callbacks->ReleaseFromLazyWrite(
current->SharedCacheMap->LazyWriteContext);
CcRosVacbDecRefCount(current);
Status = CcRosFlushVacb(current);
- KeReleaseMutex(¤t->Mutex, FALSE);
+ CcRosReleaseVacbLock(current);
current->SharedCacheMap->Callbacks->ReleaseFromLazyWrite(
current->SharedCacheMap->LazyWriteContext);
KeAcquireGuardedMutex(&ViewLock);
CcRosVacbDecRefCount(current);
- if (!NT_SUCCESS(Status) && (Status != STATUS_END_OF_FILE))
+ if (!NT_SUCCESS(Status) && (Status != STATUS_END_OF_FILE) &&
+ (Status != STATUS_MEDIA_WRITE_PROTECTED))
{
DPRINT1("CC: Failed to flush VACB.\n");
}
else
{
- (*Count) += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
- Target -= VACB_MAPPING_GRANULARITY / PAGE_SIZE;
+ ULONG PagesFreed;
+
+ /* How many pages did we free? */
+ PagesFreed = VACB_MAPPING_GRANULARITY / PAGE_SIZE;
+ (*Count) += PagesFreed;
+
+ /* Make sure we don't overflow target! */
+ if (Target < PagesFreed)
+ {
+ /* If we would have, jump to zero directly */
+ Target = 0;
+ }
+ else
+ {
+ Target -= PagesFreed;
+ }
}
current_entry = DirtyVacbListHead.Flink;
return STATUS_SUCCESS;
}
+/* FIXME: Someday this could somewhat implement write-behind/read-ahead */
+VOID
+NTAPI
+CciLazyWriter(PVOID Unused)
+{
+ while (TRUE)
+ {
+ NTSTATUS Status;
+ PLIST_ENTRY ListEntry;
+ ULONG Target, Count = 0;
+
+ /* One per second or until we have to stop */
+ Status = KeWaitForSingleObject(&iLazyWriterShutdown,
+ Executive,
+ KernelMode,
+ FALSE,
+ &CcIdleDelay);
+
+ /* If we succeeed, we've to stop running! */
+ if (Status == STATUS_SUCCESS)
+ {
+ break;
+ }
+
+ /* We're not sleeping anymore */
+ KeClearEvent(&iLazyWriterNotify);
+
+ /* Our target is one-eighth of the dirty pages */
+ Target = CcTotalDirtyPages / 8;
+ if (Target != 0)
+ {
+ /* Flush! */
+ DPRINT("Lazy writer starting (%d)\n", Target);
+ CcRosFlushDirtyPages(Target, &Count, FALSE, TRUE);
+
+ /* And update stats */
+ CcLazyWritePages += Count;
+ ++CcLazyWriteIos;
+ DPRINT("Lazy writer done (%d)\n", Count);
+ }
+
+ /* Inform people waiting on us that we're done */
+ KeSetEvent(&iLazyWriterNotify, IO_DISK_INCREMENT, FALSE);
+
+ /* Likely not optimal, but let's handle one deferred write now! */
+ ListEntry = ExInterlockedRemoveHeadList(&CcDeferredWrites, &CcDeferredWriteSpinLock);
+ if (ListEntry != NULL)
+ {
+ PDEFERRED_WRITE Context;
+
+ /* Extract the context */
+ Context = CONTAINING_RECORD(ListEntry, DEFERRED_WRITE, DeferredWriteLinks);
+ ASSERT(Context->NodeTypeCode == NODE_TYPE_DEFERRED_WRITE);
+
+ /* Can we write now? */
+ if (CcCanIWrite(Context->FileObject, Context->BytesToWrite, FALSE, TRUE))
+ {
+ /* Yes! Do it, and destroy the associated context */
+ Context->PostRoutine(Context->Context1, Context->Context2);
+ ExFreePoolWithTag(Context, 'CcDw');
+ }
+ else
+ {
+ /* Otherwise, requeue it, but in tail, so that it doesn't block others
+ * This is clearly to improve, but given the poor algorithm used now
+ * It's better than nothing!
+ */
+ ExInterlockedInsertTailList(&CcDeferredWrites,
+ &Context->DeferredWriteLinks,
+ &CcDeferredWriteSpinLock);
+ }
+ }
+ }
+}
+
NTSTATUS
CcRosTrimCache (
ULONG Target,
if ((Target > 0) && !FlushedPages)
{
/* Flush dirty pages to disk */
- CcRosFlushDirtyPages(Target, &PagesFreed, FALSE);
+ CcRosFlushDirtyPages(Target, &PagesFreed, FALSE, FALSE);
FlushedPages = TRUE;
/* We can only swap as many pages as we flushed */
BOOLEAN Dirty,
BOOLEAN Mapped)
{
- BOOLEAN WasDirty;
- KIRQL oldIrql;
-
ASSERT(SharedCacheMap);
DPRINT("CcRosReleaseVacb(SharedCacheMap 0x%p, Vacb 0x%p, Valid %u)\n",
SharedCacheMap, Vacb, Valid);
- KeAcquireGuardedMutex(&ViewLock);
- KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
-
Vacb->Valid = Valid;
- WasDirty = Vacb->Dirty;
- Vacb->Dirty = Vacb->Dirty || Dirty;
-
- if (!WasDirty && Vacb->Dirty)
+ if (Dirty && !Vacb->Dirty)
{
- InsertTailList(&DirtyVacbListHead, &Vacb->DirtyVacbListEntry);
- DirtyPageCount += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
+ CcRosMarkDirtyVacb(Vacb);
}
if (Mapped)
{
CcRosVacbIncRefCount(Vacb);
}
- if (!WasDirty && Vacb->Dirty)
- {
- CcRosVacbIncRefCount(Vacb);
- }
- KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
- KeReleaseGuardedMutex(&ViewLock);
- KeReleaseMutex(&Vacb->Mutex, FALSE);
+ CcRosReleaseVacbLock(Vacb);
return STATUS_SUCCESS;
}
CcRosVacbIncRefCount(current);
KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
KeReleaseGuardedMutex(&ViewLock);
- KeWaitForSingleObject(¤t->Mutex,
- Executive,
- KernelMode,
- FALSE,
- NULL);
+ CcRosAcquireVacbLock(current, NULL);
return current;
}
if (current->FileOffset.QuadPart > FileOffset)
return NULL;
}
-NTSTATUS
+VOID
NTAPI
CcRosMarkDirtyVacb (
+ PROS_VACB Vacb)
+{
+ KIRQL oldIrql;
+ PROS_SHARED_CACHE_MAP SharedCacheMap;
+
+ SharedCacheMap = Vacb->SharedCacheMap;
+
+ KeAcquireGuardedMutex(&ViewLock);
+ KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
+
+ ASSERT(!Vacb->Dirty);
+
+ InsertTailList(&DirtyVacbListHead, &Vacb->DirtyVacbListEntry);
+ CcTotalDirtyPages += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
+ Vacb->SharedCacheMap->DirtyPages += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
+ CcRosVacbIncRefCount(Vacb);
+
+ /* Move to the tail of the LRU list */
+ RemoveEntryList(&Vacb->VacbLruListEntry);
+ InsertTailList(&VacbLruListHead, &Vacb->VacbLruListEntry);
+
+ Vacb->Dirty = TRUE;
+
+ KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
+ KeReleaseGuardedMutex(&ViewLock);
+}
+
+VOID
+NTAPI
+CcRosUnmarkDirtyVacb (
+ PROS_VACB Vacb,
+ BOOLEAN LockViews)
+{
+ KIRQL oldIrql;
+ PROS_SHARED_CACHE_MAP SharedCacheMap;
+
+ SharedCacheMap = Vacb->SharedCacheMap;
+
+ if (LockViews)
+ {
+ KeAcquireGuardedMutex(&ViewLock);
+ KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
+ }
+
+ ASSERT(Vacb->Dirty);
+
+ Vacb->Dirty = FALSE;
+
+ RemoveEntryList(&Vacb->DirtyVacbListEntry);
+ CcTotalDirtyPages -= VACB_MAPPING_GRANULARITY / PAGE_SIZE;
+ Vacb->SharedCacheMap->DirtyPages -= VACB_MAPPING_GRANULARITY / PAGE_SIZE;
+ CcRosVacbDecRefCount(Vacb);
+
+ if (LockViews)
+ {
+ KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
+ KeReleaseGuardedMutex(&ViewLock);
+ }
+}
+
+NTSTATUS
+NTAPI
+CcRosMarkDirtyFile (
PROS_SHARED_CACHE_MAP SharedCacheMap,
LONGLONG FileOffset)
{
PROS_VACB Vacb;
- KIRQL oldIrql;
ASSERT(SharedCacheMap);
KeBugCheck(CACHE_MANAGER);
}
- KeAcquireGuardedMutex(&ViewLock);
- KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
-
if (!Vacb->Dirty)
{
- InsertTailList(&DirtyVacbListHead, &Vacb->DirtyVacbListEntry);
- DirtyPageCount += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
- }
- else
- {
- CcRosVacbDecRefCount(Vacb);
+ CcRosMarkDirtyVacb(Vacb);
}
- /* Move to the tail of the LRU list */
- RemoveEntryList(&Vacb->VacbLruListEntry);
- InsertTailList(&VacbLruListHead, &Vacb->VacbLruListEntry);
-
- Vacb->Dirty = TRUE;
-
- KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
- KeReleaseGuardedMutex(&ViewLock);
- KeReleaseMutex(&Vacb->Mutex, FALSE);
+ CcRosReleaseVacbLock(Vacb);
return STATUS_SUCCESS;
}
BOOLEAN NowDirty)
{
PROS_VACB Vacb;
- BOOLEAN WasDirty;
- KIRQL oldIrql;
ASSERT(SharedCacheMap);
return STATUS_UNSUCCESSFUL;
}
- KeAcquireGuardedMutex(&ViewLock);
- KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
-
- WasDirty = Vacb->Dirty;
- Vacb->Dirty = Vacb->Dirty || NowDirty;
-
- Vacb->MappedCount--;
-
- if (!WasDirty && NowDirty)
+ if (NowDirty && !Vacb->Dirty)
{
- InsertTailList(&DirtyVacbListHead, &Vacb->DirtyVacbListEntry);
- DirtyPageCount += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
+ CcRosMarkDirtyVacb(Vacb);
}
+ ASSERT(Vacb->MappedCount != 0);
+ Vacb->MappedCount--;
+
CcRosVacbDecRefCount(Vacb);
- if (!WasDirty && NowDirty)
- {
- CcRosVacbIncRefCount(Vacb);
- }
if (Vacb->MappedCount == 0)
{
CcRosVacbDecRefCount(Vacb);
}
- KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
- KeReleaseGuardedMutex(&ViewLock);
- KeReleaseMutex(&Vacb->Mutex, FALSE);
+ CcRosReleaseVacbLock(Vacb);
return STATUS_SUCCESS;
}
VACB_MAPPING_GRANULARITY,
PAGE_READWRITE,
(PMEMORY_AREA*)&Vacb->MemoryArea,
- FALSE,
0,
PAGE_SIZE);
MmUnlockAddressSpace(MmGetKernelAddressSpace());
if (!NT_SUCCESS(Status))
{
- KeBugCheck(CACHE_MANAGER);
+ DPRINT1("MmCreateMemoryArea failed with %lx for VACB %p\n", Status, Vacb);
+ return Status;
}
ASSERT(((ULONG_PTR)Vacb->BaseAddress % PAGE_SIZE) == 0);
{
PFN_NUMBER PageFrameNumber;
+ MI_SET_USAGE(MI_USAGE_CACHE);
Status = MmRequestPageMemoryConsumer(MC_CACHE, TRUE, &PageFrameNumber);
if (PageFrameNumber == 0)
{
DPRINT("CcRosCreateVacb()\n");
- if (FileOffset >= SharedCacheMap->FileSize.QuadPart)
+ if (FileOffset >= SharedCacheMap->SectionSize.QuadPart)
{
*Vacb = NULL;
return STATUS_INVALID_PARAMETER;
current->DirtyVacbListEntry.Flink = NULL;
current->DirtyVacbListEntry.Blink = NULL;
current->ReferenceCount = 1;
+ current->PinCount = 0;
KeInitializeMutex(¤t->Mutex, 0);
- KeWaitForSingleObject(¤t->Mutex,
- Executive,
- KernelMode,
- FALSE,
- NULL);
+ CcRosAcquireVacbLock(current, NULL);
KeAcquireGuardedMutex(&ViewLock);
*Vacb = current;
current);
}
#endif
- KeReleaseMutex(&(*Vacb)->Mutex, FALSE);
+ CcRosReleaseVacbLock(*Vacb);
KeReleaseGuardedMutex(&ViewLock);
ExFreeToNPagedLookasideList(&VacbLookasideList, *Vacb);
*Vacb = current;
- KeWaitForSingleObject(¤t->Mutex,
- Executive,
- KernelMode,
- FALSE,
- NULL);
+ CcRosAcquireVacbLock(current, NULL);
return STATUS_SUCCESS;
}
if (current->FileOffset.QuadPart < FileOffset)
#if MI_TRACE_PFNS
if ((SharedCacheMap->FileObject) && (SharedCacheMap->FileObject->FileName.Buffer))
{
- PWCHAR pos = NULL;
+ PWCHAR pos;
ULONG len = 0;
pos = wcsrchr(SharedCacheMap->FileObject->FileName.Buffer, '\\');
- len = wcslen(pos) * sizeof(WCHAR);
- if (pos) snprintf(MI_PFN_CURRENT_PROCESS_NAME, min(16, len), "%S", pos);
+ if (pos)
+ {
+ len = wcslen(pos) * sizeof(WCHAR);
+ snprintf(MI_PFN_CURRENT_PROCESS_NAME, min(16, len), "%S", pos);
+ }
+ else
+ {
+ snprintf(MI_PFN_CURRENT_PROCESS_NAME, min(16, len), "%wZ", &SharedCacheMap->FileObject->FileName);
+ }
}
#endif
Status = CcRosMapVacb(current);
+ if (!NT_SUCCESS(Status))
+ {
+ RemoveEntryList(¤t->CacheMapVacbListEntry);
+ RemoveEntryList(¤t->VacbLruListEntry);
+ CcRosReleaseVacbLock(current);
+ ExFreeToNPagedLookasideList(&VacbLookasideList, current);
+ }
return Status;
}
NTSTATUS Status;
KIRQL oldIrql;
+ CCTRACE(CC_API_DEBUG, "SectionObjectPointers=%p FileOffset=%p Length=%lu\n",
+ SectionObjectPointers, FileOffset, Length);
+
DPRINT("CcFlushCache(SectionObjectPointers 0x%p, FileOffset 0x%p, Length %lu, IoStatus 0x%p)\n",
SectionObjectPointers, FileOffset, Length, IoStatus);
IoStatus->Status = Status;
}
}
- KeReleaseMutex(¤t->Mutex, FALSE);
+
+ CcRosReleaseVacbLock(current);
KeAcquireGuardedMutex(&ViewLock);
KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
ASSERT(SharedCacheMap);
- SharedCacheMap->RefCount++;
+ SharedCacheMap->OpenCount++;
KeReleaseGuardedMutex(&ViewLock);
CcFlushCache(FileObject->SectionObjectPointer, NULL, 0, NULL);
KeAcquireGuardedMutex(&ViewLock);
- SharedCacheMap->RefCount--;
- if (SharedCacheMap->RefCount == 0)
+ SharedCacheMap->OpenCount--;
+ if (SharedCacheMap->OpenCount == 0)
{
+ KIRQL OldIrql;
+
FileObject->SectionObjectPointer->SharedCacheMap = NULL;
/*
while (!IsListEmpty(&SharedCacheMap->CacheMapVacbListHead))
{
current_entry = RemoveTailList(&SharedCacheMap->CacheMapVacbListHead);
+ KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
+
current = CONTAINING_RECORD(current_entry, ROS_VACB, CacheMapVacbListEntry);
+ CcRosAcquireVacbLock(current, NULL);
RemoveEntryList(¤t->VacbLruListEntry);
if (current->Dirty)
{
- RemoveEntryList(¤t->DirtyVacbListEntry);
- DirtyPageCount -= VACB_MAPPING_GRANULARITY / PAGE_SIZE;
+ KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
+ CcRosUnmarkDirtyVacb(current, FALSE);
+ KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
DPRINT1("Freeing dirty VACB\n");
}
InsertHeadList(&FreeList, ¤t->CacheMapVacbListEntry);
+ CcRosReleaseVacbLock(current);
+
+ KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
}
#if DBG
SharedCacheMap->Trace = FALSE;
current = CONTAINING_RECORD(current_entry, ROS_VACB, CacheMapVacbListEntry);
CcRosInternalFreeVacb(current);
}
+
+ KeAcquireSpinLock(&iSharedCacheMapLock, &OldIrql);
+ RemoveEntryList(&SharedCacheMap->SharedCacheMapLinks);
+ KeReleaseSpinLock(&iSharedCacheMapLock, OldIrql);
+
ExFreeToNPagedLookasideList(&SharedCacheMapLookasideList, SharedCacheMap);
KeAcquireGuardedMutex(&ViewLock);
}
KeAcquireGuardedMutex(&ViewLock);
SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
ASSERT(SharedCacheMap);
- ASSERT(SharedCacheMap->RefCount != 0);
- SharedCacheMap->RefCount++;
+ ASSERT(SharedCacheMap->OpenCount != 0);
+ SharedCacheMap->OpenCount++;
KeReleaseGuardedMutex(&ViewLock);
}
DPRINT("CcRosRemoveIfClosed()\n");
KeAcquireGuardedMutex(&ViewLock);
SharedCacheMap = SectionObjectPointer->SharedCacheMap;
- if (SharedCacheMap && SharedCacheMap->RefCount == 0)
+ if (SharedCacheMap && SharedCacheMap->OpenCount == 0)
{
CcRosDeleteFileCache(SharedCacheMap->FileObject, SharedCacheMap);
}
KeAcquireGuardedMutex(&ViewLock);
SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
ASSERT(SharedCacheMap);
- if (SharedCacheMap->RefCount > 0)
+ if (SharedCacheMap->OpenCount > 0)
{
- SharedCacheMap->RefCount--;
- if (SharedCacheMap->RefCount == 0)
+ SharedCacheMap->OpenCount--;
+ if (SharedCacheMap->OpenCount == 0)
{
MmFreeSectionSegments(SharedCacheMap->FileObject);
CcRosDeleteFileCache(FileObject, SharedCacheMap);
if (FileObject->PrivateCacheMap != NULL)
{
FileObject->PrivateCacheMap = NULL;
- if (SharedCacheMap->RefCount > 0)
+ if (SharedCacheMap->OpenCount > 0)
{
- SharedCacheMap->RefCount--;
- if (SharedCacheMap->RefCount == 0)
+ SharedCacheMap->OpenCount--;
+ if (SharedCacheMap->OpenCount == 0)
{
MmFreeSectionSegments(SharedCacheMap->FileObject);
CcRosDeleteFileCache(FileObject, SharedCacheMap);
if (FileObject->PrivateCacheMap == NULL)
{
FileObject->PrivateCacheMap = SharedCacheMap;
- SharedCacheMap->RefCount++;
+ SharedCacheMap->OpenCount++;
}
Status = STATUS_SUCCESS;
}
CcRosInitializeFileCache (
PFILE_OBJECT FileObject,
PCC_FILE_SIZES FileSizes,
+ BOOLEAN PinAccess,
PCACHE_MANAGER_CALLBACKS CallBacks,
PVOID LazyWriterContext)
/*
KeAcquireGuardedMutex(&ViewLock);
if (SharedCacheMap == NULL)
{
+ KIRQL OldIrql;
+
SharedCacheMap = ExAllocateFromNPagedLookasideList(&SharedCacheMapLookasideList);
if (SharedCacheMap == NULL)
{
KeReleaseGuardedMutex(&ViewLock);
- return STATUS_UNSUCCESSFUL;
+ return STATUS_INSUFFICIENT_RESOURCES;
}
RtlZeroMemory(SharedCacheMap, sizeof(*SharedCacheMap));
ObReferenceObjectByPointer(FileObject,
SharedCacheMap->LazyWriteContext = LazyWriterContext;
SharedCacheMap->SectionSize = FileSizes->AllocationSize;
SharedCacheMap->FileSize = FileSizes->FileSize;
+ SharedCacheMap->PinAccess = PinAccess;
+ SharedCacheMap->DirtyPageThreshold = 0;
+ SharedCacheMap->DirtyPages = 0;
KeInitializeSpinLock(&SharedCacheMap->CacheMapLock);
InitializeListHead(&SharedCacheMap->CacheMapVacbListHead);
FileObject->SectionObjectPointer->SharedCacheMap = SharedCacheMap;
+
+ KeAcquireSpinLock(&iSharedCacheMapLock, &OldIrql);
+ InsertTailList(&CcCleanSharedCacheMapList, &SharedCacheMap->SharedCacheMapLinks);
+ KeReleaseSpinLock(&iSharedCacheMapLock, OldIrql);
}
if (FileObject->PrivateCacheMap == NULL)
{
FileObject->PrivateCacheMap = SharedCacheMap;
- SharedCacheMap->RefCount++;
+ SharedCacheMap->OpenCount++;
}
KeReleaseGuardedMutex(&ViewLock);
IN PSECTION_OBJECT_POINTERS SectionObjectPointers)
{
PROS_SHARED_CACHE_MAP SharedCacheMap;
+
+ CCTRACE(CC_API_DEBUG, "SectionObjectPointers=%p\n", SectionObjectPointers);
+
if (SectionObjectPointers && SectionObjectPointers->SharedCacheMap)
{
SharedCacheMap = SectionObjectPointers->SharedCacheMap;
}
VOID
+NTAPI
+CcShutdownLazyWriter (
+ VOID)
+{
+ /* Simply set the event, lazy writer will stop when it's done */
+ KeSetEvent(&iLazyWriterShutdown, IO_DISK_INCREMENT, FALSE);
+}
+
+BOOLEAN
INIT_FUNCTION
NTAPI
CcInitView (
VOID)
{
+ HANDLE LazyWriter;
+ NTSTATUS Status;
+ KPRIORITY Priority;
+ OBJECT_ATTRIBUTES ObjectAttributes;
+
DPRINT("CcInitView()\n");
InitializeListHead(&DirtyVacbListHead);
InitializeListHead(&VacbLruListHead);
+ InitializeListHead(&CcDeferredWrites);
+ InitializeListHead(&CcCleanSharedCacheMapList);
+ KeInitializeSpinLock(&CcDeferredWriteSpinLock);
+ KeInitializeSpinLock(&iSharedCacheMapLock);
KeInitializeGuardedMutex(&ViewLock);
ExInitializeNPagedLookasideList(&iBcbLookasideList,
NULL,
MmInitializeMemoryConsumer(MC_CACHE, CcRosTrimCache);
+ /* Initialize lazy writer events */
+ KeInitializeEvent(&iLazyWriterShutdown, SynchronizationEvent, FALSE);
+ KeInitializeEvent(&iLazyWriterNotify, NotificationEvent, FALSE);
+
+ /* Define lazy writer threshold, depending on system type */
+ switch (MmQuerySystemSize())
+ {
+ case MmSmallSystem:
+ CcDirtyPageThreshold = MmNumberOfPhysicalPages / 8;
+ break;
+
+ case MmMediumSystem:
+ CcDirtyPageThreshold = MmNumberOfPhysicalPages / 4;
+ break;
+
+ case MmLargeSystem:
+ CcDirtyPageThreshold = MmNumberOfPhysicalPages / 8 + MmNumberOfPhysicalPages / 4;
+ break;
+ }
+
+ /* Start the lazy writer thread */
+ InitializeObjectAttributes(&ObjectAttributes,
+ NULL,
+ OBJ_KERNEL_HANDLE,
+ NULL,
+ NULL);
+ Status = PsCreateSystemThread(&LazyWriter,
+ THREAD_ALL_ACCESS,
+ &ObjectAttributes,
+ NULL,
+ NULL,
+ CciLazyWriter,
+ NULL);
+ if (!NT_SUCCESS(Status))
+ {
+ return FALSE;
+ }
+
+ Priority = 27;
+ Status = NtSetInformationThread(LazyWriter,
+ ThreadPriority,
+ &Priority,
+ sizeof(Priority));
+ ASSERT(NT_SUCCESS(Status));
+
+ /* Handle is not needed */
+ ObCloseHandle(LazyWriter, KernelMode);
+
CcInitCacheZeroPage();
+
+ return TRUE;
+}
+
+#if DBG && defined(KDBG)
+BOOLEAN
+ExpKdbgExtFileCache(ULONG Argc, PCHAR Argv[])
+{
+ PLIST_ENTRY ListEntry;
+ UNICODE_STRING NoName = RTL_CONSTANT_STRING(L"No name for File");
+
+ KdbpPrint(" Usage Summary (in kb)\n");
+ KdbpPrint("Shared\t\tValid\tDirty\tName\n");
+ /* No need to lock the spin lock here, we're in DBG */
+ for (ListEntry = CcCleanSharedCacheMapList.Flink;
+ ListEntry != &CcCleanSharedCacheMapList;
+ ListEntry = ListEntry->Flink)
+ {
+ PLIST_ENTRY Vacbs;
+ ULONG Valid = 0, Dirty = 0;
+ PROS_SHARED_CACHE_MAP SharedCacheMap;
+ PUNICODE_STRING FileName;
+
+ SharedCacheMap = CONTAINING_RECORD(ListEntry, ROS_SHARED_CACHE_MAP, SharedCacheMapLinks);
+
+ /* Dirty size */
+ Dirty = (SharedCacheMap->DirtyPages * PAGE_SIZE) / 1024;
+
+ /* First, count for all the associated VACB */
+ for (Vacbs = SharedCacheMap->CacheMapVacbListHead.Flink;
+ Vacbs != &SharedCacheMap->CacheMapVacbListHead;
+ Vacbs = Vacbs->Flink)
+ {
+ PROS_VACB Vacb;
+
+ Vacb = CONTAINING_RECORD(Vacbs, ROS_VACB, CacheMapVacbListEntry);
+ if (Vacb->Valid)
+ {
+ Valid += VACB_MAPPING_GRANULARITY / 1024;
+ }
+ }
+
+ /* Setup name */
+ if (SharedCacheMap->FileObject != NULL &&
+ SharedCacheMap->FileObject->FileName.Length != 0)
+ {
+ FileName = &SharedCacheMap->FileObject->FileName;
+ }
+ else
+ {
+ FileName = &NoName;
+ }
+
+ /* And print */
+ KdbpPrint("%p\t%d\t%d\t%wZ\n", SharedCacheMap, Valid, Dirty, FileName);
+ }
+
+ return TRUE;
}
+#endif
/* EOF */