* PURPOSE: Cache manager
*
* PROGRAMMERS: David Welch (welch@mcmail.com)
+ * Pierre Schweitzer (pierre@reactos.org)
*/
/* NOTES **********************************************************************
/* GLOBALS *******************************************************************/
-/*
- * If CACHE_BITMAP is defined, the cache manager uses one large memory region
- * within the kernel address space and allocate/deallocate space from this block
- * over a bitmap. If CACHE_BITMAP is used, the size of the mdl mapping region
- * must be reduced (ntoskrnl\mm\mdl.c, MI_MDLMAPPING_REGION_SIZE).
- */
-//#define CACHE_BITMAP
-
-static LIST_ENTRY DirtySegmentListHead;
-static LIST_ENTRY CacheSegmentListHead;
-static LIST_ENTRY CacheSegmentLRUListHead;
-static LIST_ENTRY ClosedListHead;
-ULONG DirtyPageCount=0;
+LIST_ENTRY DirtyVacbListHead;
+static LIST_ENTRY VacbLruListHead;
KGUARDED_MUTEX ViewLock;
-#ifdef CACHE_BITMAP
-#define CI_CACHESEG_MAPPING_REGION_SIZE (128*1024*1024)
-
-static PVOID CiCacheSegMappingRegionBase = NULL;
-static RTL_BITMAP CiCacheSegMappingRegionAllocMap;
-static ULONG CiCacheSegMappingRegionHint;
-static KSPIN_LOCK CiCacheSegMappingRegionLock;
-#endif
-
NPAGED_LOOKASIDE_LIST iBcbLookasideList;
-static NPAGED_LOOKASIDE_LIST BcbLookasideList;
-static NPAGED_LOOKASIDE_LIST CacheSegLookasideList;
+static NPAGED_LOOKASIDE_LIST SharedCacheMapLookasideList;
+static NPAGED_LOOKASIDE_LIST VacbLookasideList;
+
+/* Counters:
+ * - Amount of pages flushed by lazy writer
+ * - Number of times lazy writer ran
+ */
+ULONG CcLazyWritePages = 0;
+ULONG CcLazyWriteIos = 0;
+
+/* Internal vars (MS):
+ * - Threshold above which lazy writer will start action
+ * - Amount of dirty pages
+ * - List for deferred writes
+ * - Spinlock when dealing with the deferred list
+ * - List for "clean" shared cache maps
+ */
+ULONG CcDirtyPageThreshold = 0;
+ULONG CcTotalDirtyPages = 0;
+LIST_ENTRY CcDeferredWrites;
+KSPIN_LOCK CcDeferredWriteSpinLock;
+LIST_ENTRY CcCleanSharedCacheMapList;
+
+/* Internal vars (ROS):
+ * - Event to notify lazy writer to shutdown
+ * - Event to inform watchers lazy writer is done for this loop
+ * - Lock for the CcCleanSharedCacheMapList list
+ */
+KEVENT iLazyWriterShutdown;
+KEVENT iLazyWriterNotify;
+KSPIN_LOCK iSharedCacheMapLock;
#if DBG
-static void CcRosCacheSegmentIncRefCount_ ( PCACHE_SEGMENT cs, const char* file, int line )
+static void CcRosVacbIncRefCount_(PROS_VACB vacb, const char* file, int line)
{
- ++cs->ReferenceCount;
- if ( cs->Bcb->Trace )
+ ++vacb->ReferenceCount;
+ if (vacb->SharedCacheMap->Trace)
{
- DbgPrint("(%s:%i) CacheSegment %p ++RefCount=%lu, Dirty %u, PageOut %lu\n",
- file, line, cs, cs->ReferenceCount, cs->Dirty, cs->PageOut );
+ DbgPrint("(%s:%i) VACB %p ++RefCount=%lu, Dirty %u, PageOut %lu\n",
+ file, line, vacb, vacb->ReferenceCount, vacb->Dirty, vacb->PageOut);
}
}
-static void CcRosCacheSegmentDecRefCount_ ( PCACHE_SEGMENT cs, const char* file, int line )
+static void CcRosVacbDecRefCount_(PROS_VACB vacb, const char* file, int line)
{
- --cs->ReferenceCount;
- if ( cs->Bcb->Trace )
+ --vacb->ReferenceCount;
+ if (vacb->SharedCacheMap->Trace)
{
- DbgPrint("(%s:%i) CacheSegment %p --RefCount=%lu, Dirty %u, PageOut %lu\n",
- file, line, cs, cs->ReferenceCount, cs->Dirty, cs->PageOut );
+ DbgPrint("(%s:%i) VACB %p --RefCount=%lu, Dirty %u, PageOut %lu\n",
+ file, line, vacb, vacb->ReferenceCount, vacb->Dirty, vacb->PageOut);
}
}
-#define CcRosCacheSegmentIncRefCount(cs) CcRosCacheSegmentIncRefCount_(cs,__FILE__,__LINE__)
-#define CcRosCacheSegmentDecRefCount(cs) CcRosCacheSegmentDecRefCount_(cs,__FILE__,__LINE__)
+#define CcRosVacbIncRefCount(vacb) CcRosVacbIncRefCount_(vacb,__FILE__,__LINE__)
+#define CcRosVacbDecRefCount(vacb) CcRosVacbDecRefCount_(vacb,__FILE__,__LINE__)
#else
-#define CcRosCacheSegmentIncRefCount(cs) (++((cs)->ReferenceCount))
-#define CcRosCacheSegmentDecRefCount(cs) (--((cs)->ReferenceCount))
+#define CcRosVacbIncRefCount(vacb) (++((vacb)->ReferenceCount))
+#define CcRosVacbDecRefCount(vacb) (--((vacb)->ReferenceCount))
#endif
NTSTATUS
-CcRosInternalFreeCacheSegment(PCACHE_SEGMENT CacheSeg);
+CcRosInternalFreeVacb(PROS_VACB Vacb);
/* FUNCTIONS *****************************************************************/
VOID
NTAPI
CcRosTraceCacheMap (
- PBCB Bcb,
+ PROS_SHARED_CACHE_MAP SharedCacheMap,
BOOLEAN Trace )
{
#if DBG
KIRQL oldirql;
PLIST_ENTRY current_entry;
- PCACHE_SEGMENT current;
+ PROS_VACB current;
- if ( !Bcb )
+ if (!SharedCacheMap)
return;
- Bcb->Trace = Trace;
+ SharedCacheMap->Trace = Trace;
- if ( Trace )
+ if (Trace)
{
- DPRINT1("Enabling Tracing for CacheMap 0x%p:\n", Bcb );
+ DPRINT1("Enabling Tracing for CacheMap 0x%p:\n", SharedCacheMap);
KeAcquireGuardedMutex(&ViewLock);
- KeAcquireSpinLock(&Bcb->BcbLock, &oldirql);
+ KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldirql);
- current_entry = Bcb->BcbSegmentListHead.Flink;
- while (current_entry != &Bcb->BcbSegmentListHead)
+ current_entry = SharedCacheMap->CacheMapVacbListHead.Flink;
+ while (current_entry != &SharedCacheMap->CacheMapVacbListHead)
{
- current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT, BcbSegmentListEntry);
+ current = CONTAINING_RECORD(current_entry, ROS_VACB, CacheMapVacbListEntry);
current_entry = current_entry->Flink;
- DPRINT1(" CacheSegment 0x%p enabled, RefCount %lu, Dirty %u, PageOut %lu\n",
+ DPRINT1(" VACB 0x%p enabled, RefCount %lu, Dirty %u, PageOut %lu\n",
current, current->ReferenceCount, current->Dirty, current->PageOut );
}
- KeReleaseSpinLock(&Bcb->BcbLock, oldirql);
+ KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldirql);
KeReleaseGuardedMutex(&ViewLock);
}
else
{
- DPRINT1("Disabling Tracing for CacheMap 0x%p:\n", Bcb );
+ DPRINT1("Disabling Tracing for CacheMap 0x%p:\n", SharedCacheMap);
}
#else
- Bcb = Bcb;
- Trace = Trace;
+ UNREFERENCED_PARAMETER(SharedCacheMap);
+ UNREFERENCED_PARAMETER(Trace);
#endif
}
NTSTATUS
NTAPI
-CcRosFlushCacheSegment (
- PCACHE_SEGMENT CacheSegment)
+CcRosFlushVacb (
+ PROS_VACB Vacb)
{
NTSTATUS Status;
KIRQL oldIrql;
- Status = WriteCacheSegment(CacheSegment);
+ Status = CcWriteVirtualAddress(Vacb);
if (NT_SUCCESS(Status))
{
KeAcquireGuardedMutex(&ViewLock);
- KeAcquireSpinLock(&CacheSegment->Bcb->BcbLock, &oldIrql);
+ KeAcquireSpinLock(&Vacb->SharedCacheMap->CacheMapLock, &oldIrql);
- CacheSegment->Dirty = FALSE;
- RemoveEntryList(&CacheSegment->DirtySegmentListEntry);
- DirtyPageCount -= CacheSegment->Bcb->CacheSegmentSize / PAGE_SIZE;
- CcRosCacheSegmentDecRefCount(CacheSegment);
+ Vacb->Dirty = FALSE;
+ RemoveEntryList(&Vacb->DirtyVacbListEntry);
+ CcTotalDirtyPages -= VACB_MAPPING_GRANULARITY / PAGE_SIZE;
+ Vacb->SharedCacheMap->DirtyPages -= VACB_MAPPING_GRANULARITY / PAGE_SIZE;
+ CcRosVacbDecRefCount(Vacb);
- KeReleaseSpinLock(&CacheSegment->Bcb->BcbLock, oldIrql);
+ KeReleaseSpinLock(&Vacb->SharedCacheMap->CacheMapLock, oldIrql);
KeReleaseGuardedMutex(&ViewLock);
}
- return(Status);
+ return Status;
}
NTSTATUS
CcRosFlushDirtyPages (
ULONG Target,
PULONG Count,
- BOOLEAN Wait)
+ BOOLEAN Wait,
+ BOOLEAN CalledFromLazy)
{
PLIST_ENTRY current_entry;
- PCACHE_SEGMENT current;
- ULONG PagesPerSegment;
+ PROS_VACB current;
BOOLEAN Locked;
NTSTATUS Status;
LARGE_INTEGER ZeroTimeout;
KeEnterCriticalRegion();
KeAcquireGuardedMutex(&ViewLock);
- current_entry = DirtySegmentListHead.Flink;
- if (current_entry == &DirtySegmentListHead)
+ current_entry = DirtyVacbListHead.Flink;
+ if (current_entry == &DirtyVacbListHead)
{
DPRINT("No Dirty pages\n");
}
- while ((current_entry != &DirtySegmentListHead) && (Target > 0))
+ while ((current_entry != &DirtyVacbListHead) && (Target > 0))
{
- current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT,
- DirtySegmentListEntry);
+ current = CONTAINING_RECORD(current_entry,
+ ROS_VACB,
+ DirtyVacbListEntry);
current_entry = current_entry->Flink;
- CcRosCacheSegmentIncRefCount(current);
+ CcRosVacbIncRefCount(current);
+
+ /* When performing lazy write, don't handle temporary files */
+ if (CalledFromLazy &&
+ BooleanFlagOn(current->SharedCacheMap->FileObject->Flags, FO_TEMPORARY_FILE))
+ {
+ CcRosVacbDecRefCount(current);
+ continue;
+ }
- Locked = current->Bcb->Callbacks->AcquireForLazyWrite(
- current->Bcb->LazyWriteContext, Wait);
+ Locked = current->SharedCacheMap->Callbacks->AcquireForLazyWrite(
+ current->SharedCacheMap->LazyWriteContext, Wait);
if (!Locked)
{
- CcRosCacheSegmentDecRefCount(current);
+ CcRosVacbDecRefCount(current);
continue;
}
- Status = KeWaitForSingleObject(¤t->Mutex,
- Executive,
- KernelMode,
- FALSE,
- Wait ? NULL : &ZeroTimeout);
+ Status = CcRosAcquireVacbLock(current,
+ Wait ? NULL : &ZeroTimeout);
if (Status != STATUS_SUCCESS)
{
- current->Bcb->Callbacks->ReleaseFromLazyWrite(
- current->Bcb->LazyWriteContext);
- CcRosCacheSegmentDecRefCount(current);
+ current->SharedCacheMap->Callbacks->ReleaseFromLazyWrite(
+ current->SharedCacheMap->LazyWriteContext);
+ CcRosVacbDecRefCount(current);
continue;
}
ASSERT(current->Dirty);
/* One reference is added above */
- if (current->ReferenceCount > 2)
+ if ((current->ReferenceCount > 2 && current->PinCount == 0) ||
+ (current->ReferenceCount > 3 && current->PinCount > 1))
{
- KeReleaseMutex(¤t->Mutex, 0);
- current->Bcb->Callbacks->ReleaseFromLazyWrite(
- current->Bcb->LazyWriteContext);
- CcRosCacheSegmentDecRefCount(current);
+ CcRosReleaseVacbLock(current);
+ current->SharedCacheMap->Callbacks->ReleaseFromLazyWrite(
+ current->SharedCacheMap->LazyWriteContext);
+ CcRosVacbDecRefCount(current);
continue;
}
- PagesPerSegment = current->Bcb->CacheSegmentSize / PAGE_SIZE;
-
KeReleaseGuardedMutex(&ViewLock);
- Status = CcRosFlushCacheSegment(current);
+ Status = CcRosFlushVacb(current);
- KeReleaseMutex(¤t->Mutex, 0);
- current->Bcb->Callbacks->ReleaseFromLazyWrite(
- current->Bcb->LazyWriteContext);
+ CcRosReleaseVacbLock(current);
+ current->SharedCacheMap->Callbacks->ReleaseFromLazyWrite(
+ current->SharedCacheMap->LazyWriteContext);
KeAcquireGuardedMutex(&ViewLock);
- CcRosCacheSegmentDecRefCount(current);
+ CcRosVacbDecRefCount(current);
- if (!NT_SUCCESS(Status) && (Status != STATUS_END_OF_FILE))
+ if (!NT_SUCCESS(Status) && (Status != STATUS_END_OF_FILE) &&
+ (Status != STATUS_MEDIA_WRITE_PROTECTED))
{
- DPRINT1("CC: Failed to flush cache segment.\n");
+ DPRINT1("CC: Failed to flush VACB.\n");
}
else
{
- (*Count) += PagesPerSegment;
- Target -= PagesPerSegment;
+ ULONG PagesFreed;
+
+ /* How many pages did we free? */
+ PagesFreed = VACB_MAPPING_GRANULARITY / PAGE_SIZE;
+ (*Count) += PagesFreed;
+
+ /* Make sure we don't overflow target! */
+ if (Target < PagesFreed)
+ {
+ /* If we would have, jump to zero directly */
+ Target = 0;
+ }
+ else
+ {
+ Target -= PagesFreed;
+ }
}
- current_entry = DirtySegmentListHead.Flink;
+ current_entry = DirtyVacbListHead.Flink;
}
KeReleaseGuardedMutex(&ViewLock);
KeLeaveCriticalRegion();
DPRINT("CcRosFlushDirtyPages() finished\n");
- return(STATUS_SUCCESS);
+ return STATUS_SUCCESS;
+}
+
+/* FIXME: Someday this could somewhat implement write-behind/read-ahead */
+VOID
+NTAPI
+CciLazyWriter(PVOID Unused)
+{
+ LARGE_INTEGER OneSecond;
+
+ OneSecond.QuadPart = (LONGLONG)-1*1000*1000*10;
+
+ while (TRUE)
+ {
+ NTSTATUS Status;
+ PLIST_ENTRY ListEntry;
+ ULONG Target, Count = 0;
+
+ /* One per second or until we have to stop */
+ Status = KeWaitForSingleObject(&iLazyWriterShutdown,
+ Executive,
+ KernelMode,
+ FALSE,
+ &OneSecond);
+
+ /* If we succeeed, we've to stop running! */
+ if (Status == STATUS_SUCCESS)
+ {
+ break;
+ }
+
+ /* We're not sleeping anymore */
+ KeClearEvent(&iLazyWriterNotify);
+
+ /* Our target is one-eighth of the dirty pages */
+ Target = CcTotalDirtyPages / 8;
+ if (Target != 0)
+ {
+ /* Flush! */
+ DPRINT("Lazy writer starting (%d)\n", Target);
+ CcRosFlushDirtyPages(Target, &Count, FALSE, TRUE);
+
+ /* And update stats */
+ CcLazyWritePages += Count;
+ ++CcLazyWriteIos;
+ DPRINT("Lazy writer done (%d)\n", Count);
+ }
+
+ /* Inform people waiting on us that we're done */
+ KeSetEvent(&iLazyWriterNotify, IO_DISK_INCREMENT, FALSE);
+
+ /* Likely not optimal, but let's handle one deferred write now! */
+ ListEntry = ExInterlockedRemoveHeadList(&CcDeferredWrites, &CcDeferredWriteSpinLock);
+ if (ListEntry != NULL)
+ {
+ PROS_DEFERRED_WRITE_CONTEXT Context;
+
+ /* Extract the context */
+ Context = CONTAINING_RECORD(ListEntry, ROS_DEFERRED_WRITE_CONTEXT, CcDeferredWritesEntry);
+
+ /* Can we write now? */
+ if (CcCanIWrite(Context->FileObject, Context->BytesToWrite, FALSE, Context->Retrying))
+ {
+ /* Yes! Do it, and destroy the associated context */
+ Context->PostRoutine(Context->Context1, Context->Context2);
+ ExFreePoolWithTag(Context, 'CcDw');
+ }
+ else
+ {
+ /* Otherwise, requeue it, but in tail, so that it doesn't block others
+ * This is clearly to improve, but given the poor algorithm used now
+ * It's better than nothing!
+ */
+ ExInterlockedInsertTailList(&CcDeferredWrites,
+ &Context->CcDeferredWritesEntry,
+ &CcDeferredWriteSpinLock);
+ }
+ }
+ }
}
NTSTATUS
*/
{
PLIST_ENTRY current_entry;
- PCACHE_SEGMENT current;
- ULONG PagesPerSegment;
+ PROS_VACB current;
ULONG PagesFreed;
KIRQL oldIrql;
LIST_ENTRY FreeList;
retry:
KeAcquireGuardedMutex(&ViewLock);
- current_entry = CacheSegmentLRUListHead.Flink;
- while (current_entry != &CacheSegmentLRUListHead)
+ current_entry = VacbLruListHead.Flink;
+ while (current_entry != &VacbLruListHead)
{
- current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT,
- CacheSegmentLRUListEntry);
+ current = CONTAINING_RECORD(current_entry,
+ ROS_VACB,
+ VacbLruListEntry);
current_entry = current_entry->Flink;
- KeAcquireSpinLock(¤t->Bcb->BcbLock, &oldIrql);
+ KeAcquireSpinLock(¤t->SharedCacheMap->CacheMapLock, &oldIrql);
- /* Reference the cache segment */
- CcRosCacheSegmentIncRefCount(current);
+ /* Reference the VACB */
+ CcRosVacbIncRefCount(current);
/* Check if it's mapped and not dirty */
if (current->MappedCount > 0 && !current->Dirty)
{
/* We have to break these locks because Cc sucks */
- KeReleaseSpinLock(¤t->Bcb->BcbLock, oldIrql);
+ KeReleaseSpinLock(¤t->SharedCacheMap->CacheMapLock, oldIrql);
KeReleaseGuardedMutex(&ViewLock);
- /* Page out the segment */
- for (i = 0; i < current->Bcb->CacheSegmentSize / PAGE_SIZE; i++)
+ /* Page out the VACB */
+ for (i = 0; i < VACB_MAPPING_GRANULARITY / PAGE_SIZE; i++)
{
Page = (PFN_NUMBER)(MmGetPhysicalAddress((PUCHAR)current->BaseAddress + (i * PAGE_SIZE)).QuadPart >> PAGE_SHIFT);
/* Reacquire the locks */
KeAcquireGuardedMutex(&ViewLock);
- KeAcquireSpinLock(¤t->Bcb->BcbLock, &oldIrql);
+ KeAcquireSpinLock(¤t->SharedCacheMap->CacheMapLock, &oldIrql);
}
- /* Dereference the cache segment */
- CcRosCacheSegmentDecRefCount(current);
+ /* Dereference the VACB */
+ CcRosVacbDecRefCount(current);
/* Check if we can free this entry now */
if (current->ReferenceCount == 0)
ASSERT(!current->Dirty);
ASSERT(!current->MappedCount);
- RemoveEntryList(¤t->BcbSegmentListEntry);
- RemoveEntryList(¤t->CacheSegmentListEntry);
- RemoveEntryList(¤t->CacheSegmentLRUListEntry);
- InsertHeadList(&FreeList, ¤t->BcbSegmentListEntry);
+ RemoveEntryList(¤t->CacheMapVacbListEntry);
+ RemoveEntryList(¤t->VacbLruListEntry);
+ InsertHeadList(&FreeList, ¤t->CacheMapVacbListEntry);
/* Calculate how many pages we freed for Mm */
- PagesPerSegment = current->Bcb->CacheSegmentSize / PAGE_SIZE;
- PagesFreed = min(PagesPerSegment, Target);
+ PagesFreed = min(VACB_MAPPING_GRANULARITY / PAGE_SIZE, Target);
Target -= PagesFreed;
(*NrFreed) += PagesFreed;
}
- KeReleaseSpinLock(¤t->Bcb->BcbLock, oldIrql);
+ KeReleaseSpinLock(¤t->SharedCacheMap->CacheMapLock, oldIrql);
}
KeReleaseGuardedMutex(&ViewLock);
if ((Target > 0) && !FlushedPages)
{
/* Flush dirty pages to disk */
- CcRosFlushDirtyPages(Target, &PagesFreed, FALSE);
+ CcRosFlushDirtyPages(Target, &PagesFreed, FALSE, FALSE);
FlushedPages = TRUE;
/* We can only swap as many pages as we flushed */
while (!IsListEmpty(&FreeList))
{
current_entry = RemoveHeadList(&FreeList);
- current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT,
- BcbSegmentListEntry);
- CcRosInternalFreeCacheSegment(current);
+ current = CONTAINING_RECORD(current_entry,
+ ROS_VACB,
+ CacheMapVacbListEntry);
+ CcRosInternalFreeVacb(current);
}
DPRINT("Evicted %lu cache pages\n", (*NrFreed));
- return(STATUS_SUCCESS);
+ return STATUS_SUCCESS;
}
NTSTATUS
NTAPI
-CcRosReleaseCacheSegment (
- PBCB Bcb,
- PCACHE_SEGMENT CacheSeg,
+CcRosReleaseVacb (
+ PROS_SHARED_CACHE_MAP SharedCacheMap,
+ PROS_VACB Vacb,
BOOLEAN Valid,
BOOLEAN Dirty,
BOOLEAN Mapped)
{
BOOLEAN WasDirty;
- KIRQL oldIrql;
-
- ASSERT(Bcb);
- DPRINT("CcReleaseCacheSegment(Bcb 0x%p, CacheSeg 0x%p, Valid %u)\n",
- Bcb, CacheSeg, Valid);
+ ASSERT(SharedCacheMap);
- KeAcquireGuardedMutex(&ViewLock);
- KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
+ DPRINT("CcRosReleaseVacb(SharedCacheMap 0x%p, Vacb 0x%p, Valid %u)\n",
+ SharedCacheMap, Vacb, Valid);
- CacheSeg->Valid = Valid;
+ Vacb->Valid = Valid;
- WasDirty = CacheSeg->Dirty;
- CacheSeg->Dirty = CacheSeg->Dirty || Dirty;
-
- if (!WasDirty && CacheSeg->Dirty)
+ WasDirty = FALSE;
+ if (Dirty)
{
- InsertTailList(&DirtySegmentListHead, &CacheSeg->DirtySegmentListEntry);
- DirtyPageCount += Bcb->CacheSegmentSize / PAGE_SIZE;
+ if (!Vacb->Dirty && Dirty)
+ {
+ CcRosMarkDirtyVacb(Vacb);
+ }
+ else
+ {
+ WasDirty = TRUE;
+ }
}
if (Mapped)
{
- CacheSeg->MappedCount++;
+ Vacb->MappedCount++;
}
- CcRosCacheSegmentDecRefCount(CacheSeg);
- if (Mapped && (CacheSeg->MappedCount == 1))
+ CcRosVacbDecRefCount(Vacb);
+ if (Mapped && (Vacb->MappedCount == 1))
{
- CcRosCacheSegmentIncRefCount(CacheSeg);
+ CcRosVacbIncRefCount(Vacb);
}
- if (!WasDirty && CacheSeg->Dirty)
+ if (!WasDirty && Vacb->Dirty)
{
- CcRosCacheSegmentIncRefCount(CacheSeg);
+ CcRosVacbIncRefCount(Vacb);
}
- KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
- KeReleaseGuardedMutex(&ViewLock);
- KeReleaseMutex(&CacheSeg->Mutex, 0);
+ CcRosReleaseVacbLock(Vacb);
- return(STATUS_SUCCESS);
+ return STATUS_SUCCESS;
}
-/* Returns with Cache Segment Lock Held! */
-PCACHE_SEGMENT
+/* Returns with VACB Lock Held! */
+PROS_VACB
NTAPI
-CcRosLookupCacheSegment (
- PBCB Bcb,
- ULONG FileOffset)
+CcRosLookupVacb (
+ PROS_SHARED_CACHE_MAP SharedCacheMap,
+ LONGLONG FileOffset)
{
PLIST_ENTRY current_entry;
- PCACHE_SEGMENT current;
+ PROS_VACB current;
KIRQL oldIrql;
- ASSERT(Bcb);
+ ASSERT(SharedCacheMap);
- DPRINT("CcRosLookupCacheSegment(Bcb -x%p, FileOffset %lu)\n", Bcb, FileOffset);
+ DPRINT("CcRosLookupVacb(SharedCacheMap 0x%p, FileOffset %I64u)\n",
+ SharedCacheMap, FileOffset);
KeAcquireGuardedMutex(&ViewLock);
- KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
+ KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
- current_entry = Bcb->BcbSegmentListHead.Flink;
- while (current_entry != &Bcb->BcbSegmentListHead)
+ current_entry = SharedCacheMap->CacheMapVacbListHead.Flink;
+ while (current_entry != &SharedCacheMap->CacheMapVacbListHead)
{
- current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT,
- BcbSegmentListEntry);
- if ((current->FileOffset <= FileOffset) &&
- ((current->FileOffset + Bcb->CacheSegmentSize) > FileOffset))
+ current = CONTAINING_RECORD(current_entry,
+ ROS_VACB,
+ CacheMapVacbListEntry);
+ if (IsPointInRange(current->FileOffset.QuadPart,
+ VACB_MAPPING_GRANULARITY,
+ FileOffset))
{
- CcRosCacheSegmentIncRefCount(current);
- KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
+ CcRosVacbIncRefCount(current);
+ KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
KeReleaseGuardedMutex(&ViewLock);
- KeWaitForSingleObject(¤t->Mutex,
- Executive,
- KernelMode,
- FALSE,
- NULL);
- return(current);
+ CcRosAcquireVacbLock(current, NULL);
+ return current;
}
+ if (current->FileOffset.QuadPart > FileOffset)
+ break;
current_entry = current_entry->Flink;
}
- KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
+ KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
KeReleaseGuardedMutex(&ViewLock);
- return(NULL);
+ return NULL;
}
-NTSTATUS
+VOID
NTAPI
-CcRosMarkDirtyCacheSegment (
- PBCB Bcb,
- ULONG FileOffset)
+CcRosMarkDirtyVacb (
+ PROS_VACB Vacb)
{
- PCACHE_SEGMENT CacheSeg;
KIRQL oldIrql;
+ PROS_SHARED_CACHE_MAP SharedCacheMap;
- ASSERT(Bcb);
-
- DPRINT("CcRosMarkDirtyCacheSegment(Bcb 0x%p, FileOffset %lu)\n", Bcb, FileOffset);
-
- CacheSeg = CcRosLookupCacheSegment(Bcb, FileOffset);
- if (CacheSeg == NULL)
- {
- KeBugCheck(CACHE_MANAGER);
- }
+ SharedCacheMap = Vacb->SharedCacheMap;
KeAcquireGuardedMutex(&ViewLock);
- KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
+ KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
- if (!CacheSeg->Dirty)
+ if (!Vacb->Dirty)
{
- InsertTailList(&DirtySegmentListHead, &CacheSeg->DirtySegmentListEntry);
- DirtyPageCount += Bcb->CacheSegmentSize / PAGE_SIZE;
+ InsertTailList(&DirtyVacbListHead, &Vacb->DirtyVacbListEntry);
+ CcTotalDirtyPages += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
+ Vacb->SharedCacheMap->DirtyPages += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
}
else
{
- CcRosCacheSegmentDecRefCount(CacheSeg);
+ CcRosVacbDecRefCount(Vacb);
}
/* Move to the tail of the LRU list */
- RemoveEntryList(&CacheSeg->CacheSegmentLRUListEntry);
- InsertTailList(&CacheSegmentLRUListHead, &CacheSeg->CacheSegmentLRUListEntry);
+ RemoveEntryList(&Vacb->VacbLruListEntry);
+ InsertTailList(&VacbLruListHead, &Vacb->VacbLruListEntry);
- CacheSeg->Dirty = TRUE;
+ Vacb->Dirty = TRUE;
- KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
+ KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
KeReleaseGuardedMutex(&ViewLock);
- KeReleaseMutex(&CacheSeg->Mutex, 0);
+}
+
+NTSTATUS
+NTAPI
+CcRosMarkDirtyFile (
+ PROS_SHARED_CACHE_MAP SharedCacheMap,
+ LONGLONG FileOffset)
+{
+ PROS_VACB Vacb;
+
+ ASSERT(SharedCacheMap);
+
+ DPRINT("CcRosMarkDirtyVacb(SharedCacheMap 0x%p, FileOffset %I64u)\n",
+ SharedCacheMap, FileOffset);
- return(STATUS_SUCCESS);
+ Vacb = CcRosLookupVacb(SharedCacheMap, FileOffset);
+ if (Vacb == NULL)
+ {
+ KeBugCheck(CACHE_MANAGER);
+ }
+
+ CcRosMarkDirtyVacb(Vacb);
+
+
+ CcRosReleaseVacbLock(Vacb);
+
+ return STATUS_SUCCESS;
}
NTSTATUS
NTAPI
-CcRosUnmapCacheSegment (
- PBCB Bcb,
- ULONG FileOffset,
+CcRosUnmapVacb (
+ PROS_SHARED_CACHE_MAP SharedCacheMap,
+ LONGLONG FileOffset,
BOOLEAN NowDirty)
{
- PCACHE_SEGMENT CacheSeg;
+ PROS_VACB Vacb;
BOOLEAN WasDirty;
- KIRQL oldIrql;
- ASSERT(Bcb);
+ ASSERT(SharedCacheMap);
- DPRINT("CcRosUnmapCacheSegment(Bcb 0x%p, FileOffset %lu, NowDirty %u)\n",
- Bcb, FileOffset, NowDirty);
+ DPRINT("CcRosUnmapVacb(SharedCacheMap 0x%p, FileOffset %I64u, NowDirty %u)\n",
+ SharedCacheMap, FileOffset, NowDirty);
- CacheSeg = CcRosLookupCacheSegment(Bcb, FileOffset);
- if (CacheSeg == NULL)
+ Vacb = CcRosLookupVacb(SharedCacheMap, FileOffset);
+ if (Vacb == NULL)
{
- return(STATUS_UNSUCCESSFUL);
+ return STATUS_UNSUCCESSFUL;
}
- KeAcquireGuardedMutex(&ViewLock);
- KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
-
- WasDirty = CacheSeg->Dirty;
- CacheSeg->Dirty = CacheSeg->Dirty || NowDirty;
+ WasDirty = FALSE;
+ if (NowDirty)
+ {
+ if (!Vacb->Dirty && NowDirty)
+ {
+ CcRosMarkDirtyVacb(Vacb);
+ }
+ else
+ {
+ WasDirty = TRUE;
+ }
+ }
- CacheSeg->MappedCount--;
+ Vacb->MappedCount--;
+ CcRosVacbDecRefCount(Vacb);
if (!WasDirty && NowDirty)
{
- InsertTailList(&DirtySegmentListHead, &CacheSeg->DirtySegmentListEntry);
- DirtyPageCount += Bcb->CacheSegmentSize / PAGE_SIZE;
+ CcRosVacbIncRefCount(Vacb);
}
-
- CcRosCacheSegmentDecRefCount(CacheSeg);
- if (!WasDirty && NowDirty)
+ if (Vacb->MappedCount == 0)
{
- CcRosCacheSegmentIncRefCount(CacheSeg);
+ CcRosVacbDecRefCount(Vacb);
}
- if (CacheSeg->MappedCount == 0)
+
+ CcRosReleaseVacbLock(Vacb);
+
+ return STATUS_SUCCESS;
+}
+
+static
+NTSTATUS
+CcRosMapVacb(
+ PROS_VACB Vacb)
+{
+ ULONG i;
+ NTSTATUS Status;
+ ULONG_PTR NumberOfPages;
+
+ /* Create a memory area. */
+ MmLockAddressSpace(MmGetKernelAddressSpace());
+ Status = MmCreateMemoryArea(MmGetKernelAddressSpace(),
+ 0, // nothing checks for VACB mareas, so set to 0
+ &Vacb->BaseAddress,
+ VACB_MAPPING_GRANULARITY,
+ PAGE_READWRITE,
+ (PMEMORY_AREA*)&Vacb->MemoryArea,
+ 0,
+ PAGE_SIZE);
+ MmUnlockAddressSpace(MmGetKernelAddressSpace());
+ if (!NT_SUCCESS(Status))
{
- CcRosCacheSegmentDecRefCount(CacheSeg);
+ DPRINT1("MmCreateMemoryArea failed with %lx for VACB %p\n", Status, Vacb);
+ return Status;
}
- KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
- KeReleaseGuardedMutex(&ViewLock);
- KeReleaseMutex(&CacheSeg->Mutex, 0);
+ ASSERT(((ULONG_PTR)Vacb->BaseAddress % PAGE_SIZE) == 0);
+ ASSERT((ULONG_PTR)Vacb->BaseAddress > (ULONG_PTR)MmSystemRangeStart);
- return(STATUS_SUCCESS);
+ /* Create a virtual mapping for this memory area */
+ NumberOfPages = BYTES_TO_PAGES(VACB_MAPPING_GRANULARITY);
+ for (i = 0; i < NumberOfPages; i++)
+ {
+ PFN_NUMBER PageFrameNumber;
+
+ MI_SET_USAGE(MI_USAGE_CACHE);
+ Status = MmRequestPageMemoryConsumer(MC_CACHE, TRUE, &PageFrameNumber);
+ if (PageFrameNumber == 0)
+ {
+ DPRINT1("Unable to allocate page\n");
+ KeBugCheck(MEMORY_MANAGEMENT);
+ }
+
+ Status = MmCreateVirtualMapping(NULL,
+ (PVOID)((ULONG_PTR)Vacb->BaseAddress + (i * PAGE_SIZE)),
+ PAGE_READWRITE,
+ &PageFrameNumber,
+ 1);
+ if (!NT_SUCCESS(Status))
+ {
+ DPRINT1("Unable to create virtual mapping\n");
+ KeBugCheck(MEMORY_MANAGEMENT);
+ }
+ }
+
+ return STATUS_SUCCESS;
}
static
NTSTATUS
-CcRosCreateCacheSegment (
- PBCB Bcb,
- ULONG FileOffset,
- PCACHE_SEGMENT* CacheSeg)
+CcRosCreateVacb (
+ PROS_SHARED_CACHE_MAP SharedCacheMap,
+ LONGLONG FileOffset,
+ PROS_VACB *Vacb)
{
- PCACHE_SEGMENT current;
- PCACHE_SEGMENT previous;
+ PROS_VACB current;
+ PROS_VACB previous;
PLIST_ENTRY current_entry;
NTSTATUS Status;
KIRQL oldIrql;
-#ifdef CACHE_BITMAP
- ULONG StartingOffset;
-#endif
- PHYSICAL_ADDRESS BoundaryAddressMultiple;
- ASSERT(Bcb);
+ ASSERT(SharedCacheMap);
- DPRINT("CcRosCreateCacheSegment()\n");
+ DPRINT("CcRosCreateVacb()\n");
- BoundaryAddressMultiple.QuadPart = 0;
- if (FileOffset >= Bcb->FileSize.u.LowPart)
+ if (FileOffset >= SharedCacheMap->SectionSize.QuadPart)
{
- CacheSeg = NULL;
+ *Vacb = NULL;
return STATUS_INVALID_PARAMETER;
}
- current = ExAllocateFromNPagedLookasideList(&CacheSegLookasideList);
+ current = ExAllocateFromNPagedLookasideList(&VacbLookasideList);
+ current->BaseAddress = NULL;
current->Valid = FALSE;
current->Dirty = FALSE;
current->PageOut = FALSE;
- current->FileOffset = ROUND_DOWN(FileOffset, Bcb->CacheSegmentSize);
- current->Bcb = Bcb;
+ current->FileOffset.QuadPart = ROUND_DOWN(FileOffset, VACB_MAPPING_GRANULARITY);
+ current->SharedCacheMap = SharedCacheMap;
#if DBG
- if ( Bcb->Trace )
+ if (SharedCacheMap->Trace)
{
- DPRINT1("CacheMap 0x%p: new Cache Segment: 0x%p\n", Bcb, current );
+ DPRINT1("CacheMap 0x%p: new VACB: 0x%p\n", SharedCacheMap, current);
}
#endif
current->MappedCount = 0;
- current->DirtySegmentListEntry.Flink = NULL;
- current->DirtySegmentListEntry.Blink = NULL;
+ current->DirtyVacbListEntry.Flink = NULL;
+ current->DirtyVacbListEntry.Blink = NULL;
current->ReferenceCount = 1;
+ current->PinCount = 0;
KeInitializeMutex(¤t->Mutex, 0);
- KeWaitForSingleObject(¤t->Mutex,
- Executive,
- KernelMode,
- FALSE,
- NULL);
+ CcRosAcquireVacbLock(current, NULL);
KeAcquireGuardedMutex(&ViewLock);
- *CacheSeg = current;
- /* There is window between the call to CcRosLookupCacheSegment
- * and CcRosCreateCacheSegment. We must check if a segment on
- * the fileoffset exist. If there exist a segment, we release
- * our new created segment and return the existing one.
+ *Vacb = current;
+ /* There is window between the call to CcRosLookupVacb
+ * and CcRosCreateVacb. We must check if a VACB for the
+ * file offset exist. If there is a VACB, we release
+ * our newly created VACB and return the existing one.
*/
- KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
- current_entry = Bcb->BcbSegmentListHead.Flink;
+ KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
+ current_entry = SharedCacheMap->CacheMapVacbListHead.Flink;
previous = NULL;
- while (current_entry != &Bcb->BcbSegmentListHead)
+ while (current_entry != &SharedCacheMap->CacheMapVacbListHead)
{
- current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT,
- BcbSegmentListEntry);
- if (current->FileOffset <= FileOffset &&
- (current->FileOffset + Bcb->CacheSegmentSize) > FileOffset)
+ current = CONTAINING_RECORD(current_entry,
+ ROS_VACB,
+ CacheMapVacbListEntry);
+ if (IsPointInRange(current->FileOffset.QuadPart,
+ VACB_MAPPING_GRANULARITY,
+ FileOffset))
{
- CcRosCacheSegmentIncRefCount(current);
- KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
+ CcRosVacbIncRefCount(current);
+ KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
#if DBG
- if ( Bcb->Trace )
+ if (SharedCacheMap->Trace)
{
- DPRINT1("CacheMap 0x%p: deleting newly created Cache Segment 0x%p ( found existing one 0x%p )\n",
- Bcb,
- (*CacheSeg),
- current );
+ DPRINT1("CacheMap 0x%p: deleting newly created VACB 0x%p ( found existing one 0x%p )\n",
+ SharedCacheMap,
+ (*Vacb),
+ current);
}
#endif
- KeReleaseMutex(&(*CacheSeg)->Mutex, 0);
+ CcRosReleaseVacbLock(*Vacb);
KeReleaseGuardedMutex(&ViewLock);
- ExFreeToNPagedLookasideList(&CacheSegLookasideList, *CacheSeg);
- *CacheSeg = current;
- KeWaitForSingleObject(¤t->Mutex,
- Executive,
- KernelMode,
- FALSE,
- NULL);
+ ExFreeToNPagedLookasideList(&VacbLookasideList, *Vacb);
+ *Vacb = current;
+ CcRosAcquireVacbLock(current, NULL);
return STATUS_SUCCESS;
}
- if (current->FileOffset < FileOffset)
+ if (current->FileOffset.QuadPart < FileOffset)
{
- if (previous == NULL)
- {
- previous = current;
- }
- else
- {
- if (previous->FileOffset < current->FileOffset)
- {
- previous = current;
- }
- }
+ ASSERT(previous == NULL ||
+ previous->FileOffset.QuadPart < current->FileOffset.QuadPart);
+ previous = current;
}
+ if (current->FileOffset.QuadPart > FileOffset)
+ break;
current_entry = current_entry->Flink;
}
- /* There was no existing segment. */
- current = *CacheSeg;
+ /* There was no existing VACB. */
+ current = *Vacb;
if (previous)
{
- InsertHeadList(&previous->BcbSegmentListEntry, ¤t->BcbSegmentListEntry);
+ InsertHeadList(&previous->CacheMapVacbListEntry, ¤t->CacheMapVacbListEntry);
}
else
{
- InsertHeadList(&Bcb->BcbSegmentListHead, ¤t->BcbSegmentListEntry);
+ InsertHeadList(&SharedCacheMap->CacheMapVacbListHead, ¤t->CacheMapVacbListEntry);
}
- KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
- InsertTailList(&CacheSegmentListHead, ¤t->CacheSegmentListEntry);
- InsertTailList(&CacheSegmentLRUListHead, ¤t->CacheSegmentLRUListEntry);
+ KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
+ InsertTailList(&VacbLruListHead, ¤t->VacbLruListEntry);
KeReleaseGuardedMutex(&ViewLock);
-#ifdef CACHE_BITMAP
- KeAcquireSpinLock(&CiCacheSegMappingRegionLock, &oldIrql);
-
- StartingOffset = RtlFindClearBitsAndSet(&CiCacheSegMappingRegionAllocMap, Bcb->CacheSegmentSize / PAGE_SIZE, CiCacheSegMappingRegionHint);
-
- if (StartingOffset == 0xffffffff)
- {
- DPRINT1("Out of CacheSeg mapping space\n");
- KeBugCheck(CACHE_MANAGER);
- }
-
- current->BaseAddress = CiCacheSegMappingRegionBase + StartingOffset * PAGE_SIZE;
-
- if (CiCacheSegMappingRegionHint == StartingOffset)
- {
- CiCacheSegMappingRegionHint += Bcb->CacheSegmentSize / PAGE_SIZE;
- }
-
- KeReleaseSpinLock(&CiCacheSegMappingRegionLock, oldIrql);
-#else
- MmLockAddressSpace(MmGetKernelAddressSpace());
- current->BaseAddress = NULL;
- Status = MmCreateMemoryArea(MmGetKernelAddressSpace(),
- 0, // nothing checks for cache_segment mareas, so set to 0
- ¤t->BaseAddress,
- Bcb->CacheSegmentSize,
- PAGE_READWRITE,
- (PMEMORY_AREA*)¤t->MemoryArea,
- FALSE,
- 0,
- BoundaryAddressMultiple);
- MmUnlockAddressSpace(MmGetKernelAddressSpace());
- if (!NT_SUCCESS(Status))
- {
- KeBugCheck(CACHE_MANAGER);
- }
-#endif
- /* Create a virtual mapping for this memory area */
MI_SET_USAGE(MI_USAGE_CACHE);
#if MI_TRACE_PFNS
- PWCHAR pos = NULL;
- ULONG len = 0;
- if ((Bcb->FileObject) && (Bcb->FileObject->FileName.Buffer))
- {
- pos = wcsrchr(Bcb->FileObject->FileName.Buffer, '\\');
- len = wcslen(pos) * sizeof(WCHAR);
- if (pos) snprintf(MI_PFN_CURRENT_PROCESS_NAME, min(16, len), "%S", pos);
- }
-#endif
-
- MmMapMemoryArea(current->BaseAddress, Bcb->CacheSegmentSize,
- MC_CACHE, PAGE_READWRITE);
-
- return(STATUS_SUCCESS);
-}
-
-NTSTATUS
-NTAPI
-CcRosGetCacheSegmentChain (
- PBCB Bcb,
- ULONG FileOffset,
- ULONG Length,
- PCACHE_SEGMENT* CacheSeg)
-{
- PCACHE_SEGMENT current;
- ULONG i;
- PCACHE_SEGMENT* CacheSegList;
- PCACHE_SEGMENT Previous = NULL;
-
- ASSERT(Bcb);
-
- DPRINT("CcRosGetCacheSegmentChain()\n");
-
- Length = ROUND_UP(Length, Bcb->CacheSegmentSize);
-
- CacheSegList = _alloca(sizeof(PCACHE_SEGMENT) *
- (Length / Bcb->CacheSegmentSize));
-
- /*
- * Look for a cache segment already mapping the same data.
- */
- for (i = 0; i < (Length / Bcb->CacheSegmentSize); i++)
+ if ((SharedCacheMap->FileObject) && (SharedCacheMap->FileObject->FileName.Buffer))
{
- ULONG CurrentOffset = FileOffset + (i * Bcb->CacheSegmentSize);
- current = CcRosLookupCacheSegment(Bcb, CurrentOffset);
- if (current != NULL)
+ PWCHAR pos;
+ ULONG len = 0;
+ pos = wcsrchr(SharedCacheMap->FileObject->FileName.Buffer, '\\');
+ if (pos)
{
- KeAcquireGuardedMutex(&ViewLock);
-
- /* Move to tail of LRU list */
- RemoveEntryList(¤t->CacheSegmentLRUListEntry);
- InsertTailList(&CacheSegmentLRUListHead, ¤t->CacheSegmentLRUListEntry);
-
- KeReleaseGuardedMutex(&ViewLock);
-
- CacheSegList[i] = current;
+ len = wcslen(pos) * sizeof(WCHAR);
+ snprintf(MI_PFN_CURRENT_PROCESS_NAME, min(16, len), "%S", pos);
}
else
{
- CcRosCreateCacheSegment(Bcb, CurrentOffset, ¤t);
- CacheSegList[i] = current;
+ snprintf(MI_PFN_CURRENT_PROCESS_NAME, min(16, len), "%wZ", &SharedCacheMap->FileObject->FileName);
}
}
+#endif
- for (i = 0; i < (Length / Bcb->CacheSegmentSize); i++)
+ Status = CcRosMapVacb(current);
+ if (!NT_SUCCESS(Status))
{
- if (i == 0)
- {
- *CacheSeg = CacheSegList[i];
- Previous = CacheSegList[i];
- }
- else
- {
- Previous->NextInChain = CacheSegList[i];
- Previous = CacheSegList[i];
- }
+ RemoveEntryList(¤t->CacheMapVacbListEntry);
+ RemoveEntryList(¤t->VacbLruListEntry);
+ CcRosReleaseVacbLock(current);
+ ExFreeToNPagedLookasideList(&VacbLookasideList, current);
}
- ASSERT(Previous);
- Previous->NextInChain = NULL;
- return(STATUS_SUCCESS);
+ return Status;
}
NTSTATUS
NTAPI
-CcRosGetCacheSegment (
- PBCB Bcb,
- ULONG FileOffset,
- PULONG BaseOffset,
+CcRosGetVacb (
+ PROS_SHARED_CACHE_MAP SharedCacheMap,
+ LONGLONG FileOffset,
+ PLONGLONG BaseOffset,
PVOID* BaseAddress,
PBOOLEAN UptoDate,
- PCACHE_SEGMENT* CacheSeg)
+ PROS_VACB *Vacb)
{
- PCACHE_SEGMENT current;
+ PROS_VACB current;
NTSTATUS Status;
- ASSERT(Bcb);
+ ASSERT(SharedCacheMap);
- DPRINT("CcRosGetCacheSegment()\n");
+ DPRINT("CcRosGetVacb()\n");
/*
- * Look for a cache segment already mapping the same data.
+ * Look for a VACB already mapping the same data.
*/
- current = CcRosLookupCacheSegment(Bcb, FileOffset);
+ current = CcRosLookupVacb(SharedCacheMap, FileOffset);
if (current == NULL)
{
/*
- * Otherwise create a new segment.
+ * Otherwise create a new VACB.
*/
- Status = CcRosCreateCacheSegment(Bcb, FileOffset, ¤t);
+ Status = CcRosCreateVacb(SharedCacheMap, FileOffset, ¤t);
if (!NT_SUCCESS(Status))
{
return Status;
KeAcquireGuardedMutex(&ViewLock);
/* Move to the tail of the LRU list */
- RemoveEntryList(¤t->CacheSegmentLRUListEntry);
- InsertTailList(&CacheSegmentLRUListHead, ¤t->CacheSegmentLRUListEntry);
+ RemoveEntryList(¤t->VacbLruListEntry);
+ InsertTailList(&VacbLruListHead, ¤t->VacbLruListEntry);
KeReleaseGuardedMutex(&ViewLock);
/*
- * Return information about the segment to the caller.
+ * Return information about the VACB to the caller.
*/
*UptoDate = current->Valid;
*BaseAddress = current->BaseAddress;
DPRINT("*BaseAddress %p\n", *BaseAddress);
- *CacheSeg = current;
- *BaseOffset = current->FileOffset;
- return(STATUS_SUCCESS);
+ *Vacb = current;
+ *BaseOffset = current->FileOffset.QuadPart;
+ return STATUS_SUCCESS;
}
NTSTATUS
NTAPI
-CcRosRequestCacheSegment (
- PBCB Bcb,
- ULONG FileOffset,
+CcRosRequestVacb (
+ PROS_SHARED_CACHE_MAP SharedCacheMap,
+ LONGLONG FileOffset,
PVOID* BaseAddress,
PBOOLEAN UptoDate,
- PCACHE_SEGMENT* CacheSeg)
+ PROS_VACB *Vacb)
/*
- * FUNCTION: Request a page mapping for a BCB
+ * FUNCTION: Request a page mapping for a shared cache map
*/
{
- ULONG BaseOffset;
+ LONGLONG BaseOffset;
- ASSERT(Bcb);
+ ASSERT(SharedCacheMap);
- if ((FileOffset % Bcb->CacheSegmentSize) != 0)
+ if (FileOffset % VACB_MAPPING_GRANULARITY != 0)
{
- DPRINT1("Bad fileoffset %x should be multiple of %x",
- FileOffset, Bcb->CacheSegmentSize);
+ DPRINT1("Bad fileoffset %I64x should be multiple of %x",
+ FileOffset, VACB_MAPPING_GRANULARITY);
KeBugCheck(CACHE_MANAGER);
}
- return(CcRosGetCacheSegment(Bcb,
- FileOffset,
- &BaseOffset,
- BaseAddress,
- UptoDate,
- CacheSeg));
+ return CcRosGetVacb(SharedCacheMap,
+ FileOffset,
+ &BaseOffset,
+ BaseAddress,
+ UptoDate,
+ Vacb);
}
-#ifdef CACHE_BITMAP
-#else
+
static
VOID
CcFreeCachePage (
MmReleasePageMemoryConsumer(MC_CACHE, Page);
}
}
-#endif
+
NTSTATUS
-CcRosInternalFreeCacheSegment (
- PCACHE_SEGMENT CacheSeg)
+CcRosInternalFreeVacb (
+ PROS_VACB Vacb)
/*
- * FUNCTION: Releases a cache segment associated with a BCB
+ * FUNCTION: Releases a VACB associated with a shared cache map
*/
{
-#ifdef CACHE_BITMAP
- ULONG i;
- ULONG RegionSize;
- ULONG Base;
- PFN_NUMBER Page;
- KIRQL oldIrql;
-#endif
- DPRINT("Freeing cache segment 0x%p\n", CacheSeg);
+ DPRINT("Freeing VACB 0x%p\n", Vacb);
#if DBG
- if ( CacheSeg->Bcb->Trace )
+ if (Vacb->SharedCacheMap->Trace)
{
- DPRINT1("CacheMap 0x%p: deleting Cache Segment: 0x%p\n", CacheSeg->Bcb, CacheSeg );
+ DPRINT1("CacheMap 0x%p: deleting VACB: 0x%p\n", Vacb->SharedCacheMap, Vacb);
}
#endif
-#ifdef CACHE_BITMAP
- RegionSize = CacheSeg->Bcb->CacheSegmentSize / PAGE_SIZE;
-
- /* Unmap all the pages. */
- for (i = 0; i < RegionSize; i++)
- {
- MmDeleteVirtualMapping(NULL,
- CacheSeg->BaseAddress + (i * PAGE_SIZE),
- FALSE,
- NULL,
- &Page);
- MmReleasePageMemoryConsumer(MC_CACHE, Page);
- }
- KeAcquireSpinLock(&CiCacheSegMappingRegionLock, &oldIrql);
- /* Deallocate all the pages used. */
- Base = (ULONG)(CacheSeg->BaseAddress - CiCacheSegMappingRegionBase) / PAGE_SIZE;
-
- RtlClearBits(&CiCacheSegMappingRegionAllocMap, Base, RegionSize);
-
- CiCacheSegMappingRegionHint = min (CiCacheSegMappingRegionHint, Base);
-
- KeReleaseSpinLock(&CiCacheSegMappingRegionLock, oldIrql);
-#else
MmLockAddressSpace(MmGetKernelAddressSpace());
MmFreeMemoryArea(MmGetKernelAddressSpace(),
- CacheSeg->MemoryArea,
+ Vacb->MemoryArea,
CcFreeCachePage,
NULL);
MmUnlockAddressSpace(MmGetKernelAddressSpace());
-#endif
- ExFreeToNPagedLookasideList(&CacheSegLookasideList, CacheSeg);
- return(STATUS_SUCCESS);
-}
-
-NTSTATUS
-NTAPI
-CcRosFreeCacheSegment (
- PBCB Bcb,
- PCACHE_SEGMENT CacheSeg)
-{
- NTSTATUS Status;
- KIRQL oldIrql;
-
- ASSERT(Bcb);
-
- DPRINT("CcRosFreeCacheSegment(Bcb 0x%p, CacheSeg 0x%p)\n",
- Bcb, CacheSeg);
-
- KeAcquireGuardedMutex(&ViewLock);
- KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
- RemoveEntryList(&CacheSeg->BcbSegmentListEntry);
- RemoveEntryList(&CacheSeg->CacheSegmentListEntry);
- RemoveEntryList(&CacheSeg->CacheSegmentLRUListEntry);
- if (CacheSeg->Dirty)
- {
- RemoveEntryList(&CacheSeg->DirtySegmentListEntry);
- DirtyPageCount -= Bcb->CacheSegmentSize / PAGE_SIZE;
-
- }
- KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
- KeReleaseGuardedMutex(&ViewLock);
- Status = CcRosInternalFreeCacheSegment(CacheSeg);
- return(Status);
+ ExFreeToNPagedLookasideList(&VacbLookasideList, Vacb);
+ return STATUS_SUCCESS;
}
/*
IN ULONG Length,
OUT PIO_STATUS_BLOCK IoStatus)
{
- PBCB Bcb;
+ PROS_SHARED_CACHE_MAP SharedCacheMap;
LARGE_INTEGER Offset;
- PCACHE_SEGMENT current;
+ LONGLONG RemainingLength;
+ PROS_VACB current;
NTSTATUS Status;
KIRQL oldIrql;
+ CCTRACE(CC_API_DEBUG, "SectionObjectPointers=%p FileOffset=%p Length=%lu\n",
+ SectionObjectPointers, FileOffset, Length);
+
DPRINT("CcFlushCache(SectionObjectPointers 0x%p, FileOffset 0x%p, Length %lu, IoStatus 0x%p)\n",
SectionObjectPointers, FileOffset, Length, IoStatus);
if (SectionObjectPointers && SectionObjectPointers->SharedCacheMap)
{
- Bcb = (PBCB)SectionObjectPointers->SharedCacheMap;
- ASSERT(Bcb);
+ SharedCacheMap = SectionObjectPointers->SharedCacheMap;
+ ASSERT(SharedCacheMap);
if (FileOffset)
{
Offset = *FileOffset;
+ RemainingLength = Length;
}
else
{
- Offset.QuadPart = (LONGLONG)0;
- Length = Bcb->FileSize.u.LowPart;
+ Offset.QuadPart = 0;
+ RemainingLength = SharedCacheMap->FileSize.QuadPart;
}
if (IoStatus)
IoStatus->Information = 0;
}
- while (Length > 0)
+ while (RemainingLength > 0)
{
- current = CcRosLookupCacheSegment (Bcb, Offset.u.LowPart);
+ current = CcRosLookupVacb(SharedCacheMap, Offset.QuadPart);
if (current != NULL)
{
if (current->Dirty)
{
- Status = CcRosFlushCacheSegment(current);
+ Status = CcRosFlushVacb(current);
if (!NT_SUCCESS(Status) && IoStatus != NULL)
{
IoStatus->Status = Status;
}
}
- KeReleaseMutex(¤t->Mutex, 0);
+
+ CcRosReleaseVacbLock(current);
KeAcquireGuardedMutex(&ViewLock);
- KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
- CcRosCacheSegmentDecRefCount(current);
- KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
+ KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
+ CcRosVacbDecRefCount(current);
+ KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
KeReleaseGuardedMutex(&ViewLock);
}
- Offset.QuadPart += Bcb->CacheSegmentSize;
- if (Length > Bcb->CacheSegmentSize)
- {
- Length -= Bcb->CacheSegmentSize;
- }
- else
- {
- Length = 0;
- }
+ Offset.QuadPart += VACB_MAPPING_GRANULARITY;
+ RemainingLength -= min(RemainingLength, VACB_MAPPING_GRANULARITY);
}
}
else
NTAPI
CcRosDeleteFileCache (
PFILE_OBJECT FileObject,
- PBCB Bcb)
+ PROS_SHARED_CACHE_MAP SharedCacheMap)
/*
- * FUNCTION: Releases the BCB associated with a file object
+ * FUNCTION: Releases the shared cache map associated with a file object
*/
{
PLIST_ENTRY current_entry;
- PCACHE_SEGMENT current;
+ PROS_VACB current;
LIST_ENTRY FreeList;
KIRQL oldIrql;
- ASSERT(Bcb);
+ ASSERT(SharedCacheMap);
- Bcb->RefCount++;
+ SharedCacheMap->OpenCount++;
KeReleaseGuardedMutex(&ViewLock);
CcFlushCache(FileObject->SectionObjectPointer, NULL, 0, NULL);
KeAcquireGuardedMutex(&ViewLock);
- Bcb->RefCount--;
- if (Bcb->RefCount == 0)
+ SharedCacheMap->OpenCount--;
+ if (SharedCacheMap->OpenCount == 0)
{
- if (Bcb->BcbRemoveListEntry.Flink != NULL)
- {
- RemoveEntryList(&Bcb->BcbRemoveListEntry);
- Bcb->BcbRemoveListEntry.Flink = NULL;
- }
+ KIRQL OldIrql;
FileObject->SectionObjectPointer->SharedCacheMap = NULL;
/*
- * Release all cache segments.
+ * Release all VACBs
*/
InitializeListHead(&FreeList);
- KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
- current_entry = Bcb->BcbSegmentListHead.Flink;
- while (!IsListEmpty(&Bcb->BcbSegmentListHead))
+ KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
+ while (!IsListEmpty(&SharedCacheMap->CacheMapVacbListHead))
{
- current_entry = RemoveTailList(&Bcb->BcbSegmentListHead);
- current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT, BcbSegmentListEntry);
- RemoveEntryList(¤t->CacheSegmentListEntry);
- RemoveEntryList(¤t->CacheSegmentLRUListEntry);
+ current_entry = RemoveTailList(&SharedCacheMap->CacheMapVacbListHead);
+ current = CONTAINING_RECORD(current_entry, ROS_VACB, CacheMapVacbListEntry);
+ RemoveEntryList(¤t->VacbLruListEntry);
if (current->Dirty)
{
- RemoveEntryList(¤t->DirtySegmentListEntry);
- DirtyPageCount -= Bcb->CacheSegmentSize / PAGE_SIZE;
- DPRINT1("Freeing dirty segment\n");
+ RemoveEntryList(¤t->DirtyVacbListEntry);
+ CcTotalDirtyPages -= VACB_MAPPING_GRANULARITY / PAGE_SIZE;
+ current->SharedCacheMap->DirtyPages -= VACB_MAPPING_GRANULARITY / PAGE_SIZE;
+ DPRINT1("Freeing dirty VACB\n");
}
- InsertHeadList(&FreeList, ¤t->BcbSegmentListEntry);
+ InsertHeadList(&FreeList, ¤t->CacheMapVacbListEntry);
}
#if DBG
- Bcb->Trace = FALSE;
+ SharedCacheMap->Trace = FALSE;
#endif
- KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
+ KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
KeReleaseGuardedMutex(&ViewLock);
- ObDereferenceObject (Bcb->FileObject);
+ ObDereferenceObject(SharedCacheMap->FileObject);
while (!IsListEmpty(&FreeList))
{
current_entry = RemoveTailList(&FreeList);
- current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT, BcbSegmentListEntry);
- CcRosInternalFreeCacheSegment(current);
+ current = CONTAINING_RECORD(current_entry, ROS_VACB, CacheMapVacbListEntry);
+ CcRosInternalFreeVacb(current);
}
- ExFreeToNPagedLookasideList(&BcbLookasideList, Bcb);
+
+ KeAcquireSpinLock(&iSharedCacheMapLock, &OldIrql);
+ RemoveEntryList(&SharedCacheMap->SharedCacheMapLinks);
+ KeReleaseSpinLock(&iSharedCacheMapLock, OldIrql);
+
+ ExFreeToNPagedLookasideList(&SharedCacheMapLookasideList, SharedCacheMap);
KeAcquireGuardedMutex(&ViewLock);
}
- return(STATUS_SUCCESS);
+ return STATUS_SUCCESS;
}
VOID
CcRosReferenceCache (
PFILE_OBJECT FileObject)
{
- PBCB Bcb;
+ PROS_SHARED_CACHE_MAP SharedCacheMap;
KeAcquireGuardedMutex(&ViewLock);
- Bcb = (PBCB)FileObject->SectionObjectPointer->SharedCacheMap;
- ASSERT(Bcb);
- if (Bcb->RefCount == 0)
- {
- ASSERT(Bcb->BcbRemoveListEntry.Flink != NULL);
- RemoveEntryList(&Bcb->BcbRemoveListEntry);
- Bcb->BcbRemoveListEntry.Flink = NULL;
-
- }
- else
- {
- ASSERT(Bcb->BcbRemoveListEntry.Flink == NULL);
- }
- Bcb->RefCount++;
+ SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
+ ASSERT(SharedCacheMap);
+ ASSERT(SharedCacheMap->OpenCount != 0);
+ SharedCacheMap->OpenCount++;
KeReleaseGuardedMutex(&ViewLock);
}
VOID
NTAPI
-CcRosSetRemoveOnClose (
+CcRosRemoveIfClosed (
PSECTION_OBJECT_POINTERS SectionObjectPointer)
{
- PBCB Bcb;
- DPRINT("CcRosSetRemoveOnClose()\n");
+ PROS_SHARED_CACHE_MAP SharedCacheMap;
+ DPRINT("CcRosRemoveIfClosed()\n");
KeAcquireGuardedMutex(&ViewLock);
- Bcb = (PBCB)SectionObjectPointer->SharedCacheMap;
- if (Bcb)
+ SharedCacheMap = SectionObjectPointer->SharedCacheMap;
+ if (SharedCacheMap && SharedCacheMap->OpenCount == 0)
{
- Bcb->RemoveOnClose = TRUE;
- if (Bcb->RefCount == 0)
- {
- CcRosDeleteFileCache(Bcb->FileObject, Bcb);
- }
+ CcRosDeleteFileCache(SharedCacheMap->FileObject, SharedCacheMap);
}
KeReleaseGuardedMutex(&ViewLock);
}
CcRosDereferenceCache (
PFILE_OBJECT FileObject)
{
- PBCB Bcb;
+ PROS_SHARED_CACHE_MAP SharedCacheMap;
KeAcquireGuardedMutex(&ViewLock);
- Bcb = (PBCB)FileObject->SectionObjectPointer->SharedCacheMap;
- ASSERT(Bcb);
- if (Bcb->RefCount > 0)
+ SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
+ ASSERT(SharedCacheMap);
+ if (SharedCacheMap->OpenCount > 0)
{
- Bcb->RefCount--;
- if (Bcb->RefCount == 0)
+ SharedCacheMap->OpenCount--;
+ if (SharedCacheMap->OpenCount == 0)
{
- MmFreeSectionSegments(Bcb->FileObject);
- CcRosDeleteFileCache(FileObject, Bcb);
+ MmFreeSectionSegments(SharedCacheMap->FileObject);
+ CcRosDeleteFileCache(FileObject, SharedCacheMap);
}
}
KeReleaseGuardedMutex(&ViewLock);
* has been closed.
*/
{
- PBCB Bcb;
+ PROS_SHARED_CACHE_MAP SharedCacheMap;
KeAcquireGuardedMutex(&ViewLock);
if (FileObject->SectionObjectPointer->SharedCacheMap != NULL)
{
- Bcb = FileObject->SectionObjectPointer->SharedCacheMap;
+ SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
if (FileObject->PrivateCacheMap != NULL)
{
FileObject->PrivateCacheMap = NULL;
- if (Bcb->RefCount > 0)
+ if (SharedCacheMap->OpenCount > 0)
{
- Bcb->RefCount--;
- if (Bcb->RefCount == 0)
+ SharedCacheMap->OpenCount--;
+ if (SharedCacheMap->OpenCount == 0)
{
- MmFreeSectionSegments(Bcb->FileObject);
- CcRosDeleteFileCache(FileObject, Bcb);
+ MmFreeSectionSegments(SharedCacheMap->FileObject);
+ CcRosDeleteFileCache(FileObject, SharedCacheMap);
}
}
}
}
KeReleaseGuardedMutex(&ViewLock);
- return(STATUS_SUCCESS);
+ return STATUS_SUCCESS;
}
NTSTATUS
CcTryToInitializeFileCache (
PFILE_OBJECT FileObject)
{
- PBCB Bcb;
+ PROS_SHARED_CACHE_MAP SharedCacheMap;
NTSTATUS Status;
KeAcquireGuardedMutex(&ViewLock);
ASSERT(FileObject->SectionObjectPointer);
- Bcb = FileObject->SectionObjectPointer->SharedCacheMap;
- if (Bcb == NULL)
+ SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
+ if (SharedCacheMap == NULL)
{
Status = STATUS_UNSUCCESSFUL;
}
{
if (FileObject->PrivateCacheMap == NULL)
{
- FileObject->PrivateCacheMap = Bcb;
- Bcb->RefCount++;
- }
- if (Bcb->BcbRemoveListEntry.Flink != NULL)
- {
- RemoveEntryList(&Bcb->BcbRemoveListEntry);
- Bcb->BcbRemoveListEntry.Flink = NULL;
+ FileObject->PrivateCacheMap = SharedCacheMap;
+ SharedCacheMap->OpenCount++;
}
Status = STATUS_SUCCESS;
}
NTAPI
CcRosInitializeFileCache (
PFILE_OBJECT FileObject,
- ULONG CacheSegmentSize,
+ PCC_FILE_SIZES FileSizes,
+ BOOLEAN PinAccess,
PCACHE_MANAGER_CALLBACKS CallBacks,
PVOID LazyWriterContext)
/*
- * FUNCTION: Initializes a BCB for a file object
+ * FUNCTION: Initializes a shared cache map for a file object
*/
{
- PBCB Bcb;
+ PROS_SHARED_CACHE_MAP SharedCacheMap;
- Bcb = FileObject->SectionObjectPointer->SharedCacheMap;
- DPRINT("CcRosInitializeFileCache(FileObject 0x%p, Bcb 0x%p, CacheSegmentSize %lu)\n",
- FileObject, Bcb, CacheSegmentSize);
+ SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
+ DPRINT("CcRosInitializeFileCache(FileObject 0x%p, SharedCacheMap 0x%p)\n",
+ FileObject, SharedCacheMap);
KeAcquireGuardedMutex(&ViewLock);
- if (Bcb == NULL)
+ if (SharedCacheMap == NULL)
{
- Bcb = ExAllocateFromNPagedLookasideList(&BcbLookasideList);
- if (Bcb == NULL)
+ KIRQL OldIrql;
+
+ SharedCacheMap = ExAllocateFromNPagedLookasideList(&SharedCacheMapLookasideList);
+ if (SharedCacheMap == NULL)
{
KeReleaseGuardedMutex(&ViewLock);
- return(STATUS_UNSUCCESSFUL);
+ return STATUS_INSUFFICIENT_RESOURCES;
}
- memset(Bcb, 0, sizeof(BCB));
+ RtlZeroMemory(SharedCacheMap, sizeof(*SharedCacheMap));
ObReferenceObjectByPointer(FileObject,
FILE_ALL_ACCESS,
NULL,
KernelMode);
- Bcb->FileObject = FileObject;
- Bcb->CacheSegmentSize = CacheSegmentSize;
- Bcb->Callbacks = CallBacks;
- Bcb->LazyWriteContext = LazyWriterContext;
- if (FileObject->FsContext)
- {
- Bcb->AllocationSize =
- ((PFSRTL_COMMON_FCB_HEADER)FileObject->FsContext)->AllocationSize;
- Bcb->FileSize =
- ((PFSRTL_COMMON_FCB_HEADER)FileObject->FsContext)->FileSize;
- }
- KeInitializeSpinLock(&Bcb->BcbLock);
- InitializeListHead(&Bcb->BcbSegmentListHead);
- FileObject->SectionObjectPointer->SharedCacheMap = Bcb;
+ SharedCacheMap->FileObject = FileObject;
+ SharedCacheMap->Callbacks = CallBacks;
+ SharedCacheMap->LazyWriteContext = LazyWriterContext;
+ SharedCacheMap->SectionSize = FileSizes->AllocationSize;
+ SharedCacheMap->FileSize = FileSizes->FileSize;
+ SharedCacheMap->PinAccess = PinAccess;
+ SharedCacheMap->DirtyPageThreshold = 0;
+ SharedCacheMap->DirtyPages = 0;
+ KeInitializeSpinLock(&SharedCacheMap->CacheMapLock);
+ InitializeListHead(&SharedCacheMap->CacheMapVacbListHead);
+ FileObject->SectionObjectPointer->SharedCacheMap = SharedCacheMap;
+
+ KeAcquireSpinLock(&iSharedCacheMapLock, &OldIrql);
+ InsertTailList(&CcCleanSharedCacheMapList, &SharedCacheMap->SharedCacheMapLinks);
+ KeReleaseSpinLock(&iSharedCacheMapLock, OldIrql);
}
if (FileObject->PrivateCacheMap == NULL)
{
- FileObject->PrivateCacheMap = Bcb;
- Bcb->RefCount++;
- }
- if (Bcb->BcbRemoveListEntry.Flink != NULL)
- {
- RemoveEntryList(&Bcb->BcbRemoveListEntry);
- Bcb->BcbRemoveListEntry.Flink = NULL;
+ FileObject->PrivateCacheMap = SharedCacheMap;
+ SharedCacheMap->OpenCount++;
}
KeReleaseGuardedMutex(&ViewLock);
- return(STATUS_SUCCESS);
+ return STATUS_SUCCESS;
}
/*
CcGetFileObjectFromSectionPtrs (
IN PSECTION_OBJECT_POINTERS SectionObjectPointers)
{
- PBCB Bcb;
+ PROS_SHARED_CACHE_MAP SharedCacheMap;
+
+ CCTRACE(CC_API_DEBUG, "SectionObjectPointers=%p\n", SectionObjectPointers);
+
if (SectionObjectPointers && SectionObjectPointers->SharedCacheMap)
{
- Bcb = (PBCB)SectionObjectPointers->SharedCacheMap;
- ASSERT(Bcb);
- return Bcb->FileObject;
+ SharedCacheMap = SectionObjectPointers->SharedCacheMap;
+ ASSERT(SharedCacheMap);
+ return SharedCacheMap->FileObject;
}
return NULL;
}
VOID
+NTAPI
+CcShutdownLazyWriter (
+ VOID)
+{
+ /* Simply set the event, lazy writer will stop when it's done */
+ KeSetEvent(&iLazyWriterShutdown, IO_DISK_INCREMENT, FALSE);
+}
+
+BOOLEAN
INIT_FUNCTION
NTAPI
CcInitView (
VOID)
{
-#ifdef CACHE_BITMAP
- PMEMORY_AREA marea;
- PVOID Buffer;
- PHYSICAL_ADDRESS BoundaryAddressMultiple;
-#endif
+ HANDLE LazyWriter;
+ NTSTATUS Status;
+ KPRIORITY Priority;
+ OBJECT_ATTRIBUTES ObjectAttributes;
DPRINT("CcInitView()\n");
-#ifdef CACHE_BITMAP
- BoundaryAddressMultiple.QuadPart = 0;
- CiCacheSegMappingRegionHint = 0;
- CiCacheSegMappingRegionBase = NULL;
- MmLockAddressSpace(MmGetKernelAddressSpace());
+ InitializeListHead(&DirtyVacbListHead);
+ InitializeListHead(&VacbLruListHead);
+ InitializeListHead(&CcDeferredWrites);
+ InitializeListHead(&CcCleanSharedCacheMapList);
+ KeInitializeSpinLock(&CcDeferredWriteSpinLock);
+ KeInitializeSpinLock(&iSharedCacheMapLock);
+ KeInitializeGuardedMutex(&ViewLock);
+ ExInitializeNPagedLookasideList(&iBcbLookasideList,
+ NULL,
+ NULL,
+ 0,
+ sizeof(INTERNAL_BCB),
+ TAG_BCB,
+ 20);
+ ExInitializeNPagedLookasideList(&SharedCacheMapLookasideList,
+ NULL,
+ NULL,
+ 0,
+ sizeof(ROS_SHARED_CACHE_MAP),
+ TAG_SHARED_CACHE_MAP,
+ 20);
+ ExInitializeNPagedLookasideList(&VacbLookasideList,
+ NULL,
+ NULL,
+ 0,
+ sizeof(ROS_VACB),
+ TAG_VACB,
+ 20);
- Status = MmCreateMemoryArea(MmGetKernelAddressSpace(),
- MEMORY_AREA_CACHE_SEGMENT,
- &CiCacheSegMappingRegionBase,
- CI_CACHESEG_MAPPING_REGION_SIZE,
- PAGE_READWRITE,
- &marea,
- FALSE,
- 0,
- BoundaryAddressMultiple);
- MmUnlockAddressSpace(MmGetKernelAddressSpace());
- if (!NT_SUCCESS(Status))
+ MmInitializeMemoryConsumer(MC_CACHE, CcRosTrimCache);
+
+ /* Initialize lazy writer events */
+ KeInitializeEvent(&iLazyWriterShutdown, SynchronizationEvent, FALSE);
+ KeInitializeEvent(&iLazyWriterNotify, NotificationEvent, FALSE);
+
+ /* Define lazy writer threshold, depending on system type */
+ switch (MmQuerySystemSize())
{
- KeBugCheck(CACHE_MANAGER);
+ case MmSmallSystem:
+ CcDirtyPageThreshold = MmNumberOfPhysicalPages / 8;
+ break;
+
+ case MmMediumSystem:
+ CcDirtyPageThreshold = MmNumberOfPhysicalPages / 4;
+ break;
+
+ case MmLargeSystem:
+ CcDirtyPageThreshold = MmNumberOfPhysicalPages / 8 + MmNumberOfPhysicalPages / 4;
+ break;
}
- Buffer = ExAllocatePool(NonPagedPool, CI_CACHESEG_MAPPING_REGION_SIZE / (PAGE_SIZE * 8));
- if (!Buffer)
+ /* Start the lazy writer thread */
+ InitializeObjectAttributes(&ObjectAttributes,
+ NULL,
+ OBJ_KERNEL_HANDLE,
+ NULL,
+ NULL);
+ Status = PsCreateSystemThread(&LazyWriter,
+ THREAD_ALL_ACCESS,
+ &ObjectAttributes,
+ NULL,
+ NULL,
+ CciLazyWriter,
+ NULL);
+ if (!NT_SUCCESS(Status))
{
- KeBugCheck(CACHE_MANAGER);
+ return FALSE;
}
- RtlInitializeBitMap(&CiCacheSegMappingRegionAllocMap, Buffer, CI_CACHESEG_MAPPING_REGION_SIZE / PAGE_SIZE);
- RtlClearAllBits(&CiCacheSegMappingRegionAllocMap);
+ Priority = 27;
+ Status = NtSetInformationThread(LazyWriter,
+ ThreadPriority,
+ &Priority,
+ sizeof(Priority));
+ ASSERT(NT_SUCCESS(Status));
- KeInitializeSpinLock(&CiCacheSegMappingRegionLock);
-#endif
- InitializeListHead(&CacheSegmentListHead);
- InitializeListHead(&DirtySegmentListHead);
- InitializeListHead(&CacheSegmentLRUListHead);
- InitializeListHead(&ClosedListHead);
- KeInitializeGuardedMutex(&ViewLock);
- ExInitializeNPagedLookasideList (&iBcbLookasideList,
- NULL,
- NULL,
- 0,
- sizeof(INTERNAL_BCB),
- TAG_IBCB,
- 20);
- ExInitializeNPagedLookasideList (&BcbLookasideList,
- NULL,
- NULL,
- 0,
- sizeof(BCB),
- TAG_BCB,
- 20);
- ExInitializeNPagedLookasideList (&CacheSegLookasideList,
- NULL,
- NULL,
- 0,
- sizeof(CACHE_SEGMENT),
- TAG_CSEG,
- 20);
-
- MmInitializeMemoryConsumer(MC_CACHE, CcRosTrimCache);
+ /* Handle is not needed */
+ ObCloseHandle(LazyWriter, KernelMode);
CcInitCacheZeroPage();
+ return TRUE;
}
-/* EOF */
+#if DBG && defined(KDBG)
+BOOLEAN
+ExpKdbgExtFileCache(ULONG Argc, PCHAR Argv[])
+{
+ PLIST_ENTRY ListEntry;
+ UNICODE_STRING NoName = RTL_CONSTANT_STRING(L"No name for File");
+
+ KdbpPrint(" Usage Summary (in kb)\n");
+ KdbpPrint("Shared\t\tValid\tDirty\tName\n");
+ /* No need to lock the spin lock here, we're in DBG */
+ for (ListEntry = CcCleanSharedCacheMapList.Flink;
+ ListEntry != &CcCleanSharedCacheMapList;
+ ListEntry = ListEntry->Flink)
+ {
+ PLIST_ENTRY Vacbs;
+ ULONG Valid = 0, Dirty = 0;
+ PROS_SHARED_CACHE_MAP SharedCacheMap;
+ PUNICODE_STRING FileName;
+ SharedCacheMap = CONTAINING_RECORD(ListEntry, ROS_SHARED_CACHE_MAP, SharedCacheMapLinks);
+ /* Dirty size */
+ Dirty = (SharedCacheMap->DirtyPages * PAGE_SIZE) / 1024;
+ /* First, count for all the associated VACB */
+ for (Vacbs = SharedCacheMap->CacheMapVacbListHead.Flink;
+ Vacbs != &SharedCacheMap->CacheMapVacbListHead;
+ Vacbs = Vacbs->Flink)
+ {
+ PROS_VACB Vacb;
+ Vacb = CONTAINING_RECORD(Vacbs, ROS_VACB, CacheMapVacbListEntry);
+ if (Vacb->Valid)
+ {
+ Valid += VACB_MAPPING_GRANULARITY / 1024;
+ }
+ }
+ /* Setup name */
+ if (SharedCacheMap->FileObject != NULL &&
+ SharedCacheMap->FileObject->FileName.Length != 0)
+ {
+ FileName = &SharedCacheMap->FileObject->FileName;
+ }
+ else
+ {
+ FileName = &NoName;
+ }
+ /* And print */
+ KdbpPrint("%p\t%d\t%d\t%wZ\n", SharedCacheMap, Valid, Dirty, FileName);
+ }
+ return TRUE;
+}
+#endif
+/* EOF */