#define NDEBUG
#include <debug.h>
-#if defined (ALLOC_PRAGMA)
-#pragma alloc_text(INIT, CcInitView)
-#endif
-
/* GLOBALS *******************************************************************/
LIST_ENTRY DirtyVacbListHead;
static LIST_ENTRY VacbLruListHead;
-KGUARDED_MUTEX ViewLock;
-
NPAGED_LOOKASIDE_LIST iBcbLookasideList;
static NPAGED_LOOKASIDE_LIST SharedCacheMapLookasideList;
static NPAGED_LOOKASIDE_LIST VacbLookasideList;
-/* Counters:
- * - Amount of pages flushed by lazy writer
- * - Number of times lazy writer ran
- */
-ULONG CcLazyWritePages = 0;
-ULONG CcLazyWriteIos = 0;
-
/* Internal vars (MS):
* - Threshold above which lazy writer will start action
* - Amount of dirty pages
* - List for deferred writes
* - Spinlock when dealing with the deferred list
* - List for "clean" shared cache maps
- * - One second delay for lazy writer
*/
ULONG CcDirtyPageThreshold = 0;
ULONG CcTotalDirtyPages = 0;
LIST_ENTRY CcDeferredWrites;
KSPIN_LOCK CcDeferredWriteSpinLock;
LIST_ENTRY CcCleanSharedCacheMapList;
-LARGE_INTEGER CcIdleDelay = RTL_CONSTANT_LARGE_INTEGER((LONGLONG)-1*1000*1000*10);
-
-/* Internal vars (ROS):
- * - Event to notify lazy writer to shutdown
- * - Event to inform watchers lazy writer is done for this loop
- * - Lock for the CcCleanSharedCacheMapList list
- */
-KEVENT iLazyWriterShutdown;
-KEVENT iLazyWriterNotify;
-KSPIN_LOCK iSharedCacheMapLock;
#if DBG
-static void CcRosVacbIncRefCount_(PROS_VACB vacb, const char* file, int line)
+ULONG CcRosVacbIncRefCount_(PROS_VACB vacb, PCSTR file, INT line)
{
- ++vacb->ReferenceCount;
+ ULONG Refs;
+
+ Refs = InterlockedIncrement((PLONG)&vacb->ReferenceCount);
if (vacb->SharedCacheMap->Trace)
{
DbgPrint("(%s:%i) VACB %p ++RefCount=%lu, Dirty %u, PageOut %lu\n",
- file, line, vacb, vacb->ReferenceCount, vacb->Dirty, vacb->PageOut);
+ file, line, vacb, Refs, vacb->Dirty, vacb->PageOut);
}
+
+ return Refs;
}
-static void CcRosVacbDecRefCount_(PROS_VACB vacb, const char* file, int line)
+ULONG CcRosVacbDecRefCount_(PROS_VACB vacb, PCSTR file, INT line)
{
- --vacb->ReferenceCount;
- ASSERT(!(vacb->ReferenceCount == 0 && vacb->Dirty));
- if (vacb->SharedCacheMap->Trace)
+ ULONG Refs;
+ BOOLEAN VacbDirty = vacb->Dirty;
+ BOOLEAN VacbTrace = vacb->SharedCacheMap->Trace;
+ BOOLEAN VacbPageOut = vacb->PageOut;
+
+ Refs = InterlockedDecrement((PLONG)&vacb->ReferenceCount);
+ ASSERT(!(Refs == 0 && VacbDirty));
+ if (VacbTrace)
{
DbgPrint("(%s:%i) VACB %p --RefCount=%lu, Dirty %u, PageOut %lu\n",
- file, line, vacb, vacb->ReferenceCount, vacb->Dirty, vacb->PageOut);
+ file, line, vacb, Refs, VacbDirty, VacbPageOut);
+ }
+
+ if (Refs == 0)
+ {
+ CcRosInternalFreeVacb(vacb);
}
+
+ return Refs;
}
-#define CcRosVacbIncRefCount(vacb) CcRosVacbIncRefCount_(vacb,__FILE__,__LINE__)
-#define CcRosVacbDecRefCount(vacb) CcRosVacbDecRefCount_(vacb,__FILE__,__LINE__)
-#else
-#define CcRosVacbIncRefCount(vacb) (++((vacb)->ReferenceCount))
-#define CcRosVacbDecRefCount(vacb) (--((vacb)->ReferenceCount))
-#endif
+ULONG CcRosVacbGetRefCount_(PROS_VACB vacb, PCSTR file, INT line)
+{
+ ULONG Refs;
-NTSTATUS
-CcRosInternalFreeVacb(PROS_VACB Vacb);
+ Refs = InterlockedCompareExchange((PLONG)&vacb->ReferenceCount, 0, 0);
+ if (vacb->SharedCacheMap->Trace)
+ {
+ DbgPrint("(%s:%i) VACB %p ==RefCount=%lu, Dirty %u, PageOut %lu\n",
+ file, line, vacb, Refs, vacb->Dirty, vacb->PageOut);
+ }
+
+ return Refs;
+}
+#endif
/* FUNCTIONS *****************************************************************/
VOID
-NTAPI
CcRosTraceCacheMap (
PROS_SHARED_CACHE_MAP SharedCacheMap,
BOOLEAN Trace )
{
DPRINT1("Enabling Tracing for CacheMap 0x%p:\n", SharedCacheMap);
- KeAcquireGuardedMutex(&ViewLock);
- KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldirql);
+ oldirql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
+ KeAcquireSpinLockAtDpcLevel(&SharedCacheMap->CacheMapLock);
current_entry = SharedCacheMap->CacheMapVacbListHead.Flink;
while (current_entry != &SharedCacheMap->CacheMapVacbListHead)
current = CONTAINING_RECORD(current_entry, ROS_VACB, CacheMapVacbListEntry);
current_entry = current_entry->Flink;
- DPRINT1(" VACB 0x%p enabled, RefCount %lu, Dirty %u, PageOut %lu\n",
- current, current->ReferenceCount, current->Dirty, current->PageOut );
+ DPRINT1(" VACB 0x%p enabled, RefCount %lu, Dirty %u, PageOut %lu, BaseAddress %p, FileOffset %I64d\n",
+ current, current->ReferenceCount, current->Dirty, current->PageOut, current->BaseAddress, current->FileOffset.QuadPart);
}
- KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldirql);
- KeReleaseGuardedMutex(&ViewLock);
+
+ KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock);
+ KeReleaseQueuedSpinLock(LockQueueMasterLock, oldirql);
}
else
{
}
NTSTATUS
-NTAPI
CcRosFlushVacb (
- PROS_VACB Vacb)
+ _In_ PROS_VACB Vacb,
+ _Out_opt_ PIO_STATUS_BLOCK Iosb)
{
NTSTATUS Status;
- KIRQL oldIrql;
+ BOOLEAN HaveLock = FALSE;
+ PROS_SHARED_CACHE_MAP SharedCacheMap = Vacb->SharedCacheMap;
+
+ CcRosUnmarkDirtyVacb(Vacb, TRUE);
- Status = CcWriteVirtualAddress(Vacb);
- if (NT_SUCCESS(Status))
+ /* Lock for flush, if we are not already the top-level */
+ if (IoGetTopLevelIrp() != (PIRP)FSRTL_CACHE_TOP_LEVEL_IRP)
{
- KeAcquireGuardedMutex(&ViewLock);
- KeAcquireSpinLock(&Vacb->SharedCacheMap->CacheMapLock, &oldIrql);
+ Status = FsRtlAcquireFileForCcFlushEx(Vacb->SharedCacheMap->FileObject);
+ if (!NT_SUCCESS(Status))
+ goto quit;
+ HaveLock = TRUE;
+ }
- Vacb->Dirty = FALSE;
- RemoveEntryList(&Vacb->DirtyVacbListEntry);
- CcTotalDirtyPages -= VACB_MAPPING_GRANULARITY / PAGE_SIZE;
- Vacb->SharedCacheMap->DirtyPages -= VACB_MAPPING_GRANULARITY / PAGE_SIZE;
- CcRosVacbDecRefCount(Vacb);
+ Status = MmFlushSegment(SharedCacheMap->FileObject->SectionObjectPointer,
+ &Vacb->FileOffset,
+ VACB_MAPPING_GRANULARITY,
+ Iosb);
- KeReleaseSpinLock(&Vacb->SharedCacheMap->CacheMapLock, oldIrql);
- KeReleaseGuardedMutex(&ViewLock);
+ if (HaveLock)
+ {
+ FsRtlReleaseFileForCcFlush(Vacb->SharedCacheMap->FileObject);
+ }
+
+quit:
+ if (!NT_SUCCESS(Status))
+ CcRosMarkDirtyVacb(Vacb);
+ else
+ {
+ /* Update VDL */
+ if (SharedCacheMap->ValidDataLength.QuadPart < (Vacb->FileOffset.QuadPart + VACB_MAPPING_GRANULARITY))
+ {
+ SharedCacheMap->ValidDataLength.QuadPart = Vacb->FileOffset.QuadPart + VACB_MAPPING_GRANULARITY;
+ }
}
return Status;
}
+static
+NTSTATUS
+CcRosDeleteFileCache (
+ PFILE_OBJECT FileObject,
+ PROS_SHARED_CACHE_MAP SharedCacheMap,
+ PKIRQL OldIrql)
+/*
+ * FUNCTION: Releases the shared cache map associated with a file object
+ */
+{
+ PLIST_ENTRY current_entry;
+
+ ASSERT(SharedCacheMap);
+ ASSERT(SharedCacheMap == FileObject->SectionObjectPointer->SharedCacheMap);
+ ASSERT(SharedCacheMap->OpenCount == 0);
+
+ /* Remove all VACBs from the global lists */
+ KeAcquireSpinLockAtDpcLevel(&SharedCacheMap->CacheMapLock);
+ current_entry = SharedCacheMap->CacheMapVacbListHead.Flink;
+ while (current_entry != &SharedCacheMap->CacheMapVacbListHead)
+ {
+ PROS_VACB Vacb = CONTAINING_RECORD(current_entry, ROS_VACB, CacheMapVacbListEntry);
+
+ RemoveEntryList(&Vacb->VacbLruListEntry);
+ InitializeListHead(&Vacb->VacbLruListEntry);
+
+ if (Vacb->Dirty)
+ {
+ CcRosUnmarkDirtyVacb(Vacb, FALSE);
+ /* Mark it as dirty again so we know that we have to flush before freeing it */
+ Vacb->Dirty = TRUE;
+ }
+
+ current_entry = current_entry->Flink;
+ }
+
+ /* Make sure there is no trace anymore of this map */
+ FileObject->SectionObjectPointer->SharedCacheMap = NULL;
+ RemoveEntryList(&SharedCacheMap->SharedCacheMapLinks);
+
+ KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock);
+ KeReleaseQueuedSpinLock(LockQueueMasterLock, *OldIrql);
+
+ /* Now that we're out of the locks, free everything for real */
+ while (!IsListEmpty(&SharedCacheMap->CacheMapVacbListHead))
+ {
+ PROS_VACB Vacb = CONTAINING_RECORD(RemoveHeadList(&SharedCacheMap->CacheMapVacbListHead), ROS_VACB, CacheMapVacbListEntry);
+ ULONG RefCount;
+
+ InitializeListHead(&Vacb->CacheMapVacbListEntry);
+
+ /* Flush to disk, if needed */
+ if (Vacb->Dirty)
+ {
+ IO_STATUS_BLOCK Iosb;
+ NTSTATUS Status;
+
+ Status = MmFlushSegment(FileObject->SectionObjectPointer, &Vacb->FileOffset, VACB_MAPPING_GRANULARITY, &Iosb);
+ if (!NT_SUCCESS(Status))
+ {
+ /* Complain. There's not much we can do */
+ DPRINT1("Failed to flush VACB to disk while deleting the cache entry. Status: 0x%08x\n", Status);
+ }
+ Vacb->Dirty = FALSE;
+ }
+
+ RefCount = CcRosVacbDecRefCount(Vacb);
+#if DBG // CORE-14578
+ if (RefCount != 0)
+ {
+ DPRINT1("Leaking VACB %p attached to %p (%I64d)\n", Vacb, FileObject, Vacb->FileOffset.QuadPart);
+ DPRINT1("There are: %d references left\n", RefCount);
+ DPRINT1("Map: %d\n", Vacb->MappedCount);
+ DPRINT1("Dirty: %d\n", Vacb->Dirty);
+ if (FileObject->FileName.Length != 0)
+ {
+ DPRINT1("File was: %wZ\n", &FileObject->FileName);
+ }
+ else
+ {
+ DPRINT1("No name for the file\n");
+ }
+ }
+#else
+ (void)RefCount;
+#endif
+ }
+
+ /* Release the references we own */
+ if(SharedCacheMap->Section)
+ ObDereferenceObject(SharedCacheMap->Section);
+ ObDereferenceObject(SharedCacheMap->FileObject);
+
+ ExFreeToNPagedLookasideList(&SharedCacheMapLookasideList, SharedCacheMap);
+
+ /* Acquire the lock again for our caller */
+ *OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
+
+ return STATUS_SUCCESS;
+}
+
NTSTATUS
-NTAPI
CcRosFlushDirtyPages (
ULONG Target,
PULONG Count,
BOOLEAN CalledFromLazy)
{
PLIST_ENTRY current_entry;
- PROS_VACB current;
- BOOLEAN Locked;
NTSTATUS Status;
- LARGE_INTEGER ZeroTimeout;
+ KIRQL OldIrql;
+ BOOLEAN FlushAll = (Target == MAXULONG);
DPRINT("CcRosFlushDirtyPages(Target %lu)\n", Target);
(*Count) = 0;
- ZeroTimeout.QuadPart = 0;
KeEnterCriticalRegion();
- KeAcquireGuardedMutex(&ViewLock);
+ OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
current_entry = DirtyVacbListHead.Flink;
if (current_entry == &DirtyVacbListHead)
DPRINT("No Dirty pages\n");
}
- while ((current_entry != &DirtyVacbListHead) && (Target > 0))
+ while (((current_entry != &DirtyVacbListHead) && (Target > 0)) || FlushAll)
{
+ PROS_SHARED_CACHE_MAP SharedCacheMap;
+ PROS_VACB current;
+ BOOLEAN Locked;
+
+ if (current_entry == &DirtyVacbListHead)
+ {
+ ASSERT(FlushAll);
+ if (IsListEmpty(&DirtyVacbListHead))
+ break;
+ current_entry = DirtyVacbListHead.Flink;
+ }
+
current = CONTAINING_RECORD(current_entry,
ROS_VACB,
DirtyVacbListEntry);
CcRosVacbIncRefCount(current);
+ SharedCacheMap = current->SharedCacheMap;
+
/* When performing lazy write, don't handle temporary files */
- if (CalledFromLazy &&
- BooleanFlagOn(current->SharedCacheMap->FileObject->Flags, FO_TEMPORARY_FILE))
+ if (CalledFromLazy && BooleanFlagOn(SharedCacheMap->FileObject->Flags, FO_TEMPORARY_FILE))
{
CcRosVacbDecRefCount(current);
continue;
}
- Locked = current->SharedCacheMap->Callbacks->AcquireForLazyWrite(
- current->SharedCacheMap->LazyWriteContext, Wait);
- if (!Locked)
+ /* Don't attempt to lazy write the files that asked not to */
+ if (CalledFromLazy && BooleanFlagOn(SharedCacheMap->Flags, WRITEBEHIND_DISABLED))
{
CcRosVacbDecRefCount(current);
continue;
}
- Status = CcRosAcquireVacbLock(current,
- Wait ? NULL : &ZeroTimeout);
- if (Status != STATUS_SUCCESS)
+ ASSERT(current->Dirty);
+
+ /* Do not lazy-write the same file concurrently. Fastfat ASSERTS on that */
+ if (SharedCacheMap->Flags & SHARED_CACHE_MAP_IN_LAZYWRITE)
{
- current->SharedCacheMap->Callbacks->ReleaseFromLazyWrite(
- current->SharedCacheMap->LazyWriteContext);
CcRosVacbDecRefCount(current);
continue;
}
- ASSERT(current->Dirty);
+ SharedCacheMap->Flags |= SHARED_CACHE_MAP_IN_LAZYWRITE;
+
+ /* Keep a ref on the shared cache map */
+ SharedCacheMap->OpenCount++;
+
+ KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
- /* One reference is added above */
- if ((current->ReferenceCount > 2 && current->PinCount == 0) ||
- (current->ReferenceCount > 3 && current->PinCount > 1))
+ Locked = SharedCacheMap->Callbacks->AcquireForLazyWrite(SharedCacheMap->LazyWriteContext, Wait);
+ if (!Locked)
{
- CcRosReleaseVacbLock(current);
- current->SharedCacheMap->Callbacks->ReleaseFromLazyWrite(
- current->SharedCacheMap->LazyWriteContext);
+ DPRINT("Not locked!");
+ ASSERT(!Wait);
CcRosVacbDecRefCount(current);
+ OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
+ SharedCacheMap->Flags &= ~SHARED_CACHE_MAP_IN_LAZYWRITE;
+
+ if (--SharedCacheMap->OpenCount == 0)
+ CcRosDeleteFileCache(SharedCacheMap->FileObject, SharedCacheMap, &OldIrql);
+
continue;
}
- KeReleaseGuardedMutex(&ViewLock);
-
- Status = CcRosFlushVacb(current);
+ IO_STATUS_BLOCK Iosb;
+ Status = CcRosFlushVacb(current, &Iosb);
- CcRosReleaseVacbLock(current);
- current->SharedCacheMap->Callbacks->ReleaseFromLazyWrite(
- current->SharedCacheMap->LazyWriteContext);
+ SharedCacheMap->Callbacks->ReleaseFromLazyWrite(SharedCacheMap->LazyWriteContext);
- KeAcquireGuardedMutex(&ViewLock);
+ /* We release the VACB before acquiring the lock again, because
+ * CcRosVacbDecRefCount might free the VACB, as CcRosFlushVacb dropped a
+ * Refcount. Freeing must be done outside of the lock.
+ * The refcount is decremented atomically. So this is OK. */
CcRosVacbDecRefCount(current);
+ OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
+
+ SharedCacheMap->Flags &= ~SHARED_CACHE_MAP_IN_LAZYWRITE;
+
+ if (--SharedCacheMap->OpenCount == 0)
+ CcRosDeleteFileCache(SharedCacheMap->FileObject, SharedCacheMap, &OldIrql);
if (!NT_SUCCESS(Status) && (Status != STATUS_END_OF_FILE) &&
(Status != STATUS_MEDIA_WRITE_PROTECTED))
ULONG PagesFreed;
/* How many pages did we free? */
- PagesFreed = VACB_MAPPING_GRANULARITY / PAGE_SIZE;
+ PagesFreed = Iosb.Information / PAGE_SIZE;
(*Count) += PagesFreed;
- /* Make sure we don't overflow target! */
- if (Target < PagesFreed)
- {
- /* If we would have, jump to zero directly */
- Target = 0;
- }
- else
+ if (!Wait)
{
- Target -= PagesFreed;
+ /* Make sure we don't overflow target! */
+ if (Target < PagesFreed)
+ {
+ /* If we would have, jump to zero directly */
+ Target = 0;
+ }
+ else
+ {
+ Target -= PagesFreed;
+ }
}
}
current_entry = DirtyVacbListHead.Flink;
}
- KeReleaseGuardedMutex(&ViewLock);
+ KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
KeLeaveCriticalRegion();
DPRINT("CcRosFlushDirtyPages() finished\n");
return STATUS_SUCCESS;
}
-/* FIXME: Someday this could somewhat implement write-behind/read-ahead */
VOID
-NTAPI
-CciLazyWriter(PVOID Unused)
-{
- while (TRUE)
- {
- NTSTATUS Status;
- PLIST_ENTRY ListEntry;
- ULONG Target, Count = 0;
-
- /* One per second or until we have to stop */
- Status = KeWaitForSingleObject(&iLazyWriterShutdown,
- Executive,
- KernelMode,
- FALSE,
- &CcIdleDelay);
-
- /* If we succeeed, we've to stop running! */
- if (Status == STATUS_SUCCESS)
- {
- break;
- }
-
- /* We're not sleeping anymore */
- KeClearEvent(&iLazyWriterNotify);
-
- /* Our target is one-eighth of the dirty pages */
- Target = CcTotalDirtyPages / 8;
- if (Target != 0)
- {
- /* Flush! */
- DPRINT("Lazy writer starting (%d)\n", Target);
- CcRosFlushDirtyPages(Target, &Count, FALSE, TRUE);
-
- /* And update stats */
- CcLazyWritePages += Count;
- ++CcLazyWriteIos;
- DPRINT("Lazy writer done (%d)\n", Count);
- }
-
- /* Inform people waiting on us that we're done */
- KeSetEvent(&iLazyWriterNotify, IO_DISK_INCREMENT, FALSE);
-
- /* Likely not optimal, but let's handle one deferred write now! */
- ListEntry = ExInterlockedRemoveHeadList(&CcDeferredWrites, &CcDeferredWriteSpinLock);
- if (ListEntry != NULL)
- {
- PDEFERRED_WRITE Context;
-
- /* Extract the context */
- Context = CONTAINING_RECORD(ListEntry, DEFERRED_WRITE, DeferredWriteLinks);
- ASSERT(Context->NodeTypeCode == NODE_TYPE_DEFERRED_WRITE);
-
- /* Can we write now? */
- if (CcCanIWrite(Context->FileObject, Context->BytesToWrite, FALSE, TRUE))
- {
- /* Yes! Do it, and destroy the associated context */
- Context->PostRoutine(Context->Context1, Context->Context2);
- ExFreePoolWithTag(Context, 'CcDw');
- }
- else
- {
- /* Otherwise, requeue it, but in tail, so that it doesn't block others
- * This is clearly to improve, but given the poor algorithm used now
- * It's better than nothing!
- */
- ExInterlockedInsertTailList(&CcDeferredWrites,
- &Context->DeferredWriteLinks,
- &CcDeferredWriteSpinLock);
- }
- }
- }
-}
-
-NTSTATUS
-CcRosTrimCache (
- ULONG Target,
- ULONG Priority,
- PULONG NrFreed)
+CcRosTrimCache(
+ _In_ ULONG Target,
+ _Out_ PULONG NrFreed)
/*
* FUNCTION: Try to free some memory from the file cache.
* ARGUMENTS:
* Target - The number of pages to be freed.
- * Priority - The priority of free (currently unused).
* NrFreed - Points to a variable where the number of pages
* actually freed is returned.
*/
*NrFreed = 0;
retry:
- KeAcquireGuardedMutex(&ViewLock);
+ oldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
current_entry = VacbLruListHead.Flink;
while (current_entry != &VacbLruListHead)
{
+ ULONG Refs;
+
current = CONTAINING_RECORD(current_entry,
ROS_VACB,
VacbLruListEntry);
current_entry = current_entry->Flink;
- KeAcquireSpinLock(¤t->SharedCacheMap->CacheMapLock, &oldIrql);
+ KeAcquireSpinLockAtDpcLevel(¤t->SharedCacheMap->CacheMapLock);
/* Reference the VACB */
CcRosVacbIncRefCount(current);
/* Check if it's mapped and not dirty */
- if (current->MappedCount > 0 && !current->Dirty)
+ if (InterlockedCompareExchange((PLONG)¤t->MappedCount, 0, 0) > 0 && !current->Dirty)
{
- /* We have to break these locks because Cc sucks */
- KeReleaseSpinLock(¤t->SharedCacheMap->CacheMapLock, oldIrql);
- KeReleaseGuardedMutex(&ViewLock);
-
/* Page out the VACB */
for (i = 0; i < VACB_MAPPING_GRANULARITY / PAGE_SIZE; i++)
{
MmPageOutPhysicalAddress(Page);
}
-
- /* Reacquire the locks */
- KeAcquireGuardedMutex(&ViewLock);
- KeAcquireSpinLock(¤t->SharedCacheMap->CacheMapLock, &oldIrql);
}
/* Dereference the VACB */
- CcRosVacbDecRefCount(current);
+ Refs = CcRosVacbDecRefCount(current);
/* Check if we can free this entry now */
- if (current->ReferenceCount == 0)
+ if (Refs < 2)
{
ASSERT(!current->Dirty);
ASSERT(!current->MappedCount);
+ ASSERT(Refs == 1);
RemoveEntryList(¤t->CacheMapVacbListEntry);
RemoveEntryList(¤t->VacbLruListEntry);
+ InitializeListHead(¤t->VacbLruListEntry);
InsertHeadList(&FreeList, ¤t->CacheMapVacbListEntry);
/* Calculate how many pages we freed for Mm */
(*NrFreed) += PagesFreed;
}
- KeReleaseSpinLock(¤t->SharedCacheMap->CacheMapLock, oldIrql);
+ KeReleaseSpinLockFromDpcLevel(¤t->SharedCacheMap->CacheMapLock);
}
- KeReleaseGuardedMutex(&ViewLock);
+ KeReleaseQueuedSpinLock(LockQueueMasterLock, oldIrql);
/* Try flushing pages if we haven't met our target */
if ((Target > 0) && !FlushedPages)
while (!IsListEmpty(&FreeList))
{
+ ULONG Refs;
+
current_entry = RemoveHeadList(&FreeList);
current = CONTAINING_RECORD(current_entry,
ROS_VACB,
CacheMapVacbListEntry);
- CcRosInternalFreeVacb(current);
+ InitializeListHead(¤t->CacheMapVacbListEntry);
+ Refs = CcRosVacbDecRefCount(current);
+ ASSERT(Refs == 0);
}
DPRINT("Evicted %lu cache pages\n", (*NrFreed));
-
- return STATUS_SUCCESS;
}
NTSTATUS
-NTAPI
CcRosReleaseVacb (
PROS_SHARED_CACHE_MAP SharedCacheMap,
PROS_VACB Vacb,
- BOOLEAN Valid,
BOOLEAN Dirty,
BOOLEAN Mapped)
{
+ ULONG Refs;
ASSERT(SharedCacheMap);
- DPRINT("CcRosReleaseVacb(SharedCacheMap 0x%p, Vacb 0x%p, Valid %u)\n",
- SharedCacheMap, Vacb, Valid);
-
- Vacb->Valid = Valid;
+ DPRINT("CcRosReleaseVacb(SharedCacheMap 0x%p, Vacb 0x%p)\n", SharedCacheMap, Vacb);
if (Dirty && !Vacb->Dirty)
{
if (Mapped)
{
- Vacb->MappedCount++;
- }
- CcRosVacbDecRefCount(Vacb);
- if (Mapped && (Vacb->MappedCount == 1))
- {
- CcRosVacbIncRefCount(Vacb);
+ if (InterlockedIncrement((PLONG)&Vacb->MappedCount) == 1)
+ {
+ CcRosVacbIncRefCount(Vacb);
+ }
}
- CcRosReleaseVacbLock(Vacb);
+ Refs = CcRosVacbDecRefCount(Vacb);
+ ASSERT(Refs > 0);
return STATUS_SUCCESS;
}
/* Returns with VACB Lock Held! */
PROS_VACB
-NTAPI
CcRosLookupVacb (
PROS_SHARED_CACHE_MAP SharedCacheMap,
LONGLONG FileOffset)
DPRINT("CcRosLookupVacb(SharedCacheMap 0x%p, FileOffset %I64u)\n",
SharedCacheMap, FileOffset);
- KeAcquireGuardedMutex(&ViewLock);
- KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
+ oldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
+ KeAcquireSpinLockAtDpcLevel(&SharedCacheMap->CacheMapLock);
current_entry = SharedCacheMap->CacheMapVacbListHead.Flink;
while (current_entry != &SharedCacheMap->CacheMapVacbListHead)
FileOffset))
{
CcRosVacbIncRefCount(current);
- KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
- KeReleaseGuardedMutex(&ViewLock);
- CcRosAcquireVacbLock(current, NULL);
+ KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock);
+ KeReleaseQueuedSpinLock(LockQueueMasterLock, oldIrql);
return current;
}
if (current->FileOffset.QuadPart > FileOffset)
current_entry = current_entry->Flink;
}
- KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
- KeReleaseGuardedMutex(&ViewLock);
+ KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock);
+ KeReleaseQueuedSpinLock(LockQueueMasterLock, oldIrql);
return NULL;
}
VOID
-NTAPI
CcRosMarkDirtyVacb (
PROS_VACB Vacb)
{
SharedCacheMap = Vacb->SharedCacheMap;
- KeAcquireGuardedMutex(&ViewLock);
- KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
+ oldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
+ KeAcquireSpinLockAtDpcLevel(&SharedCacheMap->CacheMapLock);
ASSERT(!Vacb->Dirty);
InsertTailList(&DirtyVacbListHead, &Vacb->DirtyVacbListEntry);
+ /* FIXME: There is no reason to account for the whole VACB. */
CcTotalDirtyPages += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
Vacb->SharedCacheMap->DirtyPages += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
CcRosVacbIncRefCount(Vacb);
Vacb->Dirty = TRUE;
- KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
- KeReleaseGuardedMutex(&ViewLock);
-}
-
-NTSTATUS
-NTAPI
-CcRosMarkDirtyFile (
- PROS_SHARED_CACHE_MAP SharedCacheMap,
- LONGLONG FileOffset)
-{
- PROS_VACB Vacb;
-
- ASSERT(SharedCacheMap);
-
- DPRINT("CcRosMarkDirtyVacb(SharedCacheMap 0x%p, FileOffset %I64u)\n",
- SharedCacheMap, FileOffset);
-
- Vacb = CcRosLookupVacb(SharedCacheMap, FileOffset);
- if (Vacb == NULL)
- {
- KeBugCheck(CACHE_MANAGER);
- }
+ KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock);
- if (!Vacb->Dirty)
+ /* Schedule a lazy writer run to now that we have dirty VACB */
+ if (!LazyWriter.ScanActive)
{
- CcRosMarkDirtyVacb(Vacb);
+ CcScheduleLazyWriteScan(FALSE);
}
-
- CcRosReleaseVacbLock(Vacb);
-
- return STATUS_SUCCESS;
+ KeReleaseQueuedSpinLock(LockQueueMasterLock, oldIrql);
}
-NTSTATUS
-NTAPI
-CcRosUnmapVacb (
- PROS_SHARED_CACHE_MAP SharedCacheMap,
- LONGLONG FileOffset,
- BOOLEAN NowDirty)
+VOID
+CcRosUnmarkDirtyVacb (
+ PROS_VACB Vacb,
+ BOOLEAN LockViews)
{
- PROS_VACB Vacb;
-
- ASSERT(SharedCacheMap);
+ KIRQL oldIrql;
+ PROS_SHARED_CACHE_MAP SharedCacheMap;
- DPRINT("CcRosUnmapVacb(SharedCacheMap 0x%p, FileOffset %I64u, NowDirty %u)\n",
- SharedCacheMap, FileOffset, NowDirty);
+ SharedCacheMap = Vacb->SharedCacheMap;
- Vacb = CcRosLookupVacb(SharedCacheMap, FileOffset);
- if (Vacb == NULL)
+ if (LockViews)
{
- return STATUS_UNSUCCESSFUL;
+ oldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
+ KeAcquireSpinLockAtDpcLevel(&SharedCacheMap->CacheMapLock);
}
- if (NowDirty && !Vacb->Dirty)
- {
- CcRosMarkDirtyVacb(Vacb);
- }
+ ASSERT(Vacb->Dirty);
+
+ Vacb->Dirty = FALSE;
+
+ RemoveEntryList(&Vacb->DirtyVacbListEntry);
+ InitializeListHead(&Vacb->DirtyVacbListEntry);
- Vacb->MappedCount--;
+ CcTotalDirtyPages -= VACB_MAPPING_GRANULARITY / PAGE_SIZE;
+ Vacb->SharedCacheMap->DirtyPages -= VACB_MAPPING_GRANULARITY / PAGE_SIZE;
CcRosVacbDecRefCount(Vacb);
- if (Vacb->MappedCount == 0)
+
+ if (LockViews)
{
- CcRosVacbDecRefCount(Vacb);
+ KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock);
+ KeReleaseQueuedSpinLock(LockQueueMasterLock, oldIrql);
}
-
- CcRosReleaseVacbLock(Vacb);
-
- return STATUS_SUCCESS;
}
-static
-NTSTATUS
-CcRosMapVacb(
- PROS_VACB Vacb)
+BOOLEAN
+CcRosFreeOneUnusedVacb(
+ VOID)
{
- ULONG i;
- NTSTATUS Status;
- ULONG_PTR NumberOfPages;
-
- /* Create a memory area. */
- MmLockAddressSpace(MmGetKernelAddressSpace());
- Status = MmCreateMemoryArea(MmGetKernelAddressSpace(),
- 0, // nothing checks for VACB mareas, so set to 0
- &Vacb->BaseAddress,
- VACB_MAPPING_GRANULARITY,
- PAGE_READWRITE,
- (PMEMORY_AREA*)&Vacb->MemoryArea,
- 0,
- PAGE_SIZE);
- MmUnlockAddressSpace(MmGetKernelAddressSpace());
- if (!NT_SUCCESS(Status))
- {
- DPRINT1("MmCreateMemoryArea failed with %lx for VACB %p\n", Status, Vacb);
- return Status;
- }
+ KIRQL oldIrql;
+ PLIST_ENTRY current_entry;
+ PROS_VACB to_free = NULL;
- ASSERT(((ULONG_PTR)Vacb->BaseAddress % PAGE_SIZE) == 0);
- ASSERT((ULONG_PTR)Vacb->BaseAddress > (ULONG_PTR)MmSystemRangeStart);
+ oldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
- /* Create a virtual mapping for this memory area */
- NumberOfPages = BYTES_TO_PAGES(VACB_MAPPING_GRANULARITY);
- for (i = 0; i < NumberOfPages; i++)
+ /* Browse all the available VACB */
+ current_entry = VacbLruListHead.Flink;
+ while ((current_entry != &VacbLruListHead) && (to_free == NULL))
{
- PFN_NUMBER PageFrameNumber;
+ ULONG Refs;
+ PROS_VACB current;
- MI_SET_USAGE(MI_USAGE_CACHE);
- Status = MmRequestPageMemoryConsumer(MC_CACHE, TRUE, &PageFrameNumber);
- if (PageFrameNumber == 0)
- {
- DPRINT1("Unable to allocate page\n");
- KeBugCheck(MEMORY_MANAGEMENT);
- }
+ current = CONTAINING_RECORD(current_entry,
+ ROS_VACB,
+ VacbLruListEntry);
- Status = MmCreateVirtualMapping(NULL,
- (PVOID)((ULONG_PTR)Vacb->BaseAddress + (i * PAGE_SIZE)),
- PAGE_READWRITE,
- &PageFrameNumber,
- 1);
- if (!NT_SUCCESS(Status))
+ KeAcquireSpinLockAtDpcLevel(¤t->SharedCacheMap->CacheMapLock);
+
+ /* Only deal with unused VACB, we will free them */
+ Refs = CcRosVacbGetRefCount(current);
+ if (Refs < 2)
{
- DPRINT1("Unable to create virtual mapping\n");
- KeBugCheck(MEMORY_MANAGEMENT);
+ ASSERT(!current->Dirty);
+ ASSERT(!current->MappedCount);
+ ASSERT(Refs == 1);
+
+ /* Reset it, this is the one we want to free */
+ RemoveEntryList(¤t->CacheMapVacbListEntry);
+ InitializeListHead(¤t->CacheMapVacbListEntry);
+ RemoveEntryList(¤t->VacbLruListEntry);
+ InitializeListHead(¤t->VacbLruListEntry);
+
+ to_free = current;
}
+
+ KeReleaseSpinLockFromDpcLevel(¤t->SharedCacheMap->CacheMapLock);
+
+ current_entry = current_entry->Flink;
}
- return STATUS_SUCCESS;
+ KeReleaseQueuedSpinLock(LockQueueMasterLock, oldIrql);
+
+ /* And now, free the VACB that we found, if any. */
+ if (to_free == NULL)
+ {
+ return FALSE;
+ }
+
+ /* This must be its last ref */
+ NT_VERIFY(CcRosVacbDecRefCount(to_free) == 0);
+
+ return TRUE;
}
static
PLIST_ENTRY current_entry;
NTSTATUS Status;
KIRQL oldIrql;
+ ULONG Refs;
+ SIZE_T ViewSize = VACB_MAPPING_GRANULARITY;
ASSERT(SharedCacheMap);
DPRINT("CcRosCreateVacb()\n");
- if (FileOffset >= SharedCacheMap->SectionSize.QuadPart)
- {
- *Vacb = NULL;
- return STATUS_INVALID_PARAMETER;
- }
-
current = ExAllocateFromNPagedLookasideList(&VacbLookasideList);
current->BaseAddress = NULL;
- current->Valid = FALSE;
current->Dirty = FALSE;
current->PageOut = FALSE;
current->FileOffset.QuadPart = ROUND_DOWN(FileOffset, VACB_MAPPING_GRANULARITY);
current->SharedCacheMap = SharedCacheMap;
-#if DBG
- if (SharedCacheMap->Trace)
- {
- DPRINT1("CacheMap 0x%p: new VACB: 0x%p\n", SharedCacheMap, current);
- }
-#endif
current->MappedCount = 0;
- current->DirtyVacbListEntry.Flink = NULL;
- current->DirtyVacbListEntry.Blink = NULL;
- current->ReferenceCount = 1;
- current->PinCount = 0;
- KeInitializeMutex(¤t->Mutex, 0);
- CcRosAcquireVacbLock(current, NULL);
- KeAcquireGuardedMutex(&ViewLock);
+ current->ReferenceCount = 0;
+ InitializeListHead(¤t->CacheMapVacbListEntry);
+ InitializeListHead(¤t->DirtyVacbListEntry);
+ InitializeListHead(¤t->VacbLruListEntry);
- *Vacb = current;
- /* There is window between the call to CcRosLookupVacb
- * and CcRosCreateVacb. We must check if a VACB for the
- * file offset exist. If there is a VACB, we release
- * our newly created VACB and return the existing one.
- */
- KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
- current_entry = SharedCacheMap->CacheMapVacbListHead.Flink;
+ CcRosVacbIncRefCount(current);
+
+ while (TRUE)
+ {
+ /* Map VACB in system space */
+ Status = MmMapViewInSystemSpaceEx(SharedCacheMap->Section, ¤t->BaseAddress, &ViewSize, ¤t->FileOffset, 0);
+ if (NT_SUCCESS(Status))
+ {
+ break;
+ }
+
+ /*
+ * If no space left, try to prune one unused VACB to recover space to map our VACB.
+ * If it succeeds, retry to map, otherwise just fail.
+ */
+ if (!CcRosFreeOneUnusedVacb())
+ {
+ ExFreeToNPagedLookasideList(&VacbLookasideList, current);
+ return Status;
+ }
+ }
+
+#if DBG
+ if (SharedCacheMap->Trace)
+ {
+ DPRINT1("CacheMap 0x%p: new VACB: 0x%p, file offset %I64d, BaseAddress %p\n",
+ SharedCacheMap, current, current->FileOffset.QuadPart, current->BaseAddress);
+ }
+#endif
+
+ oldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
+
+ *Vacb = current;
+ /* There is window between the call to CcRosLookupVacb
+ * and CcRosCreateVacb. We must check if a VACB for the
+ * file offset exist. If there is a VACB, we release
+ * our newly created VACB and return the existing one.
+ */
+ KeAcquireSpinLockAtDpcLevel(&SharedCacheMap->CacheMapLock);
+ current_entry = SharedCacheMap->CacheMapVacbListHead.Flink;
previous = NULL;
while (current_entry != &SharedCacheMap->CacheMapVacbListHead)
{
FileOffset))
{
CcRosVacbIncRefCount(current);
- KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
+ KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock);
#if DBG
if (SharedCacheMap->Trace)
{
current);
}
#endif
- CcRosReleaseVacbLock(*Vacb);
- KeReleaseGuardedMutex(&ViewLock);
- ExFreeToNPagedLookasideList(&VacbLookasideList, *Vacb);
+ KeReleaseQueuedSpinLock(LockQueueMasterLock, oldIrql);
+
+ Refs = CcRosVacbDecRefCount(*Vacb);
+ ASSERT(Refs == 0);
+
*Vacb = current;
- CcRosAcquireVacbLock(current, NULL);
return STATUS_SUCCESS;
}
if (current->FileOffset.QuadPart < FileOffset)
{
InsertHeadList(&SharedCacheMap->CacheMapVacbListHead, ¤t->CacheMapVacbListEntry);
}
- KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
+ KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock);
InsertTailList(&VacbLruListHead, ¤t->VacbLruListEntry);
- KeReleaseGuardedMutex(&ViewLock);
- MI_SET_USAGE(MI_USAGE_CACHE);
-#if MI_TRACE_PFNS
- if ((SharedCacheMap->FileObject) && (SharedCacheMap->FileObject->FileName.Buffer))
+ /* Reference it to allow release */
+ CcRosVacbIncRefCount(current);
+
+ KeReleaseQueuedSpinLock(LockQueueMasterLock, oldIrql);
+
+ return Status;
+}
+
+BOOLEAN
+CcRosEnsureVacbResident(
+ _In_ PROS_VACB Vacb,
+ _In_ BOOLEAN Wait,
+ _In_ BOOLEAN NoRead,
+ _In_ ULONG Offset,
+ _In_ ULONG Length
+)
+{
+ PVOID BaseAddress;
+
+ ASSERT((Offset + Length) <= VACB_MAPPING_GRANULARITY);
+
+#if 0
+ if ((Vacb->FileOffset.QuadPart + Offset) > Vacb->SharedCacheMap->SectionSize.QuadPart)
{
- PWCHAR pos;
- ULONG len = 0;
- pos = wcsrchr(SharedCacheMap->FileObject->FileName.Buffer, '\\');
- if (pos)
- {
- len = wcslen(pos) * sizeof(WCHAR);
- snprintf(MI_PFN_CURRENT_PROCESS_NAME, min(16, len), "%S", pos);
- }
- else
- {
- snprintf(MI_PFN_CURRENT_PROCESS_NAME, min(16, len), "%wZ", &SharedCacheMap->FileObject->FileName);
- }
+ DPRINT1("Vacb read beyond the file size!\n");
+ return FALSE;
}
#endif
- Status = CcRosMapVacb(current);
- if (!NT_SUCCESS(Status))
+ BaseAddress = (PVOID)((ULONG_PTR)Vacb->BaseAddress + Offset);
+
+ /* Check if the pages are resident */
+ if (!MmArePagesResident(NULL, BaseAddress, Length))
{
- RemoveEntryList(¤t->CacheMapVacbListEntry);
- RemoveEntryList(¤t->VacbLruListEntry);
- CcRosReleaseVacbLock(current);
- ExFreeToNPagedLookasideList(&VacbLookasideList, current);
+ if (!Wait)
+ {
+ return FALSE;
+ }
+
+ if (!NoRead)
+ {
+ PROS_SHARED_CACHE_MAP SharedCacheMap = Vacb->SharedCacheMap;
+ NTSTATUS Status = MmMakeDataSectionResident(SharedCacheMap->FileObject->SectionObjectPointer,
+ Vacb->FileOffset.QuadPart + Offset,
+ Length,
+ &SharedCacheMap->ValidDataLength);
+ if (!NT_SUCCESS(Status))
+ ExRaiseStatus(Status);
+ }
}
- return Status;
+ return TRUE;
}
+
NTSTATUS
-NTAPI
CcRosGetVacb (
PROS_SHARED_CACHE_MAP SharedCacheMap,
LONGLONG FileOffset,
- PLONGLONG BaseOffset,
- PVOID* BaseAddress,
- PBOOLEAN UptoDate,
PROS_VACB *Vacb)
{
PROS_VACB current;
NTSTATUS Status;
+ ULONG Refs;
+ KIRQL OldIrql;
ASSERT(SharedCacheMap);
}
}
- KeAcquireGuardedMutex(&ViewLock);
+ Refs = CcRosVacbGetRefCount(current);
+
+ OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
/* Move to the tail of the LRU list */
RemoveEntryList(¤t->VacbLruListEntry);
InsertTailList(&VacbLruListHead, ¤t->VacbLruListEntry);
- KeReleaseGuardedMutex(&ViewLock);
+ KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
/*
- * Return information about the VACB to the caller.
+ * Return the VACB to the caller.
*/
- *UptoDate = current->Valid;
- *BaseAddress = current->BaseAddress;
- DPRINT("*BaseAddress %p\n", *BaseAddress);
*Vacb = current;
- *BaseOffset = current->FileOffset.QuadPart;
+
+ ASSERT(Refs > 1);
+
return STATUS_SUCCESS;
}
NTSTATUS
-NTAPI
CcRosRequestVacb (
PROS_SHARED_CACHE_MAP SharedCacheMap,
LONGLONG FileOffset,
- PVOID* BaseAddress,
- PBOOLEAN UptoDate,
PROS_VACB *Vacb)
/*
* FUNCTION: Request a page mapping for a shared cache map
*/
{
- LONGLONG BaseOffset;
ASSERT(SharedCacheMap);
return CcRosGetVacb(SharedCacheMap,
FileOffset,
- &BaseOffset,
- BaseAddress,
- UptoDate,
Vacb);
}
-static
-VOID
-CcFreeCachePage (
- PVOID Context,
- MEMORY_AREA* MemoryArea,
- PVOID Address,
- PFN_NUMBER Page,
- SWAPENTRY SwapEntry,
- BOOLEAN Dirty)
-{
- ASSERT(SwapEntry == 0);
- if (Page != 0)
- {
- ASSERT(MmGetReferenceCountPage(Page) == 1);
- MmReleasePageMemoryConsumer(MC_CACHE, Page);
- }
-}
-
NTSTATUS
CcRosInternalFreeVacb (
PROS_VACB Vacb)
* FUNCTION: Releases a VACB associated with a shared cache map
*/
{
+ NTSTATUS Status;
+
DPRINT("Freeing VACB 0x%p\n", Vacb);
#if DBG
if (Vacb->SharedCacheMap->Trace)
}
#endif
- MmLockAddressSpace(MmGetKernelAddressSpace());
- MmFreeMemoryArea(MmGetKernelAddressSpace(),
- Vacb->MemoryArea,
- CcFreeCachePage,
- NULL);
- MmUnlockAddressSpace(MmGetKernelAddressSpace());
+ if (Vacb->ReferenceCount != 0)
+ {
+ DPRINT1("Invalid free: %ld\n", Vacb->ReferenceCount);
+ if (Vacb->SharedCacheMap->FileObject && Vacb->SharedCacheMap->FileObject->FileName.Length)
+ {
+ DPRINT1("For file: %wZ\n", &Vacb->SharedCacheMap->FileObject->FileName);
+ }
+ }
+
+ ASSERT(Vacb->ReferenceCount == 0);
+ ASSERT(IsListEmpty(&Vacb->CacheMapVacbListEntry));
+ ASSERT(IsListEmpty(&Vacb->DirtyVacbListEntry));
+ ASSERT(IsListEmpty(&Vacb->VacbLruListEntry));
+
+ /* Delete the mapping */
+ Status = MmUnmapViewInSystemSpace(Vacb->BaseAddress);
+ if (!NT_SUCCESS(Status))
+ {
+ DPRINT1("Failed to unmap VACB from System address space! Status 0x%08X\n", Status);
+ ASSERT(FALSE);
+ /* Proceed with the deĺetion anyway */
+ }
+ RtlFillMemory(Vacb, sizeof(*Vacb), 0xfd);
ExFreeToNPagedLookasideList(&VacbLookasideList, Vacb);
return STATUS_SUCCESS;
}
OUT PIO_STATUS_BLOCK IoStatus)
{
PROS_SHARED_CACHE_MAP SharedCacheMap;
- LARGE_INTEGER Offset;
- LONGLONG RemainingLength;
- PROS_VACB current;
+ LONGLONG FlushStart, FlushEnd;
NTSTATUS Status;
- KIRQL oldIrql;
- CCTRACE(CC_API_DEBUG, "SectionObjectPointers=%p FileOffset=%p Length=%lu\n",
- SectionObjectPointers, FileOffset, Length);
+ CCTRACE(CC_API_DEBUG, "SectionObjectPointers=%p FileOffset=0x%I64X Length=%lu\n",
+ SectionObjectPointers, FileOffset ? FileOffset->QuadPart : 0LL, Length);
- DPRINT("CcFlushCache(SectionObjectPointers 0x%p, FileOffset 0x%p, Length %lu, IoStatus 0x%p)\n",
- SectionObjectPointers, FileOffset, Length, IoStatus);
-
- if (SectionObjectPointers && SectionObjectPointers->SharedCacheMap)
+ if (!SectionObjectPointers)
{
- SharedCacheMap = SectionObjectPointers->SharedCacheMap;
- ASSERT(SharedCacheMap);
- if (FileOffset)
- {
- Offset = *FileOffset;
- RemainingLength = Length;
- }
- else
- {
- Offset.QuadPart = 0;
- RemainingLength = SharedCacheMap->FileSize.QuadPart;
- }
-
- if (IoStatus)
- {
- IoStatus->Status = STATUS_SUCCESS;
- IoStatus->Information = 0;
- }
-
- while (RemainingLength > 0)
- {
- current = CcRosLookupVacb(SharedCacheMap, Offset.QuadPart);
- if (current != NULL)
- {
- if (current->Dirty)
- {
- Status = CcRosFlushVacb(current);
- if (!NT_SUCCESS(Status) && IoStatus != NULL)
- {
- IoStatus->Status = Status;
- }
- }
-
- CcRosReleaseVacbLock(current);
+ Status = STATUS_INVALID_PARAMETER;
+ goto quit;
+ }
- KeAcquireGuardedMutex(&ViewLock);
- KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
- CcRosVacbDecRefCount(current);
- KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
- KeReleaseGuardedMutex(&ViewLock);
- }
+ if (!SectionObjectPointers->SharedCacheMap)
+ {
+ /* Forward this to Mm */
+ MmFlushSegment(SectionObjectPointers, FileOffset, Length, IoStatus);
+ return;
+ }
- Offset.QuadPart += VACB_MAPPING_GRANULARITY;
- RemainingLength -= min(RemainingLength, VACB_MAPPING_GRANULARITY);
- }
+ SharedCacheMap = SectionObjectPointers->SharedCacheMap;
+ ASSERT(SharedCacheMap);
+ if (FileOffset)
+ {
+ FlushStart = FileOffset->QuadPart;
+ Status = RtlLongLongAdd(FlushStart, Length, &FlushEnd);
+ if (!NT_SUCCESS(Status))
+ goto quit;
}
else
{
- if (IoStatus)
- {
- IoStatus->Status = STATUS_INVALID_PARAMETER;
- }
+ FlushStart = 0;
+ FlushEnd = SharedCacheMap->FileSize.QuadPart;
}
-}
-
-NTSTATUS
-NTAPI
-CcRosDeleteFileCache (
- PFILE_OBJECT FileObject,
- PROS_SHARED_CACHE_MAP SharedCacheMap)
-/*
- * FUNCTION: Releases the shared cache map associated with a file object
- */
-{
- PLIST_ENTRY current_entry;
- PROS_VACB current;
- LIST_ENTRY FreeList;
- KIRQL oldIrql;
- ASSERT(SharedCacheMap);
-
- SharedCacheMap->OpenCount++;
- KeReleaseGuardedMutex(&ViewLock);
-
- CcFlushCache(FileObject->SectionObjectPointer, NULL, 0, NULL);
-
- KeAcquireGuardedMutex(&ViewLock);
- SharedCacheMap->OpenCount--;
- if (SharedCacheMap->OpenCount == 0)
+ Status = STATUS_SUCCESS;
+ if (IoStatus)
{
- KIRQL OldIrql;
+ IoStatus->Information = 0;
+ }
- FileObject->SectionObjectPointer->SharedCacheMap = NULL;
+ /*
+ * We flush the VACBs that we find here.
+ * If there is no (dirty) VACB, it doesn't mean that there is no data to flush, so we call Mm to be sure.
+ * This is suboptimal, but this is due to the lack of granularity of how we track dirty cache data
+ */
+ while (FlushStart < FlushEnd)
+ {
+ BOOLEAN DirtyVacb = FALSE;
+ PROS_VACB vacb = CcRosLookupVacb(SharedCacheMap, FlushStart);
- /*
- * Release all VACBs
- */
- InitializeListHead(&FreeList);
- KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
- while (!IsListEmpty(&SharedCacheMap->CacheMapVacbListHead))
+ if (vacb != NULL)
{
- current_entry = RemoveTailList(&SharedCacheMap->CacheMapVacbListHead);
- KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
-
- current = CONTAINING_RECORD(current_entry, ROS_VACB, CacheMapVacbListEntry);
- CcRosAcquireVacbLock(current, NULL);
- RemoveEntryList(¤t->VacbLruListEntry);
- if (current->Dirty)
+ if (vacb->Dirty)
{
- RemoveEntryList(¤t->DirtyVacbListEntry);
- CcTotalDirtyPages -= VACB_MAPPING_GRANULARITY / PAGE_SIZE;
- current->SharedCacheMap->DirtyPages -= VACB_MAPPING_GRANULARITY / PAGE_SIZE;
- DPRINT1("Freeing dirty VACB\n");
+ IO_STATUS_BLOCK VacbIosb = { 0 };
+ Status = CcRosFlushVacb(vacb, &VacbIosb);
+ if (!NT_SUCCESS(Status))
+ {
+ goto quit;
+ }
+ DirtyVacb = TRUE;
+
+ if (IoStatus)
+ IoStatus->Information += VacbIosb.Information;
}
- InsertHeadList(&FreeList, ¤t->CacheMapVacbListEntry);
- CcRosReleaseVacbLock(current);
- KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
+ CcRosReleaseVacb(SharedCacheMap, vacb, FALSE, FALSE);
}
-#if DBG
- SharedCacheMap->Trace = FALSE;
-#endif
- KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
- KeReleaseGuardedMutex(&ViewLock);
- ObDereferenceObject(SharedCacheMap->FileObject);
-
- while (!IsListEmpty(&FreeList))
+ if (!DirtyVacb)
{
- current_entry = RemoveTailList(&FreeList);
- current = CONTAINING_RECORD(current_entry, ROS_VACB, CacheMapVacbListEntry);
- CcRosInternalFreeVacb(current);
- }
+ IO_STATUS_BLOCK MmIosb;
+ LARGE_INTEGER MmOffset;
- KeAcquireSpinLock(&iSharedCacheMapLock, &OldIrql);
- RemoveEntryList(&SharedCacheMap->SharedCacheMapLinks);
- KeReleaseSpinLock(&iSharedCacheMapLock, OldIrql);
+ MmOffset.QuadPart = FlushStart;
- ExFreeToNPagedLookasideList(&SharedCacheMapLookasideList, SharedCacheMap);
- KeAcquireGuardedMutex(&ViewLock);
- }
- return STATUS_SUCCESS;
-}
+ if (FlushEnd - (FlushEnd % VACB_MAPPING_GRANULARITY) <= FlushStart)
+ {
+ /* The whole range fits within a VACB chunk. */
+ Status = MmFlushSegment(SectionObjectPointers, &MmOffset, FlushEnd - FlushStart, &MmIosb);
+ }
+ else
+ {
+ ULONG MmLength = VACB_MAPPING_GRANULARITY - (FlushStart % VACB_MAPPING_GRANULARITY);
+ Status = MmFlushSegment(SectionObjectPointers, &MmOffset, MmLength, &MmIosb);
+ }
-VOID
-NTAPI
-CcRosReferenceCache (
- PFILE_OBJECT FileObject)
-{
- PROS_SHARED_CACHE_MAP SharedCacheMap;
- KeAcquireGuardedMutex(&ViewLock);
- SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
- ASSERT(SharedCacheMap);
- ASSERT(SharedCacheMap->OpenCount != 0);
- SharedCacheMap->OpenCount++;
- KeReleaseGuardedMutex(&ViewLock);
-}
+ if (!NT_SUCCESS(Status))
+ goto quit;
-VOID
-NTAPI
-CcRosRemoveIfClosed (
- PSECTION_OBJECT_POINTERS SectionObjectPointer)
-{
- PROS_SHARED_CACHE_MAP SharedCacheMap;
- DPRINT("CcRosRemoveIfClosed()\n");
- KeAcquireGuardedMutex(&ViewLock);
- SharedCacheMap = SectionObjectPointer->SharedCacheMap;
- if (SharedCacheMap && SharedCacheMap->OpenCount == 0)
- {
- CcRosDeleteFileCache(SharedCacheMap->FileObject, SharedCacheMap);
- }
- KeReleaseGuardedMutex(&ViewLock);
-}
+ if (IoStatus)
+ IoStatus->Information += MmIosb.Information;
+ /* Update VDL */
+ if (SharedCacheMap->ValidDataLength.QuadPart < FlushEnd)
+ SharedCacheMap->ValidDataLength.QuadPart = FlushEnd;
+ }
-VOID
-NTAPI
-CcRosDereferenceCache (
- PFILE_OBJECT FileObject)
-{
- PROS_SHARED_CACHE_MAP SharedCacheMap;
- KeAcquireGuardedMutex(&ViewLock);
- SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
- ASSERT(SharedCacheMap);
- if (SharedCacheMap->OpenCount > 0)
- {
- SharedCacheMap->OpenCount--;
- if (SharedCacheMap->OpenCount == 0)
+ if (!NT_SUCCESS(RtlLongLongAdd(FlushStart, VACB_MAPPING_GRANULARITY, &FlushStart)))
{
- MmFreeSectionSegments(SharedCacheMap->FileObject);
- CcRosDeleteFileCache(FileObject, SharedCacheMap);
+ /* We're at the end of file ! */
+ break;
}
+
+ /* Round down to next VACB start now */
+ FlushStart -= FlushStart % VACB_MAPPING_GRANULARITY;
+ }
+
+quit:
+ if (IoStatus)
+ {
+ IoStatus->Status = Status;
}
- KeReleaseGuardedMutex(&ViewLock);
}
NTSTATUS
-NTAPI
CcRosReleaseFileCache (
PFILE_OBJECT FileObject)
/*
* has been closed.
*/
{
+ KIRQL OldIrql;
+ PPRIVATE_CACHE_MAP PrivateMap;
PROS_SHARED_CACHE_MAP SharedCacheMap;
- KeAcquireGuardedMutex(&ViewLock);
+ OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
if (FileObject->SectionObjectPointer->SharedCacheMap != NULL)
{
SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
- if (FileObject->PrivateCacheMap != NULL)
+
+ /* Closing the handle, so kill the private cache map
+ * Before you event try to remove it from FO, always
+ * lock the master lock, to be sure not to race
+ * with a potential read ahead ongoing!
+ */
+ PrivateMap = FileObject->PrivateCacheMap;
+ FileObject->PrivateCacheMap = NULL;
+
+ if (PrivateMap != NULL)
{
- FileObject->PrivateCacheMap = NULL;
- if (SharedCacheMap->OpenCount > 0)
+ /* Remove it from the file */
+ KeAcquireSpinLockAtDpcLevel(&SharedCacheMap->CacheMapLock);
+ RemoveEntryList(&PrivateMap->PrivateLinks);
+ KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock);
+
+ /* And free it. */
+ if (PrivateMap != &SharedCacheMap->PrivateCacheMap)
{
- SharedCacheMap->OpenCount--;
- if (SharedCacheMap->OpenCount == 0)
- {
- MmFreeSectionSegments(SharedCacheMap->FileObject);
- CcRosDeleteFileCache(FileObject, SharedCacheMap);
- }
+ ExFreePoolWithTag(PrivateMap, TAG_PRIVATE_CACHE_MAP);
+ }
+ else
+ {
+ PrivateMap->NodeTypeCode = 0;
}
- }
- }
- KeReleaseGuardedMutex(&ViewLock);
- return STATUS_SUCCESS;
-}
-
-NTSTATUS
-NTAPI
-CcTryToInitializeFileCache (
- PFILE_OBJECT FileObject)
-{
- PROS_SHARED_CACHE_MAP SharedCacheMap;
- NTSTATUS Status;
- KeAcquireGuardedMutex(&ViewLock);
+ ASSERT(SharedCacheMap->OpenCount > 0);
- ASSERT(FileObject->SectionObjectPointer);
- SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
- if (SharedCacheMap == NULL)
- {
- Status = STATUS_UNSUCCESSFUL;
- }
- else
- {
- if (FileObject->PrivateCacheMap == NULL)
- {
- FileObject->PrivateCacheMap = SharedCacheMap;
- SharedCacheMap->OpenCount++;
+ SharedCacheMap->OpenCount--;
+ if (SharedCacheMap->OpenCount == 0)
+ {
+ CcRosDeleteFileCache(FileObject, SharedCacheMap, &OldIrql);
+ }
}
- Status = STATUS_SUCCESS;
}
- KeReleaseGuardedMutex(&ViewLock);
-
- return Status;
+ KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
+ return STATUS_SUCCESS;
}
-
NTSTATUS
-NTAPI
CcRosInitializeFileCache (
PFILE_OBJECT FileObject,
PCC_FILE_SIZES FileSizes,
* FUNCTION: Initializes a shared cache map for a file object
*/
{
+ KIRQL OldIrql;
+ BOOLEAN Allocated;
PROS_SHARED_CACHE_MAP SharedCacheMap;
- SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
- DPRINT("CcRosInitializeFileCache(FileObject 0x%p, SharedCacheMap 0x%p)\n",
- FileObject, SharedCacheMap);
+ DPRINT("CcRosInitializeFileCache(FileObject 0x%p)\n", FileObject);
+
+ OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
- KeAcquireGuardedMutex(&ViewLock);
+ Allocated = FALSE;
+ SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
if (SharedCacheMap == NULL)
{
- KIRQL OldIrql;
-
+ Allocated = TRUE;
SharedCacheMap = ExAllocateFromNPagedLookasideList(&SharedCacheMapLookasideList);
if (SharedCacheMap == NULL)
{
- KeReleaseGuardedMutex(&ViewLock);
return STATUS_INSUFFICIENT_RESOURCES;
}
RtlZeroMemory(SharedCacheMap, sizeof(*SharedCacheMap));
- ObReferenceObjectByPointer(FileObject,
- FILE_ALL_ACCESS,
- NULL,
- KernelMode);
+ SharedCacheMap->NodeTypeCode = NODE_TYPE_SHARED_MAP;
+ SharedCacheMap->NodeByteSize = sizeof(*SharedCacheMap);
SharedCacheMap->FileObject = FileObject;
SharedCacheMap->Callbacks = CallBacks;
SharedCacheMap->LazyWriteContext = LazyWriterContext;
SharedCacheMap->SectionSize = FileSizes->AllocationSize;
SharedCacheMap->FileSize = FileSizes->FileSize;
+ SharedCacheMap->ValidDataLength = FileSizes->ValidDataLength;
SharedCacheMap->PinAccess = PinAccess;
SharedCacheMap->DirtyPageThreshold = 0;
SharedCacheMap->DirtyPages = 0;
+ InitializeListHead(&SharedCacheMap->PrivateList);
KeInitializeSpinLock(&SharedCacheMap->CacheMapLock);
InitializeListHead(&SharedCacheMap->CacheMapVacbListHead);
+ InitializeListHead(&SharedCacheMap->BcbList);
+
+ SharedCacheMap->Flags = SHARED_CACHE_MAP_IN_CREATION;
+
+ ObReferenceObjectByPointer(FileObject,
+ FILE_ALL_ACCESS,
+ NULL,
+ KernelMode);
+
FileObject->SectionObjectPointer->SharedCacheMap = SharedCacheMap;
- KeAcquireSpinLock(&iSharedCacheMapLock, &OldIrql);
- InsertTailList(&CcCleanSharedCacheMapList, &SharedCacheMap->SharedCacheMapLinks);
- KeReleaseSpinLock(&iSharedCacheMapLock, OldIrql);
+ //CcRosTraceCacheMap(SharedCacheMap, TRUE);
+ }
+ else if (SharedCacheMap->Flags & SHARED_CACHE_MAP_IN_CREATION)
+ {
+ /* The shared cache map is being created somewhere else. Wait for that to happen */
+ KEVENT Waiter;
+ PKEVENT PreviousWaiter = SharedCacheMap->CreateEvent;
+
+ KeInitializeEvent(&Waiter, NotificationEvent, FALSE);
+ SharedCacheMap->CreateEvent = &Waiter;
+
+ KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
+
+ KeWaitForSingleObject(&Waiter, Executive, KernelMode, FALSE, NULL);
+
+ if (PreviousWaiter)
+ KeSetEvent(PreviousWaiter, IO_NO_INCREMENT, FALSE);
+
+ OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
}
+
if (FileObject->PrivateCacheMap == NULL)
{
- FileObject->PrivateCacheMap = SharedCacheMap;
+ PPRIVATE_CACHE_MAP PrivateMap;
+
+ /* Allocate the private cache map for this handle */
+ if (SharedCacheMap->PrivateCacheMap.NodeTypeCode != 0)
+ {
+ PrivateMap = ExAllocatePoolWithTag(NonPagedPool, sizeof(PRIVATE_CACHE_MAP), TAG_PRIVATE_CACHE_MAP);
+ }
+ else
+ {
+ PrivateMap = &SharedCacheMap->PrivateCacheMap;
+ }
+
+ if (PrivateMap == NULL)
+ {
+ /* If we also allocated the shared cache map for this file, kill it */
+ if (Allocated)
+ {
+ RemoveEntryList(&SharedCacheMap->SharedCacheMapLinks);
+
+ FileObject->SectionObjectPointer->SharedCacheMap = NULL;
+ ObDereferenceObject(FileObject);
+ ExFreeToNPagedLookasideList(&SharedCacheMapLookasideList, SharedCacheMap);
+ }
+
+ KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
+ return STATUS_INSUFFICIENT_RESOURCES;
+ }
+
+ /* Initialize it */
+ RtlZeroMemory(PrivateMap, sizeof(PRIVATE_CACHE_MAP));
+ PrivateMap->NodeTypeCode = NODE_TYPE_PRIVATE_MAP;
+ PrivateMap->ReadAheadMask = PAGE_SIZE - 1;
+ PrivateMap->FileObject = FileObject;
+ KeInitializeSpinLock(&PrivateMap->ReadAheadSpinLock);
+
+ /* Link it to the file */
+ KeAcquireSpinLockAtDpcLevel(&SharedCacheMap->CacheMapLock);
+ InsertTailList(&SharedCacheMap->PrivateList, &PrivateMap->PrivateLinks);
+ KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock);
+
+ FileObject->PrivateCacheMap = PrivateMap;
SharedCacheMap->OpenCount++;
}
- KeReleaseGuardedMutex(&ViewLock);
+
+ KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
+
+ /* Create the section */
+ if (Allocated)
+ {
+ NTSTATUS Status;
+
+ ASSERT(SharedCacheMap->Section == NULL);
+
+ Status = MmCreateSection(
+ &SharedCacheMap->Section,
+ SECTION_ALL_ACCESS,
+ NULL,
+ &SharedCacheMap->SectionSize,
+ PAGE_READWRITE,
+ SEC_RESERVE,
+ NULL,
+ FileObject);
+
+ ASSERT(NT_SUCCESS(Status));
+
+ if (!NT_SUCCESS(Status))
+ {
+ CcRosReleaseFileCache(FileObject);
+ return Status;
+ }
+
+ OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
+
+ InsertTailList(&CcCleanSharedCacheMapList, &SharedCacheMap->SharedCacheMapLinks);
+ SharedCacheMap->Flags &= ~SHARED_CACHE_MAP_IN_CREATION;
+
+ if (SharedCacheMap->CreateEvent)
+ {
+ KeSetEvent(SharedCacheMap->CreateEvent, IO_NO_INCREMENT, FALSE);
+ SharedCacheMap->CreateEvent = NULL;
+ }
+
+ KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
+ }
return STATUS_SUCCESS;
}
return NULL;
}
+CODE_SEG("INIT")
VOID
NTAPI
-CcShutdownLazyWriter (
- VOID)
-{
- /* Simply set the event, lazy writer will stop when it's done */
- KeSetEvent(&iLazyWriterShutdown, IO_DISK_INCREMENT, FALSE);
-}
-
-BOOLEAN
-INIT_FUNCTION
-NTAPI
CcInitView (
VOID)
{
- HANDLE LazyWriter;
- NTSTATUS Status;
- KPRIORITY Priority;
- OBJECT_ATTRIBUTES ObjectAttributes;
-
DPRINT("CcInitView()\n");
InitializeListHead(&DirtyVacbListHead);
InitializeListHead(&CcDeferredWrites);
InitializeListHead(&CcCleanSharedCacheMapList);
KeInitializeSpinLock(&CcDeferredWriteSpinLock);
- KeInitializeSpinLock(&iSharedCacheMapLock);
- KeInitializeGuardedMutex(&ViewLock);
ExInitializeNPagedLookasideList(&iBcbLookasideList,
NULL,
NULL,
TAG_VACB,
20);
- MmInitializeMemoryConsumer(MC_CACHE, CcRosTrimCache);
-
- /* Initialize lazy writer events */
- KeInitializeEvent(&iLazyWriterShutdown, SynchronizationEvent, FALSE);
- KeInitializeEvent(&iLazyWriterNotify, NotificationEvent, FALSE);
-
- /* Define lazy writer threshold, depending on system type */
- switch (MmQuerySystemSize())
- {
- case MmSmallSystem:
- CcDirtyPageThreshold = MmNumberOfPhysicalPages / 8;
- break;
-
- case MmMediumSystem:
- CcDirtyPageThreshold = MmNumberOfPhysicalPages / 4;
- break;
-
- case MmLargeSystem:
- CcDirtyPageThreshold = MmNumberOfPhysicalPages / 8 + MmNumberOfPhysicalPages / 4;
- break;
- }
-
- /* Start the lazy writer thread */
- InitializeObjectAttributes(&ObjectAttributes,
- NULL,
- OBJ_KERNEL_HANDLE,
- NULL,
- NULL);
- Status = PsCreateSystemThread(&LazyWriter,
- THREAD_ALL_ACCESS,
- &ObjectAttributes,
- NULL,
- NULL,
- CciLazyWriter,
- NULL);
- if (!NT_SUCCESS(Status))
- {
- return FALSE;
- }
-
- Priority = 27;
- Status = NtSetInformationThread(LazyWriter,
- ThreadPriority,
- &Priority,
- sizeof(Priority));
- ASSERT(NT_SUCCESS(Status));
-
- /* Handle is not needed */
- ObCloseHandle(LazyWriter, KernelMode);
-
CcInitCacheZeroPage();
-
- return TRUE;
}
#if DBG && defined(KDBG)
+
+#include <kdbg/kdb.h>
+
BOOLEAN
ExpKdbgExtFileCache(ULONG Argc, PCHAR Argv[])
{
UNICODE_STRING NoName = RTL_CONSTANT_STRING(L"No name for File");
KdbpPrint(" Usage Summary (in kb)\n");
- KdbpPrint("Shared\t\tValid\tDirty\tName\n");
+ KdbpPrint("Shared\t\tMapped\tDirty\tName\n");
/* No need to lock the spin lock here, we're in DBG */
for (ListEntry = CcCleanSharedCacheMapList.Flink;
ListEntry != &CcCleanSharedCacheMapList;
ListEntry = ListEntry->Flink)
{
PLIST_ENTRY Vacbs;
- ULONG Valid = 0, Dirty = 0;
+ ULONG Mapped = 0, Dirty = 0;
PROS_SHARED_CACHE_MAP SharedCacheMap;
PUNICODE_STRING FileName;
+ PWSTR Extra = L"";
SharedCacheMap = CONTAINING_RECORD(ListEntry, ROS_SHARED_CACHE_MAP, SharedCacheMapLinks);
Vacbs != &SharedCacheMap->CacheMapVacbListHead;
Vacbs = Vacbs->Flink)
{
- PROS_VACB Vacb;
-
- Vacb = CONTAINING_RECORD(Vacbs, ROS_VACB, CacheMapVacbListEntry);
- if (Vacb->Valid)
- {
- Valid += VACB_MAPPING_GRANULARITY / 1024;
- }
+ Mapped += VACB_MAPPING_GRANULARITY / 1024;
}
/* Setup name */
{
FileName = &SharedCacheMap->FileObject->FileName;
}
+ else if (SharedCacheMap->FileObject != NULL &&
+ SharedCacheMap->FileObject->FsContext != NULL &&
+ ((PFSRTL_COMMON_FCB_HEADER)(SharedCacheMap->FileObject->FsContext))->NodeTypeCode == 0x0502 &&
+ ((PFSRTL_COMMON_FCB_HEADER)(SharedCacheMap->FileObject->FsContext))->NodeByteSize == 0x1F8 &&
+ ((PUNICODE_STRING)(((PUCHAR)SharedCacheMap->FileObject->FsContext) + 0x100))->Length != 0)
+ {
+ FileName = (PUNICODE_STRING)(((PUCHAR)SharedCacheMap->FileObject->FsContext) + 0x100);
+ Extra = L" (FastFAT)";
+ }
else
{
FileName = &NoName;
}
/* And print */
- KdbpPrint("%p\t%d\t%d\t%wZ\n", SharedCacheMap, Valid, Dirty, FileName);
+ KdbpPrint("%p\t%d\t%d\t%wZ%S\n", SharedCacheMap, Mapped, Dirty, FileName, Extra);
}
return TRUE;
}
-#endif
+
+BOOLEAN
+ExpKdbgExtDefWrites(ULONG Argc, PCHAR Argv[])
+{
+ KdbpPrint("CcTotalDirtyPages:\t%lu (%lu Kb)\n", CcTotalDirtyPages,
+ (CcTotalDirtyPages * PAGE_SIZE) / 1024);
+ KdbpPrint("CcDirtyPageThreshold:\t%lu (%lu Kb)\n", CcDirtyPageThreshold,
+ (CcDirtyPageThreshold * PAGE_SIZE) / 1024);
+ KdbpPrint("MmAvailablePages:\t%lu (%lu Kb)\n", MmAvailablePages,
+ (MmAvailablePages * PAGE_SIZE) / 1024);
+ KdbpPrint("MmThrottleTop:\t\t%lu (%lu Kb)\n", MmThrottleTop,
+ (MmThrottleTop * PAGE_SIZE) / 1024);
+ KdbpPrint("MmThrottleBottom:\t%lu (%lu Kb)\n", MmThrottleBottom,
+ (MmThrottleBottom * PAGE_SIZE) / 1024);
+ KdbpPrint("MmModifiedPageListHead.Total:\t%lu (%lu Kb)\n", MmModifiedPageListHead.Total,
+ (MmModifiedPageListHead.Total * PAGE_SIZE) / 1024);
+
+ if (CcTotalDirtyPages >= CcDirtyPageThreshold)
+ {
+ KdbpPrint("CcTotalDirtyPages above the threshold, writes should be throttled\n");
+ }
+ else if (CcTotalDirtyPages + 64 >= CcDirtyPageThreshold)
+ {
+ KdbpPrint("CcTotalDirtyPages within 64 (max charge) pages of the threshold, writes may be throttled\n");
+ }
+ else
+ {
+ KdbpPrint("CcTotalDirtyPages below the threshold, writes should not be throttled\n");
+ }
+
+ return TRUE;
+}
+
+#endif // DBG && defined(KDBG)
/* EOF */