* PURPOSE: Cache manager
*
* PROGRAMMERS: David Welch (welch@mcmail.com)
+ * Pierre Schweitzer (pierre@reactos.org)
*/
/* NOTES **********************************************************************
/* GLOBALS *******************************************************************/
-static LIST_ENTRY DirtyVacbListHead;
+LIST_ENTRY DirtyVacbListHead;
static LIST_ENTRY VacbLruListHead;
-ULONG DirtyPageCount = 0;
KGUARDED_MUTEX ViewLock;
static NPAGED_LOOKASIDE_LIST SharedCacheMapLookasideList;
static NPAGED_LOOKASIDE_LIST VacbLookasideList;
+/* Internal vars (MS):
+ * - Threshold above which lazy writer will start action
+ * - Amount of dirty pages
+ * - List for deferred writes
+ * - Spinlock when dealing with the deferred list
+ * - List for "clean" shared cache maps
+ */
+ULONG CcDirtyPageThreshold = 0;
+ULONG CcTotalDirtyPages = 0;
+LIST_ENTRY CcDeferredWrites;
+KSPIN_LOCK CcDeferredWriteSpinLock;
+LIST_ENTRY CcCleanSharedCacheMapList;
+
#if DBG
-static void CcRosVacbIncRefCount_(PROS_VACB vacb, const char* file, int line)
+ULONG CcRosVacbIncRefCount_(PROS_VACB vacb, PCSTR file, INT line)
{
- ++vacb->ReferenceCount;
+ ULONG Refs;
+
+ Refs = InterlockedIncrement((PLONG)&vacb->ReferenceCount);
if (vacb->SharedCacheMap->Trace)
{
DbgPrint("(%s:%i) VACB %p ++RefCount=%lu, Dirty %u, PageOut %lu\n",
- file, line, vacb, vacb->ReferenceCount, vacb->Dirty, vacb->PageOut);
+ file, line, vacb, Refs, vacb->Dirty, vacb->PageOut);
}
+
+ return Refs;
}
-static void CcRosVacbDecRefCount_(PROS_VACB vacb, const char* file, int line)
+ULONG CcRosVacbDecRefCount_(PROS_VACB vacb, PCSTR file, INT line)
{
- --vacb->ReferenceCount;
+ ULONG Refs;
+
+ Refs = InterlockedDecrement((PLONG)&vacb->ReferenceCount);
+ ASSERT(!(Refs == 0 && vacb->Dirty));
if (vacb->SharedCacheMap->Trace)
{
DbgPrint("(%s:%i) VACB %p --RefCount=%lu, Dirty %u, PageOut %lu\n",
- file, line, vacb, vacb->ReferenceCount, vacb->Dirty, vacb->PageOut);
+ file, line, vacb, Refs, vacb->Dirty, vacb->PageOut);
}
+
+ return Refs;
+}
+ULONG CcRosVacbGetRefCount_(PROS_VACB vacb, PCSTR file, INT line)
+{
+ ULONG Refs;
+
+ Refs = InterlockedCompareExchange((PLONG)&vacb->ReferenceCount, 0, 0);
+ if (vacb->SharedCacheMap->Trace)
+ {
+ DbgPrint("(%s:%i) VACB %p ==RefCount=%lu, Dirty %u, PageOut %lu\n",
+ file, line, vacb, Refs, vacb->Dirty, vacb->PageOut);
+ }
+
+ return Refs;
}
-#define CcRosVacbIncRefCount(vacb) CcRosVacbIncRefCount_(vacb,__FILE__,__LINE__)
-#define CcRosVacbDecRefCount(vacb) CcRosVacbDecRefCount_(vacb,__FILE__,__LINE__)
-#else
-#define CcRosVacbIncRefCount(vacb) (++((vacb)->ReferenceCount))
-#define CcRosVacbDecRefCount(vacb) (--((vacb)->ReferenceCount))
#endif
NTSTATUS
PROS_VACB Vacb)
{
NTSTATUS Status;
- KIRQL oldIrql;
Status = CcWriteVirtualAddress(Vacb);
if (NT_SUCCESS(Status))
{
- KeAcquireGuardedMutex(&ViewLock);
- KeAcquireSpinLock(&Vacb->SharedCacheMap->CacheMapLock, &oldIrql);
-
- Vacb->Dirty = FALSE;
- RemoveEntryList(&Vacb->DirtyVacbListEntry);
- DirtyPageCount -= VACB_MAPPING_GRANULARITY / PAGE_SIZE;
- CcRosVacbDecRefCount(Vacb);
-
- KeReleaseSpinLock(&Vacb->SharedCacheMap->CacheMapLock, oldIrql);
- KeReleaseGuardedMutex(&ViewLock);
+ CcRosUnmarkDirtyVacb(Vacb, TRUE);
}
return Status;
CcRosFlushDirtyPages (
ULONG Target,
PULONG Count,
- BOOLEAN Wait)
+ BOOLEAN Wait,
+ BOOLEAN CalledFromLazy)
{
PLIST_ENTRY current_entry;
PROS_VACB current;
CcRosVacbIncRefCount(current);
+ /* When performing lazy write, don't handle temporary files */
+ if (CalledFromLazy &&
+ BooleanFlagOn(current->SharedCacheMap->FileObject->Flags, FO_TEMPORARY_FILE))
+ {
+ CcRosVacbDecRefCount(current);
+ continue;
+ }
+
Locked = current->SharedCacheMap->Callbacks->AcquireForLazyWrite(
current->SharedCacheMap->LazyWriteContext, Wait);
if (!Locked)
continue;
}
- Status = KeWaitForSingleObject(¤t->Mutex,
- Executive,
- KernelMode,
- FALSE,
- Wait ? NULL : &ZeroTimeout);
+ Status = CcRosAcquireVacbLock(current,
+ Wait ? NULL : &ZeroTimeout);
if (Status != STATUS_SUCCESS)
{
current->SharedCacheMap->Callbacks->ReleaseFromLazyWrite(
ASSERT(current->Dirty);
/* One reference is added above */
- if (current->ReferenceCount > 2)
+ if (CcRosVacbGetRefCount(current) > 2)
{
- KeReleaseMutex(¤t->Mutex, FALSE);
+ CcRosReleaseVacbLock(current);
current->SharedCacheMap->Callbacks->ReleaseFromLazyWrite(
current->SharedCacheMap->LazyWriteContext);
CcRosVacbDecRefCount(current);
Status = CcRosFlushVacb(current);
- KeReleaseMutex(¤t->Mutex, FALSE);
+ CcRosReleaseVacbLock(current);
current->SharedCacheMap->Callbacks->ReleaseFromLazyWrite(
current->SharedCacheMap->LazyWriteContext);
KeAcquireGuardedMutex(&ViewLock);
CcRosVacbDecRefCount(current);
- if (!NT_SUCCESS(Status) && (Status != STATUS_END_OF_FILE))
+ if (!NT_SUCCESS(Status) && (Status != STATUS_END_OF_FILE) &&
+ (Status != STATUS_MEDIA_WRITE_PROTECTED))
{
DPRINT1("CC: Failed to flush VACB.\n");
}
else
{
- (*Count) += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
- Target -= VACB_MAPPING_GRANULARITY / PAGE_SIZE;
+ ULONG PagesFreed;
+
+ /* How many pages did we free? */
+ PagesFreed = VACB_MAPPING_GRANULARITY / PAGE_SIZE;
+ (*Count) += PagesFreed;
+
+ /* Make sure we don't overflow target! */
+ if (Target < PagesFreed)
+ {
+ /* If we would have, jump to zero directly */
+ Target = 0;
+ }
+ else
+ {
+ Target -= PagesFreed;
+ }
}
current_entry = DirtyVacbListHead.Flink;
current_entry = VacbLruListHead.Flink;
while (current_entry != &VacbLruListHead)
{
+ ULONG Refs;
+
current = CONTAINING_RECORD(current_entry,
ROS_VACB,
VacbLruListEntry);
}
/* Dereference the VACB */
- CcRosVacbDecRefCount(current);
+ Refs = CcRosVacbDecRefCount(current);
/* Check if we can free this entry now */
- if (current->ReferenceCount == 0)
+ if (Refs < 2)
{
ASSERT(!current->Dirty);
ASSERT(!current->MappedCount);
+ ASSERT(Refs == 1);
RemoveEntryList(¤t->CacheMapVacbListEntry);
RemoveEntryList(¤t->VacbLruListEntry);
+ InitializeListHead(¤t->VacbLruListEntry);
InsertHeadList(&FreeList, ¤t->CacheMapVacbListEntry);
/* Calculate how many pages we freed for Mm */
if ((Target > 0) && !FlushedPages)
{
/* Flush dirty pages to disk */
- CcRosFlushDirtyPages(Target, &PagesFreed, FALSE);
+ CcRosFlushDirtyPages(Target, &PagesFreed, FALSE, FALSE);
FlushedPages = TRUE;
/* We can only swap as many pages as we flushed */
current = CONTAINING_RECORD(current_entry,
ROS_VACB,
CacheMapVacbListEntry);
+ InitializeListHead(¤t->CacheMapVacbListEntry);
+ CcRosVacbDecRefCount(current);
CcRosInternalFreeVacb(current);
}
BOOLEAN Dirty,
BOOLEAN Mapped)
{
- BOOLEAN WasDirty;
- KIRQL oldIrql;
-
+ ULONG Refs;
ASSERT(SharedCacheMap);
DPRINT("CcRosReleaseVacb(SharedCacheMap 0x%p, Vacb 0x%p, Valid %u)\n",
SharedCacheMap, Vacb, Valid);
- KeAcquireGuardedMutex(&ViewLock);
- KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
-
Vacb->Valid = Valid;
- WasDirty = Vacb->Dirty;
- Vacb->Dirty = Vacb->Dirty || Dirty;
-
- if (!WasDirty && Vacb->Dirty)
+ if (Dirty && !Vacb->Dirty)
{
- InsertTailList(&DirtyVacbListHead, &Vacb->DirtyVacbListEntry);
- DirtyPageCount += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
+ CcRosMarkDirtyVacb(Vacb);
}
if (Mapped)
{
Vacb->MappedCount++;
}
- CcRosVacbDecRefCount(Vacb);
+ Refs = CcRosVacbDecRefCount(Vacb);
if (Mapped && (Vacb->MappedCount == 1))
{
CcRosVacbIncRefCount(Vacb);
}
- if (!WasDirty && Vacb->Dirty)
- {
- CcRosVacbIncRefCount(Vacb);
- }
- KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
- KeReleaseGuardedMutex(&ViewLock);
- KeReleaseMutex(&Vacb->Mutex, FALSE);
+ ASSERT(Refs > 0);
+
+ CcRosReleaseVacbLock(Vacb);
return STATUS_SUCCESS;
}
CcRosVacbIncRefCount(current);
KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
KeReleaseGuardedMutex(&ViewLock);
- KeWaitForSingleObject(¤t->Mutex,
- Executive,
- KernelMode,
- FALSE,
- NULL);
+ CcRosAcquireVacbLock(current, NULL);
return current;
}
if (current->FileOffset.QuadPart > FileOffset)
return NULL;
}
-NTSTATUS
+VOID
NTAPI
CcRosMarkDirtyVacb (
- PROS_SHARED_CACHE_MAP SharedCacheMap,
- LONGLONG FileOffset)
+ PROS_VACB Vacb)
{
- PROS_VACB Vacb;
KIRQL oldIrql;
+ PROS_SHARED_CACHE_MAP SharedCacheMap;
- ASSERT(SharedCacheMap);
-
- DPRINT("CcRosMarkDirtyVacb(SharedCacheMap 0x%p, FileOffset %I64u)\n",
- SharedCacheMap, FileOffset);
-
- Vacb = CcRosLookupVacb(SharedCacheMap, FileOffset);
- if (Vacb == NULL)
- {
- KeBugCheck(CACHE_MANAGER);
- }
+ SharedCacheMap = Vacb->SharedCacheMap;
KeAcquireGuardedMutex(&ViewLock);
KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
- if (!Vacb->Dirty)
- {
- InsertTailList(&DirtyVacbListHead, &Vacb->DirtyVacbListEntry);
- DirtyPageCount += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
- }
- else
- {
- CcRosVacbDecRefCount(Vacb);
- }
+ ASSERT(!Vacb->Dirty);
+
+ InsertTailList(&DirtyVacbListHead, &Vacb->DirtyVacbListEntry);
+ CcTotalDirtyPages += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
+ Vacb->SharedCacheMap->DirtyPages += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
+ CcRosVacbIncRefCount(Vacb);
/* Move to the tail of the LRU list */
RemoveEntryList(&Vacb->VacbLruListEntry);
KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
KeReleaseGuardedMutex(&ViewLock);
- KeReleaseMutex(&Vacb->Mutex, FALSE);
+
+ /* Schedule a lazy writer run to now that we have dirty VACB */
+ oldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
+ if (!LazyWriter.ScanActive)
+ {
+ CcScheduleLazyWriteScan(FALSE);
+ }
+ KeReleaseQueuedSpinLock(LockQueueMasterLock, oldIrql);
+}
+
+VOID
+NTAPI
+CcRosUnmarkDirtyVacb (
+ PROS_VACB Vacb,
+ BOOLEAN LockViews)
+{
+ KIRQL oldIrql;
+ PROS_SHARED_CACHE_MAP SharedCacheMap;
+
+ SharedCacheMap = Vacb->SharedCacheMap;
+
+ if (LockViews)
+ {
+ KeAcquireGuardedMutex(&ViewLock);
+ KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
+ }
+
+ ASSERT(Vacb->Dirty);
+
+ Vacb->Dirty = FALSE;
+
+ RemoveEntryList(&Vacb->DirtyVacbListEntry);
+ InitializeListHead(&Vacb->DirtyVacbListEntry);
+ CcTotalDirtyPages -= VACB_MAPPING_GRANULARITY / PAGE_SIZE;
+ Vacb->SharedCacheMap->DirtyPages -= VACB_MAPPING_GRANULARITY / PAGE_SIZE;
+ CcRosVacbDecRefCount(Vacb);
+
+ if (LockViews)
+ {
+ KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
+ KeReleaseGuardedMutex(&ViewLock);
+ }
+}
+
+NTSTATUS
+NTAPI
+CcRosMarkDirtyFile (
+ PROS_SHARED_CACHE_MAP SharedCacheMap,
+ LONGLONG FileOffset)
+{
+ PROS_VACB Vacb;
+
+ ASSERT(SharedCacheMap);
+
+ DPRINT("CcRosMarkDirtyVacb(SharedCacheMap 0x%p, FileOffset %I64u)\n",
+ SharedCacheMap, FileOffset);
+
+ Vacb = CcRosLookupVacb(SharedCacheMap, FileOffset);
+ if (Vacb == NULL)
+ {
+ KeBugCheck(CACHE_MANAGER);
+ }
+
+ CcRosReleaseVacb(SharedCacheMap, Vacb, Vacb->Valid, TRUE, FALSE);
return STATUS_SUCCESS;
}
+/*
+ * Note: this is not the contrary function of
+ * CcRosMapVacbInKernelSpace()
+ */
NTSTATUS
NTAPI
CcRosUnmapVacb (
BOOLEAN NowDirty)
{
PROS_VACB Vacb;
- BOOLEAN WasDirty;
- KIRQL oldIrql;
ASSERT(SharedCacheMap);
return STATUS_UNSUCCESSFUL;
}
- KeAcquireGuardedMutex(&ViewLock);
- KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
-
- WasDirty = Vacb->Dirty;
- Vacb->Dirty = Vacb->Dirty || NowDirty;
-
+ ASSERT(Vacb->MappedCount != 0);
Vacb->MappedCount--;
- if (!WasDirty && NowDirty)
+ if (Vacb->MappedCount == 0)
{
- InsertTailList(&DirtyVacbListHead, &Vacb->DirtyVacbListEntry);
- DirtyPageCount += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
+ CcRosVacbDecRefCount(Vacb);
}
- CcRosVacbDecRefCount(Vacb);
- if (!WasDirty && NowDirty)
+ CcRosReleaseVacb(SharedCacheMap, Vacb, Vacb->Valid, NowDirty, FALSE);
+
+ return STATUS_SUCCESS;
+}
+
+static
+NTSTATUS
+CcRosMapVacbInKernelSpace(
+ PROS_VACB Vacb)
+{
+ ULONG i;
+ NTSTATUS Status;
+ ULONG_PTR NumberOfPages;
+ PVOID BaseAddress = NULL;
+
+ /* Create a memory area. */
+ MmLockAddressSpace(MmGetKernelAddressSpace());
+ Status = MmCreateMemoryArea(MmGetKernelAddressSpace(),
+ 0, // nothing checks for VACB mareas, so set to 0
+ &BaseAddress,
+ VACB_MAPPING_GRANULARITY,
+ PAGE_READWRITE,
+ (PMEMORY_AREA*)&Vacb->MemoryArea,
+ 0,
+ PAGE_SIZE);
+ ASSERT(Vacb->BaseAddress == NULL);
+ Vacb->BaseAddress = BaseAddress;
+ MmUnlockAddressSpace(MmGetKernelAddressSpace());
+ if (!NT_SUCCESS(Status))
{
- CcRosVacbIncRefCount(Vacb);
+ DPRINT1("MmCreateMemoryArea failed with %lx for VACB %p\n", Status, Vacb);
+ return Status;
}
- if (Vacb->MappedCount == 0)
+
+ ASSERT(((ULONG_PTR)Vacb->BaseAddress % PAGE_SIZE) == 0);
+ ASSERT((ULONG_PTR)Vacb->BaseAddress > (ULONG_PTR)MmSystemRangeStart);
+ ASSERT((ULONG_PTR)Vacb->BaseAddress + VACB_MAPPING_GRANULARITY - 1 > (ULONG_PTR)MmSystemRangeStart);
+
+ /* Create a virtual mapping for this memory area */
+ NumberOfPages = BYTES_TO_PAGES(VACB_MAPPING_GRANULARITY);
+ for (i = 0; i < NumberOfPages; i++)
{
- CcRosVacbDecRefCount(Vacb);
- }
+ PFN_NUMBER PageFrameNumber;
- KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
- KeReleaseGuardedMutex(&ViewLock);
- KeReleaseMutex(&Vacb->Mutex, FALSE);
+ MI_SET_USAGE(MI_USAGE_CACHE);
+ Status = MmRequestPageMemoryConsumer(MC_CACHE, TRUE, &PageFrameNumber);
+ if (PageFrameNumber == 0)
+ {
+ DPRINT1("Unable to allocate page\n");
+ KeBugCheck(MEMORY_MANAGEMENT);
+ }
+
+ ASSERT(BaseAddress == Vacb->BaseAddress);
+ ASSERT(i * PAGE_SIZE < VACB_MAPPING_GRANULARITY);
+ ASSERT((ULONG_PTR)Vacb->BaseAddress + (i * PAGE_SIZE) >= (ULONG_PTR)BaseAddress);
+ ASSERT((ULONG_PTR)Vacb->BaseAddress + (i * PAGE_SIZE) > (ULONG_PTR)MmSystemRangeStart);
+
+ Status = MmCreateVirtualMapping(NULL,
+ (PVOID)((ULONG_PTR)Vacb->BaseAddress + (i * PAGE_SIZE)),
+ PAGE_READWRITE,
+ &PageFrameNumber,
+ 1);
+ if (!NT_SUCCESS(Status))
+ {
+ DPRINT1("Unable to create virtual mapping\n");
+ KeBugCheck(MEMORY_MANAGEMENT);
+ }
+ }
return STATUS_SUCCESS;
}
DPRINT("CcRosCreateVacb()\n");
- if (FileOffset >= SharedCacheMap->FileSize.QuadPart)
+ if (FileOffset >= SharedCacheMap->SectionSize.QuadPart)
{
*Vacb = NULL;
return STATUS_INVALID_PARAMETER;
}
current = ExAllocateFromNPagedLookasideList(&VacbLookasideList);
+ current->BaseAddress = NULL;
current->Valid = FALSE;
current->Dirty = FALSE;
current->PageOut = FALSE;
}
#endif
current->MappedCount = 0;
- current->DirtyVacbListEntry.Flink = NULL;
- current->DirtyVacbListEntry.Blink = NULL;
- current->ReferenceCount = 1;
+ current->ReferenceCount = 0;
+ current->PinCount = 0;
KeInitializeMutex(¤t->Mutex, 0);
- KeWaitForSingleObject(¤t->Mutex,
- Executive,
- KernelMode,
- FALSE,
- NULL);
+ InitializeListHead(¤t->CacheMapVacbListEntry);
+ InitializeListHead(¤t->DirtyVacbListEntry);
+ InitializeListHead(¤t->VacbLruListEntry);
+
+ CcRosVacbIncRefCount(current);
+
+ Status = CcRosMapVacbInKernelSpace(current);
+ if (!NT_SUCCESS(Status))
+ {
+ CcRosVacbDecRefCount(current);
+ ExFreeToNPagedLookasideList(&VacbLookasideList, current);
+ return Status;
+ }
+
+ CcRosAcquireVacbLock(current, NULL);
KeAcquireGuardedMutex(&ViewLock);
*Vacb = current;
current);
}
#endif
- KeReleaseMutex(&(*Vacb)->Mutex, FALSE);
+ CcRosVacbDecRefCount(*Vacb);
+ CcRosReleaseVacbLock(*Vacb);
KeReleaseGuardedMutex(&ViewLock);
- ExFreeToNPagedLookasideList(&VacbLookasideList, *Vacb);
+ CcRosInternalFreeVacb(*Vacb);
*Vacb = current;
- KeWaitForSingleObject(¤t->Mutex,
- Executive,
- KernelMode,
- FALSE,
- NULL);
+ CcRosAcquireVacbLock(current, NULL);
return STATUS_SUCCESS;
}
if (current->FileOffset.QuadPart < FileOffset)
InsertTailList(&VacbLruListHead, ¤t->VacbLruListEntry);
KeReleaseGuardedMutex(&ViewLock);
- MmLockAddressSpace(MmGetKernelAddressSpace());
- current->BaseAddress = NULL;
- Status = MmCreateMemoryArea(MmGetKernelAddressSpace(),
- 0, // nothing checks for VACB mareas, so set to 0
- ¤t->BaseAddress,
- VACB_MAPPING_GRANULARITY,
- PAGE_READWRITE,
- (PMEMORY_AREA*)¤t->MemoryArea,
- FALSE,
- 0,
- PAGE_SIZE);
- MmUnlockAddressSpace(MmGetKernelAddressSpace());
- if (!NT_SUCCESS(Status))
- {
- KeBugCheck(CACHE_MANAGER);
- }
-
- /* Create a virtual mapping for this memory area */
MI_SET_USAGE(MI_USAGE_CACHE);
#if MI_TRACE_PFNS
- PWCHAR pos = NULL;
- ULONG len = 0;
if ((SharedCacheMap->FileObject) && (SharedCacheMap->FileObject->FileName.Buffer))
{
+ PWCHAR pos;
+ ULONG len = 0;
pos = wcsrchr(SharedCacheMap->FileObject->FileName.Buffer, '\\');
- len = wcslen(pos) * sizeof(WCHAR);
- if (pos) snprintf(MI_PFN_CURRENT_PROCESS_NAME, min(16, len), "%S", pos);
+ if (pos)
+ {
+ len = wcslen(pos) * sizeof(WCHAR);
+ snprintf(MI_PFN_CURRENT_PROCESS_NAME, min(16, len), "%S", pos);
+ }
+ else
+ {
+ snprintf(MI_PFN_CURRENT_PROCESS_NAME, min(16, len), "%wZ", &SharedCacheMap->FileObject->FileName);
+ }
}
#endif
- MmMapMemoryArea(current->BaseAddress, VACB_MAPPING_GRANULARITY,
- MC_CACHE, PAGE_READWRITE);
+ /* Reference it to allow release */
+ CcRosVacbIncRefCount(current);
- return STATUS_SUCCESS;
+ return Status;
}
NTSTATUS
{
PROS_VACB current;
NTSTATUS Status;
+ ULONG Refs;
ASSERT(SharedCacheMap);
}
}
+ Refs = CcRosVacbGetRefCount(current);
+
KeAcquireGuardedMutex(&ViewLock);
/* Move to the tail of the LRU list */
DPRINT("*BaseAddress %p\n", *BaseAddress);
*Vacb = current;
*BaseOffset = current->FileOffset.QuadPart;
+
+ ASSERT(Refs > 1);
+
return STATUS_SUCCESS;
}
NULL);
MmUnlockAddressSpace(MmGetKernelAddressSpace());
+ if (Vacb->PinCount != 0 || Vacb->ReferenceCount != 0)
+ {
+ DPRINT1("Invalid free: %ld, %ld\n", Vacb->ReferenceCount, Vacb->PinCount);
+ if (Vacb->SharedCacheMap->FileObject && Vacb->SharedCacheMap->FileObject->FileName.Length)
+ {
+ DPRINT1("For file: %wZ\n", &Vacb->SharedCacheMap->FileObject->FileName);
+ }
+ }
+
+ ASSERT(Vacb->PinCount == 0);
+ ASSERT(Vacb->ReferenceCount == 0);
+ ASSERT(IsListEmpty(&Vacb->CacheMapVacbListEntry));
+ ASSERT(IsListEmpty(&Vacb->DirtyVacbListEntry));
+ ASSERT(IsListEmpty(&Vacb->VacbLruListEntry));
+ RtlFillMemory(Vacb, sizeof(Vacb), 0xfd);
ExFreeToNPagedLookasideList(&VacbLookasideList, Vacb);
return STATUS_SUCCESS;
}
LONGLONG RemainingLength;
PROS_VACB current;
NTSTATUS Status;
- KIRQL oldIrql;
+
+ CCTRACE(CC_API_DEBUG, "SectionObjectPointers=%p FileOffset=%p Length=%lu\n",
+ SectionObjectPointers, FileOffset, Length);
DPRINT("CcFlushCache(SectionObjectPointers 0x%p, FileOffset 0x%p, Length %lu, IoStatus 0x%p)\n",
SectionObjectPointers, FileOffset, Length, IoStatus);
IoStatus->Status = Status;
}
}
- KeReleaseMutex(¤t->Mutex, FALSE);
- KeAcquireGuardedMutex(&ViewLock);
- KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
- CcRosVacbDecRefCount(current);
- KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
- KeReleaseGuardedMutex(&ViewLock);
+ CcRosReleaseVacb(SharedCacheMap, current, current->Valid, current->Dirty, FALSE);
}
Offset.QuadPart += VACB_MAPPING_GRANULARITY;
ASSERT(SharedCacheMap);
- SharedCacheMap->RefCount++;
+ SharedCacheMap->OpenCount++;
KeReleaseGuardedMutex(&ViewLock);
CcFlushCache(FileObject->SectionObjectPointer, NULL, 0, NULL);
KeAcquireGuardedMutex(&ViewLock);
- SharedCacheMap->RefCount--;
- if (SharedCacheMap->RefCount == 0)
+ SharedCacheMap->OpenCount--;
+ if (SharedCacheMap->OpenCount == 0)
{
+ KIRQL OldIrql;
+
FileObject->SectionObjectPointer->SharedCacheMap = NULL;
/*
while (!IsListEmpty(&SharedCacheMap->CacheMapVacbListHead))
{
current_entry = RemoveTailList(&SharedCacheMap->CacheMapVacbListHead);
+ KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
+
current = CONTAINING_RECORD(current_entry, ROS_VACB, CacheMapVacbListEntry);
+ CcRosAcquireVacbLock(current, NULL);
RemoveEntryList(¤t->VacbLruListEntry);
+ InitializeListHead(¤t->VacbLruListEntry);
if (current->Dirty)
{
- RemoveEntryList(¤t->DirtyVacbListEntry);
- DirtyPageCount -= VACB_MAPPING_GRANULARITY / PAGE_SIZE;
+ KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
+ CcRosUnmarkDirtyVacb(current, FALSE);
+ KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
DPRINT1("Freeing dirty VACB\n");
}
InsertHeadList(&FreeList, ¤t->CacheMapVacbListEntry);
+ CcRosReleaseVacbLock(current);
+
+ KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
}
#if DBG
SharedCacheMap->Trace = FALSE;
{
current_entry = RemoveTailList(&FreeList);
current = CONTAINING_RECORD(current_entry, ROS_VACB, CacheMapVacbListEntry);
+ InitializeListHead(¤t->CacheMapVacbListEntry);
+ CcRosVacbDecRefCount(current);
CcRosInternalFreeVacb(current);
}
+
+ OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
+ RemoveEntryList(&SharedCacheMap->SharedCacheMapLinks);
+ KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
+
ExFreeToNPagedLookasideList(&SharedCacheMapLookasideList, SharedCacheMap);
KeAcquireGuardedMutex(&ViewLock);
}
KeAcquireGuardedMutex(&ViewLock);
SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
ASSERT(SharedCacheMap);
- ASSERT(SharedCacheMap->RefCount != 0);
- SharedCacheMap->RefCount++;
+ ASSERT(SharedCacheMap->OpenCount != 0);
+ SharedCacheMap->OpenCount++;
KeReleaseGuardedMutex(&ViewLock);
}
DPRINT("CcRosRemoveIfClosed()\n");
KeAcquireGuardedMutex(&ViewLock);
SharedCacheMap = SectionObjectPointer->SharedCacheMap;
- if (SharedCacheMap && SharedCacheMap->RefCount == 0)
+ if (SharedCacheMap && SharedCacheMap->OpenCount == 0)
{
CcRosDeleteFileCache(SharedCacheMap->FileObject, SharedCacheMap);
}
KeAcquireGuardedMutex(&ViewLock);
SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
ASSERT(SharedCacheMap);
- if (SharedCacheMap->RefCount > 0)
+ if (SharedCacheMap->OpenCount > 0)
{
- SharedCacheMap->RefCount--;
- if (SharedCacheMap->RefCount == 0)
+ SharedCacheMap->OpenCount--;
+ if (SharedCacheMap->OpenCount == 0)
{
MmFreeSectionSegments(SharedCacheMap->FileObject);
CcRosDeleteFileCache(FileObject, SharedCacheMap);
* has been closed.
*/
{
+ KIRQL OldIrql;
+ PPRIVATE_CACHE_MAP PrivateMap;
PROS_SHARED_CACHE_MAP SharedCacheMap;
KeAcquireGuardedMutex(&ViewLock);
if (FileObject->SectionObjectPointer->SharedCacheMap != NULL)
{
SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
- if (FileObject->PrivateCacheMap != NULL)
+
+ /* Closing the handle, so kill the private cache map
+ * Before you event try to remove it from FO, always
+ * lock the master lock, to be sure not to race
+ * with a potential read ahead ongoing!
+ */
+ OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
+ PrivateMap = FileObject->PrivateCacheMap;
+ FileObject->PrivateCacheMap = NULL;
+ KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
+
+ if (PrivateMap != NULL)
{
- FileObject->PrivateCacheMap = NULL;
- if (SharedCacheMap->RefCount > 0)
+ /* Remove it from the file */
+ KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &OldIrql);
+ RemoveEntryList(&PrivateMap->PrivateLinks);
+ KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, OldIrql);
+
+ /* And free it. */
+ if (PrivateMap != &SharedCacheMap->PrivateCacheMap)
{
- SharedCacheMap->RefCount--;
- if (SharedCacheMap->RefCount == 0)
+ ExFreePoolWithTag(PrivateMap, TAG_PRIVATE_CACHE_MAP);
+ }
+ else
+ {
+ PrivateMap->NodeTypeCode = 0;
+ }
+
+ if (SharedCacheMap->OpenCount > 0)
+ {
+ SharedCacheMap->OpenCount--;
+ if (SharedCacheMap->OpenCount == 0)
{
MmFreeSectionSegments(SharedCacheMap->FileObject);
CcRosDeleteFileCache(FileObject, SharedCacheMap);
return STATUS_SUCCESS;
}
-NTSTATUS
-NTAPI
-CcTryToInitializeFileCache (
- PFILE_OBJECT FileObject)
-{
- PROS_SHARED_CACHE_MAP SharedCacheMap;
- NTSTATUS Status;
-
- KeAcquireGuardedMutex(&ViewLock);
-
- ASSERT(FileObject->SectionObjectPointer);
- SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
- if (SharedCacheMap == NULL)
- {
- Status = STATUS_UNSUCCESSFUL;
- }
- else
- {
- if (FileObject->PrivateCacheMap == NULL)
- {
- FileObject->PrivateCacheMap = SharedCacheMap;
- SharedCacheMap->RefCount++;
- }
- Status = STATUS_SUCCESS;
- }
- KeReleaseGuardedMutex(&ViewLock);
-
- return Status;
-}
-
-
NTSTATUS
NTAPI
CcRosInitializeFileCache (
PFILE_OBJECT FileObject,
PCC_FILE_SIZES FileSizes,
+ BOOLEAN PinAccess,
PCACHE_MANAGER_CALLBACKS CallBacks,
PVOID LazyWriterContext)
/*
* FUNCTION: Initializes a shared cache map for a file object
*/
{
+ KIRQL OldIrql;
+ BOOLEAN Allocated;
PROS_SHARED_CACHE_MAP SharedCacheMap;
SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
DPRINT("CcRosInitializeFileCache(FileObject 0x%p, SharedCacheMap 0x%p)\n",
FileObject, SharedCacheMap);
+ Allocated = FALSE;
KeAcquireGuardedMutex(&ViewLock);
if (SharedCacheMap == NULL)
{
+ Allocated = TRUE;
SharedCacheMap = ExAllocateFromNPagedLookasideList(&SharedCacheMapLookasideList);
if (SharedCacheMap == NULL)
{
KeReleaseGuardedMutex(&ViewLock);
- return STATUS_UNSUCCESSFUL;
+ return STATUS_INSUFFICIENT_RESOURCES;
}
RtlZeroMemory(SharedCacheMap, sizeof(*SharedCacheMap));
ObReferenceObjectByPointer(FileObject,
FILE_ALL_ACCESS,
NULL,
KernelMode);
+ SharedCacheMap->NodeTypeCode = NODE_TYPE_SHARED_MAP;
+ SharedCacheMap->NodeByteSize = sizeof(*SharedCacheMap);
SharedCacheMap->FileObject = FileObject;
SharedCacheMap->Callbacks = CallBacks;
SharedCacheMap->LazyWriteContext = LazyWriterContext;
SharedCacheMap->SectionSize = FileSizes->AllocationSize;
SharedCacheMap->FileSize = FileSizes->FileSize;
+ SharedCacheMap->PinAccess = PinAccess;
+ SharedCacheMap->DirtyPageThreshold = 0;
+ SharedCacheMap->DirtyPages = 0;
+ InitializeListHead(&SharedCacheMap->PrivateList);
KeInitializeSpinLock(&SharedCacheMap->CacheMapLock);
InitializeListHead(&SharedCacheMap->CacheMapVacbListHead);
FileObject->SectionObjectPointer->SharedCacheMap = SharedCacheMap;
+
+ OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
+ InsertTailList(&CcCleanSharedCacheMapList, &SharedCacheMap->SharedCacheMapLinks);
+ KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
}
if (FileObject->PrivateCacheMap == NULL)
{
- FileObject->PrivateCacheMap = SharedCacheMap;
- SharedCacheMap->RefCount++;
+ PPRIVATE_CACHE_MAP PrivateMap;
+
+ /* Allocate the private cache map for this handle */
+ if (SharedCacheMap->PrivateCacheMap.NodeTypeCode != 0)
+ {
+ PrivateMap = ExAllocatePoolWithTag(NonPagedPool, sizeof(PRIVATE_CACHE_MAP), TAG_PRIVATE_CACHE_MAP);
+ }
+ else
+ {
+ PrivateMap = &SharedCacheMap->PrivateCacheMap;
+ }
+
+ if (PrivateMap == NULL)
+ {
+ /* If we also allocated the shared cache map for this file, kill it */
+ if (Allocated)
+ {
+ OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
+ RemoveEntryList(&SharedCacheMap->SharedCacheMapLinks);
+ KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
+
+ FileObject->SectionObjectPointer->SharedCacheMap = NULL;
+ ObDereferenceObject(FileObject);
+ ExFreeToNPagedLookasideList(&SharedCacheMapLookasideList, SharedCacheMap);
+ }
+
+ KeReleaseGuardedMutex(&ViewLock);
+ return STATUS_INSUFFICIENT_RESOURCES;
+ }
+
+ /* Initialize it */
+ RtlZeroMemory(PrivateMap, sizeof(PRIVATE_CACHE_MAP));
+ PrivateMap->NodeTypeCode = NODE_TYPE_PRIVATE_MAP;
+ PrivateMap->ReadAheadMask = PAGE_SIZE - 1;
+ PrivateMap->FileObject = FileObject;
+ KeInitializeSpinLock(&PrivateMap->ReadAheadSpinLock);
+
+ /* Link it to the file */
+ KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &OldIrql);
+ InsertTailList(&SharedCacheMap->PrivateList, &PrivateMap->PrivateLinks);
+ KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, OldIrql);
+
+ FileObject->PrivateCacheMap = PrivateMap;
+ SharedCacheMap->OpenCount++;
}
KeReleaseGuardedMutex(&ViewLock);
IN PSECTION_OBJECT_POINTERS SectionObjectPointers)
{
PROS_SHARED_CACHE_MAP SharedCacheMap;
+
+ CCTRACE(CC_API_DEBUG, "SectionObjectPointers=%p\n", SectionObjectPointers);
+
if (SectionObjectPointers && SectionObjectPointers->SharedCacheMap)
{
SharedCacheMap = SectionObjectPointers->SharedCacheMap;
InitializeListHead(&DirtyVacbListHead);
InitializeListHead(&VacbLruListHead);
+ InitializeListHead(&CcDeferredWrites);
+ InitializeListHead(&CcCleanSharedCacheMapList);
+ KeInitializeSpinLock(&CcDeferredWriteSpinLock);
KeInitializeGuardedMutex(&ViewLock);
ExInitializeNPagedLookasideList(&iBcbLookasideList,
NULL,
CcInitCacheZeroPage();
}
+#if DBG && defined(KDBG)
+BOOLEAN
+ExpKdbgExtFileCache(ULONG Argc, PCHAR Argv[])
+{
+ PLIST_ENTRY ListEntry;
+ UNICODE_STRING NoName = RTL_CONSTANT_STRING(L"No name for File");
+
+ KdbpPrint(" Usage Summary (in kb)\n");
+ KdbpPrint("Shared\t\tValid\tDirty\tName\n");
+ /* No need to lock the spin lock here, we're in DBG */
+ for (ListEntry = CcCleanSharedCacheMapList.Flink;
+ ListEntry != &CcCleanSharedCacheMapList;
+ ListEntry = ListEntry->Flink)
+ {
+ PLIST_ENTRY Vacbs;
+ ULONG Valid = 0, Dirty = 0;
+ PROS_SHARED_CACHE_MAP SharedCacheMap;
+ PUNICODE_STRING FileName;
+
+ SharedCacheMap = CONTAINING_RECORD(ListEntry, ROS_SHARED_CACHE_MAP, SharedCacheMapLinks);
+
+ /* Dirty size */
+ Dirty = (SharedCacheMap->DirtyPages * PAGE_SIZE) / 1024;
+
+ /* First, count for all the associated VACB */
+ for (Vacbs = SharedCacheMap->CacheMapVacbListHead.Flink;
+ Vacbs != &SharedCacheMap->CacheMapVacbListHead;
+ Vacbs = Vacbs->Flink)
+ {
+ PROS_VACB Vacb;
+
+ Vacb = CONTAINING_RECORD(Vacbs, ROS_VACB, CacheMapVacbListEntry);
+ if (Vacb->Valid)
+ {
+ Valid += VACB_MAPPING_GRANULARITY / 1024;
+ }
+ }
+
+ /* Setup name */
+ if (SharedCacheMap->FileObject != NULL &&
+ SharedCacheMap->FileObject->FileName.Length != 0)
+ {
+ FileName = &SharedCacheMap->FileObject->FileName;
+ }
+ else
+ {
+ FileName = &NoName;
+ }
+
+ /* And print */
+ KdbpPrint("%p\t%d\t%d\t%wZ\n", SharedCacheMap, Valid, Dirty, FileName);
+ }
+
+ return TRUE;
+}
+
+BOOLEAN
+ExpKdbgExtDefWrites(ULONG Argc, PCHAR Argv[])
+{
+ KdbpPrint("CcTotalDirtyPages:\t%lu (%lu Kb)\n", CcTotalDirtyPages,
+ (CcTotalDirtyPages * PAGE_SIZE) / 1024);
+ KdbpPrint("CcDirtyPageThreshold:\t%lu (%lu Kb)\n", CcDirtyPageThreshold,
+ (CcDirtyPageThreshold * PAGE_SIZE) / 1024);
+ KdbpPrint("MmAvailablePages:\t%lu (%lu Kb)\n", MmAvailablePages,
+ (MmAvailablePages * PAGE_SIZE) / 1024);
+ KdbpPrint("MmThrottleTop:\t\t%lu (%lu Kb)\n", MmThrottleTop,
+ (MmThrottleTop * PAGE_SIZE) / 1024);
+ KdbpPrint("MmThrottleBottom:\t%lu (%lu Kb)\n", MmThrottleBottom,
+ (MmThrottleBottom * PAGE_SIZE) / 1024);
+ KdbpPrint("MmModifiedPageListHead.Total:\t%lu (%lu Kb)\n", MmModifiedPageListHead.Total,
+ (MmModifiedPageListHead.Total * PAGE_SIZE) / 1024);
+
+ if (CcTotalDirtyPages >= CcDirtyPageThreshold)
+ {
+ KdbpPrint("CcTotalDirtyPages above the threshold, writes should be throttled\n");
+ }
+ else if (CcTotalDirtyPages + 64 >= CcDirtyPageThreshold)
+ {
+ KdbpPrint("CcTotalDirtyPages within 64 (max charge) pages of the threshold, writes may be throttled\n");
+ }
+ else
+ {
+ KdbpPrint("CcTotalDirtyPages below the threshold, writes should not be throttled\n");
+ }
+
+ return TRUE;
+}
+#endif
+
/* EOF */