(*Count) = 0;
+ KeEnterCriticalRegion();
KeAcquireGuardedMutex(&ViewLock);
WriteCount[0] = WriteCount[1];
current_entry = current_entry->Flink;
Locked = current->Bcb->Callbacks->AcquireForLazyWrite(
- current->Bcb->LazyWriteContext, FALSE);
+ current->Bcb->LazyWriteContext, TRUE);
if (!Locked)
{
continue;
}
-
- Locked = ExTryToAcquirePushLockExclusive(¤t->Lock);
- if (!Locked)
- {
- current->Bcb->Callbacks->ReleaseFromLazyWrite(
- current->Bcb->LazyWriteContext);
- continue;
- }
-
+ KeWaitForSingleObject(¤t->Mutex,
+ Executive,
+ KernelMode,
+ FALSE,
+ NULL);
+
ASSERT(current->Dirty);
if (current->ReferenceCount > 1)
{
- ExReleasePushLock(¤t->Lock);
+ KeReleaseMutex(¤t->Mutex, 0);
current->Bcb->Callbacks->ReleaseFromLazyWrite(
current->Bcb->LazyWriteContext);
continue;
}
-
+
PagesPerSegment = current->Bcb->CacheSegmentSize / PAGE_SIZE;
KeReleaseGuardedMutex(&ViewLock);
Status = CcRosFlushCacheSegment(current);
- ExReleasePushLock(¤t->Lock);
+ KeReleaseMutex(¤t->Mutex, 0);
current->Bcb->Callbacks->ReleaseFromLazyWrite(
current->Bcb->LazyWriteContext);
}
KeReleaseGuardedMutex(&ViewLock);
+ KeLeaveCriticalRegion();
DPRINT("CcRosFlushDirtyPages() finished\n");
return(STATUS_SUCCESS);
ULONG PagesFreed;
KIRQL oldIrql;
LIST_ENTRY FreeList;
-
+ PFN_NUMBER Page;
+ ULONG i;
+ BOOLEAN FlushedPages = FALSE;
+
DPRINT("CcRosTrimCache(Target %d)\n", Target);
-
- *NrFreed = 0;
-
+
InitializeListHead(&FreeList);
+ *NrFreed = 0;
+
+retry:
KeAcquireGuardedMutex(&ViewLock);
+
current_entry = CacheSegmentLRUListHead.Flink;
- while (current_entry != &CacheSegmentLRUListHead && Target > 0)
+ while (current_entry != &CacheSegmentLRUListHead)
{
current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT,
CacheSegmentLRUListEntry);
current_entry = current_entry->Flink;
-
+
KeAcquireSpinLock(¤t->Bcb->BcbLock, &oldIrql);
- if (current->MappedCount > 0 && !current->Dirty && !current->PageOut)
+ /* Reference the cache segment */
+ CcRosCacheSegmentIncRefCount(current);
+
+ /* Check if it's mapped and not dirty */
+ if (current->MappedCount > 0 && !current->Dirty)
{
- ULONG i;
-
- CcRosCacheSegmentIncRefCount(current);
- current->PageOut = TRUE;
+ /* We have to break these locks because Cc sucks */
KeReleaseSpinLock(¤t->Bcb->BcbLock, oldIrql);
KeReleaseGuardedMutex(&ViewLock);
+
+ /* Page out the segment */
for (i = 0; i < current->Bcb->CacheSegmentSize / PAGE_SIZE; i++)
{
- PFN_NUMBER Page;
- Page = (PFN_NUMBER)(MmGetPhysicalAddress((char*)current->BaseAddress + i * PAGE_SIZE).QuadPart >> PAGE_SHIFT);
+ Page = (PFN_NUMBER)(MmGetPhysicalAddress((PUCHAR)current->BaseAddress + (i * PAGE_SIZE)).QuadPart >> PAGE_SHIFT);
+
MmPageOutPhysicalAddress(Page);
}
+
+ /* Reacquire the locks */
KeAcquireGuardedMutex(&ViewLock);
KeAcquireSpinLock(¤t->Bcb->BcbLock, &oldIrql);
- CcRosCacheSegmentDecRefCount(current);
}
-
+
+ /* Dereference the cache segment */
+ CcRosCacheSegmentDecRefCount(current);
+
+ /* Check if we can free this entry now */
if (current->ReferenceCount == 0)
{
+ ASSERT(!current->Dirty);
+ ASSERT(!current->MappedCount);
+
+ RemoveEntryList(¤t->BcbSegmentListEntry);
+ RemoveEntryList(¤t->CacheSegmentListEntry);
+ RemoveEntryList(¤t->CacheSegmentLRUListEntry);
+ InsertHeadList(&FreeList, ¤t->BcbSegmentListEntry);
+
+ /* Calculate how many pages we freed for Mm */
PagesPerSegment = current->Bcb->CacheSegmentSize / PAGE_SIZE;
- // PagesFreed = PagesPerSegment;
PagesFreed = min(PagesPerSegment, Target);
Target -= PagesFreed;
- (*NrFreed) += PagesFreed;
+ (*NrFreed) += PagesFreed;
}
-
+
KeReleaseSpinLock(¤t->Bcb->BcbLock, oldIrql);
}
-
- current_entry = CacheSegmentLRUListHead.Flink;
- while (current_entry != &CacheSegmentLRUListHead)
+
+ KeReleaseGuardedMutex(&ViewLock);
+
+ /* Try flushing pages if we haven't met our target */
+ if (Target > 0 && !FlushedPages)
{
- current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT,
- CacheSegmentLRUListEntry);
- current->PageOut = FALSE;
- current_entry = current_entry->Flink;
-
- KeAcquireSpinLock(¤t->Bcb->BcbLock, &oldIrql);
- if (current->ReferenceCount == 0)
- {
- RemoveEntryList(¤t->BcbSegmentListEntry);
- KeReleaseSpinLock(¤t->Bcb->BcbLock, oldIrql);
- RemoveEntryList(¤t->CacheSegmentListEntry);
- RemoveEntryList(¤t->CacheSegmentLRUListEntry);
- InsertHeadList(&FreeList, ¤t->BcbSegmentListEntry);
- }
- else
+ /* Flush dirty pages to disk */
+ CcRosFlushDirtyPages(Target, &PagesFreed);
+ FlushedPages = TRUE;
+
+ /* We can only swap as many pages as we flushed */
+ if (PagesFreed < Target) Target = PagesFreed;
+
+ /* Check if we flushed anything */
+ if (PagesFreed != 0)
{
- KeReleaseSpinLock(¤t->Bcb->BcbLock, oldIrql);
+ /* Try again after flushing dirty pages */
+ DPRINT("Flushed %d dirty cache pages to disk\n", PagesFreed);
+ goto retry;
}
}
-
- KeReleaseGuardedMutex(&ViewLock);
-
+
while (!IsListEmpty(&FreeList))
{
current_entry = RemoveHeadList(&FreeList);
BcbSegmentListEntry);
CcRosInternalFreeCacheSegment(current);
}
-
+
+ DPRINT("Evicted %d cache pages\n", (*NrFreed));
+
return(STATUS_SUCCESS);
}
InsertTailList(&DirtySegmentListHead, &CacheSeg->DirtySegmentListEntry);
DirtyPageCount += Bcb->CacheSegmentSize / PAGE_SIZE;
}
- RemoveEntryList(&CacheSeg->CacheSegmentLRUListEntry);
- InsertTailList(&CacheSegmentLRUListHead, &CacheSeg->CacheSegmentLRUListEntry);
if (Mapped)
{
}
KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
KeReleaseGuardedMutex(&ViewLock);
- ExReleasePushLock(&CacheSeg->Lock);
+ KeReleaseMutex(&CacheSeg->Mutex, 0);
return(STATUS_SUCCESS);
}
{
CcRosCacheSegmentIncRefCount(current);
KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
- ExAcquirePushLockExclusive(¤t->Lock);
+ KeWaitForSingleObject(¤t->Mutex,
+ Executive,
+ KernelMode,
+ FALSE,
+ NULL);
return(current);
}
current_entry = current_entry->Flink;
KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
}
+ KeAcquireGuardedMutex(&ViewLock);
+
+ /* Move to the tail of the LRU list */
+ RemoveEntryList(&CacheSeg->CacheSegmentLRUListEntry);
+ InsertTailList(&CacheSegmentLRUListHead, &CacheSeg->CacheSegmentLRUListEntry);
+
+ KeReleaseGuardedMutex(&ViewLock);
CacheSeg->Dirty = TRUE;
- ExReleasePushLock(&CacheSeg->Lock);
+ KeReleaseMutex(&CacheSeg->Mutex, 0);
return(STATUS_SUCCESS);
}
}
KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
- ExReleasePushLock(&CacheSeg->Lock);
+ KeReleaseMutex(&CacheSeg->Mutex, 0);
+
return(STATUS_SUCCESS);
}
current->DirtySegmentListEntry.Flink = NULL;
current->DirtySegmentListEntry.Blink = NULL;
current->ReferenceCount = 1;
- ExInitializePushLock((PULONG_PTR)¤t->Lock);
- ExAcquirePushLockExclusive(¤t->Lock);
+ KeInitializeMutex(¤t->Mutex, 0);
+ KeWaitForSingleObject(¤t->Mutex,
+ Executive,
+ KernelMode,
+ FALSE,
+ NULL);
KeAcquireGuardedMutex(&ViewLock);
*CacheSeg = current;
current );
}
#endif
- ExReleasePushLock(&(*CacheSeg)->Lock);
+ KeReleaseMutex(&(*CacheSeg)->Mutex, 0);
KeReleaseGuardedMutex(&ViewLock);
ExFreeToNPagedLookasideList(&CacheSegLookasideList, *CacheSeg);
*CacheSeg = current;
- ExAcquirePushLockExclusive(¤t->Lock);
+ KeWaitForSingleObject(¤t->Mutex,
+ Executive,
+ KernelMode,
+ FALSE,
+ NULL);
return STATUS_SUCCESS;
}
if (current->FileOffset < FileOffset)
MmLockAddressSpace(MmGetKernelAddressSpace());
current->BaseAddress = NULL;
Status = MmCreateMemoryArea(MmGetKernelAddressSpace(),
- MEMORY_AREA_CACHE_SEGMENT,
+ 0, // nothing checks for cache_segment mareas, so set to 0
¤t->BaseAddress,
Bcb->CacheSegmentSize,
PAGE_READWRITE,
#endif
/* Create a virtual mapping for this memory area */
+ MI_SET_USAGE(MI_USAGE_CACHE);
+#if MI_TRACE_PFNS
+ PWCHAR pos = NULL;
+ ULONG len = 0;
+ if ((Bcb->FileObject) && (Bcb->FileObject->FileName.Buffer))
+ {
+ pos = wcsrchr(Bcb->FileObject->FileName.Buffer, '\\');
+ len = wcslen(pos) * sizeof(WCHAR);
+ if (pos) snprintf(MI_PFN_CURRENT_PROCESS_NAME, min(16, len), "%S", pos);
+ }
+#endif
+
MmMapMemoryArea(current->BaseAddress, Bcb->CacheSegmentSize,
MC_CACHE, PAGE_READWRITE);
current = CcRosLookupCacheSegment(Bcb, CurrentOffset);
if (current != NULL)
{
+ KeAcquireGuardedMutex(&ViewLock);
+
+ /* Move to tail of LRU list */
+ RemoveEntryList(¤t->CacheSegmentLRUListEntry);
+ InsertTailList(&CacheSegmentLRUListHead, ¤t->CacheSegmentLRUListEntry);
+
+ KeReleaseGuardedMutex(&ViewLock);
+
CacheSegList[i] = current;
}
else
return Status;
}
}
+
+ KeAcquireGuardedMutex(&ViewLock);
+
+ /* Move to the tail of the LRU list */
+ RemoveEntryList(¤t->CacheSegmentLRUListEntry);
+ InsertTailList(&CacheSegmentLRUListHead, ¤t->CacheSegmentLRUListEntry);
+
+ KeReleaseGuardedMutex(&ViewLock);
+
/*
* Return information about the segment to the caller.
*/
ASSERT(SwapEntry == 0);
if (Page != 0)
{
+ ASSERT(MmGetReferenceCountPage(Page) == 1);
MmReleasePageMemoryConsumer(MC_CACHE, Page);
}
}
*/
VOID NTAPI
CcFlushCache(IN PSECTION_OBJECT_POINTERS SectionObjectPointers,
- IN PLARGE_INTEGER FileOffset OPTIONAL,
- IN ULONG Length,
- OUT PIO_STATUS_BLOCK IoStatus)
+ IN PLARGE_INTEGER FileOffset OPTIONAL,
+ IN ULONG Length,
+ OUT PIO_STATUS_BLOCK IoStatus)
{
- PBCB Bcb;
- LARGE_INTEGER Offset;
- PCACHE_SEGMENT current;
- NTSTATUS Status;
- KIRQL oldIrql;
+ PBCB Bcb;
+ LARGE_INTEGER Offset;
+ PCACHE_SEGMENT current;
+ NTSTATUS Status;
+ KIRQL oldIrql;
- DPRINT("CcFlushCache(SectionObjectPointers 0x%p, FileOffset 0x%p, Length %d, IoStatus 0x%p)\n",
+ DPRINT("CcFlushCache(SectionObjectPointers 0x%p, FileOffset 0x%p, Length %d, IoStatus 0x%p)\n",
SectionObjectPointers, FileOffset, Length, IoStatus);
- if (SectionObjectPointers && SectionObjectPointers->SharedCacheMap)
- {
- Bcb = (PBCB)SectionObjectPointers->SharedCacheMap;
- ASSERT(Bcb);
- if (FileOffset)
- {
- Offset = *FileOffset;
- }
- else
- {
- Offset.QuadPart = (LONGLONG)0;
- Length = Bcb->FileSize.u.LowPart;
- }
+ if (SectionObjectPointers && SectionObjectPointers->SharedCacheMap)
+ {
+ Bcb = (PBCB)SectionObjectPointers->SharedCacheMap;
+ ASSERT(Bcb);
+ if (FileOffset)
+ {
+ Offset = *FileOffset;
+ }
+ else
+ {
+ Offset.QuadPart = (LONGLONG)0;
+ Length = Bcb->FileSize.u.LowPart;
+ }
- if (IoStatus)
- {
- IoStatus->Status = STATUS_SUCCESS;
- IoStatus->Information = 0;
- }
+ if (IoStatus)
+ {
+ IoStatus->Status = STATUS_SUCCESS;
+ IoStatus->Information = 0;
+ }
- while (Length > 0)
- {
- current = CcRosLookupCacheSegment (Bcb, Offset.u.LowPart);
- if (current != NULL)
- {
- if (current->Dirty)
- {
- Status = CcRosFlushCacheSegment(current);
- if (!NT_SUCCESS(Status) && IoStatus != NULL)
- {
- IoStatus->Status = Status;
- }
- }
- KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
- ExReleasePushLock(¤t->Lock);
- CcRosCacheSegmentDecRefCount(current);
- KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
- }
+ while (Length > 0)
+ {
+ current = CcRosLookupCacheSegment (Bcb, Offset.u.LowPart);
+ if (current != NULL)
+ {
+ if (current->Dirty)
+ {
+ Status = CcRosFlushCacheSegment(current);
+ if (!NT_SUCCESS(Status) && IoStatus != NULL)
+ {
+ IoStatus->Status = Status;
+ }
+ }
+ KeReleaseMutex(¤t->Mutex, 0);
+ KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
+ CcRosCacheSegmentDecRefCount(current);
+ KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
+ }
- Offset.QuadPart += Bcb->CacheSegmentSize;
- if (Length > Bcb->CacheSegmentSize)
- {
- Length -= Bcb->CacheSegmentSize;
- }
- else
- {
- Length = 0;
- }
- }
- }
- else
- {
- if (IoStatus)
- {
- IoStatus->Status = STATUS_INVALID_PARAMETER;
- }
- }
+ Offset.QuadPart += Bcb->CacheSegmentSize;
+ if (Length > Bcb->CacheSegmentSize)
+ {
+ Length -= Bcb->CacheSegmentSize;
+ }
+ else
+ {
+ Length = 0;
+ }
+ }
+ }
+ else
+ {
+ if (IoStatus)
+ {
+ IoStatus->Status = STATUS_INVALID_PARAMETER;
+ }
+ }
}
NTSTATUS
* FUNCTION: Releases the BCB associated with a file object
*/
{
- PLIST_ENTRY current_entry;
- PCACHE_SEGMENT current;
- LIST_ENTRY FreeList;
- KIRQL oldIrql;
+ PLIST_ENTRY current_entry;
+ PCACHE_SEGMENT current;
+ LIST_ENTRY FreeList;
+ KIRQL oldIrql;
- ASSERT(Bcb);
+ ASSERT(Bcb);
- Bcb->RefCount++;
- KeReleaseGuardedMutex(&ViewLock);
+ Bcb->RefCount++;
+ KeReleaseGuardedMutex(&ViewLock);
- CcFlushCache(FileObject->SectionObjectPointer, NULL, 0, NULL);
+ CcFlushCache(FileObject->SectionObjectPointer, NULL, 0, NULL);
- KeAcquireGuardedMutex(&ViewLock);
- Bcb->RefCount--;
- if (Bcb->RefCount == 0)
- {
- if (Bcb->BcbRemoveListEntry.Flink != NULL)
- {
- RemoveEntryList(&Bcb->BcbRemoveListEntry);
- Bcb->BcbRemoveListEntry.Flink = NULL;
- }
+ KeAcquireGuardedMutex(&ViewLock);
+ Bcb->RefCount--;
+ if (Bcb->RefCount == 0)
+ {
+ if (Bcb->BcbRemoveListEntry.Flink != NULL)
+ {
+ RemoveEntryList(&Bcb->BcbRemoveListEntry);
+ Bcb->BcbRemoveListEntry.Flink = NULL;
+ }
- FileObject->SectionObjectPointer->SharedCacheMap = NULL;
+ FileObject->SectionObjectPointer->SharedCacheMap = NULL;
- /*
- * Release all cache segments.
- */
- InitializeListHead(&FreeList);
- KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
- current_entry = Bcb->BcbSegmentListHead.Flink;
- while (!IsListEmpty(&Bcb->BcbSegmentListHead))
- {
- current_entry = RemoveTailList(&Bcb->BcbSegmentListHead);
- current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT, BcbSegmentListEntry);
- RemoveEntryList(¤t->CacheSegmentListEntry);
- RemoveEntryList(¤t->CacheSegmentLRUListEntry);
- if (current->Dirty)
- {
- RemoveEntryList(¤t->DirtySegmentListEntry);
- DirtyPageCount -= Bcb->CacheSegmentSize / PAGE_SIZE;
- DPRINT1("Freeing dirty segment\n");
- }
- InsertHeadList(&FreeList, ¤t->BcbSegmentListEntry);
- }
+ /*
+ * Release all cache segments.
+ */
+ InitializeListHead(&FreeList);
+ KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
+ current_entry = Bcb->BcbSegmentListHead.Flink;
+ while (!IsListEmpty(&Bcb->BcbSegmentListHead))
+ {
+ current_entry = RemoveTailList(&Bcb->BcbSegmentListHead);
+ current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT, BcbSegmentListEntry);
+ RemoveEntryList(¤t->CacheSegmentListEntry);
+ RemoveEntryList(¤t->CacheSegmentLRUListEntry);
+ if (current->Dirty)
+ {
+ RemoveEntryList(¤t->DirtySegmentListEntry);
+ DirtyPageCount -= Bcb->CacheSegmentSize / PAGE_SIZE;
+ DPRINT1("Freeing dirty segment\n");
+ }
+ InsertHeadList(&FreeList, ¤t->BcbSegmentListEntry);
+ }
#if DBG
- Bcb->Trace = FALSE;
+ Bcb->Trace = FALSE;
#endif
- KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
+ KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
- KeReleaseGuardedMutex(&ViewLock);
- ObDereferenceObject (Bcb->FileObject);
+ KeReleaseGuardedMutex(&ViewLock);
+ ObDereferenceObject (Bcb->FileObject);
- while (!IsListEmpty(&FreeList))
- {
- current_entry = RemoveTailList(&FreeList);
- current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT, BcbSegmentListEntry);
- CcRosInternalFreeCacheSegment(current);
- }
- ExFreeToNPagedLookasideList(&BcbLookasideList, Bcb);
- KeAcquireGuardedMutex(&ViewLock);
- }
- return(STATUS_SUCCESS);
+ while (!IsListEmpty(&FreeList))
+ {
+ current_entry = RemoveTailList(&FreeList);
+ current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT, BcbSegmentListEntry);
+ CcRosInternalFreeCacheSegment(current);
+ }
+ ExFreeToNPagedLookasideList(&BcbLookasideList, Bcb);
+ KeAcquireGuardedMutex(&ViewLock);
+ }
+ return(STATUS_SUCCESS);
}
VOID