ULONG PagesFreed;
KIRQL oldIrql;
LIST_ENTRY FreeList;
-
+ PFN_NUMBER Page;
+ ULONG i;
+ BOOLEAN FlushedPages = FALSE;
+
DPRINT("CcRosTrimCache(Target %d)\n", Target);
-
- *NrFreed = 0;
-
+
InitializeListHead(&FreeList);
+ *NrFreed = 0;
+
+retry:
KeAcquireGuardedMutex(&ViewLock);
+
current_entry = CacheSegmentLRUListHead.Flink;
- while (current_entry != &CacheSegmentLRUListHead && Target > 0)
+ while (current_entry != &CacheSegmentLRUListHead)
{
current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT,
CacheSegmentLRUListEntry);
current_entry = current_entry->Flink;
-
+
KeAcquireSpinLock(¤t->Bcb->BcbLock, &oldIrql);
- if (current->MappedCount > 0 && !current->Dirty && !current->PageOut)
+ /* Reference the cache segment */
+ CcRosCacheSegmentIncRefCount(current);
+
+ /* Check if it's mapped and not dirty */
+ if (current->MappedCount > 0 && !current->Dirty)
{
- ULONG i;
-
- CcRosCacheSegmentIncRefCount(current);
- current->PageOut = TRUE;
+ /* We have to break these locks because Cc sucks */
KeReleaseSpinLock(¤t->Bcb->BcbLock, oldIrql);
KeReleaseGuardedMutex(&ViewLock);
+
+ /* Page out the segment */
for (i = 0; i < current->Bcb->CacheSegmentSize / PAGE_SIZE; i++)
{
- PFN_NUMBER Page;
- Page = (PFN_NUMBER)(MmGetPhysicalAddress((char*)current->BaseAddress + i * PAGE_SIZE).QuadPart >> PAGE_SHIFT);
+ Page = (PFN_NUMBER)(MmGetPhysicalAddress((PUCHAR)current->BaseAddress + (i * PAGE_SIZE)).QuadPart >> PAGE_SHIFT);
+
MmPageOutPhysicalAddress(Page);
}
+
+ /* Reacquire the locks */
KeAcquireGuardedMutex(&ViewLock);
KeAcquireSpinLock(¤t->Bcb->BcbLock, &oldIrql);
- CcRosCacheSegmentDecRefCount(current);
}
-
+
+ /* Dereference the cache segment */
+ CcRosCacheSegmentDecRefCount(current);
+
+ /* Check if we can free this entry now */
if (current->ReferenceCount == 0)
{
+ ASSERT(!current->Dirty);
+ ASSERT(!current->MappedCount);
+
+ RemoveEntryList(¤t->BcbSegmentListEntry);
+ RemoveEntryList(¤t->CacheSegmentListEntry);
+ RemoveEntryList(¤t->CacheSegmentLRUListEntry);
+ InsertHeadList(&FreeList, ¤t->BcbSegmentListEntry);
+
+ /* Calculate how many pages we freed for Mm */
PagesPerSegment = current->Bcb->CacheSegmentSize / PAGE_SIZE;
- // PagesFreed = PagesPerSegment;
PagesFreed = min(PagesPerSegment, Target);
Target -= PagesFreed;
- (*NrFreed) += PagesFreed;
+ (*NrFreed) += PagesFreed;
}
-
+
KeReleaseSpinLock(¤t->Bcb->BcbLock, oldIrql);
}
-
- current_entry = CacheSegmentLRUListHead.Flink;
- while (current_entry != &CacheSegmentLRUListHead)
+
+ KeReleaseGuardedMutex(&ViewLock);
+
+ /* Try flushing pages if we haven't met our target */
+ if (Target > 0 && !FlushedPages)
{
- current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT,
- CacheSegmentLRUListEntry);
- current->PageOut = FALSE;
- current_entry = current_entry->Flink;
-
- KeAcquireSpinLock(¤t->Bcb->BcbLock, &oldIrql);
- if (current->ReferenceCount == 0)
- {
- RemoveEntryList(¤t->BcbSegmentListEntry);
- KeReleaseSpinLock(¤t->Bcb->BcbLock, oldIrql);
- RemoveEntryList(¤t->CacheSegmentListEntry);
- RemoveEntryList(¤t->CacheSegmentLRUListEntry);
- InsertHeadList(&FreeList, ¤t->BcbSegmentListEntry);
- }
- else
+ /* Flush dirty pages to disk */
+ CcRosFlushDirtyPages(Target, &PagesFreed);
+ FlushedPages = TRUE;
+
+ /* We can only swap as many pages as we flushed */
+ if (PagesFreed < Target) Target = PagesFreed;
+
+ /* Check if we flushed anything */
+ if (PagesFreed != 0)
{
- KeReleaseSpinLock(¤t->Bcb->BcbLock, oldIrql);
+ /* Try again after flushing dirty pages */
+ DPRINT1("Flushed %d dirty cache pages to disk\n", PagesFreed);
+ goto retry;
}
}
-
- KeReleaseGuardedMutex(&ViewLock);
-
+
while (!IsListEmpty(&FreeList))
{
current_entry = RemoveHeadList(&FreeList);
BcbSegmentListEntry);
CcRosInternalFreeCacheSegment(current);
}
-
+
+ DPRINT1("Evicted %d cache pages\n", (*NrFreed));
+
return(STATUS_SUCCESS);
}
InsertTailList(&DirtySegmentListHead, &CacheSeg->DirtySegmentListEntry);
DirtyPageCount += Bcb->CacheSegmentSize / PAGE_SIZE;
}
- RemoveEntryList(&CacheSeg->CacheSegmentLRUListEntry);
- InsertTailList(&CacheSegmentLRUListHead, &CacheSeg->CacheSegmentLRUListEntry);
if (Mapped)
{
KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
}
+ KeAcquireGuardedMutex(&ViewLock);
+
+ /* Move to the tail of the LRU list */
+ RemoveEntryList(&CacheSeg->CacheSegmentLRUListEntry);
+ InsertTailList(&CacheSegmentLRUListHead, &CacheSeg->CacheSegmentLRUListEntry);
+
+ KeReleaseGuardedMutex(&ViewLock);
CacheSeg->Dirty = TRUE;
ExReleasePushLock(&CacheSeg->Lock);
current->DirtySegmentListEntry.Flink = NULL;
current->DirtySegmentListEntry.Blink = NULL;
current->ReferenceCount = 1;
- ExInitializePushLock((PULONG_PTR)¤t->Lock);
+ ExInitializePushLock(¤t->Lock);
ExAcquirePushLockExclusive(¤t->Lock);
KeAcquireGuardedMutex(&ViewLock);
current = CcRosLookupCacheSegment(Bcb, CurrentOffset);
if (current != NULL)
{
+ KeAcquireGuardedMutex(&ViewLock);
+
+ /* Move to tail of LRU list */
+ RemoveEntryList(¤t->CacheSegmentLRUListEntry);
+ InsertTailList(&CacheSegmentLRUListHead, ¤t->CacheSegmentLRUListEntry);
+
+ KeReleaseGuardedMutex(&ViewLock);
+
CacheSegList[i] = current;
}
else
return Status;
}
}
+
+ KeAcquireGuardedMutex(&ViewLock);
+
+ /* Move to the tail of the LRU list */
+ RemoveEntryList(¤t->CacheSegmentLRUListEntry);
+ InsertTailList(&CacheSegmentLRUListHead, ¤t->CacheSegmentLRUListEntry);
+
+ KeReleaseGuardedMutex(&ViewLock);
+
/*
* Return information about the segment to the caller.
*/