(*Count) = 0;
+ KeEnterCriticalRegion();
KeAcquireGuardedMutex(&ViewLock);
WriteCount[0] = WriteCount[1];
}
KeReleaseGuardedMutex(&ViewLock);
+ KeLeaveCriticalRegion();
DPRINT("CcRosFlushDirtyPages() finished\n");
return(STATUS_SUCCESS);
ULONG PagesFreed;
KIRQL oldIrql;
LIST_ENTRY FreeList;
-
+ PFN_NUMBER Page;
+ ULONG i;
+
DPRINT("CcRosTrimCache(Target %d)\n", Target);
+
+ InitializeListHead(&FreeList);
+
+ /* Flush dirty pages to disk */
+ CcRosFlushDirtyPages(Target, NrFreed);
+ if ((*NrFreed) != 0) DPRINT1("Flushed %d dirty cache pages to disk\n", (*NrFreed));
+
*NrFreed = 0;
-
- InitializeListHead(&FreeList);
KeAcquireGuardedMutex(&ViewLock);
+
current_entry = CacheSegmentLRUListHead.Flink;
- while (current_entry != &CacheSegmentLRUListHead && Target > 0)
+ while (current_entry != &CacheSegmentLRUListHead)
{
- NTSTATUS Status;
-
- Status = STATUS_SUCCESS;
current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT,
CacheSegmentLRUListEntry);
current_entry = current_entry->Flink;
-
+
KeAcquireSpinLock(¤t->Bcb->BcbLock, &oldIrql);
- if (current->MappedCount > 0 && !current->Dirty && !current->PageOut)
+ /* Reference the cache segment */
+ CcRosCacheSegmentIncRefCount(current);
+
+ /* Check if it's mapped and not dirty */
+ if (current->MappedCount > 0 && !current->Dirty)
{
- ULONG i;
-
- CcRosCacheSegmentIncRefCount(current);
- current->PageOut = TRUE;
+ /* We have to break these locks because Cc sucks */
KeReleaseSpinLock(¤t->Bcb->BcbLock, oldIrql);
KeReleaseGuardedMutex(&ViewLock);
+
+ /* Page out the segment */
for (i = 0; i < current->Bcb->CacheSegmentSize / PAGE_SIZE; i++)
{
- PFN_TYPE Page;
- Page = (PFN_TYPE)(MmGetPhysicalAddress((char*)current->BaseAddress + i * PAGE_SIZE).QuadPart >> PAGE_SHIFT);
- Status = MmPageOutPhysicalAddress(Page);
+ Page = (PFN_NUMBER)(MmGetPhysicalAddress((PUCHAR)current->BaseAddress + (i * PAGE_SIZE)).QuadPart >> PAGE_SHIFT);
+
+ MmPageOutPhysicalAddress(Page);
}
+
+ /* Reacquire the locks */
KeAcquireGuardedMutex(&ViewLock);
KeAcquireSpinLock(¤t->Bcb->BcbLock, &oldIrql);
- CcRosCacheSegmentDecRefCount(current);
- }
-
- if (current->ReferenceCount == 0)
- {
- PagesPerSegment = current->Bcb->CacheSegmentSize / PAGE_SIZE;
- // PagesFreed = PagesPerSegment;
- PagesFreed = min(PagesPerSegment, Target);
- Target -= PagesFreed;
- (*NrFreed) += PagesFreed;
}
-
- KeReleaseSpinLock(¤t->Bcb->BcbLock, oldIrql);
- }
-
- current_entry = CacheSegmentLRUListHead.Flink;
- while (current_entry != &CacheSegmentLRUListHead)
- {
- current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT,
- CacheSegmentLRUListEntry);
- current->PageOut = FALSE;
- current_entry = current_entry->Flink;
-
- KeAcquireSpinLock(¤t->Bcb->BcbLock, &oldIrql);
+
+ /* Dereference the cache segment */
+ CcRosCacheSegmentDecRefCount(current);
+
+ /* Check if we can free this entry now */
if (current->ReferenceCount == 0)
{
+ ASSERT(!current->Dirty);
+ ASSERT(!current->MappedCount);
+
RemoveEntryList(¤t->BcbSegmentListEntry);
- KeReleaseSpinLock(¤t->Bcb->BcbLock, oldIrql);
RemoveEntryList(¤t->CacheSegmentListEntry);
RemoveEntryList(¤t->CacheSegmentLRUListEntry);
InsertHeadList(&FreeList, ¤t->BcbSegmentListEntry);
+
+ /* Calculate how many pages we freed for Mm */
+ PagesPerSegment = current->Bcb->CacheSegmentSize / PAGE_SIZE;
+ PagesFreed = min(PagesPerSegment, Target);
+ Target -= PagesFreed;
+ (*NrFreed) += PagesFreed;
}
- else
- {
- KeReleaseSpinLock(¤t->Bcb->BcbLock, oldIrql);
- }
+
+ KeReleaseSpinLock(¤t->Bcb->BcbLock, oldIrql);
}
-
+
KeReleaseGuardedMutex(&ViewLock);
-
+
while (!IsListEmpty(&FreeList))
{
current_entry = RemoveHeadList(&FreeList);
BcbSegmentListEntry);
CcRosInternalFreeCacheSegment(current);
}
-
+
+ DPRINT1("Evicted %d cache pages\n", (*NrFreed));
+
return(STATUS_SUCCESS);
}
current->DirtySegmentListEntry.Flink = NULL;
current->DirtySegmentListEntry.Blink = NULL;
current->ReferenceCount = 1;
- ExInitializePushLock((PULONG_PTR)¤t->Lock);
+ ExInitializePushLock(¤t->Lock);
ExAcquirePushLockExclusive(¤t->Lock);
KeAcquireGuardedMutex(&ViewLock);
MmLockAddressSpace(MmGetKernelAddressSpace());
current->BaseAddress = NULL;
Status = MmCreateMemoryArea(MmGetKernelAddressSpace(),
- MEMORY_AREA_CACHE_SEGMENT,
+ 0, // nothing checks for cache_segment mareas, so set to 0
¤t->BaseAddress,
Bcb->CacheSegmentSize,
PAGE_READWRITE,
#endif
/* Create a virtual mapping for this memory area */
+ MI_SET_USAGE(MI_USAGE_CACHE);
+#if MI_TRACE_PFNS
+ PWCHAR pos = NULL;
+ ULONG len = 0;
+ if ((Bcb->FileObject) && (Bcb->FileObject->FileName.Buffer))
+ {
+ pos = wcsrchr(Bcb->FileObject->FileName.Buffer, '\\');
+ len = wcslen(pos) * sizeof(WCHAR);
+ if (pos) snprintf(MI_PFN_CURRENT_PROCESS_NAME, min(16, len), "%S", pos);
+ }
+#endif
+
MmMapMemoryArea(current->BaseAddress, Bcb->CacheSegmentSize,
MC_CACHE, PAGE_READWRITE);
#else
static VOID
CcFreeCachePage(PVOID Context, MEMORY_AREA* MemoryArea, PVOID Address,
- PFN_TYPE Page, SWAPENTRY SwapEntry, BOOLEAN Dirty)
+ PFN_NUMBER Page, SWAPENTRY SwapEntry, BOOLEAN Dirty)
{
ASSERT(SwapEntry == 0);
if (Page != 0)
ULONG i;
ULONG RegionSize;
ULONG Base;
- PFN_TYPE Page;
+ PFN_NUMBER Page;
KIRQL oldIrql;
#endif
DPRINT("Freeing cache segment 0x%p\n", CacheSeg);
{
PLIST_ENTRY current_entry;
PCACHE_SEGMENT current;
- NTSTATUS Status;
LIST_ENTRY FreeList;
KIRQL oldIrql;
{
current_entry = RemoveTailList(&FreeList);
current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT, BcbSegmentListEntry);
- Status = CcRosInternalFreeCacheSegment(current);
+ CcRosInternalFreeCacheSegment(current);
}
ExFreeToNPagedLookasideList(&BcbLookasideList, Bcb);
KeAcquireGuardedMutex(&ViewLock);