-/*
- * ReactOS kernel
- * Copyright (C) 1998, 1999, 2000, 2001 ReactOS Team
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-/* $Id: view.c,v 1.43 2002/06/10 21:11:56 hbirr Exp $
+/* $Id$
*
* COPYRIGHT: See COPYING in the top level directory
* PROJECT: ReactOS kernel
* FILE: ntoskrnl/cc/view.c
* PURPOSE: Cache manager
- * PROGRAMMER: David Welch (welch@mcmail.com)
- * PORTABILITY: Checked
- * UPDATE HISTORY:
- * Created 22/05/98
+ *
+ * PROGRAMMERS: David Welch (welch@mcmail.com)
*/
/* NOTES **********************************************************************
*
- * This is not the NT implementation of a file cache nor anything much like
- * it.
+ * This is not the NT implementation of a file cache nor anything much like
+ * it.
*
- * The general procedure for a filesystem to implement a read or write
+ * The general procedure for a filesystem to implement a read or write
* dispatch routine is as follows
- *
+ *
* (1) If caching for the FCB hasn't been initiated then so do by calling
* CcInitializeFileCache.
- *
+ *
* (2) For each 4k region which is being read or written obtain a cache page
- * by calling CcRequestCachePage.
+ * by calling CcRequestCachePage.
*
- * (3) If either the page is being read or not completely written, and it is
+ * (3) If either the page is being read or not completely written, and it is
* not up to date then read its data from the underlying medium. If the read
- * fails then call CcReleaseCachePage with VALID as FALSE and return a error.
- *
+ * fails then call CcReleaseCachePage with VALID as FALSE and return a error.
+ *
* (4) Copy the data into or out of the page as necessary.
- *
+ *
* (5) Release the cache page
*/
/* INCLUDES ******************************************************************/
-#include <ddk/ntddk.h>
-#include <ddk/ntifs.h>
-#include <internal/mm.h>
-#include <internal/cc.h>
-#include <internal/pool.h>
-#include <ntos/minmax.h>
-
+#include <ntoskrnl.h>
#define NDEBUG
#include <internal/debug.h>
-extern void * alloca(size_t);
+#if defined (ALLOC_PRAGMA)
+#pragma alloc_text(INIT, CcInitView)
+#endif
/* GLOBALS *******************************************************************/
-#define ROUND_UP(N, S) ((((N) + (S) - 1) / (S)) * (S))
-#define ROUND_DOWN(N, S) (((N) % (S)) ? ROUND_UP(N, S) - S : N)
-
-#define TAG_CSEG TAG('C', 'S', 'E', 'G')
-#define TAG_BCB TAG('B', 'C', 'B', ' ')
+/*
+ * If CACHE_BITMAP is defined, the cache manager uses one large memory region
+ * within the kernel address space and allocate/deallocate space from this block
+ * over a bitmap. If CACHE_BITMAP is used, the size of the mdl mapping region
+ * must be reduced (ntoskrnl\mm\mdl.c, MI_MDLMAPPING_REGION_SIZE).
+ */
+//#define CACHE_BITMAP
static LIST_ENTRY DirtySegmentListHead;
static LIST_ENTRY CacheSegmentListHead;
static LIST_ENTRY CacheSegmentLRUListHead;
+static LIST_ENTRY ClosedListHead;
+ULONG DirtyPageCount=0;
-static FAST_MUTEX ViewLock;
+FAST_MUTEX ViewLock;
-NTSTATUS STDCALL
-CcRosInternalFreeCacheSegment(PBCB Bcb, PCACHE_SEGMENT CacheSeg);
+#ifdef CACHE_BITMAP
+#define CI_CACHESEG_MAPPING_REGION_SIZE (128*1024*1024)
+
+static PVOID CiCacheSegMappingRegionBase = NULL;
+static RTL_BITMAP CiCacheSegMappingRegionAllocMap;
+static ULONG CiCacheSegMappingRegionHint;
+static KSPIN_LOCK CiCacheSegMappingRegionLock;
+#endif
+
+NPAGED_LOOKASIDE_LIST iBcbLookasideList;
+static NPAGED_LOOKASIDE_LIST BcbLookasideList;
+static NPAGED_LOOKASIDE_LIST CacheSegLookasideList;
+
+static ULONG CcTimeStamp;
+static KEVENT LazyCloseThreadEvent;
+static HANDLE LazyCloseThreadHandle;
+static CLIENT_ID LazyCloseThreadId;
+static volatile BOOLEAN LazyCloseThreadShouldTerminate;
+
+#if defined(__GNUC__)
+/* void * alloca(size_t size); */
+#elif defined(_MSC_VER)
+void* _alloca(size_t size);
+#else
+#error Unknown compiler for alloca intrinsic stack allocation "function"
+#endif
+
+#if defined(DBG) || defined(KDBG)
+static void CcRosCacheSegmentIncRefCount_ ( PCACHE_SEGMENT cs, const char* file, int line )
+{
+ ++cs->ReferenceCount;
+ if ( cs->Bcb->Trace )
+ {
+ DbgPrint("(%s:%i) CacheSegment %p ++RefCount=%d, Dirty %d, PageOut %d\n",
+ file, line, cs, cs->ReferenceCount, cs->Dirty, cs->PageOut );
+ }
+}
+static void CcRosCacheSegmentDecRefCount_ ( PCACHE_SEGMENT cs, const char* file, int line )
+{
+ --cs->ReferenceCount;
+ if ( cs->Bcb->Trace )
+ {
+ DbgPrint("(%s:%i) CacheSegment %p --RefCount=%d, Dirty %d, PageOut %d\n",
+ file, line, cs, cs->ReferenceCount, cs->Dirty, cs->PageOut );
+ }
+}
+#define CcRosCacheSegmentIncRefCount(cs) CcRosCacheSegmentIncRefCount_(cs,__FILE__,__LINE__)
+#define CcRosCacheSegmentDecRefCount(cs) CcRosCacheSegmentDecRefCount_(cs,__FILE__,__LINE__)
+#else
+#define CcRosCacheSegmentIncRefCount(cs) (++((cs)->ReferenceCount))
+#define CcRosCacheSegmentDecRefCount(cs) (--((cs)->ReferenceCount))
+#endif
+
+NTSTATUS
+CcRosInternalFreeCacheSegment(PCACHE_SEGMENT CacheSeg);
+
+BOOLEAN
+FASTCALL
+CcTryToAcquireBrokenMutex(PFAST_MUTEX FastMutex)
+{
+ KeEnterCriticalRegion();
+ if (InterlockedCompareExchange(&FastMutex->Count, 0, 1) == 1)
+ {
+ FastMutex->Owner = KeGetCurrentThread();
+ return(TRUE);
+ }
+ else
+ {
+ KeLeaveCriticalRegion();
+ return(FALSE);
+ }
+}
/* FUNCTIONS *****************************************************************/
+VOID
+STDCALL
+CcRosTraceCacheMap (
+ PBCB Bcb,
+ BOOLEAN Trace )
+{
+#if defined(DBG) || defined(KDBG)
+ KIRQL oldirql;
+ PLIST_ENTRY current_entry;
+ PCACHE_SEGMENT current;
+
+ if ( !Bcb )
+ return;
+
+ Bcb->Trace = Trace;
+
+ if ( Trace )
+ {
+ DPRINT1("Enabling Tracing for CacheMap 0x%p:\n", Bcb );
+
+ ExEnterCriticalRegionAndAcquireFastMutexUnsafe(&ViewLock);
+ KeAcquireSpinLock(&Bcb->BcbLock, &oldirql);
+
+ current_entry = Bcb->BcbSegmentListHead.Flink;
+ while (current_entry != &Bcb->BcbSegmentListHead)
+ {
+ current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT, BcbSegmentListEntry);
+ current_entry = current_entry->Flink;
+
+ DPRINT1(" CacheSegment 0x%p enabled, RefCount %d, Dirty %d, PageOut %d\n",
+ current, current->ReferenceCount, current->Dirty, current->PageOut );
+ }
+ KeReleaseSpinLock(&Bcb->BcbLock, oldirql);
+ ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&ViewLock);
+ }
+ else
+ {
+ DPRINT1("Disabling Tracing for CacheMap 0x%p:\n", Bcb );
+ }
+
+#else
+ Bcb = Bcb;
+ Trace = Trace;
+#endif
+}
+
+NTSTATUS
+NTAPI
+CcRosFlushCacheSegment(PCACHE_SEGMENT CacheSegment)
+{
+ NTSTATUS Status;
+ KIRQL oldIrql;
+ Status = WriteCacheSegment(CacheSegment);
+ if (NT_SUCCESS(Status))
+ {
+ ExEnterCriticalRegionAndAcquireFastMutexUnsafe(&ViewLock);
+ KeAcquireSpinLock(&CacheSegment->Bcb->BcbLock, &oldIrql);
+ CacheSegment->Dirty = FALSE;
+ RemoveEntryList(&CacheSegment->DirtySegmentListEntry);
+ DirtyPageCount -= CacheSegment->Bcb->CacheSegmentSize / PAGE_SIZE;
+ CcRosCacheSegmentDecRefCount ( CacheSegment );
+ KeReleaseSpinLock(&CacheSegment->Bcb->BcbLock, oldIrql);
+ ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&ViewLock);
+ }
+ return(Status);
+}
+
+NTSTATUS
+NTAPI
+CcRosFlushDirtyPages(ULONG Target, PULONG Count)
+{
+ PLIST_ENTRY current_entry;
+ PCACHE_SEGMENT current;
+ ULONG PagesPerSegment;
+ BOOLEAN Locked;
+ NTSTATUS Status;
+ static ULONG WriteCount[4] = {0, 0, 0, 0};
+ ULONG NewTarget;
+
+ DPRINT("CcRosFlushDirtyPages(Target %d)\n", Target);
+
+ (*Count) = 0;
+
+ ExEnterCriticalRegionAndAcquireFastMutexUnsafe(&ViewLock);
+
+ WriteCount[0] = WriteCount[1];
+ WriteCount[1] = WriteCount[2];
+ WriteCount[2] = WriteCount[3];
+ WriteCount[3] = 0;
+
+ NewTarget = WriteCount[0] + WriteCount[1] + WriteCount[2];
+
+ if (NewTarget < DirtyPageCount)
+ {
+ NewTarget = (DirtyPageCount - NewTarget + 3) / 4;
+ WriteCount[0] += NewTarget;
+ WriteCount[1] += NewTarget;
+ WriteCount[2] += NewTarget;
+ WriteCount[3] += NewTarget;
+ }
+
+ NewTarget = WriteCount[0];
+
+ Target = max(NewTarget, Target);
+
+ current_entry = DirtySegmentListHead.Flink;
+ if (current_entry == &DirtySegmentListHead)
+ {
+ DPRINT("No Dirty pages\n");
+ }
+ while (current_entry != &DirtySegmentListHead && Target > 0)
+ {
+ current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT,
+ DirtySegmentListEntry);
+ current_entry = current_entry->Flink;
+
+// Locked = current->Bcb->Callbacks.AcquireForLazyWrite(current->Bcb->Context, FALSE);
+ Locked = ExTryToAcquireResourceExclusiveLite(((FSRTL_COMMON_FCB_HEADER*)(current->Bcb->FileObject->FsContext))->Resource);
+ if (!Locked)
+ {
+ continue;
+ }
+ Locked = CcTryToAcquireBrokenMutex(¤t->Lock);
+ if (!Locked)
+ {
+// current->Bcb->Callbacks.ReleaseFromLazyWrite(current->Bcb->Context);
+ ExReleaseResourceLite(((FSRTL_COMMON_FCB_HEADER*)(current->Bcb->FileObject->FsContext))->Resource);
+ continue;
+ }
+ ASSERT(current->Dirty);
+ if (current->ReferenceCount > 1)
+ {
+ ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(¤t->Lock);
+// current->Bcb->Callbacks.ReleaseFromLazyWrite(current->Bcb->Context);
+ ExReleaseResourceLite(((FSRTL_COMMON_FCB_HEADER*)(current->Bcb->FileObject->FsContext))->Resource);
+ continue;
+ }
+ ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&ViewLock);
+ PagesPerSegment = current->Bcb->CacheSegmentSize / PAGE_SIZE;
+ Status = CcRosFlushCacheSegment(current);
+ ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(¤t->Lock);
+// current->Bcb->Callbacks.ReleaseFromLazyWrite(current->Bcb->Context);
+ ExReleaseResourceLite(((FSRTL_COMMON_FCB_HEADER*)(current->Bcb->FileObject->FsContext))->Resource);
+ if (!NT_SUCCESS(Status) && (Status != STATUS_END_OF_FILE))
+ {
+ DPRINT1("CC: Failed to flush cache segment.\n");
+ }
+ else
+ {
+ (*Count) += PagesPerSegment;
+ Target -= PagesPerSegment;
+ }
+ ExEnterCriticalRegionAndAcquireFastMutexUnsafe(&ViewLock);
+ current_entry = DirtySegmentListHead.Flink;
+ }
+ if (*Count < NewTarget)
+ {
+ WriteCount[1] += (NewTarget - *Count);
+ }
+ ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&ViewLock);
+ DPRINT("CcRosFlushDirtyPages() finished\n");
+
+ return(STATUS_SUCCESS);
+}
+
NTSTATUS
CcRosTrimCache(ULONG Target, ULONG Priority, PULONG NrFreed)
/*
* ARGUMENTS:
* Target - The number of pages to be freed.
* Priority - The priority of free (currently unused).
- * NrFreed - Points to a variable where the number of pages
+ * NrFreed - Points to a variable where the number of pages
* actually freed is returned.
*/
{
PLIST_ENTRY current_entry;
- PCACHE_SEGMENT current;
+ PCACHE_SEGMENT current, last = NULL;
ULONG PagesPerSegment;
ULONG PagesFreed;
- BOOLEAN Locked;
+ KIRQL oldIrql;
+ LIST_ENTRY FreeList;
DPRINT("CcRosTrimCache(Target %d)\n", Target);
*NrFreed = 0;
- ExAcquireFastMutex(&ViewLock);
+ InitializeListHead(&FreeList);
+
+ ExEnterCriticalRegionAndAcquireFastMutexUnsafe(&ViewLock);
current_entry = CacheSegmentLRUListHead.Flink;
while (current_entry != &CacheSegmentLRUListHead && Target > 0)
{
- current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT,
+ current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT,
CacheSegmentLRUListEntry);
current_entry = current_entry->Flink;
- Locked = ExTryToAcquireFastMutex(¤t->Lock);
- if (!Locked)
- {
- continue;
- }
- if (current->MappedCount > 0 || current->Dirty ||
- current->ReferenceCount > 0)
- {
- ExReleaseFastMutex(¤t->Lock);
- continue;
- }
- ExReleaseFastMutex(¤t->Lock);
- DPRINT("current->Bcb->CacheSegmentSize %d\n",
- current->Bcb->CacheSegmentSize);
- PagesPerSegment = current->Bcb->CacheSegmentSize / PAGESIZE;
- CcRosInternalFreeCacheSegment(current->Bcb, current);
- DPRINT("CcRosTrimCache(): Freed %d\n", PagesPerSegment);
- PagesFreed = min(PagesPerSegment, Target);
- Target = Target - PagesFreed;
- (*NrFreed) = (*NrFreed) + PagesFreed;
- }
- ExReleaseFastMutex(&ViewLock);
+
+ KeAcquireSpinLock(¤t->Bcb->BcbLock, &oldIrql);
+ if (current->ReferenceCount == 0)
+ {
+ RemoveEntryList(¤t->BcbSegmentListEntry);
+ KeReleaseSpinLock(¤t->Bcb->BcbLock, oldIrql);
+ RemoveEntryList(¤t->CacheSegmentListEntry);
+ RemoveEntryList(¤t->CacheSegmentLRUListEntry);
+ InsertHeadList(&FreeList, ¤t->BcbSegmentListEntry);
+ PagesPerSegment = current->Bcb->CacheSegmentSize / PAGE_SIZE;
+ PagesFreed = min(PagesPerSegment, Target);
+ Target -= PagesFreed;
+ (*NrFreed) += PagesFreed;
+ }
+ else
+ {
+ if (last != current && current->MappedCount > 0 && !current->Dirty && !current->PageOut)
+ {
+ ULONG i;
+ NTSTATUS Status;
+
+ CcRosCacheSegmentIncRefCount(current);
+ last = current;
+ current->PageOut = TRUE;
+ KeReleaseSpinLock(¤t->Bcb->BcbLock, oldIrql);
+ ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&ViewLock);
+ for (i = 0; i < current->Bcb->CacheSegmentSize / PAGE_SIZE; i++)
+ {
+ PFN_TYPE Page;
+ Page = MmGetPhysicalAddress((char*)current->BaseAddress + i * PAGE_SIZE).QuadPart >> PAGE_SHIFT;
+ Status = MmPageOutPhysicalAddress(Page);
+ if (!NT_SUCCESS(Status))
+ {
+ break;
+ }
+ }
+ ExEnterCriticalRegionAndAcquireFastMutexUnsafe(&ViewLock);
+ KeAcquireSpinLock(¤t->Bcb->BcbLock, &oldIrql);
+ CcRosCacheSegmentDecRefCount(current);
+ current->PageOut = FALSE;
+ KeReleaseSpinLock(¤t->Bcb->BcbLock, oldIrql);
+ current_entry = ¤t->CacheSegmentLRUListEntry;
+ continue;
+ }
+ KeReleaseSpinLock(¤t->Bcb->BcbLock, oldIrql);
+ }
+ }
+ ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&ViewLock);
+
+ while (!IsListEmpty(&FreeList))
+ {
+ current_entry = RemoveHeadList(&FreeList);
+ current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT,
+ BcbSegmentListEntry);
+ CcRosInternalFreeCacheSegment(current);
+ }
+
DPRINT("CcRosTrimCache() finished\n");
return(STATUS_SUCCESS);
}
-NTSTATUS STDCALL
+NTSTATUS
+NTAPI
CcRosReleaseCacheSegment(PBCB Bcb,
PCACHE_SEGMENT CacheSeg,
BOOLEAN Valid,
BOOLEAN Dirty,
BOOLEAN Mapped)
{
- DPRINT("CcReleaseCachePage(Bcb %x, CacheSeg %x, Valid %d)\n",
- Bcb, CacheSeg, Valid);
-
- CacheSeg->Valid = Valid;
- CacheSeg->Dirty = CacheSeg->Dirty || Dirty;
- if (Mapped)
- {
- CacheSeg->MappedCount++;
- }
- ExReleaseFastMutex(&CacheSeg->Lock);
- ExAcquireFastMutex(&ViewLock);
- RemoveEntryList(&CacheSeg->CacheSegmentLRUListEntry);
- InsertTailList(&CacheSegmentLRUListHead,
- &CacheSeg->CacheSegmentLRUListEntry);
- ExReleaseFastMutex(&ViewLock);
- InterlockedDecrement(&CacheSeg->ReferenceCount);
-
- DPRINT("CcReleaseCachePage() finished\n");
-
- return(STATUS_SUCCESS);
+ BOOLEAN WasDirty = CacheSeg->Dirty;
+ KIRQL oldIrql;
+
+ ASSERT(Bcb);
+
+ DPRINT("CcReleaseCacheSegment(Bcb 0x%p, CacheSeg 0x%p, Valid %d)\n",
+ Bcb, CacheSeg, Valid);
+
+ CacheSeg->Valid = Valid;
+ CacheSeg->Dirty = CacheSeg->Dirty || Dirty;
+
+ ExEnterCriticalRegionAndAcquireFastMutexUnsafe(&ViewLock);
+ if (!WasDirty && CacheSeg->Dirty)
+ {
+ InsertTailList(&DirtySegmentListHead, &CacheSeg->DirtySegmentListEntry);
+ DirtyPageCount += Bcb->CacheSegmentSize / PAGE_SIZE;
+ }
+ RemoveEntryList(&CacheSeg->CacheSegmentLRUListEntry);
+ InsertTailList(&CacheSegmentLRUListHead, &CacheSeg->CacheSegmentLRUListEntry);
+
+ if (Mapped)
+ {
+ CacheSeg->MappedCount++;
+ }
+ KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
+ CcRosCacheSegmentDecRefCount(CacheSeg);
+ if (Mapped && CacheSeg->MappedCount == 1)
+ {
+ CcRosCacheSegmentIncRefCount(CacheSeg);
+ }
+ if (!WasDirty && CacheSeg->Dirty)
+ {
+ CcRosCacheSegmentIncRefCount(CacheSeg);
+ }
+ KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
+ ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&ViewLock);
+ ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&CacheSeg->Lock);
+
+ return(STATUS_SUCCESS);
}
-PCACHE_SEGMENT CcRosLookupCacheSegment(PBCB Bcb, ULONG FileOffset)
+PCACHE_SEGMENT
+NTAPI
+CcRosLookupCacheSegment(PBCB Bcb, ULONG FileOffset)
{
PLIST_ENTRY current_entry;
PCACHE_SEGMENT current;
KIRQL oldIrql;
+ ASSERT(Bcb);
+
+ DPRINT("CcRosLookupCacheSegment(Bcb -x%p, FileOffset %d)\n", Bcb, FileOffset);
+
KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
current_entry = Bcb->BcbSegmentListHead.Flink;
while (current_entry != &Bcb->BcbSegmentListHead)
{
- current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT,
+ current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT,
BcbSegmentListEntry);
if (current->FileOffset <= FileOffset &&
(current->FileOffset + Bcb->CacheSegmentSize) > FileOffset)
{
+ CcRosCacheSegmentIncRefCount(current);
KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
+ ExEnterCriticalRegionAndAcquireFastMutexUnsafe(¤t->Lock);
return(current);
}
current_entry = current_entry->Flink;
}
NTSTATUS
-CcRosSuggestFreeCacheSegment(PBCB Bcb, ULONG FileOffset, BOOLEAN NowDirty)
+NTAPI
+CcRosMarkDirtyCacheSegment(PBCB Bcb, ULONG FileOffset)
{
PCACHE_SEGMENT CacheSeg;
+ KIRQL oldIrql;
+
+ ASSERT(Bcb);
+
+ DPRINT("CcRosMarkDirtyCacheSegment(Bcb 0x%p, FileOffset %d)\n", Bcb, FileOffset);
- ExAcquireFastMutex(&ViewLock);
CacheSeg = CcRosLookupCacheSegment(Bcb, FileOffset);
if (CacheSeg == NULL)
{
- KeBugCheck(0);
+ KEBUGCHECKCC;
}
- ExAcquireFastMutex(&CacheSeg->Lock);
- if (CacheSeg->MappedCount > 0)
+ if (!CacheSeg->Dirty)
{
- KeBugCheck(0);
+ ExEnterCriticalRegionAndAcquireFastMutexUnsafe(&ViewLock);
+ InsertTailList(&DirtySegmentListHead, &CacheSeg->DirtySegmentListEntry);
+ DirtyPageCount += Bcb->CacheSegmentSize / PAGE_SIZE;
+ ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&ViewLock);
}
- CacheSeg->Dirty = CacheSeg->Dirty || NowDirty;
- if (CacheSeg->Dirty || CacheSeg->ReferenceCount > 0)
- {
- ExReleaseFastMutex(&CacheSeg->Lock);
- ExReleaseFastMutex(&ViewLock);
- return(STATUS_UNSUCCESSFUL);
- }
- ExReleaseFastMutex(&CacheSeg->Lock);
- CcRosInternalFreeCacheSegment(CacheSeg->Bcb, CacheSeg);
- ExReleaseFastMutex(&ViewLock);
+ else
+ {
+ KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
+ CcRosCacheSegmentDecRefCount(CacheSeg);
+ KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
+ }
+
+
+ CacheSeg->Dirty = TRUE;
+ ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&CacheSeg->Lock);
+
return(STATUS_SUCCESS);
}
NTSTATUS
+NTAPI
CcRosUnmapCacheSegment(PBCB Bcb, ULONG FileOffset, BOOLEAN NowDirty)
{
PCACHE_SEGMENT CacheSeg;
+ BOOLEAN WasDirty;
+ KIRQL oldIrql;
+
+ ASSERT(Bcb);
+
+ DPRINT("CcRosUnmapCacheSegment(Bcb 0x%p, FileOffset %d, NowDirty %d)\n",
+ Bcb, FileOffset, NowDirty);
- ExAcquireFastMutex(&ViewLock);
CacheSeg = CcRosLookupCacheSegment(Bcb, FileOffset);
if (CacheSeg == NULL)
{
- ExReleaseFastMutex(&ViewLock);
return(STATUS_UNSUCCESSFUL);
}
- CacheSeg->ReferenceCount++;
- ExReleaseFastMutex(&ViewLock);
- ExAcquireFastMutex(&CacheSeg->Lock);
- CacheSeg->MappedCount--;
+
+ WasDirty = CacheSeg->Dirty;
CacheSeg->Dirty = CacheSeg->Dirty || NowDirty;
- ExReleaseFastMutex(&CacheSeg->Lock);
+
+ CacheSeg->MappedCount--;
+
+ if (!WasDirty && NowDirty)
+ {
+ ExEnterCriticalRegionAndAcquireFastMutexUnsafe(&ViewLock);
+ InsertTailList(&DirtySegmentListHead, &CacheSeg->DirtySegmentListEntry);
+ DirtyPageCount += Bcb->CacheSegmentSize / PAGE_SIZE;
+ ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&ViewLock);
+ }
+
+ KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
+ CcRosCacheSegmentDecRefCount(CacheSeg);
+ if (!WasDirty && NowDirty)
+ {
+ CcRosCacheSegmentIncRefCount(CacheSeg);
+ }
+ if (CacheSeg->MappedCount == 0)
+ {
+ CcRosCacheSegmentDecRefCount(CacheSeg);
+ }
+ KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
+
+ ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&CacheSeg->Lock);
return(STATUS_SUCCESS);
}
NTSTATUS STATIC
CcRosCreateCacheSegment(PBCB Bcb,
ULONG FileOffset,
- PCACHE_SEGMENT* CacheSeg,
- BOOLEAN Lock)
+ PCACHE_SEGMENT* CacheSeg)
{
ULONG i;
PCACHE_SEGMENT current;
+ PCACHE_SEGMENT previous;
+ PLIST_ENTRY current_entry;
NTSTATUS Status;
KIRQL oldIrql;
+ PPFN_TYPE Pfn;
+#ifdef CACHE_BITMAP
+ ULONG StartingOffset;
+#else
+#endif
+ PHYSICAL_ADDRESS BoundaryAddressMultiple;
- current = ExAllocatePoolWithTag(NonPagedPool, sizeof(CACHE_SEGMENT),
- TAG_CSEG);
+ ASSERT(Bcb);
- MmLockAddressSpace(MmGetKernelAddressSpace());
- current->BaseAddress = NULL;
- Status = MmCreateMemoryArea(KernelMode,
- MmGetKernelAddressSpace(),
- MEMORY_AREA_CACHE_SEGMENT,
- ¤t->BaseAddress,
- Bcb->CacheSegmentSize,
- PAGE_READWRITE,
- (PMEMORY_AREA*)¤t->MemoryArea,
- FALSE);
- if (!NT_SUCCESS(Status))
- {
- MmUnlockAddressSpace(MmGetKernelAddressSpace());
- KeBugCheck(0);
- }
- MmUnlockAddressSpace(MmGetKernelAddressSpace());
+ DPRINT("CcRosCreateCacheSegment()\n");
+
+ BoundaryAddressMultiple.QuadPart = 0;
+ if (FileOffset >= Bcb->FileSize.u.LowPart)
+ {
+ CacheSeg = NULL;
+ return STATUS_INVALID_PARAMETER;
+ }
+
+ current = ExAllocateFromNPagedLookasideList(&CacheSegLookasideList);
current->Valid = FALSE;
current->Dirty = FALSE;
+ current->PageOut = FALSE;
current->FileOffset = ROUND_DOWN(FileOffset, Bcb->CacheSegmentSize);
current->Bcb = Bcb;
+#if defined(DBG) || defined(KDBG)
+ if ( Bcb->Trace )
+ {
+ DPRINT1("CacheMap 0x%p: new Cache Segment: 0x%p\n", Bcb, current );
+ }
+#endif
current->MappedCount = 0;
- ExInitializeFastMutex(¤t->Lock);
+ current->DirtySegmentListEntry.Flink = NULL;
+ current->DirtySegmentListEntry.Blink = NULL;
current->ReferenceCount = 1;
+ ExInitializeFastMutex(¤t->Lock);
+ ExEnterCriticalRegionAndAcquireFastMutexUnsafe(¤t->Lock);
+ ExEnterCriticalRegionAndAcquireFastMutexUnsafe(&ViewLock);
+
+ *CacheSeg = current;
+ /* There is window between the call to CcRosLookupCacheSegment
+ * and CcRosCreateCacheSegment. We must check if a segment on
+ * the fileoffset exist. If there exist a segment, we release
+ * our new created segment and return the existing one.
+ */
KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
- InsertTailList(&Bcb->BcbSegmentListHead, ¤t->BcbSegmentListEntry);
- KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
- InsertTailList(&CacheSegmentListHead, ¤t->CacheSegmentListEntry);
- InsertTailList(&CacheSegmentLRUListHead,
- ¤t->CacheSegmentLRUListEntry);
- current->DirtySegmentListEntry.Flink =
- current->DirtySegmentListEntry.Blink = NULL;
- if (Lock)
- {
- ExAcquireFastMutex(¤t->Lock);
- }
- ExReleaseFastMutex(&ViewLock);
- for (i = 0; i < (Bcb->CacheSegmentSize / PAGESIZE); i++)
- {
- PHYSICAL_ADDRESS Page;
-
- Status = MmRequestPageMemoryConsumer(MC_CACHE, TRUE, &Page);
- if (!NT_SUCCESS(Status))
+ current_entry = Bcb->BcbSegmentListHead.Flink;
+ previous = NULL;
+ while (current_entry != &Bcb->BcbSegmentListHead)
+ {
+ current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT,
+ BcbSegmentListEntry);
+ if (current->FileOffset <= FileOffset &&
+ (current->FileOffset + Bcb->CacheSegmentSize) > FileOffset)
+ {
+ CcRosCacheSegmentIncRefCount(current);
+ KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
+#if defined(DBG) || defined(KDBG)
+ if ( Bcb->Trace )
{
- KeBugCheck(0);
+ DPRINT1("CacheMap 0x%p: deleting newly created Cache Segment 0x%p ( found existing one 0x%p )\n",
+ Bcb,
+ (*CacheSeg),
+ current );
}
-
- Status = MmCreateVirtualMapping(NULL,
- current->BaseAddress + (i * PAGESIZE),
- PAGE_READWRITE,
- Page,
- TRUE);
- if (!NT_SUCCESS(Status))
+#endif
+ ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&(*CacheSeg)->Lock);
+ ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&ViewLock);
+ ExFreeToNPagedLookasideList(&CacheSegLookasideList, *CacheSeg);
+ *CacheSeg = current;
+ ExEnterCriticalRegionAndAcquireFastMutexUnsafe(¤t->Lock);
+ return STATUS_SUCCESS;
+ }
+ if (current->FileOffset < FileOffset)
+ {
+ if (previous == NULL)
{
- KeBugCheck(0);
+ previous = current;
}
- }
- *CacheSeg = current;
+ else
+ {
+ if (previous->FileOffset < current->FileOffset)
+ {
+ previous = current;
+ }
+ }
+ }
+ current_entry = current_entry->Flink;
+ }
+ /* There was no existing segment. */
+ current = *CacheSeg;
+ if (previous)
+ {
+ InsertHeadList(&previous->BcbSegmentListEntry, ¤t->BcbSegmentListEntry);
+ }
+ else
+ {
+ InsertHeadList(&Bcb->BcbSegmentListHead, ¤t->BcbSegmentListEntry);
+ }
+ KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
+ InsertTailList(&CacheSegmentListHead, ¤t->CacheSegmentListEntry);
+ InsertTailList(&CacheSegmentLRUListHead, ¤t->CacheSegmentLRUListEntry);
+ ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&ViewLock);
+#ifdef CACHE_BITMAP
+ KeAcquireSpinLock(&CiCacheSegMappingRegionLock, &oldIrql);
+
+ StartingOffset = RtlFindClearBitsAndSet(&CiCacheSegMappingRegionAllocMap, Bcb->CacheSegmentSize / PAGE_SIZE, CiCacheSegMappingRegionHint);
+
+ if (StartingOffset == 0xffffffff)
+ {
+ DPRINT1("Out of CacheSeg mapping space\n");
+ KEBUGCHECKCC;
+ }
+
+ current->BaseAddress = CiCacheSegMappingRegionBase + StartingOffset * PAGE_SIZE;
+
+ if (CiCacheSegMappingRegionHint == StartingOffset)
+ {
+ CiCacheSegMappingRegionHint += Bcb->CacheSegmentSize / PAGE_SIZE;
+ }
+
+ KeReleaseSpinLock(&CiCacheSegMappingRegionLock, oldIrql);
+#else
+ MmLockAddressSpace(MmGetKernelAddressSpace());
+ current->BaseAddress = NULL;
+ Status = MmCreateMemoryArea(MmGetKernelAddressSpace(),
+ MEMORY_AREA_CACHE_SEGMENT,
+ ¤t->BaseAddress,
+ Bcb->CacheSegmentSize,
+ PAGE_READWRITE,
+ (PMEMORY_AREA*)¤t->MemoryArea,
+ FALSE,
+ 0,
+ BoundaryAddressMultiple);
+ MmUnlockAddressSpace(MmGetKernelAddressSpace());
+ if (!NT_SUCCESS(Status))
+ {
+ KEBUGCHECKCC;
+ }
+#endif
+ Pfn = alloca(sizeof(PFN_TYPE) * (Bcb->CacheSegmentSize / PAGE_SIZE));
+ for (i = 0; i < (Bcb->CacheSegmentSize / PAGE_SIZE); i++)
+ {
+ Status = MmRequestPageMemoryConsumer(MC_CACHE, TRUE, &Pfn[i]);
+ if (!NT_SUCCESS(Status))
+ {
+ KEBUGCHECKCC;
+ }
+ }
+ Status = MmCreateVirtualMapping(NULL,
+ current->BaseAddress,
+ PAGE_READWRITE,
+ Pfn,
+ Bcb->CacheSegmentSize / PAGE_SIZE);
+ if (!NT_SUCCESS(Status))
+ {
+ KEBUGCHECKCC;
+ }
return(STATUS_SUCCESS);
}
NTSTATUS
+NTAPI
CcRosGetCacheSegmentChain(PBCB Bcb,
ULONG FileOffset,
ULONG Length,
PCACHE_SEGMENT current;
ULONG i;
PCACHE_SEGMENT* CacheSegList;
- PCACHE_SEGMENT Previous;
+ PCACHE_SEGMENT Previous = NULL;
+
+ ASSERT(Bcb);
+
+ DPRINT("CcRosGetCacheSegmentChain()\n");
Length = ROUND_UP(Length, Bcb->CacheSegmentSize);
- CacheSegList = alloca(sizeof(PCACHE_SEGMENT) *
+#if defined(__GNUC__)
+ CacheSegList = alloca(sizeof(PCACHE_SEGMENT) *
+ (Length / Bcb->CacheSegmentSize));
+#elif defined(_MSC_VER)
+ CacheSegList = _alloca(sizeof(PCACHE_SEGMENT) *
(Length / Bcb->CacheSegmentSize));
+#else
+#error Unknown compiler for alloca intrinsic stack allocation "function"
+#endif
- /*
- * Acquire the global lock.
- */
- ExAcquireFastMutex(&ViewLock);
-
/*
* Look for a cache segment already mapping the same data.
*/
current = CcRosLookupCacheSegment(Bcb, CurrentOffset);
if (current != NULL)
{
- /*
- * Make sure the cache segment can't go away outside of our control.
- */
- current->ReferenceCount++;
CacheSegList[i] = current;
}
else
{
- CcRosCreateCacheSegment(Bcb, CurrentOffset, ¤t, FALSE);
+ CcRosCreateCacheSegment(Bcb, CurrentOffset, ¤t);
CacheSegList[i] = current;
- ExAcquireFastMutex(&ViewLock);
}
}
- ExReleaseFastMutex(&ViewLock);
for (i = 0; i < (Length / Bcb->CacheSegmentSize); i++)
{
- ExAcquireFastMutex(&CacheSegList[i]->Lock);
if (i == 0)
{
*CacheSeg = CacheSegList[i];
}
}
Previous->NextInChain = NULL;
-
+
return(STATUS_SUCCESS);
}
NTSTATUS
+NTAPI
CcRosGetCacheSegment(PBCB Bcb,
ULONG FileOffset,
PULONG BaseOffset,
PCACHE_SEGMENT current;
NTSTATUS Status;
- /*
- * Acquire the global lock.
- */
- ExAcquireFastMutex(&ViewLock);
+ ASSERT(Bcb);
+
+ DPRINT("CcRosGetCacheSegment()\n");
/*
* Look for a cache segment already mapping the same data.
*/
current = CcRosLookupCacheSegment(Bcb, FileOffset);
- if (current != NULL)
- {
- /*
- * Make sure the cache segment can't go away outside of our control.
- */
- current->ReferenceCount++;
- /*
- * Release the global lock and lock the cache segment.
- */
- ExReleaseFastMutex(&ViewLock);
- ExAcquireFastMutex(¤t->Lock);
- /*
- * Return information about the segment to the caller.
- */
- *UptoDate = current->Valid;
- *BaseAddress = current->BaseAddress;
- DPRINT("*BaseAddress 0x%.8X\n", *BaseAddress);
- *CacheSeg = current;
- *BaseOffset = current->FileOffset;
- return(STATUS_SUCCESS);
- }
-
+ if (current == NULL)
+ {
+ /*
+ * Otherwise create a new segment.
+ */
+ Status = CcRosCreateCacheSegment(Bcb, FileOffset, ¤t);
+ if (!NT_SUCCESS(Status))
+ {
+ return Status;
+ }
+ }
/*
- * Otherwise create a new segment.
+ * Return information about the segment to the caller.
*/
- Status = CcRosCreateCacheSegment(Bcb, FileOffset, ¤t, TRUE);
*UptoDate = current->Valid;
*BaseAddress = current->BaseAddress;
DPRINT("*BaseAddress 0x%.8X\n", *BaseAddress);
*CacheSeg = current;
*BaseOffset = current->FileOffset;
-
return(STATUS_SUCCESS);
}
-NTSTATUS STDCALL
+NTSTATUS STDCALL
CcRosRequestCacheSegment(PBCB Bcb,
ULONG FileOffset,
PVOID* BaseAddress,
*/
{
ULONG BaseOffset;
-
+
+ ASSERT(Bcb);
+
if ((FileOffset % Bcb->CacheSegmentSize) != 0)
{
CPRINT("Bad fileoffset %x should be multiple of %x",
FileOffset, Bcb->CacheSegmentSize);
- KeBugCheck(0);
+ KEBUGCHECKCC;
}
return(CcRosGetCacheSegment(Bcb,
UptoDate,
CacheSeg));
}
-
-STATIC VOID
-CcFreeCachePage(PVOID Context, MEMORY_AREA* MemoryArea, PVOID Address,
- PHYSICAL_ADDRESS PhysAddr, SWAPENTRY SwapEntry, BOOLEAN Dirty)
+#ifdef CACHE_BITMAP
+#else
+STATIC VOID
+CcFreeCachePage(PVOID Context, MEMORY_AREA* MemoryArea, PVOID Address,
+ PFN_TYPE Page, SWAPENTRY SwapEntry, BOOLEAN Dirty)
{
- assert(SwapEntry == 0);
- if (PhysAddr.QuadPart != 0)
+ ASSERT(SwapEntry == 0);
+ if (Page != 0)
{
- MmReleasePageMemoryConsumer(MC_CACHE, PhysAddr);
+ MmReleasePageMemoryConsumer(MC_CACHE, Page);
}
}
-
-NTSTATUS STDCALL
-CcRosInternalFreeCacheSegment(PBCB Bcb, PCACHE_SEGMENT CacheSeg)
+#endif
+NTSTATUS
+CcRosInternalFreeCacheSegment(PCACHE_SEGMENT CacheSeg)
/*
* FUNCTION: Releases a cache segment associated with a BCB
*/
{
- DPRINT("Freeing cache segment %x\n", CacheSeg);
- RemoveEntryList(&CacheSeg->CacheSegmentListEntry);
- RemoveEntryList(&CacheSeg->CacheSegmentLRUListEntry);
- RemoveEntryList(&CacheSeg->BcbSegmentListEntry);
+#ifdef CACHE_BITMAP
+ ULONG i;
+ ULONG RegionSize;
+ ULONG Base;
+ PFN_TYPE Page;
+ KIRQL oldIrql;
+#endif
+ DPRINT("Freeing cache segment 0x%p\n", CacheSeg);
+#if defined(DBG) || defined(KDBG)
+ if ( CacheSeg->Bcb->Trace )
+ {
+ DPRINT1("CacheMap 0x%p: deleting Cache Segment: 0x%p\n", CacheSeg->Bcb, CacheSeg );
+ }
+#endif
+#ifdef CACHE_BITMAP
+ RegionSize = CacheSeg->Bcb->CacheSegmentSize / PAGE_SIZE;
+
+ /* Unmap all the pages. */
+ for (i = 0; i < RegionSize; i++)
+ {
+ MmDeleteVirtualMapping(NULL,
+ CacheSeg->BaseAddress + (i * PAGE_SIZE),
+ FALSE,
+ NULL,
+ &Page);
+ MmReleasePageMemoryConsumer(MC_CACHE, Page);
+ }
+
+ KeAcquireSpinLock(&CiCacheSegMappingRegionLock, &oldIrql);
+ /* Deallocate all the pages used. */
+ Base = (ULONG)(CacheSeg->BaseAddress - CiCacheSegMappingRegionBase) / PAGE_SIZE;
+
+ RtlClearBits(&CiCacheSegMappingRegionAllocMap, Base, RegionSize);
+
+ CiCacheSegMappingRegionHint = min (CiCacheSegMappingRegionHint, Base);
+
+ KeReleaseSpinLock(&CiCacheSegMappingRegionLock, oldIrql);
+#else
MmLockAddressSpace(MmGetKernelAddressSpace());
MmFreeMemoryArea(MmGetKernelAddressSpace(),
- CacheSeg->BaseAddress,
- Bcb->CacheSegmentSize,
+ CacheSeg->MemoryArea,
CcFreeCachePage,
NULL);
MmUnlockAddressSpace(MmGetKernelAddressSpace());
- ExFreePool(CacheSeg);
+#endif
+ ExFreeToNPagedLookasideList(&CacheSegLookasideList, CacheSeg);
return(STATUS_SUCCESS);
}
-NTSTATUS STDCALL
+NTSTATUS
+NTAPI
CcRosFreeCacheSegment(PBCB Bcb, PCACHE_SEGMENT CacheSeg)
{
NTSTATUS Status;
- ExAcquireFastMutex(&ViewLock);
- Status = CcRosInternalFreeCacheSegment(Bcb, CacheSeg);
- ExReleaseFastMutex(&ViewLock);
+ KIRQL oldIrql;
+
+ ASSERT(Bcb);
+
+ DPRINT("CcRosFreeCacheSegment(Bcb 0x%p, CacheSeg 0x%p)\n",
+ Bcb, CacheSeg);
+
+ ExEnterCriticalRegionAndAcquireFastMutexUnsafe(&ViewLock);
+ KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
+ RemoveEntryList(&CacheSeg->BcbSegmentListEntry);
+ RemoveEntryList(&CacheSeg->CacheSegmentListEntry);
+ RemoveEntryList(&CacheSeg->CacheSegmentLRUListEntry);
+ if (CacheSeg->Dirty)
+ {
+ RemoveEntryList(&CacheSeg->DirtySegmentListEntry);
+ DirtyPageCount -= Bcb->CacheSegmentSize / PAGE_SIZE;
+
+ }
+ KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
+ ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&ViewLock);
+
+ Status = CcRosInternalFreeCacheSegment(CacheSeg);
return(Status);
}
-NTSTATUS STDCALL
-CcRosReleaseFileCache(PFILE_OBJECT FileObject, PBCB Bcb)
+/*
+ * @implemented
+ */
+VOID STDCALL
+CcFlushCache(IN PSECTION_OBJECT_POINTERS SectionObjectPointers,
+ IN PLARGE_INTEGER FileOffset OPTIONAL,
+ IN ULONG Length,
+ OUT PIO_STATUS_BLOCK IoStatus)
+{
+ PBCB Bcb;
+ LARGE_INTEGER Offset;
+ PCACHE_SEGMENT current;
+ NTSTATUS Status;
+ KIRQL oldIrql;
+
+ DPRINT("CcFlushCache(SectionObjectPointers 0x%p, FileOffset 0x%p, Length %d, IoStatus 0x%p)\n",
+ SectionObjectPointers, FileOffset, Length, IoStatus);
+
+ if (SectionObjectPointers && SectionObjectPointers->SharedCacheMap)
+ {
+ Bcb = (PBCB)SectionObjectPointers->SharedCacheMap;
+ ASSERT(Bcb);
+ if (FileOffset)
+ {
+ Offset = *FileOffset;
+ }
+ else
+ {
+ Offset.QuadPart = (LONGLONG)0;
+ Length = Bcb->FileSize.u.LowPart;
+ }
+
+ if (IoStatus)
+ {
+ IoStatus->Status = STATUS_SUCCESS;
+ IoStatus->Information = 0;
+ }
+
+ while (Length > 0)
+ {
+ current = CcRosLookupCacheSegment (Bcb, Offset.u.LowPart);
+ if (current != NULL)
+ {
+ if (current->Dirty)
+ {
+ Status = CcRosFlushCacheSegment(current);
+ if (!NT_SUCCESS(Status) && IoStatus != NULL)
+ {
+ IoStatus->Status = Status;
+ }
+ }
+ KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
+ ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(¤t->Lock);
+ CcRosCacheSegmentDecRefCount(current);
+ KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
+ }
+
+ Offset.QuadPart += Bcb->CacheSegmentSize;
+ if (Length > Bcb->CacheSegmentSize)
+ {
+ Length -= Bcb->CacheSegmentSize;
+ }
+ else
+ {
+ Length = 0;
+ }
+ }
+ }
+ else
+ {
+ if (IoStatus)
+ {
+ IoStatus->Status = STATUS_INVALID_PARAMETER;
+ }
+ }
+}
+
+NTSTATUS
+NTAPI
+CcRosDeleteFileCache(PFILE_OBJECT FileObject, PBCB Bcb)
/*
* FUNCTION: Releases the BCB associated with a file object
*/
{
PLIST_ENTRY current_entry;
PCACHE_SEGMENT current;
-
- DPRINT("CcRosReleaseFileCache(FileObject %x, Bcb %x)\n", Bcb->FileObject,
- Bcb);
+ NTSTATUS Status;
+ LIST_ENTRY FreeList;
+ KIRQL oldIrql;
- MmFreeSectionSegments(Bcb->FileObject);
-
- /*
- * Release all cache segments.
- */
- current_entry = Bcb->BcbSegmentListHead.Flink;
- while (current_entry != &Bcb->BcbSegmentListHead)
- {
- current =
- CONTAINING_RECORD(current_entry, CACHE_SEGMENT, BcbSegmentListEntry);
- current_entry = current_entry->Flink;
- CcRosFreeCacheSegment(Bcb, current);
- }
+ ASSERT(Bcb);
+
+ Bcb->RefCount++;
+ ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&ViewLock);
+
+ CcFlushCache(FileObject->SectionObjectPointer, NULL, 0, NULL);
+
+ ExEnterCriticalRegionAndAcquireFastMutexUnsafe(&ViewLock);
+ Bcb->RefCount--;
+ if (Bcb->RefCount == 0)
+ {
+ if (Bcb->BcbRemoveListEntry.Flink != NULL)
+ {
+ RemoveEntryList(&Bcb->BcbRemoveListEntry);
+ Bcb->BcbRemoveListEntry.Flink = NULL;
+ }
- ObDereferenceObject (Bcb->FileObject);
- ExFreePool(Bcb);
+ FileObject->SectionObjectPointer->SharedCacheMap = NULL;
+ /*
+ * Release all cache segments.
+ */
+ InitializeListHead(&FreeList);
+ KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
+ current_entry = Bcb->BcbSegmentListHead.Flink;
+ while (!IsListEmpty(&Bcb->BcbSegmentListHead))
+ {
+ current_entry = RemoveTailList(&Bcb->BcbSegmentListHead);
+ current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT, BcbSegmentListEntry);
+ RemoveEntryList(¤t->CacheSegmentListEntry);
+ RemoveEntryList(¤t->CacheSegmentLRUListEntry);
+ if (current->Dirty)
+ {
+ RemoveEntryList(¤t->DirtySegmentListEntry);
+ DirtyPageCount -= Bcb->CacheSegmentSize / PAGE_SIZE;
+ DPRINT1("Freeing dirty segment\n");
+ }
+ InsertHeadList(&FreeList, ¤t->BcbSegmentListEntry);
+ }
+#if defined(DBG) || defined(KDBG)
+ Bcb->Trace = FALSE;
+#endif
+ KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
+
+ ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&ViewLock);
+ ObDereferenceObject (Bcb->FileObject);
+
+ while (!IsListEmpty(&FreeList))
+ {
+ current_entry = RemoveTailList(&FreeList);
+ current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT, BcbSegmentListEntry);
+ Status = CcRosInternalFreeCacheSegment(current);
+ }
+ ExFreeToNPagedLookasideList(&BcbLookasideList, Bcb);
+ ExEnterCriticalRegionAndAcquireFastMutexUnsafe(&ViewLock);
+ }
return(STATUS_SUCCESS);
}
-NTSTATUS STDCALL
+VOID
+NTAPI
+CcRosReferenceCache(PFILE_OBJECT FileObject)
+{
+ PBCB Bcb;
+ ExEnterCriticalRegionAndAcquireFastMutexUnsafe(&ViewLock);
+ Bcb = (PBCB)FileObject->SectionObjectPointer->SharedCacheMap;
+ ASSERT(Bcb);
+ if (Bcb->RefCount == 0)
+ {
+ ASSERT(Bcb->BcbRemoveListEntry.Flink != NULL);
+ RemoveEntryList(&Bcb->BcbRemoveListEntry);
+ Bcb->BcbRemoveListEntry.Flink = NULL;
+
+ }
+ else
+ {
+ ASSERT(Bcb->BcbRemoveListEntry.Flink == NULL);
+ }
+ Bcb->RefCount++;
+ ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&ViewLock);
+}
+
+VOID
+NTAPI
+CcRosSetRemoveOnClose(PSECTION_OBJECT_POINTERS SectionObjectPointer)
+{
+ PBCB Bcb;
+ DPRINT("CcRosSetRemoveOnClose()\n");
+ ExEnterCriticalRegionAndAcquireFastMutexUnsafe(&ViewLock);
+ Bcb = (PBCB)SectionObjectPointer->SharedCacheMap;
+ if (Bcb)
+ {
+ Bcb->RemoveOnClose = TRUE;
+ if (Bcb->RefCount == 0)
+ {
+ CcRosDeleteFileCache(Bcb->FileObject, Bcb);
+ }
+ }
+ ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&ViewLock);
+}
+
+
+VOID
+NTAPI
+CcRosDereferenceCache(PFILE_OBJECT FileObject)
+{
+ PBCB Bcb;
+ ExEnterCriticalRegionAndAcquireFastMutexUnsafe(&ViewLock);
+ Bcb = (PBCB)FileObject->SectionObjectPointer->SharedCacheMap;
+ ASSERT(Bcb);
+ if (Bcb->RefCount > 0)
+ {
+ Bcb->RefCount--;
+ if (Bcb->RefCount == 0)
+ {
+ MmFreeSectionSegments(Bcb->FileObject);
+ if (Bcb->RemoveOnClose)
+ {
+ CcRosDeleteFileCache(FileObject, Bcb);
+ }
+ else
+ {
+ Bcb->TimeStamp = CcTimeStamp;
+ InsertHeadList(&ClosedListHead, &Bcb->BcbRemoveListEntry);
+ }
+ }
+ }
+ ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&ViewLock);
+}
+
+NTSTATUS STDCALL
+CcRosReleaseFileCache(PFILE_OBJECT FileObject)
+/*
+ * FUNCTION: Called by the file system when a handle to a file object
+ * has been closed.
+ */
+{
+ PBCB Bcb;
+
+ ExEnterCriticalRegionAndAcquireFastMutexUnsafe(&ViewLock);
+
+ if (FileObject->SectionObjectPointer->SharedCacheMap != NULL)
+ {
+ Bcb = FileObject->SectionObjectPointer->SharedCacheMap;
+ if (FileObject->PrivateCacheMap != NULL)
+ {
+ FileObject->PrivateCacheMap = NULL;
+ if (Bcb->RefCount > 0)
+ {
+ Bcb->RefCount--;
+ if (Bcb->RefCount == 0)
+ {
+ MmFreeSectionSegments(Bcb->FileObject);
+ if (Bcb->RemoveOnClose)
+ {
+ CcRosDeleteFileCache(FileObject, Bcb);
+ }
+ else
+ {
+ Bcb->TimeStamp = CcTimeStamp;
+ InsertHeadList(&ClosedListHead, &Bcb->BcbRemoveListEntry);
+ }
+ }
+ }
+ }
+ }
+ ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&ViewLock);
+ return(STATUS_SUCCESS);
+}
+
+NTSTATUS
+NTAPI
+CcTryToInitializeFileCache(PFILE_OBJECT FileObject)
+{
+ PBCB Bcb;
+ NTSTATUS Status;
+
+ ExEnterCriticalRegionAndAcquireFastMutexUnsafe(&ViewLock);
+
+ Bcb = FileObject->SectionObjectPointer->SharedCacheMap;
+ if (Bcb == NULL)
+ {
+ Status = STATUS_UNSUCCESSFUL;
+ }
+ else
+ {
+ if (FileObject->PrivateCacheMap == NULL)
+ {
+ FileObject->PrivateCacheMap = Bcb;
+ Bcb->RefCount++;
+ }
+ if (Bcb->BcbRemoveListEntry.Flink != NULL)
+ {
+ RemoveEntryList(&Bcb->BcbRemoveListEntry);
+ Bcb->BcbRemoveListEntry.Flink = NULL;
+ }
+ Status = STATUS_SUCCESS;
+ }
+ ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&ViewLock);
+
+ return Status;
+}
+
+
+NTSTATUS STDCALL
CcRosInitializeFileCache(PFILE_OBJECT FileObject,
- PBCB* Bcb,
- ULONG CacheSegmentSize)
+ ULONG CacheSegmentSize)
/*
* FUNCTION: Initializes a BCB for a file object
*/
-{
- (*Bcb) = ExAllocatePoolWithTag(NonPagedPool, sizeof(BCB), TAG_BCB);
- if ((*Bcb) == NULL)
- {
+{
+ PBCB Bcb;
+
+ Bcb = FileObject->SectionObjectPointer->SharedCacheMap;
+ DPRINT("CcRosInitializeFileCache(FileObject 0x%p, Bcb 0x%p, CacheSegmentSize %d)\n",
+ FileObject, Bcb, CacheSegmentSize);
+
+ ExEnterCriticalRegionAndAcquireFastMutexUnsafe(&ViewLock);
+ if (Bcb == NULL)
+ {
+ Bcb = ExAllocateFromNPagedLookasideList(&BcbLookasideList);
+ if (Bcb == NULL)
+ {
+ ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&ViewLock);
return(STATUS_UNSUCCESSFUL);
- }
-
- ObReferenceObjectByPointer(FileObject,
- FILE_ALL_ACCESS,
- NULL,
- KernelMode);
- (*Bcb)->FileObject = FileObject;
- (*Bcb)->CacheSegmentSize = CacheSegmentSize;
- if (FileObject->FsContext)
- {
- (*Bcb)->AllocationSize =
- ((REACTOS_COMMON_FCB_HEADER*)FileObject->FsContext)->AllocationSize;
- (*Bcb)->FileSize =
- ((REACTOS_COMMON_FCB_HEADER*)FileObject->FsContext)->FileSize;
- }
- KeInitializeSpinLock(&(*Bcb)->BcbLock);
- InitializeListHead(&(*Bcb)->BcbSegmentListHead);
-
+ }
+ memset(Bcb, 0, sizeof(BCB));
+ ObReferenceObjectByPointer(FileObject,
+ FILE_ALL_ACCESS,
+ NULL,
+ KernelMode);
+ Bcb->FileObject = FileObject;
+ Bcb->CacheSegmentSize = CacheSegmentSize;
+ if (FileObject->FsContext)
+ {
+ Bcb->AllocationSize =
+ ((PFSRTL_COMMON_FCB_HEADER)FileObject->FsContext)->AllocationSize;
+ Bcb->FileSize =
+ ((PFSRTL_COMMON_FCB_HEADER)FileObject->FsContext)->FileSize;
+ }
+ KeInitializeSpinLock(&Bcb->BcbLock);
+ InitializeListHead(&Bcb->BcbSegmentListHead);
+ FileObject->SectionObjectPointer->SharedCacheMap = Bcb;
+ }
+ if (FileObject->PrivateCacheMap == NULL)
+ {
+ FileObject->PrivateCacheMap = Bcb;
+ Bcb->RefCount++;
+ }
+ if (Bcb->BcbRemoveListEntry.Flink != NULL)
+ {
+ RemoveEntryList(&Bcb->BcbRemoveListEntry);
+ Bcb->BcbRemoveListEntry.Flink = NULL;
+ }
+ ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&ViewLock);
+
return(STATUS_SUCCESS);
}
+/*
+ * @implemented
+ */
+PFILE_OBJECT STDCALL
+CcGetFileObjectFromSectionPtrs(IN PSECTION_OBJECT_POINTERS SectionObjectPointers)
+{
+ PBCB Bcb;
+ if (SectionObjectPointers && SectionObjectPointers->SharedCacheMap)
+ {
+ Bcb = (PBCB)SectionObjectPointers->SharedCacheMap;
+ ASSERT(Bcb);
+ return Bcb->FileObject;
+ }
+ return NULL;
+}
+
+VOID STDCALL
+CmLazyCloseThreadMain(PVOID Ignored)
+{
+ LARGE_INTEGER Timeout;
+ PLIST_ENTRY current_entry;
+ PBCB current;
+ ULONG RemoveTimeStamp;
+ NTSTATUS Status;
+
+ KeQuerySystemTime (&Timeout);
+
+ while (1)
+ {
+ Timeout.QuadPart += (LONGLONG)100000000; // 10sec
+ Status = KeWaitForSingleObject(&LazyCloseThreadEvent,
+ 0,
+ KernelMode,
+ FALSE,
+ &Timeout);
+
+ DPRINT("LazyCloseThreadMain %d\n", CcTimeStamp);
+
+ if (!NT_SUCCESS(Status))
+ {
+ DbgPrint("LazyCloseThread: Wait failed\n");
+ KEBUGCHECKCC;
+ break;
+ }
+ if (LazyCloseThreadShouldTerminate)
+ {
+ DbgPrint("LazyCloseThread: Terminating\n");
+ break;
+ }
+
+ ExEnterCriticalRegionAndAcquireFastMutexUnsafe(&ViewLock);
+ CcTimeStamp++;
+ if (CcTimeStamp >= 30)
+ {
+ RemoveTimeStamp = CcTimeStamp - 30; /* 5min = 10sec * 30 */
+ while (!IsListEmpty(&ClosedListHead))
+ {
+ current_entry = ClosedListHead.Blink;
+ current = CONTAINING_RECORD(current_entry, BCB, BcbRemoveListEntry);
+ if (current->TimeStamp >= RemoveTimeStamp)
+ {
+ break;
+ }
+ CcRosDeleteFileCache(current->FileObject, current);
+ }
+ }
+ ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&ViewLock);
+ }
+}
+
VOID
+INIT_FUNCTION
+NTAPI
CcInitView(VOID)
{
+#ifdef CACHE_BITMAP
+ PMEMORY_AREA marea;
+ PVOID Buffer;
+ PHYSICAL_ADDRESS BoundaryAddressMultiple;
+#endif
+ NTSTATUS Status;
+ KPRIORITY Priority;
+
DPRINT("CcInitView()\n");
+#ifdef CACHE_BITMAP
+ BoundaryAddressMultiple.QuadPart = 0;
+ CiCacheSegMappingRegionHint = 0;
+ CiCacheSegMappingRegionBase = NULL;
+
+ MmLockAddressSpace(MmGetKernelAddressSpace());
+
+ Status = MmCreateMemoryArea(MmGetKernelAddressSpace(),
+ MEMORY_AREA_CACHE_SEGMENT,
+ &CiCacheSegMappingRegionBase,
+ CI_CACHESEG_MAPPING_REGION_SIZE,
+ PAGE_READWRITE,
+ &marea,
+ FALSE,
+ 0,
+ BoundaryAddressMultiple);
+ MmUnlockAddressSpace(MmGetKernelAddressSpace());
+ if (!NT_SUCCESS(Status))
+ {
+ KEBUGCHECKCC;
+ }
+
+ Buffer = ExAllocatePool(NonPagedPool, CI_CACHESEG_MAPPING_REGION_SIZE / (PAGE_SIZE * 8));
+
+ RtlInitializeBitMap(&CiCacheSegMappingRegionAllocMap, Buffer, CI_CACHESEG_MAPPING_REGION_SIZE / PAGE_SIZE);
+ RtlClearAllBits(&CiCacheSegMappingRegionAllocMap);
+
+ KeInitializeSpinLock(&CiCacheSegMappingRegionLock);
+#endif
InitializeListHead(&CacheSegmentListHead);
InitializeListHead(&DirtySegmentListHead);
InitializeListHead(&CacheSegmentLRUListHead);
+ InitializeListHead(&ClosedListHead);
ExInitializeFastMutex(&ViewLock);
+ ExInitializeNPagedLookasideList (&iBcbLookasideList,
+ NULL,
+ NULL,
+ 0,
+ sizeof(INTERNAL_BCB),
+ TAG_IBCB,
+ 20);
+ ExInitializeNPagedLookasideList (&BcbLookasideList,
+ NULL,
+ NULL,
+ 0,
+ sizeof(BCB),
+ TAG_BCB,
+ 20);
+ ExInitializeNPagedLookasideList (&CacheSegLookasideList,
+ NULL,
+ NULL,
+ 0,
+ sizeof(CACHE_SEGMENT),
+ TAG_CSEG,
+ 20);
+
MmInitializeMemoryConsumer(MC_CACHE, CcRosTrimCache);
- InitCacheZeroPage();
+
+ CcInitCacheZeroPage();
+
+ CcTimeStamp = 0;
+ LazyCloseThreadShouldTerminate = FALSE;
+ KeInitializeEvent (&LazyCloseThreadEvent, SynchronizationEvent, FALSE);
+ Status = PsCreateSystemThread(&LazyCloseThreadHandle,
+ THREAD_ALL_ACCESS,
+ NULL,
+ NULL,
+ &LazyCloseThreadId,
+ (PKSTART_ROUTINE)CmLazyCloseThreadMain,
+ NULL);
+ if (NT_SUCCESS(Status))
+ {
+ Priority = LOW_REALTIME_PRIORITY;
+ NtSetInformationThread(LazyCloseThreadHandle,
+ ThreadPriority,
+ &Priority,
+ sizeof(Priority));
+ }
+
}
/* EOF */