-/*
- * ReactOS kernel
- * Copyright (C) 1998, 1999, 2000, 2001 ReactOS Team
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-/* $Id: view.c,v 1.65 2003/07/10 06:27:13 royce Exp $
+/* $Id$
*
+ * COPYRIGHT: See COPYING in the top level directory
* PROJECT: ReactOS kernel
* FILE: ntoskrnl/cc/view.c
* PURPOSE: Cache manager
- * PROGRAMMER: David Welch (welch@mcmail.com)
- * PORTABILITY: Checked
- * UPDATE HISTORY:
- * Created 22/05/98
+ *
+ * PROGRAMMERS: David Welch (welch@mcmail.com)
*/
/* NOTES **********************************************************************
*
- * This is not the NT implementation of a file cache nor anything much like
- * it.
+ * This is not the NT implementation of a file cache nor anything much like
+ * it.
*
- * The general procedure for a filesystem to implement a read or write
+ * The general procedure for a filesystem to implement a read or write
* dispatch routine is as follows
- *
+ *
* (1) If caching for the FCB hasn't been initiated then so do by calling
* CcInitializeFileCache.
- *
+ *
* (2) For each 4k region which is being read or written obtain a cache page
- * by calling CcRequestCachePage.
+ * by calling CcRequestCachePage.
*
- * (3) If either the page is being read or not completely written, and it is
+ * (3) If either the page is being read or not completely written, and it is
* not up to date then read its data from the underlying medium. If the read
- * fails then call CcReleaseCachePage with VALID as FALSE and return a error.
- *
+ * fails then call CcReleaseCachePage with VALID as FALSE and return a error.
+ *
* (4) Copy the data into or out of the page as necessary.
- *
+ *
* (5) Release the cache page
*/
/* INCLUDES ******************************************************************/
-#include <ddk/ntddk.h>
-#include <ddk/ntifs.h>
-#include <internal/mm.h>
-#include <internal/cc.h>
-#include <internal/pool.h>
-#include <ntos/minmax.h>
-
+#include <ntoskrnl.h>
#define NDEBUG
#include <internal/debug.h>
+#if defined (ALLOC_PRAGMA)
+#pragma alloc_text(INIT, CcInitView)
+#endif
+
/* GLOBALS *******************************************************************/
/*
- * If CACHE_BITMAP is defined, the cache manager uses one large memory region
- * within the kernel address space and allocate/deallocate space from this block
- * over a bitmap. If CACHE_BITMAP is used, the size of the mdl mapping region
+ * If CACHE_BITMAP is defined, the cache manager uses one large memory region
+ * within the kernel address space and allocate/deallocate space from this block
+ * over a bitmap. If CACHE_BITMAP is used, the size of the mdl mapping region
* must be reduced (ntoskrnl\mm\mdl.c, MI_MDLMAPPING_REGION_SIZE).
*/
//#define CACHE_BITMAP
-#define ROUND_UP(N, S) ((((N) + (S) - 1) / (S)) * (S))
-#define ROUND_DOWN(N, S) (((N) % (S)) ? ROUND_UP(N, S) - S : N)
-
-#define TAG_CSEG TAG('C', 'S', 'E', 'G')
-#define TAG_BCB TAG('B', 'C', 'B', ' ')
-#define TAG_IBCB TAG('i', 'B', 'C', 'B')
-
static LIST_ENTRY DirtySegmentListHead;
static LIST_ENTRY CacheSegmentListHead;
static LIST_ENTRY CacheSegmentLRUListHead;
static CLIENT_ID LazyCloseThreadId;
static volatile BOOLEAN LazyCloseThreadShouldTerminate;
-void * alloca(size_t size);
+#if defined(__GNUC__)
+/* void * alloca(size_t size); */
+#elif defined(_MSC_VER)
+void* _alloca(size_t size);
+#else
+#error Unknown compiler for alloca intrinsic stack allocation "function"
+#endif
+
+#if defined(DBG) || defined(KDBG)
+static void CcRosCacheSegmentIncRefCount_ ( PCACHE_SEGMENT cs, const char* file, int line )
+{
+ ++cs->ReferenceCount;
+ if ( cs->Bcb->Trace )
+ {
+ DbgPrint("(%s:%i) CacheSegment %p ++RefCount=%d, Dirty %d, PageOut %d\n",
+ file, line, cs, cs->ReferenceCount, cs->Dirty, cs->PageOut );
+ }
+}
+static void CcRosCacheSegmentDecRefCount_ ( PCACHE_SEGMENT cs, const char* file, int line )
+{
+ --cs->ReferenceCount;
+ if ( cs->Bcb->Trace )
+ {
+ DbgPrint("(%s:%i) CacheSegment %p --RefCount=%d, Dirty %d, PageOut %d\n",
+ file, line, cs, cs->ReferenceCount, cs->Dirty, cs->PageOut );
+ }
+}
+#define CcRosCacheSegmentIncRefCount(cs) CcRosCacheSegmentIncRefCount_(cs,__FILE__,__LINE__)
+#define CcRosCacheSegmentDecRefCount(cs) CcRosCacheSegmentDecRefCount_(cs,__FILE__,__LINE__)
+#else
+#define CcRosCacheSegmentIncRefCount(cs) (++((cs)->ReferenceCount))
+#define CcRosCacheSegmentDecRefCount(cs) (--((cs)->ReferenceCount))
+#endif
NTSTATUS
CcRosInternalFreeCacheSegment(PCACHE_SEGMENT CacheSeg);
+BOOLEAN
+FASTCALL
+CcTryToAcquireBrokenMutex(PFAST_MUTEX FastMutex)
+{
+ KeEnterCriticalRegion();
+ if (InterlockedCompareExchange(&FastMutex->Count, 0, 1) == 1)
+ {
+ FastMutex->Owner = KeGetCurrentThread();
+ return(TRUE);
+ }
+ else
+ {
+ KeLeaveCriticalRegion();
+ return(FALSE);
+ }
+}
+
/* FUNCTIONS *****************************************************************/
-NTSTATUS STATIC
+VOID
+STDCALL
+CcRosTraceCacheMap (
+ PBCB Bcb,
+ BOOLEAN Trace )
+{
+#if defined(DBG) || defined(KDBG)
+ KIRQL oldirql;
+ PLIST_ENTRY current_entry;
+ PCACHE_SEGMENT current;
+
+ if ( !Bcb )
+ return;
+
+ Bcb->Trace = Trace;
+
+ if ( Trace )
+ {
+ DPRINT1("Enabling Tracing for CacheMap 0x%p:\n", Bcb );
+
+ ExEnterCriticalRegionAndAcquireFastMutexUnsafe(&ViewLock);
+ KeAcquireSpinLock(&Bcb->BcbLock, &oldirql);
+
+ current_entry = Bcb->BcbSegmentListHead.Flink;
+ while (current_entry != &Bcb->BcbSegmentListHead)
+ {
+ current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT, BcbSegmentListEntry);
+ current_entry = current_entry->Flink;
+
+ DPRINT1(" CacheSegment 0x%p enabled, RefCount %d, Dirty %d, PageOut %d\n",
+ current, current->ReferenceCount, current->Dirty, current->PageOut );
+ }
+ KeReleaseSpinLock(&Bcb->BcbLock, oldirql);
+ ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&ViewLock);
+ }
+ else
+ {
+ DPRINT1("Disabling Tracing for CacheMap 0x%p:\n", Bcb );
+ }
+
+#else
+ Bcb = Bcb;
+ Trace = Trace;
+#endif
+}
+
+NTSTATUS
+NTAPI
CcRosFlushCacheSegment(PCACHE_SEGMENT CacheSegment)
{
NTSTATUS Status;
Status = WriteCacheSegment(CacheSegment);
if (NT_SUCCESS(Status))
{
- ExAcquireFastMutex(&ViewLock);
+ ExEnterCriticalRegionAndAcquireFastMutexUnsafe(&ViewLock);
KeAcquireSpinLock(&CacheSegment->Bcb->BcbLock, &oldIrql);
CacheSegment->Dirty = FALSE;
RemoveEntryList(&CacheSegment->DirtySegmentListEntry);
DirtyPageCount -= CacheSegment->Bcb->CacheSegmentSize / PAGE_SIZE;
- CacheSegment->ReferenceCount--;
+ CcRosCacheSegmentDecRefCount ( CacheSegment );
KeReleaseSpinLock(&CacheSegment->Bcb->BcbLock, oldIrql);
- ExReleaseFastMutex(&ViewLock);
+ ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&ViewLock);
}
return(Status);
}
-VOID CcRosRemoveUnusedFiles(VOID);
-
-
NTSTATUS
+NTAPI
CcRosFlushDirtyPages(ULONG Target, PULONG Count)
{
PLIST_ENTRY current_entry;
(*Count) = 0;
- ExAcquireFastMutex(&ViewLock);
+ ExEnterCriticalRegionAndAcquireFastMutexUnsafe(&ViewLock);
WriteCount[0] = WriteCount[1];
WriteCount[1] = WriteCount[2];
}
NewTarget = WriteCount[0];
-
+
Target = max(NewTarget, Target);
current_entry = DirtySegmentListHead.Flink;
}
while (current_entry != &DirtySegmentListHead && Target > 0)
{
- current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT,
+ current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT,
DirtySegmentListEntry);
current_entry = current_entry->Flink;
- Locked = ExTryToAcquireFastMutex(¤t->Lock);
+
+// Locked = current->Bcb->Callbacks.AcquireForLazyWrite(current->Bcb->Context, FALSE);
+ Locked = ExTryToAcquireResourceExclusiveLite(((FSRTL_COMMON_FCB_HEADER*)(current->Bcb->FileObject->FsContext))->Resource);
+ if (!Locked)
+ {
+ continue;
+ }
+ Locked = CcTryToAcquireBrokenMutex(¤t->Lock);
if (!Locked)
{
+// current->Bcb->Callbacks.ReleaseFromLazyWrite(current->Bcb->Context);
+ ExReleaseResourceLite(((FSRTL_COMMON_FCB_HEADER*)(current->Bcb->FileObject->FsContext))->Resource);
continue;
}
- assert(current->Dirty);
+ ASSERT(current->Dirty);
if (current->ReferenceCount > 1)
{
- ExReleaseFastMutex(¤t->Lock);
+ ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(¤t->Lock);
+// current->Bcb->Callbacks.ReleaseFromLazyWrite(current->Bcb->Context);
+ ExReleaseResourceLite(((FSRTL_COMMON_FCB_HEADER*)(current->Bcb->FileObject->FsContext))->Resource);
continue;
}
- ExReleaseFastMutex(&ViewLock);
+ ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&ViewLock);
PagesPerSegment = current->Bcb->CacheSegmentSize / PAGE_SIZE;
- Status = CcRosFlushCacheSegment(current);
- ExReleaseFastMutex(¤t->Lock);
+ Status = CcRosFlushCacheSegment(current);
+ ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(¤t->Lock);
+// current->Bcb->Callbacks.ReleaseFromLazyWrite(current->Bcb->Context);
+ ExReleaseResourceLite(((FSRTL_COMMON_FCB_HEADER*)(current->Bcb->FileObject->FsContext))->Resource);
if (!NT_SUCCESS(Status) && (Status != STATUS_END_OF_FILE))
{
DPRINT1("CC: Failed to flush cache segment.\n");
else
{
(*Count) += PagesPerSegment;
- Target -= PagesPerSegment;
+ Target -= PagesPerSegment;
}
- ExAcquireFastMutex(&ViewLock);
+ ExEnterCriticalRegionAndAcquireFastMutexUnsafe(&ViewLock);
current_entry = DirtySegmentListHead.Flink;
}
if (*Count < NewTarget)
{
WriteCount[1] += (NewTarget - *Count);
}
- ExReleaseFastMutex(&ViewLock);
+ ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&ViewLock);
DPRINT("CcRosFlushDirtyPages() finished\n");
return(STATUS_SUCCESS);
* ARGUMENTS:
* Target - The number of pages to be freed.
* Priority - The priority of free (currently unused).
- * NrFreed - Points to a variable where the number of pages
+ * NrFreed - Points to a variable where the number of pages
* actually freed is returned.
*/
{
PLIST_ENTRY current_entry;
- PCACHE_SEGMENT current;
+ PCACHE_SEGMENT current, last = NULL;
ULONG PagesPerSegment;
ULONG PagesFreed;
KIRQL oldIrql;
DPRINT("CcRosTrimCache(Target %d)\n", Target);
*NrFreed = 0;
-
+
InitializeListHead(&FreeList);
-
- ExAcquireFastMutex(&ViewLock);
+
+ ExEnterCriticalRegionAndAcquireFastMutexUnsafe(&ViewLock);
current_entry = CacheSegmentLRUListHead.Flink;
while (current_entry != &CacheSegmentLRUListHead && Target > 0)
{
- current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT,
+ current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT,
CacheSegmentLRUListEntry);
current_entry = current_entry->Flink;
-
+
KeAcquireSpinLock(¤t->Bcb->BcbLock, &oldIrql);
if (current->ReferenceCount == 0)
{
}
else
{
- KeReleaseSpinLock(¤t->Bcb->BcbLock, oldIrql);
+ if (last != current && current->MappedCount > 0 && !current->Dirty && !current->PageOut)
+ {
+ ULONG i;
+ NTSTATUS Status;
+
+ CcRosCacheSegmentIncRefCount(current);
+ last = current;
+ current->PageOut = TRUE;
+ KeReleaseSpinLock(¤t->Bcb->BcbLock, oldIrql);
+ ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&ViewLock);
+ for (i = 0; i < current->Bcb->CacheSegmentSize / PAGE_SIZE; i++)
+ {
+ PFN_TYPE Page;
+ Page = MmGetPhysicalAddress((char*)current->BaseAddress + i * PAGE_SIZE).QuadPart >> PAGE_SHIFT;
+ Status = MmPageOutPhysicalAddress(Page);
+ if (!NT_SUCCESS(Status))
+ {
+ break;
+ }
+ }
+ ExEnterCriticalRegionAndAcquireFastMutexUnsafe(&ViewLock);
+ KeAcquireSpinLock(¤t->Bcb->BcbLock, &oldIrql);
+ CcRosCacheSegmentDecRefCount(current);
+ current->PageOut = FALSE;
+ KeReleaseSpinLock(¤t->Bcb->BcbLock, oldIrql);
+ current_entry = ¤t->CacheSegmentLRUListEntry;
+ continue;
+ }
+ KeReleaseSpinLock(¤t->Bcb->BcbLock, oldIrql);
}
}
- ExReleaseFastMutex(&ViewLock);
+ ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&ViewLock);
while (!IsListEmpty(&FreeList))
{
current_entry = RemoveHeadList(&FreeList);
- current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT,
+ current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT,
BcbSegmentListEntry);
CcRosInternalFreeCacheSegment(current);
}
return(STATUS_SUCCESS);
}
-NTSTATUS
+NTSTATUS
+NTAPI
CcRosReleaseCacheSegment(PBCB Bcb,
PCACHE_SEGMENT CacheSeg,
BOOLEAN Valid,
BOOLEAN WasDirty = CacheSeg->Dirty;
KIRQL oldIrql;
- assert(Bcb);
+ ASSERT(Bcb);
- DPRINT("CcReleaseCacheSegment(Bcb %x, CacheSeg %x, Valid %d)\n",
+ DPRINT("CcReleaseCacheSegment(Bcb 0x%p, CacheSeg 0x%p, Valid %d)\n",
Bcb, CacheSeg, Valid);
CacheSeg->Valid = Valid;
CacheSeg->Dirty = CacheSeg->Dirty || Dirty;
- ExAcquireFastMutex(&ViewLock);
+ ExEnterCriticalRegionAndAcquireFastMutexUnsafe(&ViewLock);
if (!WasDirty && CacheSeg->Dirty)
{
InsertTailList(&DirtySegmentListHead, &CacheSeg->DirtySegmentListEntry);
CacheSeg->MappedCount++;
}
KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
- CacheSeg->ReferenceCount--;
+ CcRosCacheSegmentDecRefCount(CacheSeg);
if (Mapped && CacheSeg->MappedCount == 1)
{
- CacheSeg->ReferenceCount++;
+ CcRosCacheSegmentIncRefCount(CacheSeg);
}
if (!WasDirty && CacheSeg->Dirty)
{
- CacheSeg->ReferenceCount++;
+ CcRosCacheSegmentIncRefCount(CacheSeg);
}
KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
- ExReleaseFastMutex(&ViewLock);
- ExReleaseFastMutex(&CacheSeg->Lock);
-
+ ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&ViewLock);
+ ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&CacheSeg->Lock);
+
return(STATUS_SUCCESS);
}
-PCACHE_SEGMENT
+PCACHE_SEGMENT
+NTAPI
CcRosLookupCacheSegment(PBCB Bcb, ULONG FileOffset)
{
PLIST_ENTRY current_entry;
PCACHE_SEGMENT current;
KIRQL oldIrql;
- assert(Bcb);
+ ASSERT(Bcb);
- DPRINT("CcRosLookupCacheSegment(Bcb %x, FileOffset %d)\n", Bcb, FileOffset);
+ DPRINT("CcRosLookupCacheSegment(Bcb -x%p, FileOffset %d)\n", Bcb, FileOffset);
KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
current_entry = Bcb->BcbSegmentListHead.Flink;
while (current_entry != &Bcb->BcbSegmentListHead)
{
- current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT,
+ current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT,
BcbSegmentListEntry);
if (current->FileOffset <= FileOffset &&
(current->FileOffset + Bcb->CacheSegmentSize) > FileOffset)
{
- current->ReferenceCount++;
+ CcRosCacheSegmentIncRefCount(current);
KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
+ ExEnterCriticalRegionAndAcquireFastMutexUnsafe(¤t->Lock);
return(current);
}
current_entry = current_entry->Flink;
}
NTSTATUS
+NTAPI
CcRosMarkDirtyCacheSegment(PBCB Bcb, ULONG FileOffset)
{
PCACHE_SEGMENT CacheSeg;
KIRQL oldIrql;
- assert(Bcb);
+ ASSERT(Bcb);
- DPRINT("CcRosMarkDirtyCacheSegment(Bcb %x, FileOffset %d)\n", Bcb, FileOffset);
+ DPRINT("CcRosMarkDirtyCacheSegment(Bcb 0x%p, FileOffset %d)\n", Bcb, FileOffset);
CacheSeg = CcRosLookupCacheSegment(Bcb, FileOffset);
if (CacheSeg == NULL)
{
- KeBugCheck(0);
+ KEBUGCHECKCC;
}
- ExAcquireFastMutex(&CacheSeg->Lock);
if (!CacheSeg->Dirty)
{
- ExAcquireFastMutex(&ViewLock);
+ ExEnterCriticalRegionAndAcquireFastMutexUnsafe(&ViewLock);
InsertTailList(&DirtySegmentListHead, &CacheSeg->DirtySegmentListEntry);
DirtyPageCount += Bcb->CacheSegmentSize / PAGE_SIZE;
- ExReleaseFastMutex(&ViewLock);
+ ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&ViewLock);
}
else
{
KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
- CacheSeg->ReferenceCount--;
+ CcRosCacheSegmentDecRefCount(CacheSeg);
KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
}
CacheSeg->Dirty = TRUE;
- ExReleaseFastMutex(&CacheSeg->Lock);
+ ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&CacheSeg->Lock);
return(STATUS_SUCCESS);
}
NTSTATUS
+NTAPI
CcRosUnmapCacheSegment(PBCB Bcb, ULONG FileOffset, BOOLEAN NowDirty)
{
PCACHE_SEGMENT CacheSeg;
BOOLEAN WasDirty;
KIRQL oldIrql;
- assert(Bcb);
+ ASSERT(Bcb);
- DPRINT("CcRosUnmapCacheSegment(Bcb %x, FileOffset %d, NowDirty %d)\n",
+ DPRINT("CcRosUnmapCacheSegment(Bcb 0x%p, FileOffset %d, NowDirty %d)\n",
Bcb, FileOffset, NowDirty);
CacheSeg = CcRosLookupCacheSegment(Bcb, FileOffset);
{
return(STATUS_UNSUCCESSFUL);
}
- ExAcquireFastMutex(&CacheSeg->Lock);
WasDirty = CacheSeg->Dirty;
CacheSeg->Dirty = CacheSeg->Dirty || NowDirty;
if (!WasDirty && NowDirty)
{
- ExAcquireFastMutex(&ViewLock);
+ ExEnterCriticalRegionAndAcquireFastMutexUnsafe(&ViewLock);
InsertTailList(&DirtySegmentListHead, &CacheSeg->DirtySegmentListEntry);
DirtyPageCount += Bcb->CacheSegmentSize / PAGE_SIZE;
- ExReleaseFastMutex(&ViewLock);
+ ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&ViewLock);
}
KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
- CacheSeg->ReferenceCount--;
+ CcRosCacheSegmentDecRefCount(CacheSeg);
if (!WasDirty && NowDirty)
{
- CacheSeg->ReferenceCount++;
+ CcRosCacheSegmentIncRefCount(CacheSeg);
}
if (CacheSeg->MappedCount == 0)
{
- CacheSeg->ReferenceCount--;
+ CcRosCacheSegmentDecRefCount(CacheSeg);
}
KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
- ExReleaseFastMutex(&CacheSeg->Lock);
+ ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&CacheSeg->Lock);
return(STATUS_SUCCESS);
}
NTSTATUS STATIC
CcRosCreateCacheSegment(PBCB Bcb,
ULONG FileOffset,
- PCACHE_SEGMENT* CacheSeg,
- BOOLEAN Lock)
+ PCACHE_SEGMENT* CacheSeg)
{
ULONG i;
PCACHE_SEGMENT current;
PLIST_ENTRY current_entry;
NTSTATUS Status;
KIRQL oldIrql;
+ PPFN_TYPE Pfn;
#ifdef CACHE_BITMAP
ULONG StartingOffset;
+#else
#endif
+ PHYSICAL_ADDRESS BoundaryAddressMultiple;
- assert(Bcb);
+ ASSERT(Bcb);
DPRINT("CcRosCreateCacheSegment()\n");
+ BoundaryAddressMultiple.QuadPart = 0;
if (FileOffset >= Bcb->FileSize.u.LowPart)
{
CacheSeg = NULL;
current = ExAllocateFromNPagedLookasideList(&CacheSegLookasideList);
current->Valid = FALSE;
current->Dirty = FALSE;
+ current->PageOut = FALSE;
current->FileOffset = ROUND_DOWN(FileOffset, Bcb->CacheSegmentSize);
current->Bcb = Bcb;
+#if defined(DBG) || defined(KDBG)
+ if ( Bcb->Trace )
+ {
+ DPRINT1("CacheMap 0x%p: new Cache Segment: 0x%p\n", Bcb, current );
+ }
+#endif
current->MappedCount = 0;
current->DirtySegmentListEntry.Flink = NULL;
current->DirtySegmentListEntry.Blink = NULL;
current->ReferenceCount = 1;
ExInitializeFastMutex(¤t->Lock);
- ExAcquireFastMutex(¤t->Lock);
- ExAcquireFastMutex(&ViewLock);
+ ExEnterCriticalRegionAndAcquireFastMutexUnsafe(¤t->Lock);
+ ExEnterCriticalRegionAndAcquireFastMutexUnsafe(&ViewLock);
*CacheSeg = current;
/* There is window between the call to CcRosLookupCacheSegment
* and CcRosCreateCacheSegment. We must check if a segment on
* the fileoffset exist. If there exist a segment, we release
- * our new created segment and return the existing one.
+ * our new created segment and return the existing one.
*/
KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
current_entry = Bcb->BcbSegmentListHead.Flink;
previous = NULL;
while (current_entry != &Bcb->BcbSegmentListHead)
{
- current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT,
+ current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT,
BcbSegmentListEntry);
if (current->FileOffset <= FileOffset &&
(current->FileOffset + Bcb->CacheSegmentSize) > FileOffset)
{
- current->ReferenceCount++;
+ CcRosCacheSegmentIncRefCount(current);
KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
- ExReleaseFastMutex(&(*CacheSeg)->Lock);
- ExReleaseFastMutex(&ViewLock);
- ExFreeToNPagedLookasideList(&CacheSegLookasideList, *CacheSeg);
- *CacheSeg = current;
- if (Lock)
+#if defined(DBG) || defined(KDBG)
+ if ( Bcb->Trace )
{
- ExAcquireFastMutex(¤t->Lock);
+ DPRINT1("CacheMap 0x%p: deleting newly created Cache Segment 0x%p ( found existing one 0x%p )\n",
+ Bcb,
+ (*CacheSeg),
+ current );
}
+#endif
+ ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&(*CacheSeg)->Lock);
+ ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&ViewLock);
+ ExFreeToNPagedLookasideList(&CacheSegLookasideList, *CacheSeg);
+ *CacheSeg = current;
+ ExEnterCriticalRegionAndAcquireFastMutexUnsafe(¤t->Lock);
return STATUS_SUCCESS;
}
if (current->FileOffset < FileOffset)
KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
InsertTailList(&CacheSegmentListHead, ¤t->CacheSegmentListEntry);
InsertTailList(&CacheSegmentLRUListHead, ¤t->CacheSegmentLRUListEntry);
- ExReleaseFastMutex(&ViewLock);
+ ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&ViewLock);
#ifdef CACHE_BITMAP
KeAcquireSpinLock(&CiCacheSegMappingRegionLock, &oldIrql);
StartingOffset = RtlFindClearBitsAndSet(&CiCacheSegMappingRegionAllocMap, Bcb->CacheSegmentSize / PAGE_SIZE, CiCacheSegMappingRegionHint);
-
+
if (StartingOffset == 0xffffffff)
{
DPRINT1("Out of CacheSeg mapping space\n");
- KeBugCheck(0);
+ KEBUGCHECKCC;
}
current->BaseAddress = CiCacheSegMappingRegionBase + StartingOffset * PAGE_SIZE;
if (CiCacheSegMappingRegionHint == StartingOffset)
{
- CiCacheSegMappingRegionHint += Bcb->CacheSegmentSize / PAGE_SIZE;
+ CiCacheSegMappingRegionHint += Bcb->CacheSegmentSize / PAGE_SIZE;
}
KeReleaseSpinLock(&CiCacheSegMappingRegionLock, oldIrql);
#else
MmLockAddressSpace(MmGetKernelAddressSpace());
current->BaseAddress = NULL;
- Status = MmCreateMemoryArea(NULL,
- MmGetKernelAddressSpace(),
+ Status = MmCreateMemoryArea(MmGetKernelAddressSpace(),
MEMORY_AREA_CACHE_SEGMENT,
¤t->BaseAddress,
Bcb->CacheSegmentSize,
PAGE_READWRITE,
(PMEMORY_AREA*)¤t->MemoryArea,
FALSE,
- FALSE);
+ 0,
+ BoundaryAddressMultiple);
MmUnlockAddressSpace(MmGetKernelAddressSpace());
if (!NT_SUCCESS(Status))
{
- KeBugCheck(0);
+ KEBUGCHECKCC;
}
#endif
+ Pfn = alloca(sizeof(PFN_TYPE) * (Bcb->CacheSegmentSize / PAGE_SIZE));
for (i = 0; i < (Bcb->CacheSegmentSize / PAGE_SIZE); i++)
{
- PHYSICAL_ADDRESS Page;
-
- Status = MmRequestPageMemoryConsumer(MC_CACHE, TRUE, &Page);
- if (!NT_SUCCESS(Status))
- {
- KeBugCheck(0);
- }
-
- Status = MmCreateVirtualMapping(NULL,
- current->BaseAddress + (i * PAGE_SIZE),
- PAGE_READWRITE,
- Page,
- TRUE);
+ Status = MmRequestPageMemoryConsumer(MC_CACHE, TRUE, &Pfn[i]);
if (!NT_SUCCESS(Status))
{
- KeBugCheck(0);
+ KEBUGCHECKCC;
}
}
- if (!Lock)
+ Status = MmCreateVirtualMapping(NULL,
+ current->BaseAddress,
+ PAGE_READWRITE,
+ Pfn,
+ Bcb->CacheSegmentSize / PAGE_SIZE);
+ if (!NT_SUCCESS(Status))
{
- ExReleaseFastMutex(¤t->Lock);
+ KEBUGCHECKCC;
}
-
return(STATUS_SUCCESS);
}
NTSTATUS
+NTAPI
CcRosGetCacheSegmentChain(PBCB Bcb,
ULONG FileOffset,
ULONG Length,
PCACHE_SEGMENT* CacheSegList;
PCACHE_SEGMENT Previous = NULL;
- assert(Bcb);
+ ASSERT(Bcb);
DPRINT("CcRosGetCacheSegmentChain()\n");
Length = ROUND_UP(Length, Bcb->CacheSegmentSize);
- CacheSegList = alloca(sizeof(PCACHE_SEGMENT) *
+#if defined(__GNUC__)
+ CacheSegList = alloca(sizeof(PCACHE_SEGMENT) *
(Length / Bcb->CacheSegmentSize));
+#elif defined(_MSC_VER)
+ CacheSegList = _alloca(sizeof(PCACHE_SEGMENT) *
+ (Length / Bcb->CacheSegmentSize));
+#else
+#error Unknown compiler for alloca intrinsic stack allocation "function"
+#endif
/*
* Look for a cache segment already mapping the same data.
}
else
{
- CcRosCreateCacheSegment(Bcb, CurrentOffset, ¤t, FALSE);
+ CcRosCreateCacheSegment(Bcb, CurrentOffset, ¤t);
CacheSegList[i] = current;
}
}
for (i = 0; i < (Length / Bcb->CacheSegmentSize); i++)
{
- ExAcquireFastMutex(&CacheSegList[i]->Lock);
if (i == 0)
{
*CacheSeg = CacheSegList[i];
}
}
Previous->NextInChain = NULL;
-
+
return(STATUS_SUCCESS);
}
NTSTATUS
+NTAPI
CcRosGetCacheSegment(PBCB Bcb,
ULONG FileOffset,
PULONG BaseOffset,
PCACHE_SEGMENT current;
NTSTATUS Status;
- assert(Bcb);
+ ASSERT(Bcb);
DPRINT("CcRosGetCacheSegment()\n");
* Look for a cache segment already mapping the same data.
*/
current = CcRosLookupCacheSegment(Bcb, FileOffset);
- if (current != NULL)
- {
- ExAcquireFastMutex(¤t->Lock);
- }
- else
+ if (current == NULL)
{
/*
* Otherwise create a new segment.
*/
- Status = CcRosCreateCacheSegment(Bcb, FileOffset, ¤t, TRUE);
+ Status = CcRosCreateCacheSegment(Bcb, FileOffset, ¤t);
if (!NT_SUCCESS(Status))
{
return Status;
return(STATUS_SUCCESS);
}
-NTSTATUS STDCALL
+NTSTATUS STDCALL
CcRosRequestCacheSegment(PBCB Bcb,
ULONG FileOffset,
PVOID* BaseAddress,
{
ULONG BaseOffset;
- assert(Bcb);
+ ASSERT(Bcb);
if ((FileOffset % Bcb->CacheSegmentSize) != 0)
{
CPRINT("Bad fileoffset %x should be multiple of %x",
FileOffset, Bcb->CacheSegmentSize);
- KeBugCheck(0);
+ KEBUGCHECKCC;
}
return(CcRosGetCacheSegment(Bcb,
}
#ifdef CACHE_BITMAP
#else
-STATIC VOID
-CcFreeCachePage(PVOID Context, MEMORY_AREA* MemoryArea, PVOID Address,
- PHYSICAL_ADDRESS PhysAddr, SWAPENTRY SwapEntry, BOOLEAN Dirty)
+STATIC VOID
+CcFreeCachePage(PVOID Context, MEMORY_AREA* MemoryArea, PVOID Address,
+ PFN_TYPE Page, SWAPENTRY SwapEntry, BOOLEAN Dirty)
{
- assert(SwapEntry == 0);
- if (PhysAddr.QuadPart != 0)
+ ASSERT(SwapEntry == 0);
+ if (Page != 0)
{
- MmReleasePageMemoryConsumer(MC_CACHE, PhysAddr);
+ MmReleasePageMemoryConsumer(MC_CACHE, Page);
}
}
#endif
-NTSTATUS
+NTSTATUS
CcRosInternalFreeCacheSegment(PCACHE_SEGMENT CacheSeg)
/*
* FUNCTION: Releases a cache segment associated with a BCB
ULONG i;
ULONG RegionSize;
ULONG Base;
- PHYSICAL_ADDRESS PhysicalAddr;
+ PFN_TYPE Page;
KIRQL oldIrql;
#endif
- DPRINT("Freeing cache segment %x\n", CacheSeg);
+ DPRINT("Freeing cache segment 0x%p\n", CacheSeg);
+#if defined(DBG) || defined(KDBG)
+ if ( CacheSeg->Bcb->Trace )
+ {
+ DPRINT1("CacheMap 0x%p: deleting Cache Segment: 0x%p\n", CacheSeg->Bcb, CacheSeg );
+ }
+#endif
#ifdef CACHE_BITMAP
RegionSize = CacheSeg->Bcb->CacheSegmentSize / PAGE_SIZE;
/* Unmap all the pages. */
for (i = 0; i < RegionSize; i++)
{
- MmDeleteVirtualMapping(NULL,
+ MmDeleteVirtualMapping(NULL,
CacheSeg->BaseAddress + (i * PAGE_SIZE),
FALSE,
NULL,
- &PhysicalAddr);
- MmReleasePageMemoryConsumer(MC_CACHE, PhysicalAddr);
+ &Page);
+ MmReleasePageMemoryConsumer(MC_CACHE, Page);
}
KeAcquireSpinLock(&CiCacheSegMappingRegionLock, &oldIrql);
/* Deallocate all the pages used. */
Base = (ULONG)(CacheSeg->BaseAddress - CiCacheSegMappingRegionBase) / PAGE_SIZE;
-
+
RtlClearBits(&CiCacheSegMappingRegionAllocMap, Base, RegionSize);
CiCacheSegMappingRegionHint = min (CiCacheSegMappingRegionHint, Base);
#else
MmLockAddressSpace(MmGetKernelAddressSpace());
MmFreeMemoryArea(MmGetKernelAddressSpace(),
- CacheSeg->BaseAddress,
- CacheSeg->Bcb->CacheSegmentSize,
+ CacheSeg->MemoryArea,
CcFreeCachePage,
NULL);
MmUnlockAddressSpace(MmGetKernelAddressSpace());
}
NTSTATUS
+NTAPI
CcRosFreeCacheSegment(PBCB Bcb, PCACHE_SEGMENT CacheSeg)
{
NTSTATUS Status;
KIRQL oldIrql;
- assert(Bcb);
+ ASSERT(Bcb);
- DPRINT("CcRosFreeCacheSegment(Bcb %x, CacheSeg %x)\n",
+ DPRINT("CcRosFreeCacheSegment(Bcb 0x%p, CacheSeg 0x%p)\n",
Bcb, CacheSeg);
- ExAcquireFastMutex(&ViewLock);
+ ExEnterCriticalRegionAndAcquireFastMutexUnsafe(&ViewLock);
KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
RemoveEntryList(&CacheSeg->BcbSegmentListEntry);
RemoveEntryList(&CacheSeg->CacheSegmentListEntry);
}
KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
- ExReleaseFastMutex(&ViewLock);
+ ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&ViewLock);
Status = CcRosInternalFreeCacheSegment(CacheSeg);
return(Status);
NTSTATUS Status;
KIRQL oldIrql;
- DPRINT("CcFlushCache(SectionObjectPointers %x, FileOffset %x, Length %d, IoStatus %x)\n",
+ DPRINT("CcFlushCache(SectionObjectPointers 0x%p, FileOffset 0x%p, Length %d, IoStatus 0x%p)\n",
SectionObjectPointers, FileOffset, Length, IoStatus);
if (SectionObjectPointers && SectionObjectPointers->SharedCacheMap)
{
Bcb = (PBCB)SectionObjectPointers->SharedCacheMap;
- assert(Bcb);
+ ASSERT(Bcb);
if (FileOffset)
{
Offset = *FileOffset;
}
- else
+ else
{
- Offset.QuadPart = 0LL;
+ Offset.QuadPart = (LONGLONG)0;
Length = Bcb->FileSize.u.LowPart;
}
-
+
if (IoStatus)
{
IoStatus->Status = STATUS_SUCCESS;
current = CcRosLookupCacheSegment (Bcb, Offset.u.LowPart);
if (current != NULL)
{
- ExAcquireFastMutex(¤t->Lock);
if (current->Dirty)
{
Status = CcRosFlushCacheSegment(current);
}
}
KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
- ExReleaseFastMutex(¤t->Lock);
- current->ReferenceCount--;
+ ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(¤t->Lock);
+ CcRosCacheSegmentDecRefCount(current);
KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
}
}
}
-NTSTATUS
+NTSTATUS
+NTAPI
CcRosDeleteFileCache(PFILE_OBJECT FileObject, PBCB Bcb)
/*
* FUNCTION: Releases the BCB associated with a file object
LIST_ENTRY FreeList;
KIRQL oldIrql;
- assert(Bcb);
-
+ ASSERT(Bcb);
+
Bcb->RefCount++;
- ExReleaseFastMutex(&ViewLock);
+ ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&ViewLock);
CcFlushCache(FileObject->SectionObjectPointer, NULL, 0, NULL);
- ExAcquireFastMutex(&ViewLock);
+ ExEnterCriticalRegionAndAcquireFastMutexUnsafe(&ViewLock);
Bcb->RefCount--;
if (Bcb->RefCount == 0)
{
Bcb->BcbRemoveListEntry.Flink = NULL;
}
- FileObject->SectionObjectPointer->SharedCacheMap = NULL;
+ FileObject->SectionObjectPointer->SharedCacheMap = NULL;
/*
* Release all cache segments.
}
InsertHeadList(&FreeList, ¤t->BcbSegmentListEntry);
}
- KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
+#if defined(DBG) || defined(KDBG)
+ Bcb->Trace = FALSE;
+#endif
+ KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
- ExReleaseFastMutex(&ViewLock);
+ ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&ViewLock);
ObDereferenceObject (Bcb->FileObject);
while (!IsListEmpty(&FreeList))
current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT, BcbSegmentListEntry);
Status = CcRosInternalFreeCacheSegment(current);
}
- ExFreeToNPagedLookasideList(&BcbLookasideList, Bcb);
- ExAcquireFastMutex(&ViewLock);
+ ExFreeToNPagedLookasideList(&BcbLookasideList, Bcb);
+ ExEnterCriticalRegionAndAcquireFastMutexUnsafe(&ViewLock);
}
return(STATUS_SUCCESS);
}
-VOID CcRosReferenceCache(PFILE_OBJECT FileObject)
+VOID
+NTAPI
+CcRosReferenceCache(PFILE_OBJECT FileObject)
{
PBCB Bcb;
- ExAcquireFastMutex(&ViewLock);
+ ExEnterCriticalRegionAndAcquireFastMutexUnsafe(&ViewLock);
Bcb = (PBCB)FileObject->SectionObjectPointer->SharedCacheMap;
- assert(Bcb);
+ ASSERT(Bcb);
if (Bcb->RefCount == 0)
{
- assert(Bcb->BcbRemoveListEntry.Flink != NULL);
+ ASSERT(Bcb->BcbRemoveListEntry.Flink != NULL);
RemoveEntryList(&Bcb->BcbRemoveListEntry);
Bcb->BcbRemoveListEntry.Flink = NULL;
}
else
{
- assert(Bcb->BcbRemoveListEntry.Flink == NULL);
+ ASSERT(Bcb->BcbRemoveListEntry.Flink == NULL);
}
Bcb->RefCount++;
- ExReleaseFastMutex(&ViewLock);
+ ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&ViewLock);
}
-VOID CcRosSetRemoveOnClose(PSECTION_OBJECT_POINTERS SectionObjectPointer)
+VOID
+NTAPI
+CcRosSetRemoveOnClose(PSECTION_OBJECT_POINTERS SectionObjectPointer)
{
PBCB Bcb;
-// DPRINT1("CcRosSetRemoveOnClose()\n");
- ExAcquireFastMutex(&ViewLock);
+ DPRINT("CcRosSetRemoveOnClose()\n");
+ ExEnterCriticalRegionAndAcquireFastMutexUnsafe(&ViewLock);
Bcb = (PBCB)SectionObjectPointer->SharedCacheMap;
if (Bcb)
{
CcRosDeleteFileCache(Bcb->FileObject, Bcb);
}
}
- ExReleaseFastMutex(&ViewLock);
+ ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&ViewLock);
}
-VOID CcRosDereferenceCache(PFILE_OBJECT FileObject)
+VOID
+NTAPI
+CcRosDereferenceCache(PFILE_OBJECT FileObject)
{
PBCB Bcb;
- ExAcquireFastMutex(&ViewLock);
+ ExEnterCriticalRegionAndAcquireFastMutexUnsafe(&ViewLock);
Bcb = (PBCB)FileObject->SectionObjectPointer->SharedCacheMap;
- assert(Bcb);
+ ASSERT(Bcb);
if (Bcb->RefCount > 0)
{
Bcb->RefCount--;
}
}
}
- ExReleaseFastMutex(&ViewLock);
+ ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&ViewLock);
}
-/*
- * @implemented
- */
-NTSTATUS STDCALL
+NTSTATUS STDCALL
CcRosReleaseFileCache(PFILE_OBJECT FileObject)
/*
* FUNCTION: Called by the file system when a handle to a file object
{
PBCB Bcb;
- ExAcquireFastMutex(&ViewLock);
+ ExEnterCriticalRegionAndAcquireFastMutexUnsafe(&ViewLock);
if (FileObject->SectionObjectPointer->SharedCacheMap != NULL)
{
Bcb->RefCount--;
if (Bcb->RefCount == 0)
{
+ MmFreeSectionSegments(Bcb->FileObject);
if (Bcb->RemoveOnClose)
{
CcRosDeleteFileCache(FileObject, Bcb);
}
}
}
- ExReleaseFastMutex(&ViewLock);
+ ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&ViewLock);
return(STATUS_SUCCESS);
}
-NTSTATUS
+NTSTATUS
+NTAPI
CcTryToInitializeFileCache(PFILE_OBJECT FileObject)
{
PBCB Bcb;
NTSTATUS Status;
- ExAcquireFastMutex(&ViewLock);
+ ExEnterCriticalRegionAndAcquireFastMutexUnsafe(&ViewLock);
Bcb = FileObject->SectionObjectPointer->SharedCacheMap;
if (Bcb == NULL)
}
Status = STATUS_SUCCESS;
}
- ExReleaseFastMutex(&ViewLock);
+ ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&ViewLock);
return Status;
}
-/*
- * @implemented
- */
-NTSTATUS STDCALL
+NTSTATUS STDCALL
CcRosInitializeFileCache(PFILE_OBJECT FileObject,
ULONG CacheSegmentSize)
/*
*/
{
PBCB Bcb;
- DPRINT("CcRosInitializeFileCache(FileObject %x, *Bcb %x, CacheSegmentSize %d)\n",
- FileObject, Bcb, CacheSegmentSize);
-
- ExAcquireFastMutex(&ViewLock);
Bcb = FileObject->SectionObjectPointer->SharedCacheMap;
+ DPRINT("CcRosInitializeFileCache(FileObject 0x%p, Bcb 0x%p, CacheSegmentSize %d)\n",
+ FileObject, Bcb, CacheSegmentSize);
+
+ ExEnterCriticalRegionAndAcquireFastMutexUnsafe(&ViewLock);
if (Bcb == NULL)
{
- Bcb = ExAllocateFromNPagedLookasideList(&BcbLookasideList);
+ Bcb = ExAllocateFromNPagedLookasideList(&BcbLookasideList);
if (Bcb == NULL)
{
- ExReleaseFastMutex(&ViewLock);
+ ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&ViewLock);
return(STATUS_UNSUCCESSFUL);
}
- memset(Bcb, 0, sizeof(BCB));
+ memset(Bcb, 0, sizeof(BCB));
ObReferenceObjectByPointer(FileObject,
FILE_ALL_ACCESS,
NULL,
Bcb->CacheSegmentSize = CacheSegmentSize;
if (FileObject->FsContext)
{
- Bcb->AllocationSize =
+ Bcb->AllocationSize =
((PFSRTL_COMMON_FCB_HEADER)FileObject->FsContext)->AllocationSize;
- Bcb->FileSize =
+ Bcb->FileSize =
((PFSRTL_COMMON_FCB_HEADER)FileObject->FsContext)->FileSize;
}
KeInitializeSpinLock(&Bcb->BcbLock);
RemoveEntryList(&Bcb->BcbRemoveListEntry);
Bcb->BcbRemoveListEntry.Flink = NULL;
}
- ExReleaseFastMutex(&ViewLock);
+ ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&ViewLock);
return(STATUS_SUCCESS);
}
if (SectionObjectPointers && SectionObjectPointers->SharedCacheMap)
{
Bcb = (PBCB)SectionObjectPointers->SharedCacheMap;
- assert(Bcb);
+ ASSERT(Bcb);
return Bcb->FileObject;
}
return NULL;
while (1)
{
- Timeout.QuadPart += 100000000LL; // 10sec
+ Timeout.QuadPart += (LONGLONG)100000000; // 10sec
Status = KeWaitForSingleObject(&LazyCloseThreadEvent,
0,
KernelMode,
if (!NT_SUCCESS(Status))
{
DbgPrint("LazyCloseThread: Wait failed\n");
- KeBugCheck(0);
+ KEBUGCHECKCC;
break;
}
if (LazyCloseThreadShouldTerminate)
DbgPrint("LazyCloseThread: Terminating\n");
break;
}
-
- ExAcquireFastMutex(&ViewLock);
+
+ ExEnterCriticalRegionAndAcquireFastMutexUnsafe(&ViewLock);
CcTimeStamp++;
if (CcTimeStamp >= 30)
{
CcRosDeleteFileCache(current->FileObject, current);
}
}
- ExReleaseFastMutex(&ViewLock);
+ ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&ViewLock);
}
}
VOID
+INIT_FUNCTION
+NTAPI
CcInitView(VOID)
{
#ifdef CACHE_BITMAP
PMEMORY_AREA marea;
PVOID Buffer;
+ PHYSICAL_ADDRESS BoundaryAddressMultiple;
#endif
NTSTATUS Status;
KPRIORITY Priority;
DPRINT("CcInitView()\n");
#ifdef CACHE_BITMAP
+ BoundaryAddressMultiple.QuadPart = 0;
CiCacheSegMappingRegionHint = 0;
CiCacheSegMappingRegionBase = NULL;
MmLockAddressSpace(MmGetKernelAddressSpace());
- Status = MmCreateMemoryArea(NULL,
- MmGetKernelAddressSpace(),
+ Status = MmCreateMemoryArea(MmGetKernelAddressSpace(),
MEMORY_AREA_CACHE_SEGMENT,
&CiCacheSegMappingRegionBase,
CI_CACHESEG_MAPPING_REGION_SIZE,
- 0,
+ PAGE_READWRITE,
&marea,
FALSE,
- FALSE);
+ 0,
+ BoundaryAddressMultiple);
MmUnlockAddressSpace(MmGetKernelAddressSpace());
if (!NT_SUCCESS(Status))
{
- KeBugCheck(0);
+ KEBUGCHECKCC;
}
Buffer = ExAllocatePool(NonPagedPool, CI_CACHESEG_MAPPING_REGION_SIZE / (PAGE_SIZE * 8));
RtlClearAllBits(&CiCacheSegMappingRegionAllocMap);
KeInitializeSpinLock(&CiCacheSegMappingRegionLock);
-#endif
+#endif
InitializeListHead(&CacheSegmentListHead);
InitializeListHead(&DirtySegmentListHead);
InitializeListHead(&CacheSegmentLRUListHead);
20);
MmInitializeMemoryConsumer(MC_CACHE, CcRosTrimCache);
-
+
CcInitCacheZeroPage();
- CcTimeStamp = 0;
+ CcTimeStamp = 0;
LazyCloseThreadShouldTerminate = FALSE;
KeInitializeEvent (&LazyCloseThreadEvent, SynchronizationEvent, FALSE);
Status = PsCreateSystemThread(&LazyCloseThreadHandle,