[NTOSKRNL] Fix CcIdleDelay initializer for old msvc versions (#339)
[reactos.git] / ntoskrnl / cc / view.c
index 46c14a5..003c5d8 100644 (file)
@@ -5,6 +5,7 @@
  * PURPOSE:         Cache manager
  *
  * PROGRAMMERS:     David Welch (welch@mcmail.com)
+ *                  Pierre Schweitzer (pierre@reactos.org)
  */
 
 /* NOTES **********************************************************************
 
 /* GLOBALS *******************************************************************/
 
-/*
- * If CACHE_BITMAP is defined, the cache manager uses one large memory region
- * within the kernel address space and allocate/deallocate space from this block
- * over a bitmap. If CACHE_BITMAP is used, the size of the mdl mapping region
- * must be reduced (ntoskrnl\mm\mdl.c, MI_MDLMAPPING_REGION_SIZE).
- */
-//#define CACHE_BITMAP
-
-static LIST_ENTRY DirtySegmentListHead;
-static LIST_ENTRY CacheSegmentListHead;
-static LIST_ENTRY CacheSegmentLRUListHead;
-static LIST_ENTRY ClosedListHead;
-ULONG DirtyPageCount = 0;
+LIST_ENTRY DirtyVacbListHead;
+static LIST_ENTRY VacbLruListHead;
 
 KGUARDED_MUTEX ViewLock;
 
-#ifdef CACHE_BITMAP
-#define    CI_CACHESEG_MAPPING_REGION_SIZE    (128*1024*1024)
-
-static PVOID CiCacheSegMappingRegionBase = NULL;
-static RTL_BITMAP CiCacheSegMappingRegionAllocMap;
-static ULONG CiCacheSegMappingRegionHint;
-static KSPIN_LOCK CiCacheSegMappingRegionLock;
-#endif
-
 NPAGED_LOOKASIDE_LIST iBcbLookasideList;
-static NPAGED_LOOKASIDE_LIST BcbLookasideList;
-static NPAGED_LOOKASIDE_LIST CacheSegLookasideList;
+static NPAGED_LOOKASIDE_LIST SharedCacheMapLookasideList;
+static NPAGED_LOOKASIDE_LIST VacbLookasideList;
+
+/* Counters:
+ * - Amount of pages flushed by lazy writer
+ * - Number of times lazy writer ran
+ */
+ULONG CcLazyWritePages = 0;
+ULONG CcLazyWriteIos = 0;
+
+/* Internal vars (MS):
+ * - Threshold above which lazy writer will start action
+ * - Amount of dirty pages
+ * - List for deferred writes
+ * - Spinlock when dealing with the deferred list
+ * - List for "clean" shared cache maps
+ * - One second delay for lazy writer
+ */
+ULONG CcDirtyPageThreshold = 0;
+ULONG CcTotalDirtyPages = 0;
+LIST_ENTRY CcDeferredWrites;
+KSPIN_LOCK CcDeferredWriteSpinLock;
+LIST_ENTRY CcCleanSharedCacheMapList;
+LARGE_INTEGER CcIdleDelay = RTL_CONSTANT_LARGE_INTEGER((LONGLONG)-1*1000*1000*10);
+
+/* Internal vars (ROS):
+ * - Event to notify lazy writer to shutdown
+ * - Event to inform watchers lazy writer is done for this loop
+ * - Lock for the CcCleanSharedCacheMapList list
+ */
+KEVENT iLazyWriterShutdown;
+KEVENT iLazyWriterNotify;
+KSPIN_LOCK iSharedCacheMapLock;
 
 #if DBG
-static void CcRosCacheSegmentIncRefCount_ ( PCACHE_SEGMENT cs, const char* file, int line )
+static void CcRosVacbIncRefCount_(PROS_VACB vacb, const char* file, int line)
 {
-    ++cs->ReferenceCount;
-    if ( cs->Bcb->Trace )
+    ++vacb->ReferenceCount;
+    if (vacb->SharedCacheMap->Trace)
     {
-        DbgPrint("(%s:%i) CacheSegment %p ++RefCount=%lu, Dirty %u, PageOut %lu\n",
-                 file, line, cs, cs->ReferenceCount, cs->Dirty, cs->PageOut );
+        DbgPrint("(%s:%i) VACB %p ++RefCount=%lu, Dirty %u, PageOut %lu\n",
+                 file, line, vacb, vacb->ReferenceCount, vacb->Dirty, vacb->PageOut);
     }
 }
-static void CcRosCacheSegmentDecRefCount_ ( PCACHE_SEGMENT cs, const char* file, int line )
+static void CcRosVacbDecRefCount_(PROS_VACB vacb, const char* file, int line)
 {
-    --cs->ReferenceCount;
-    if ( cs->Bcb->Trace )
+    --vacb->ReferenceCount;
+    ASSERT(!(vacb->ReferenceCount == 0 && vacb->Dirty));
+    if (vacb->SharedCacheMap->Trace)
     {
-        DbgPrint("(%s:%i) CacheSegment %p --RefCount=%lu, Dirty %u, PageOut %lu\n",
-                 file, line, cs, cs->ReferenceCount, cs->Dirty, cs->PageOut );
+        DbgPrint("(%s:%i) VACB %p --RefCount=%lu, Dirty %u, PageOut %lu\n",
+                 file, line, vacb, vacb->ReferenceCount, vacb->Dirty, vacb->PageOut);
     }
 }
-#define CcRosCacheSegmentIncRefCount(cs) CcRosCacheSegmentIncRefCount_(cs,__FILE__,__LINE__)
-#define CcRosCacheSegmentDecRefCount(cs) CcRosCacheSegmentDecRefCount_(cs,__FILE__,__LINE__)
+#define CcRosVacbIncRefCount(vacb) CcRosVacbIncRefCount_(vacb,__FILE__,__LINE__)
+#define CcRosVacbDecRefCount(vacb) CcRosVacbDecRefCount_(vacb,__FILE__,__LINE__)
 #else
-#define CcRosCacheSegmentIncRefCount(cs) (++((cs)->ReferenceCount))
-#define CcRosCacheSegmentDecRefCount(cs) (--((cs)->ReferenceCount))
+#define CcRosVacbIncRefCount(vacb) (++((vacb)->ReferenceCount))
+#define CcRosVacbDecRefCount(vacb) (--((vacb)->ReferenceCount))
 #endif
 
 NTSTATUS
-CcRosInternalFreeCacheSegment(PCACHE_SEGMENT CacheSeg);
+CcRosInternalFreeVacb(PROS_VACB Vacb);
 
 
 /* FUNCTIONS *****************************************************************/
@@ -105,69 +118,70 @@ CcRosInternalFreeCacheSegment(PCACHE_SEGMENT CacheSeg);
 VOID
 NTAPI
 CcRosTraceCacheMap (
-    PBCB Bcb,
+    PROS_SHARED_CACHE_MAP SharedCacheMap,
     BOOLEAN Trace )
 {
 #if DBG
     KIRQL oldirql;
     PLIST_ENTRY current_entry;
-    PCACHE_SEGMENT current;
+    PROS_VACB current;
 
-    if ( !Bcb )
+    if (!SharedCacheMap)
         return;
 
-    Bcb->Trace = Trace;
+    SharedCacheMap->Trace = Trace;
 
-    if ( Trace )
+    if (Trace)
     {
-        DPRINT1("Enabling Tracing for CacheMap 0x%p:\n", Bcb );
+        DPRINT1("Enabling Tracing for CacheMap 0x%p:\n", SharedCacheMap);
 
         KeAcquireGuardedMutex(&ViewLock);
-        KeAcquireSpinLock(&Bcb->BcbLock, &oldirql);
+        KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldirql);
 
-        current_entry = Bcb->BcbSegmentListHead.Flink;
-        while (current_entry != &Bcb->BcbSegmentListHead)
+        current_entry = SharedCacheMap->CacheMapVacbListHead.Flink;
+        while (current_entry != &SharedCacheMap->CacheMapVacbListHead)
         {
-            current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT, BcbSegmentListEntry);
+            current = CONTAINING_RECORD(current_entry, ROS_VACB, CacheMapVacbListEntry);
             current_entry = current_entry->Flink;
 
-            DPRINT1("  CacheSegment 0x%p enabled, RefCount %lu, Dirty %u, PageOut %lu\n",
+            DPRINT1("  VACB 0x%p enabled, RefCount %lu, Dirty %u, PageOut %lu\n",
                     current, current->ReferenceCount, current->Dirty, current->PageOut );
         }
-        KeReleaseSpinLock(&Bcb->BcbLock, oldirql);
+        KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldirql);
         KeReleaseGuardedMutex(&ViewLock);
     }
     else
     {
-        DPRINT1("Disabling Tracing for CacheMap 0x%p:\n", Bcb );
+        DPRINT1("Disabling Tracing for CacheMap 0x%p:\n", SharedCacheMap);
     }
 
 #else
-    Bcb = Bcb;
-    Trace = Trace;
+    UNREFERENCED_PARAMETER(SharedCacheMap);
+    UNREFERENCED_PARAMETER(Trace);
 #endif
 }
 
 NTSTATUS
 NTAPI
-CcRosFlushCacheSegment (
-    PCACHE_SEGMENT CacheSegment)
+CcRosFlushVacb (
+    PROS_VACB Vacb)
 {
     NTSTATUS Status;
     KIRQL oldIrql;
 
-    Status = WriteCacheSegment(CacheSegment);
+    Status = CcWriteVirtualAddress(Vacb);
     if (NT_SUCCESS(Status))
     {
         KeAcquireGuardedMutex(&ViewLock);
-        KeAcquireSpinLock(&CacheSegment->Bcb->BcbLock, &oldIrql);
+        KeAcquireSpinLock(&Vacb->SharedCacheMap->CacheMapLock, &oldIrql);
 
-        CacheSegment->Dirty = FALSE;
-        RemoveEntryList(&CacheSegment->DirtySegmentListEntry);
-        DirtyPageCount -= VACB_MAPPING_GRANULARITY / PAGE_SIZE;
-        CcRosCacheSegmentDecRefCount(CacheSegment);
+        Vacb->Dirty = FALSE;
+        RemoveEntryList(&Vacb->DirtyVacbListEntry);
+        CcTotalDirtyPages -= VACB_MAPPING_GRANULARITY / PAGE_SIZE;
+        Vacb->SharedCacheMap->DirtyPages -= VACB_MAPPING_GRANULARITY / PAGE_SIZE;
+        CcRosVacbDecRefCount(Vacb);
 
-        KeReleaseSpinLock(&CacheSegment->Bcb->BcbLock, oldIrql);
+        KeReleaseSpinLock(&Vacb->SharedCacheMap->CacheMapLock, oldIrql);
         KeReleaseGuardedMutex(&ViewLock);
     }
 
@@ -179,11 +193,11 @@ NTAPI
 CcRosFlushDirtyPages (
     ULONG Target,
     PULONG Count,
-    BOOLEAN Wait)
+    BOOLEAN Wait,
+    BOOLEAN CalledFromLazy)
 {
     PLIST_ENTRY current_entry;
-    PCACHE_SEGMENT current;
-    ULONG PagesPerSegment;
+    PROS_VACB current;
     BOOLEAN Locked;
     NTSTATUS Status;
     LARGE_INTEGER ZeroTimeout;
@@ -196,77 +210,97 @@ CcRosFlushDirtyPages (
     KeEnterCriticalRegion();
     KeAcquireGuardedMutex(&ViewLock);
 
-    current_entry = DirtySegmentListHead.Flink;
-    if (current_entry == &DirtySegmentListHead)
+    current_entry = DirtyVacbListHead.Flink;
+    if (current_entry == &DirtyVacbListHead)
     {
         DPRINT("No Dirty pages\n");
     }
 
-    while ((current_entry != &DirtySegmentListHead) && (Target > 0))
+    while ((current_entry != &DirtyVacbListHead) && (Target > 0))
     {
-        current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT,
-                                    DirtySegmentListEntry);
+        current = CONTAINING_RECORD(current_entry,
+                                    ROS_VACB,
+                                    DirtyVacbListEntry);
         current_entry = current_entry->Flink;
 
-        CcRosCacheSegmentIncRefCount(current);
+        CcRosVacbIncRefCount(current);
 
-        Locked = current->Bcb->Callbacks->AcquireForLazyWrite(
-                     current->Bcb->LazyWriteContext, Wait);
+        /* When performing lazy write, don't handle temporary files */
+        if (CalledFromLazy &&
+            BooleanFlagOn(current->SharedCacheMap->FileObject->Flags, FO_TEMPORARY_FILE))
+        {
+            CcRosVacbDecRefCount(current);
+            continue;
+        }
+
+        Locked = current->SharedCacheMap->Callbacks->AcquireForLazyWrite(
+                     current->SharedCacheMap->LazyWriteContext, Wait);
         if (!Locked)
         {
-            CcRosCacheSegmentDecRefCount(current);
+            CcRosVacbDecRefCount(current);
             continue;
         }
 
-        Status = KeWaitForSingleObject(&current->Mutex,
-                                       Executive,
-                                       KernelMode,
-                                       FALSE,
-                                       Wait ? NULL : &ZeroTimeout);
+        Status = CcRosAcquireVacbLock(current,
+                                      Wait ? NULL : &ZeroTimeout);
         if (Status != STATUS_SUCCESS)
         {
-            current->Bcb->Callbacks->ReleaseFromLazyWrite(
-                current->Bcb->LazyWriteContext);
-            CcRosCacheSegmentDecRefCount(current);
+            current->SharedCacheMap->Callbacks->ReleaseFromLazyWrite(
+                current->SharedCacheMap->LazyWriteContext);
+            CcRosVacbDecRefCount(current);
             continue;
         }
 
         ASSERT(current->Dirty);
 
         /* One reference is added above */
-        if (current->ReferenceCount > 2)
+        if ((current->ReferenceCount > 2 && current->PinCount == 0) ||
+            (current->ReferenceCount > 3 && current->PinCount > 1))
         {
-            KeReleaseMutex(&current->Mutex, FALSE);
-            current->Bcb->Callbacks->ReleaseFromLazyWrite(
-                current->Bcb->LazyWriteContext);
-            CcRosCacheSegmentDecRefCount(current);
+            CcRosReleaseVacbLock(current);
+            current->SharedCacheMap->Callbacks->ReleaseFromLazyWrite(
+                current->SharedCacheMap->LazyWriteContext);
+            CcRosVacbDecRefCount(current);
             continue;
         }
 
-        PagesPerSegment = VACB_MAPPING_GRANULARITY / PAGE_SIZE;
-
         KeReleaseGuardedMutex(&ViewLock);
 
-        Status = CcRosFlushCacheSegment(current);
+        Status = CcRosFlushVacb(current);
 
-        KeReleaseMutex(&current->Mutex, FALSE);
-        current->Bcb->Callbacks->ReleaseFromLazyWrite(
-            current->Bcb->LazyWriteContext);
+        CcRosReleaseVacbLock(current);
+        current->SharedCacheMap->Callbacks->ReleaseFromLazyWrite(
+            current->SharedCacheMap->LazyWriteContext);
 
         KeAcquireGuardedMutex(&ViewLock);
-        CcRosCacheSegmentDecRefCount(current);
+        CcRosVacbDecRefCount(current);
 
-        if (!NT_SUCCESS(Status) &&  (Status != STATUS_END_OF_FILE))
+        if (!NT_SUCCESS(Status) && (Status != STATUS_END_OF_FILE) &&
+            (Status != STATUS_MEDIA_WRITE_PROTECTED))
         {
-            DPRINT1("CC: Failed to flush cache segment.\n");
+            DPRINT1("CC: Failed to flush VACB.\n");
         }
         else
         {
-            (*Count) += PagesPerSegment;
-            Target -= PagesPerSegment;
+            ULONG PagesFreed;
+
+            /* How many pages did we free? */
+            PagesFreed = VACB_MAPPING_GRANULARITY / PAGE_SIZE;
+            (*Count) += PagesFreed;
+
+            /* Make sure we don't overflow target! */
+            if (Target < PagesFreed)
+            {
+                /* If we would have, jump to zero directly */
+                Target = 0;
+            }
+            else
+            {
+                Target -= PagesFreed;
+            }
         }
 
-        current_entry = DirtySegmentListHead.Flink;
+        current_entry = DirtyVacbListHead.Flink;
     }
 
     KeReleaseGuardedMutex(&ViewLock);
@@ -276,6 +310,81 @@ CcRosFlushDirtyPages (
     return STATUS_SUCCESS;
 }
 
+/* FIXME: Someday this could somewhat implement write-behind/read-ahead */
+VOID
+NTAPI
+CciLazyWriter(PVOID Unused)
+{
+    while (TRUE)
+    {
+        NTSTATUS Status;
+        PLIST_ENTRY ListEntry;
+        ULONG Target, Count = 0;
+
+        /* One per second or until we have to stop */
+        Status = KeWaitForSingleObject(&iLazyWriterShutdown,
+                                       Executive,
+                                       KernelMode,
+                                       FALSE,
+                                       &CcIdleDelay);
+
+        /* If we succeeed, we've to stop running! */
+        if (Status == STATUS_SUCCESS)
+        {
+            break;
+        }
+
+        /* We're not sleeping anymore */
+        KeClearEvent(&iLazyWriterNotify);
+
+        /* Our target is one-eighth of the dirty pages */
+        Target = CcTotalDirtyPages / 8;
+        if (Target != 0)
+        {
+            /* Flush! */
+            DPRINT("Lazy writer starting (%d)\n", Target);
+            CcRosFlushDirtyPages(Target, &Count, FALSE, TRUE);
+
+            /* And update stats */
+            CcLazyWritePages += Count;
+            ++CcLazyWriteIos;
+            DPRINT("Lazy writer done (%d)\n", Count);
+        }
+
+        /* Inform people waiting on us that we're done */
+        KeSetEvent(&iLazyWriterNotify, IO_DISK_INCREMENT, FALSE);
+
+        /* Likely not optimal, but let's handle one deferred write now! */
+        ListEntry = ExInterlockedRemoveHeadList(&CcDeferredWrites, &CcDeferredWriteSpinLock);
+        if (ListEntry != NULL)
+        {
+            PDEFERRED_WRITE Context;
+
+            /* Extract the context */
+            Context = CONTAINING_RECORD(ListEntry, DEFERRED_WRITE, DeferredWriteLinks);
+            ASSERT(Context->NodeTypeCode == NODE_TYPE_DEFERRED_WRITE);
+
+            /* Can we write now? */
+            if (CcCanIWrite(Context->FileObject, Context->BytesToWrite, FALSE, TRUE))
+            {
+                /* Yes! Do it, and destroy the associated context */
+                Context->PostRoutine(Context->Context1, Context->Context2);
+                ExFreePoolWithTag(Context, 'CcDw');
+            }
+            else
+            {
+                /* Otherwise, requeue it, but in tail, so that it doesn't block others
+                 * This is clearly to improve, but given the poor algorithm used now
+                 * It's better than nothing!
+                 */
+                ExInterlockedInsertTailList(&CcDeferredWrites,
+                                            &Context->DeferredWriteLinks,
+                                            &CcDeferredWriteSpinLock);
+            }
+        }
+    }
+}
+
 NTSTATUS
 CcRosTrimCache (
     ULONG Target,
@@ -291,8 +400,7 @@ CcRosTrimCache (
  */
 {
     PLIST_ENTRY current_entry;
-    PCACHE_SEGMENT current;
-    ULONG PagesPerSegment;
+    PROS_VACB current;
     ULONG PagesFreed;
     KIRQL oldIrql;
     LIST_ENTRY FreeList;
@@ -309,26 +417,27 @@ CcRosTrimCache (
 retry:
     KeAcquireGuardedMutex(&ViewLock);
 
-    current_entry = CacheSegmentLRUListHead.Flink;
-    while (current_entry != &CacheSegmentLRUListHead)
+    current_entry = VacbLruListHead.Flink;
+    while (current_entry != &VacbLruListHead)
     {
-        current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT,
-                                    CacheSegmentLRUListEntry);
+        current = CONTAINING_RECORD(current_entry,
+                                    ROS_VACB,
+                                    VacbLruListEntry);
         current_entry = current_entry->Flink;
 
-        KeAcquireSpinLock(&current->Bcb->BcbLock, &oldIrql);
+        KeAcquireSpinLock(&current->SharedCacheMap->CacheMapLock, &oldIrql);
 
-        /* Reference the cache segment */
-        CcRosCacheSegmentIncRefCount(current);
+        /* Reference the VACB */
+        CcRosVacbIncRefCount(current);
 
         /* Check if it's mapped and not dirty */
         if (current->MappedCount > 0 && !current->Dirty)
         {
             /* We have to break these locks because Cc sucks */
-            KeReleaseSpinLock(&current->Bcb->BcbLock, oldIrql);
+            KeReleaseSpinLock(&current->SharedCacheMap->CacheMapLock, oldIrql);
             KeReleaseGuardedMutex(&ViewLock);
 
-            /* Page out the segment */
+            /* Page out the VACB */
             for (i = 0; i < VACB_MAPPING_GRANULARITY / PAGE_SIZE; i++)
             {
                 Page = (PFN_NUMBER)(MmGetPhysicalAddress((PUCHAR)current->BaseAddress + (i * PAGE_SIZE)).QuadPart >> PAGE_SHIFT);
@@ -338,11 +447,11 @@ retry:
 
             /* Reacquire the locks */
             KeAcquireGuardedMutex(&ViewLock);
-            KeAcquireSpinLock(&current->Bcb->BcbLock, &oldIrql);
+            KeAcquireSpinLock(&current->SharedCacheMap->CacheMapLock, &oldIrql);
         }
 
-        /* Dereference the cache segment */
-        CcRosCacheSegmentDecRefCount(current);
+        /* Dereference the VACB */
+        CcRosVacbDecRefCount(current);
 
         /* Check if we can free this entry now */
         if (current->ReferenceCount == 0)
@@ -350,19 +459,17 @@ retry:
             ASSERT(!current->Dirty);
             ASSERT(!current->MappedCount);
 
-            RemoveEntryList(&current->BcbSegmentListEntry);
-            RemoveEntryList(&current->CacheSegmentListEntry);
-            RemoveEntryList(&current->CacheSegmentLRUListEntry);
-            InsertHeadList(&FreeList, &current->BcbSegmentListEntry);
+            RemoveEntryList(&current->CacheMapVacbListEntry);
+            RemoveEntryList(&current->VacbLruListEntry);
+            InsertHeadList(&FreeList, &current->CacheMapVacbListEntry);
 
             /* Calculate how many pages we freed for Mm */
-            PagesPerSegment = VACB_MAPPING_GRANULARITY / PAGE_SIZE;
-            PagesFreed = min(PagesPerSegment, Target);
+            PagesFreed = min(VACB_MAPPING_GRANULARITY / PAGE_SIZE, Target);
             Target -= PagesFreed;
             (*NrFreed) += PagesFreed;
         }
 
-        KeReleaseSpinLock(&current->Bcb->BcbLock, oldIrql);
+        KeReleaseSpinLock(&current->SharedCacheMap->CacheMapLock, oldIrql);
     }
 
     KeReleaseGuardedMutex(&ViewLock);
@@ -371,7 +478,7 @@ retry:
     if ((Target > 0) && !FlushedPages)
     {
         /* Flush dirty pages to disk */
-        CcRosFlushDirtyPages(Target, &PagesFreed, FALSE);
+        CcRosFlushDirtyPages(Target, &PagesFreed, FALSE, FALSE);
         FlushedPages = TRUE;
 
         /* We can only swap as many pages as we flushed */
@@ -389,9 +496,10 @@ retry:
     while (!IsListEmpty(&FreeList))
     {
         current_entry = RemoveHeadList(&FreeList);
-        current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT,
-                                    BcbSegmentListEntry);
-        CcRosInternalFreeCacheSegment(current);
+        current = CONTAINING_RECORD(current_entry,
+                                    ROS_VACB,
+                                    CacheMapVacbListEntry);
+        CcRosInternalFreeVacb(current);
     }
 
     DPRINT("Evicted %lu cache pages\n", (*NrFreed));
@@ -401,466 +509,404 @@ retry:
 
 NTSTATUS
 NTAPI
-CcRosReleaseCacheSegment (
-    PBCB Bcb,
-    PCACHE_SEGMENT CacheSeg,
+CcRosReleaseVacb (
+    PROS_SHARED_CACHE_MAP SharedCacheMap,
+    PROS_VACB Vacb,
     BOOLEAN Valid,
     BOOLEAN Dirty,
     BOOLEAN Mapped)
 {
-    BOOLEAN WasDirty;
-    KIRQL oldIrql;
-
-    ASSERT(Bcb);
-
-    DPRINT("CcReleaseCacheSegment(Bcb 0x%p, CacheSeg 0x%p, Valid %u)\n",
-           Bcb, CacheSeg, Valid);
-
-    KeAcquireGuardedMutex(&ViewLock);
-    KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
+    ASSERT(SharedCacheMap);
 
-    CacheSeg->Valid = Valid;
+    DPRINT("CcRosReleaseVacb(SharedCacheMap 0x%p, Vacb 0x%p, Valid %u)\n",
+           SharedCacheMap, Vacb, Valid);
 
-    WasDirty = CacheSeg->Dirty;
-    CacheSeg->Dirty = CacheSeg->Dirty || Dirty;
+    Vacb->Valid = Valid;
 
-    if (!WasDirty && CacheSeg->Dirty)
+    if (Dirty && !Vacb->Dirty)
     {
-        InsertTailList(&DirtySegmentListHead, &CacheSeg->DirtySegmentListEntry);
-        DirtyPageCount += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
+        CcRosMarkDirtyVacb(Vacb);
     }
 
     if (Mapped)
     {
-        CacheSeg->MappedCount++;
+        Vacb->MappedCount++;
     }
-    CcRosCacheSegmentDecRefCount(CacheSeg);
-    if (Mapped && (CacheSeg->MappedCount == 1))
+    CcRosVacbDecRefCount(Vacb);
+    if (Mapped && (Vacb->MappedCount == 1))
     {
-        CcRosCacheSegmentIncRefCount(CacheSeg);
-    }
-    if (!WasDirty && CacheSeg->Dirty)
-    {
-        CcRosCacheSegmentIncRefCount(CacheSeg);
+        CcRosVacbIncRefCount(Vacb);
     }
 
-    KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
-    KeReleaseGuardedMutex(&ViewLock);
-    KeReleaseMutex(&CacheSeg->Mutex, FALSE);
+    CcRosReleaseVacbLock(Vacb);
 
     return STATUS_SUCCESS;
 }
 
-/* Returns with Cache Segment Lock Held! */
-PCACHE_SEGMENT
+/* Returns with VACB Lock Held! */
+PROS_VACB
 NTAPI
-CcRosLookupCacheSegment (
-    PBCB Bcb,
-    ULONG FileOffset)
+CcRosLookupVacb (
+    PROS_SHARED_CACHE_MAP SharedCacheMap,
+    LONGLONG FileOffset)
 {
     PLIST_ENTRY current_entry;
-    PCACHE_SEGMENT current;
+    PROS_VACB current;
     KIRQL oldIrql;
 
-    ASSERT(Bcb);
+    ASSERT(SharedCacheMap);
 
-    DPRINT("CcRosLookupCacheSegment(Bcb -x%p, FileOffset %lu)\n", Bcb, FileOffset);
+    DPRINT("CcRosLookupVacb(SharedCacheMap 0x%p, FileOffset %I64u)\n",
+           SharedCacheMap, FileOffset);
 
     KeAcquireGuardedMutex(&ViewLock);
-    KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
+    KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
 
-    current_entry = Bcb->BcbSegmentListHead.Flink;
-    while (current_entry != &Bcb->BcbSegmentListHead)
+    current_entry = SharedCacheMap->CacheMapVacbListHead.Flink;
+    while (current_entry != &SharedCacheMap->CacheMapVacbListHead)
     {
-        current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT,
-                                    BcbSegmentListEntry);
-        if (IsPointInSegment(current->FileOffset, VACB_MAPPING_GRANULARITY,
-                             FileOffset))
+        current = CONTAINING_RECORD(current_entry,
+                                    ROS_VACB,
+                                    CacheMapVacbListEntry);
+        if (IsPointInRange(current->FileOffset.QuadPart,
+                           VACB_MAPPING_GRANULARITY,
+                           FileOffset))
         {
-            CcRosCacheSegmentIncRefCount(current);
-            KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
+            CcRosVacbIncRefCount(current);
+            KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
             KeReleaseGuardedMutex(&ViewLock);
-            KeWaitForSingleObject(&current->Mutex,
-                                  Executive,
-                                  KernelMode,
-                                  FALSE,
-                                  NULL);
+            CcRosAcquireVacbLock(current, NULL);
             return current;
         }
-        if (current->FileOffset > FileOffset)
+        if (current->FileOffset.QuadPart > FileOffset)
             break;
         current_entry = current_entry->Flink;
     }
 
-    KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
+    KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
     KeReleaseGuardedMutex(&ViewLock);
 
     return NULL;
 }
 
-NTSTATUS
+VOID
 NTAPI
-CcRosMarkDirtyCacheSegment (
-    PBCB Bcb,
-    ULONG FileOffset)
+CcRosMarkDirtyVacb (
+    PROS_VACB Vacb)
 {
-    PCACHE_SEGMENT CacheSeg;
     KIRQL oldIrql;
+    PROS_SHARED_CACHE_MAP SharedCacheMap;
 
-    ASSERT(Bcb);
+    SharedCacheMap = Vacb->SharedCacheMap;
 
-    DPRINT("CcRosMarkDirtyCacheSegment(Bcb 0x%p, FileOffset %lu)\n", Bcb, FileOffset);
+    KeAcquireGuardedMutex(&ViewLock);
+    KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
 
-    CacheSeg = CcRosLookupCacheSegment(Bcb, FileOffset);
-    if (CacheSeg == NULL)
-    {
-        KeBugCheck(CACHE_MANAGER);
-    }
+    ASSERT(!Vacb->Dirty);
 
-    KeAcquireGuardedMutex(&ViewLock);
-    KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
+    InsertTailList(&DirtyVacbListHead, &Vacb->DirtyVacbListEntry);
+    CcTotalDirtyPages += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
+    Vacb->SharedCacheMap->DirtyPages += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
+    CcRosVacbIncRefCount(Vacb);
+
+    /* Move to the tail of the LRU list */
+    RemoveEntryList(&Vacb->VacbLruListEntry);
+    InsertTailList(&VacbLruListHead, &Vacb->VacbLruListEntry);
 
-    if (!CacheSeg->Dirty)
+    Vacb->Dirty = TRUE;
+
+    KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
+    KeReleaseGuardedMutex(&ViewLock);
+}
+
+NTSTATUS
+NTAPI
+CcRosMarkDirtyFile (
+    PROS_SHARED_CACHE_MAP SharedCacheMap,
+    LONGLONG FileOffset)
+{
+    PROS_VACB Vacb;
+
+    ASSERT(SharedCacheMap);
+
+    DPRINT("CcRosMarkDirtyVacb(SharedCacheMap 0x%p, FileOffset %I64u)\n",
+           SharedCacheMap, FileOffset);
+
+    Vacb = CcRosLookupVacb(SharedCacheMap, FileOffset);
+    if (Vacb == NULL)
     {
-        InsertTailList(&DirtySegmentListHead, &CacheSeg->DirtySegmentListEntry);
-        DirtyPageCount += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
+        KeBugCheck(CACHE_MANAGER);
     }
-    else
+
+    if (!Vacb->Dirty)
     {
-        CcRosCacheSegmentDecRefCount(CacheSeg);
+        CcRosMarkDirtyVacb(Vacb);
     }
 
-    /* Move to the tail of the LRU list */
-    RemoveEntryList(&CacheSeg->CacheSegmentLRUListEntry);
-    InsertTailList(&CacheSegmentLRUListHead, &CacheSeg->CacheSegmentLRUListEntry);
-
-    CacheSeg->Dirty = TRUE;
-
-    KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
-    KeReleaseGuardedMutex(&ViewLock);
-    KeReleaseMutex(&CacheSeg->Mutex, FALSE);
+    CcRosReleaseVacbLock(Vacb);
 
     return STATUS_SUCCESS;
 }
 
 NTSTATUS
 NTAPI
-CcRosUnmapCacheSegment (
-    PBCB Bcb,
-    ULONG FileOffset,
+CcRosUnmapVacb (
+    PROS_SHARED_CACHE_MAP SharedCacheMap,
+    LONGLONG FileOffset,
     BOOLEAN NowDirty)
 {
-    PCACHE_SEGMENT CacheSeg;
-    BOOLEAN WasDirty;
-    KIRQL oldIrql;
+    PROS_VACB Vacb;
 
-    ASSERT(Bcb);
+    ASSERT(SharedCacheMap);
 
-    DPRINT("CcRosUnmapCacheSegment(Bcb 0x%p, FileOffset %lu, NowDirty %u)\n",
-           Bcb, FileOffset, NowDirty);
+    DPRINT("CcRosUnmapVacb(SharedCacheMap 0x%p, FileOffset %I64u, NowDirty %u)\n",
+           SharedCacheMap, FileOffset, NowDirty);
 
-    CacheSeg = CcRosLookupCacheSegment(Bcb, FileOffset);
-    if (CacheSeg == NULL)
+    Vacb = CcRosLookupVacb(SharedCacheMap, FileOffset);
+    if (Vacb == NULL)
     {
         return STATUS_UNSUCCESSFUL;
     }
 
-    KeAcquireGuardedMutex(&ViewLock);
-    KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
-
-    WasDirty = CacheSeg->Dirty;
-    CacheSeg->Dirty = CacheSeg->Dirty || NowDirty;
+    if (NowDirty && !Vacb->Dirty)
+    {
+        CcRosMarkDirtyVacb(Vacb);
+    }
 
-    CacheSeg->MappedCount--;
+    Vacb->MappedCount--;
 
-    if (!WasDirty && NowDirty)
+    CcRosVacbDecRefCount(Vacb);
+    if (Vacb->MappedCount == 0)
     {
-        InsertTailList(&DirtySegmentListHead, &CacheSeg->DirtySegmentListEntry);
-        DirtyPageCount += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
+        CcRosVacbDecRefCount(Vacb);
     }
 
-    CcRosCacheSegmentDecRefCount(CacheSeg);
-    if (!WasDirty && NowDirty)
+    CcRosReleaseVacbLock(Vacb);
+
+    return STATUS_SUCCESS;
+}
+
+static
+NTSTATUS
+CcRosMapVacb(
+    PROS_VACB Vacb)
+{
+    ULONG i;
+    NTSTATUS Status;
+    ULONG_PTR NumberOfPages;
+
+    /* Create a memory area. */
+    MmLockAddressSpace(MmGetKernelAddressSpace());
+    Status = MmCreateMemoryArea(MmGetKernelAddressSpace(),
+                                0, // nothing checks for VACB mareas, so set to 0
+                                &Vacb->BaseAddress,
+                                VACB_MAPPING_GRANULARITY,
+                                PAGE_READWRITE,
+                                (PMEMORY_AREA*)&Vacb->MemoryArea,
+                                0,
+                                PAGE_SIZE);
+    MmUnlockAddressSpace(MmGetKernelAddressSpace());
+    if (!NT_SUCCESS(Status))
     {
-        CcRosCacheSegmentIncRefCount(CacheSeg);
+        DPRINT1("MmCreateMemoryArea failed with %lx for VACB %p\n", Status, Vacb);
+        return Status;
     }
-    if (CacheSeg->MappedCount == 0)
+
+    ASSERT(((ULONG_PTR)Vacb->BaseAddress % PAGE_SIZE) == 0);
+    ASSERT((ULONG_PTR)Vacb->BaseAddress > (ULONG_PTR)MmSystemRangeStart);
+
+    /* Create a virtual mapping for this memory area */
+    NumberOfPages = BYTES_TO_PAGES(VACB_MAPPING_GRANULARITY);
+    for (i = 0; i < NumberOfPages; i++)
     {
-        CcRosCacheSegmentDecRefCount(CacheSeg);
-    }
+        PFN_NUMBER PageFrameNumber;
 
-    KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
-    KeReleaseGuardedMutex(&ViewLock);
-    KeReleaseMutex(&CacheSeg->Mutex, FALSE);
+        MI_SET_USAGE(MI_USAGE_CACHE);
+        Status = MmRequestPageMemoryConsumer(MC_CACHE, TRUE, &PageFrameNumber);
+        if (PageFrameNumber == 0)
+        {
+            DPRINT1("Unable to allocate page\n");
+            KeBugCheck(MEMORY_MANAGEMENT);
+        }
+
+        Status = MmCreateVirtualMapping(NULL,
+                                        (PVOID)((ULONG_PTR)Vacb->BaseAddress + (i * PAGE_SIZE)),
+                                        PAGE_READWRITE,
+                                        &PageFrameNumber,
+                                        1);
+        if (!NT_SUCCESS(Status))
+        {
+            DPRINT1("Unable to create virtual mapping\n");
+            KeBugCheck(MEMORY_MANAGEMENT);
+        }
+    }
 
     return STATUS_SUCCESS;
 }
 
 static
 NTSTATUS
-CcRosCreateCacheSegment (
-    PBCB Bcb,
-    ULONG FileOffset,
-    PCACHE_SEGMENT* CacheSeg)
+CcRosCreateVacb (
+    PROS_SHARED_CACHE_MAP SharedCacheMap,
+    LONGLONG FileOffset,
+    PROS_VACB *Vacb)
 {
-    PCACHE_SEGMENT current;
-    PCACHE_SEGMENT previous;
+    PROS_VACB current;
+    PROS_VACB previous;
     PLIST_ENTRY current_entry;
     NTSTATUS Status;
     KIRQL oldIrql;
-#ifdef CACHE_BITMAP
-    ULONG StartingOffset;
-#endif
 
-    ASSERT(Bcb);
+    ASSERT(SharedCacheMap);
 
-    DPRINT("CcRosCreateCacheSegment()\n");
+    DPRINT("CcRosCreateVacb()\n");
 
-    if (FileOffset >= Bcb->FileSize.u.LowPart)
+    if (FileOffset >= SharedCacheMap->SectionSize.QuadPart)
     {
-        CacheSeg = NULL;
+        *Vacb = NULL;
         return STATUS_INVALID_PARAMETER;
     }
 
-    current = ExAllocateFromNPagedLookasideList(&CacheSegLookasideList);
+    current = ExAllocateFromNPagedLookasideList(&VacbLookasideList);
+    current->BaseAddress = NULL;
     current->Valid = FALSE;
     current->Dirty = FALSE;
     current->PageOut = FALSE;
-    current->FileOffset = ROUND_DOWN(FileOffset, VACB_MAPPING_GRANULARITY);
-    current->Bcb = Bcb;
+    current->FileOffset.QuadPart = ROUND_DOWN(FileOffset, VACB_MAPPING_GRANULARITY);
+    current->SharedCacheMap = SharedCacheMap;
 #if DBG
-    if ( Bcb->Trace )
+    if (SharedCacheMap->Trace)
     {
-        DPRINT1("CacheMap 0x%p: new Cache Segment: 0x%p\n", Bcb, current );
+        DPRINT1("CacheMap 0x%p: new VACB: 0x%p\n", SharedCacheMap, current);
     }
 #endif
     current->MappedCount = 0;
-    current->DirtySegmentListEntry.Flink = NULL;
-    current->DirtySegmentListEntry.Blink = NULL;
+    current->DirtyVacbListEntry.Flink = NULL;
+    current->DirtyVacbListEntry.Blink = NULL;
     current->ReferenceCount = 1;
+    current->PinCount = 0;
     KeInitializeMutex(&current->Mutex, 0);
-    KeWaitForSingleObject(&current->Mutex,
-                          Executive,
-                          KernelMode,
-                          FALSE,
-                          NULL);
+    CcRosAcquireVacbLock(current, NULL);
     KeAcquireGuardedMutex(&ViewLock);
 
-    *CacheSeg = current;
-    /* There is window between the call to CcRosLookupCacheSegment
-     * and CcRosCreateCacheSegment. We must check if a segment on
-     * the fileoffset exist. If there exist a segment, we release
-     * our new created segment and return the existing one.
+    *Vacb = current;
+    /* There is window between the call to CcRosLookupVacb
+     * and CcRosCreateVacb. We must check if a VACB for the
+     * file offset exist. If there is a VACB, we release
+     * our newly created VACB and return the existing one.
      */
-    KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
-    current_entry = Bcb->BcbSegmentListHead.Flink;
+    KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
+    current_entry = SharedCacheMap->CacheMapVacbListHead.Flink;
     previous = NULL;
-    while (current_entry != &Bcb->BcbSegmentListHead)
+    while (current_entry != &SharedCacheMap->CacheMapVacbListHead)
     {
-        current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT,
-                                    BcbSegmentListEntry);
-        if (IsPointInSegment(current->FileOffset, VACB_MAPPING_GRANULARITY,
-                             FileOffset))
+        current = CONTAINING_RECORD(current_entry,
+                                    ROS_VACB,
+                                    CacheMapVacbListEntry);
+        if (IsPointInRange(current->FileOffset.QuadPart,
+                           VACB_MAPPING_GRANULARITY,
+                           FileOffset))
         {
-            CcRosCacheSegmentIncRefCount(current);
-            KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
+            CcRosVacbIncRefCount(current);
+            KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
 #if DBG
-            if ( Bcb->Trace )
+            if (SharedCacheMap->Trace)
             {
-                DPRINT1("CacheMap 0x%p: deleting newly created Cache Segment 0x%p ( found existing one 0x%p )\n",
-                        Bcb,
-                        (*CacheSeg),
-                        current );
+                DPRINT1("CacheMap 0x%p: deleting newly created VACB 0x%p ( found existing one 0x%p )\n",
+                        SharedCacheMap,
+                        (*Vacb),
+                        current);
             }
 #endif
-            KeReleaseMutex(&(*CacheSeg)->Mutex, FALSE);
+            CcRosReleaseVacbLock(*Vacb);
             KeReleaseGuardedMutex(&ViewLock);
-            ExFreeToNPagedLookasideList(&CacheSegLookasideList, *CacheSeg);
-            *CacheSeg = current;
-            KeWaitForSingleObject(&current->Mutex,
-                                  Executive,
-                                  KernelMode,
-                                  FALSE,
-                                  NULL);
+            ExFreeToNPagedLookasideList(&VacbLookasideList, *Vacb);
+            *Vacb = current;
+            CcRosAcquireVacbLock(current, NULL);
             return STATUS_SUCCESS;
         }
-        if (current->FileOffset < FileOffset)
+        if (current->FileOffset.QuadPart < FileOffset)
         {
             ASSERT(previous == NULL ||
-                   previous->FileOffset < current->FileOffset);
+                   previous->FileOffset.QuadPart < current->FileOffset.QuadPart);
             previous = current;
         }
-        if (current->FileOffset > FileOffset)
+        if (current->FileOffset.QuadPart > FileOffset)
             break;
         current_entry = current_entry->Flink;
     }
-    /* There was no existing segment. */
-    current = *CacheSeg;
+    /* There was no existing VACB. */
+    current = *Vacb;
     if (previous)
     {
-        InsertHeadList(&previous->BcbSegmentListEntry, &current->BcbSegmentListEntry);
+        InsertHeadList(&previous->CacheMapVacbListEntry, &current->CacheMapVacbListEntry);
     }
     else
     {
-        InsertHeadList(&Bcb->BcbSegmentListHead, &current->BcbSegmentListEntry);
+        InsertHeadList(&SharedCacheMap->CacheMapVacbListHead, &current->CacheMapVacbListEntry);
     }
-    KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
-    InsertTailList(&CacheSegmentListHead, &current->CacheSegmentListEntry);
-    InsertTailList(&CacheSegmentLRUListHead, &current->CacheSegmentLRUListEntry);
+    KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
+    InsertTailList(&VacbLruListHead, &current->VacbLruListEntry);
     KeReleaseGuardedMutex(&ViewLock);
-#ifdef CACHE_BITMAP
-    KeAcquireSpinLock(&CiCacheSegMappingRegionLock, &oldIrql);
-
-    StartingOffset = RtlFindClearBitsAndSet(&CiCacheSegMappingRegionAllocMap,
-                                            VACB_MAPPING_GRANULARITY / PAGE_SIZE,
-                                            CiCacheSegMappingRegionHint);
-
-    if (StartingOffset == 0xffffffff)
-    {
-        DPRINT1("Out of CacheSeg mapping space\n");
-        KeBugCheck(CACHE_MANAGER);
-    }
-
-    current->BaseAddress = CiCacheSegMappingRegionBase + StartingOffset * PAGE_SIZE;
-
-    if (CiCacheSegMappingRegionHint == StartingOffset)
-    {
-        CiCacheSegMappingRegionHint += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
-    }
-
-    KeReleaseSpinLock(&CiCacheSegMappingRegionLock, oldIrql);
-#else
-    MmLockAddressSpace(MmGetKernelAddressSpace());
-    current->BaseAddress = NULL;
-    Status = MmCreateMemoryArea(MmGetKernelAddressSpace(),
-                                0, // nothing checks for cache_segment mareas, so set to 0
-                                &current->BaseAddress,
-                                VACB_MAPPING_GRANULARITY,
-                                PAGE_READWRITE,
-                                (PMEMORY_AREA*)&current->MemoryArea,
-                                FALSE,
-                                0,
-                                PAGE_SIZE);
-    MmUnlockAddressSpace(MmGetKernelAddressSpace());
-    if (!NT_SUCCESS(Status))
-    {
-        KeBugCheck(CACHE_MANAGER);
-    }
-#endif
 
-    /* Create a virtual mapping for this memory area */
     MI_SET_USAGE(MI_USAGE_CACHE);
 #if MI_TRACE_PFNS
-    PWCHAR pos = NULL;
-    ULONG len = 0;
-    if ((Bcb->FileObject) && (Bcb->FileObject->FileName.Buffer))
+    if ((SharedCacheMap->FileObject) && (SharedCacheMap->FileObject->FileName.Buffer))
     {
-        pos = wcsrchr(Bcb->FileObject->FileName.Buffer, '\\');
-        len = wcslen(pos) * sizeof(WCHAR);
-        if (pos) snprintf(MI_PFN_CURRENT_PROCESS_NAME, min(16, len), "%S", pos);
-    }
-#endif
-
-    MmMapMemoryArea(current->BaseAddress, VACB_MAPPING_GRANULARITY,
-                    MC_CACHE, PAGE_READWRITE);
-
-    return STATUS_SUCCESS;
-}
-
-NTSTATUS
-NTAPI
-CcRosGetCacheSegmentChain (
-    PBCB Bcb,
-    ULONG FileOffset,
-    ULONG Length,
-    PCACHE_SEGMENT* CacheSeg)
-{
-    PCACHE_SEGMENT current;
-    ULONG i;
-    PCACHE_SEGMENT* CacheSegList;
-    PCACHE_SEGMENT Previous = NULL;
-
-    ASSERT(Bcb);
-
-    DPRINT("CcRosGetCacheSegmentChain()\n");
-
-    Length = ROUND_UP(Length, VACB_MAPPING_GRANULARITY);
-
-    CacheSegList = _alloca(sizeof(PCACHE_SEGMENT) *
-                           (Length / VACB_MAPPING_GRANULARITY));
-
-    /*
-     * Look for a cache segment already mapping the same data.
-     */
-    for (i = 0; i < (Length / VACB_MAPPING_GRANULARITY); i++)
-    {
-        ULONG CurrentOffset = FileOffset + (i * VACB_MAPPING_GRANULARITY);
-        current = CcRosLookupCacheSegment(Bcb, CurrentOffset);
-        if (current != NULL)
+        PWCHAR pos;
+        ULONG len = 0;
+        pos = wcsrchr(SharedCacheMap->FileObject->FileName.Buffer, '\\');
+        if (pos)
         {
-            KeAcquireGuardedMutex(&ViewLock);
-
-            /* Move to tail of LRU list */
-            RemoveEntryList(&current->CacheSegmentLRUListEntry);
-            InsertTailList(&CacheSegmentLRUListHead, &current->CacheSegmentLRUListEntry);
-
-            KeReleaseGuardedMutex(&ViewLock);
-
-            CacheSegList[i] = current;
+            len = wcslen(pos) * sizeof(WCHAR);
+            snprintf(MI_PFN_CURRENT_PROCESS_NAME, min(16, len), "%S", pos);
         }
         else
         {
-            CcRosCreateCacheSegment(Bcb, CurrentOffset, &current);
-            CacheSegList[i] = current;
+            snprintf(MI_PFN_CURRENT_PROCESS_NAME, min(16, len), "%wZ", &SharedCacheMap->FileObject->FileName);
         }
     }
+#endif
 
-    for (i = 0; i < Length / VACB_MAPPING_GRANULARITY; i++)
+    Status = CcRosMapVacb(current);
+    if (!NT_SUCCESS(Status))
     {
-        if (i == 0)
-        {
-            *CacheSeg = CacheSegList[i];
-            Previous = CacheSegList[i];
-        }
-        else
-        {
-            Previous->NextInChain = CacheSegList[i];
-            Previous = CacheSegList[i];
-        }
+        RemoveEntryList(&current->CacheMapVacbListEntry);
+        RemoveEntryList(&current->VacbLruListEntry);
+        CcRosReleaseVacbLock(current);
+        ExFreeToNPagedLookasideList(&VacbLookasideList, current);
     }
-    ASSERT(Previous);
-    Previous->NextInChain = NULL;
 
-    return STATUS_SUCCESS;
+    return Status;
 }
 
 NTSTATUS
 NTAPI
-CcRosGetCacheSegment (
-    PBCB Bcb,
-    ULONG FileOffset,
-    PULONG BaseOffset,
+CcRosGetVacb (
+    PROS_SHARED_CACHE_MAP SharedCacheMap,
+    LONGLONG FileOffset,
+    PLONGLONG BaseOffset,
     PVOID* BaseAddress,
     PBOOLEAN UptoDate,
-    PCACHE_SEGMENT* CacheSeg)
+    PROS_VACB *Vacb)
 {
-    PCACHE_SEGMENT current;
+    PROS_VACB current;
     NTSTATUS Status;
 
-    ASSERT(Bcb);
+    ASSERT(SharedCacheMap);
 
-    DPRINT("CcRosGetCacheSegment()\n");
+    DPRINT("CcRosGetVacb()\n");
 
     /*
-     * Look for a cache segment already mapping the same data.
+     * Look for a VACB already mapping the same data.
      */
-    current = CcRosLookupCacheSegment(Bcb, FileOffset);
+    current = CcRosLookupVacb(SharedCacheMap, FileOffset);
     if (current == NULL)
     {
         /*
-         * Otherwise create a new segment.
+         * Otherwise create a new VACB.
          */
-        Status = CcRosCreateCacheSegment(Bcb, FileOffset, &current);
+        Status = CcRosCreateVacb(SharedCacheMap, FileOffset, &current);
         if (!NT_SUCCESS(Status))
         {
             return Status;
@@ -870,54 +916,53 @@ CcRosGetCacheSegment (
     KeAcquireGuardedMutex(&ViewLock);
 
     /* Move to the tail of the LRU list */
-    RemoveEntryList(&current->CacheSegmentLRUListEntry);
-    InsertTailList(&CacheSegmentLRUListHead, &current->CacheSegmentLRUListEntry);
+    RemoveEntryList(&current->VacbLruListEntry);
+    InsertTailList(&VacbLruListHead, &current->VacbLruListEntry);
 
     KeReleaseGuardedMutex(&ViewLock);
 
     /*
-     * Return information about the segment to the caller.
+     * Return information about the VACB to the caller.
      */
     *UptoDate = current->Valid;
     *BaseAddress = current->BaseAddress;
     DPRINT("*BaseAddress %p\n", *BaseAddress);
-    *CacheSeg = current;
-    *BaseOffset = current->FileOffset;
+    *Vacb = current;
+    *BaseOffset = current->FileOffset.QuadPart;
     return STATUS_SUCCESS;
 }
 
 NTSTATUS
 NTAPI
-CcRosRequestCacheSegment (
-    PBCB Bcb,
-    ULONG FileOffset,
+CcRosRequestVacb (
+    PROS_SHARED_CACHE_MAP SharedCacheMap,
+    LONGLONG FileOffset,
     PVOID* BaseAddress,
     PBOOLEAN UptoDate,
-    PCACHE_SEGMENT* CacheSeg)
+    PROS_VACB *Vacb)
 /*
- * FUNCTION: Request a page mapping for a BCB
+ * FUNCTION: Request a page mapping for a shared cache map
  */
 {
-    ULONG BaseOffset;
+    LONGLONG BaseOffset;
 
-    ASSERT(Bcb);
+    ASSERT(SharedCacheMap);
 
     if (FileOffset % VACB_MAPPING_GRANULARITY != 0)
     {
-        DPRINT1("Bad fileoffset %x should be multiple of %x",
+        DPRINT1("Bad fileoffset %I64x should be multiple of %x",
                 FileOffset, VACB_MAPPING_GRANULARITY);
         KeBugCheck(CACHE_MANAGER);
     }
 
-    return CcRosGetCacheSegment(Bcb,
-                                FileOffset,
-                                &BaseOffset,
-                                BaseAddress,
-                                UptoDate,
-                                CacheSeg);
+    return CcRosGetVacb(SharedCacheMap,
+                        FileOffset,
+                        &BaseOffset,
+                        BaseAddress,
+                        UptoDate,
+                        Vacb);
 }
-#ifdef CACHE_BITMAP
-#else
+
 static
 VOID
 CcFreeCachePage (
@@ -935,60 +980,30 @@ CcFreeCachePage (
         MmReleasePageMemoryConsumer(MC_CACHE, Page);
     }
 }
-#endif
+
 NTSTATUS
-CcRosInternalFreeCacheSegment (
-    PCACHE_SEGMENT CacheSeg)
+CcRosInternalFreeVacb (
+    PROS_VACB Vacb)
 /*
- * FUNCTION: Releases a cache segment associated with a BCB
+ * FUNCTION: Releases a VACB associated with a shared cache map
  */
 {
-#ifdef CACHE_BITMAP
-    ULONG i;
-    ULONG RegionSize;
-    ULONG Base;
-    PFN_NUMBER Page;
-    KIRQL oldIrql;
-#endif
-    DPRINT("Freeing cache segment 0x%p\n", CacheSeg);
+    DPRINT("Freeing VACB 0x%p\n", Vacb);
 #if DBG
-    if ( CacheSeg->Bcb->Trace )
+    if (Vacb->SharedCacheMap->Trace)
     {
-        DPRINT1("CacheMap 0x%p: deleting Cache Segment: 0x%p\n", CacheSeg->Bcb, CacheSeg );
+        DPRINT1("CacheMap 0x%p: deleting VACB: 0x%p\n", Vacb->SharedCacheMap, Vacb);
     }
 #endif
-#ifdef CACHE_BITMAP
-    RegionSize = VACB_MAPPING_GRANULARITY / PAGE_SIZE;
-
-    /* Unmap all the pages. */
-    for (i = 0; i < RegionSize; i++)
-    {
-        MmDeleteVirtualMapping(NULL,
-                               CacheSeg->BaseAddress + (i * PAGE_SIZE),
-                               FALSE,
-                               NULL,
-                               &Page);
-        MmReleasePageMemoryConsumer(MC_CACHE, Page);
-    }
-
-    KeAcquireSpinLock(&CiCacheSegMappingRegionLock, &oldIrql);
-    /* Deallocate all the pages used. */
-    Base = (ULONG)(CacheSeg->BaseAddress - CiCacheSegMappingRegionBase) / PAGE_SIZE;
 
-    RtlClearBits(&CiCacheSegMappingRegionAllocMap, Base, RegionSize);
-
-    CiCacheSegMappingRegionHint = min(CiCacheSegMappingRegionHint, Base);
-
-    KeReleaseSpinLock(&CiCacheSegMappingRegionLock, oldIrql);
-#else
     MmLockAddressSpace(MmGetKernelAddressSpace());
     MmFreeMemoryArea(MmGetKernelAddressSpace(),
-                     CacheSeg->MemoryArea,
+                     Vacb->MemoryArea,
                      CcFreeCachePage,
                      NULL);
     MmUnlockAddressSpace(MmGetKernelAddressSpace());
-#endif
-    ExFreeToNPagedLookasideList(&CacheSegLookasideList, CacheSeg);
+
+    ExFreeToNPagedLookasideList(&VacbLookasideList, Vacb);
     return STATUS_SUCCESS;
 }
 
@@ -1003,27 +1018,32 @@ CcFlushCache (
     IN ULONG Length,
     OUT PIO_STATUS_BLOCK IoStatus)
 {
-    PBCB Bcb;
+    PROS_SHARED_CACHE_MAP SharedCacheMap;
     LARGE_INTEGER Offset;
-    PCACHE_SEGMENT current;
+    LONGLONG RemainingLength;
+    PROS_VACB current;
     NTSTATUS Status;
     KIRQL oldIrql;
 
+    CCTRACE(CC_API_DEBUG, "SectionObjectPointers=%p FileOffset=%p Length=%lu\n",
+        SectionObjectPointers, FileOffset, Length);
+
     DPRINT("CcFlushCache(SectionObjectPointers 0x%p, FileOffset 0x%p, Length %lu, IoStatus 0x%p)\n",
            SectionObjectPointers, FileOffset, Length, IoStatus);
 
     if (SectionObjectPointers && SectionObjectPointers->SharedCacheMap)
     {
-        Bcb = (PBCB)SectionObjectPointers->SharedCacheMap;
-        ASSERT(Bcb);
+        SharedCacheMap = SectionObjectPointers->SharedCacheMap;
+        ASSERT(SharedCacheMap);
         if (FileOffset)
         {
             Offset = *FileOffset;
+            RemainingLength = Length;
         }
         else
         {
-            Offset.QuadPart = (LONGLONG)0;
-            Length = Bcb->FileSize.u.LowPart;
+            Offset.QuadPart = 0;
+            RemainingLength = SharedCacheMap->FileSize.QuadPart;
         }
 
         if (IoStatus)
@@ -1032,37 +1052,31 @@ CcFlushCache (
             IoStatus->Information = 0;
         }
 
-        while (Length > 0)
+        while (RemainingLength > 0)
         {
-            current = CcRosLookupCacheSegment (Bcb, Offset.u.LowPart);
+            current = CcRosLookupVacb(SharedCacheMap, Offset.QuadPart);
             if (current != NULL)
             {
                 if (current->Dirty)
                 {
-                    Status = CcRosFlushCacheSegment(current);
+                    Status = CcRosFlushVacb(current);
                     if (!NT_SUCCESS(Status) && IoStatus != NULL)
                     {
                         IoStatus->Status = Status;
                     }
                 }
-                KeReleaseMutex(&current->Mutex, FALSE);
+
+                CcRosReleaseVacbLock(current);
 
                 KeAcquireGuardedMutex(&ViewLock);
-                KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
-                CcRosCacheSegmentDecRefCount(current);
-                KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
+                KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
+                CcRosVacbDecRefCount(current);
+                KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
                 KeReleaseGuardedMutex(&ViewLock);
             }
 
             Offset.QuadPart += VACB_MAPPING_GRANULARITY;
-            if (Length > VACB_MAPPING_GRANULARITY)
-            {
-                Length -= VACB_MAPPING_GRANULARITY;
-            }
-            else
-            {
-                Length = 0;
-            }
+            RemainingLength -= min(RemainingLength, VACB_MAPPING_GRANULARITY);
         }
     }
     else
@@ -1078,69 +1092,76 @@ NTSTATUS
 NTAPI
 CcRosDeleteFileCache (
     PFILE_OBJECT FileObject,
-    PBCB Bcb)
+    PROS_SHARED_CACHE_MAP SharedCacheMap)
 /*
- * FUNCTION: Releases the BCB associated with a file object
+ * FUNCTION: Releases the shared cache map associated with a file object
  */
 {
     PLIST_ENTRY current_entry;
-    PCACHE_SEGMENT current;
+    PROS_VACB current;
     LIST_ENTRY FreeList;
     KIRQL oldIrql;
 
-    ASSERT(Bcb);
+    ASSERT(SharedCacheMap);
 
-    Bcb->RefCount++;
+    SharedCacheMap->OpenCount++;
     KeReleaseGuardedMutex(&ViewLock);
 
     CcFlushCache(FileObject->SectionObjectPointer, NULL, 0, NULL);
 
     KeAcquireGuardedMutex(&ViewLock);
-    Bcb->RefCount--;
-    if (Bcb->RefCount == 0)
+    SharedCacheMap->OpenCount--;
+    if (SharedCacheMap->OpenCount == 0)
     {
-        if (Bcb->BcbRemoveListEntry.Flink != NULL)
-        {
-            RemoveEntryList(&Bcb->BcbRemoveListEntry);
-            Bcb->BcbRemoveListEntry.Flink = NULL;
-        }
+        KIRQL OldIrql;
 
         FileObject->SectionObjectPointer->SharedCacheMap = NULL;
 
         /*
-         * Release all cache segments.
+         * Release all VACBs
          */
         InitializeListHead(&FreeList);
-        KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
-        while (!IsListEmpty(&Bcb->BcbSegmentListHead))
+        KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
+        while (!IsListEmpty(&SharedCacheMap->CacheMapVacbListHead))
         {
-            current_entry = RemoveTailList(&Bcb->BcbSegmentListHead);
-            current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT, BcbSegmentListEntry);
-            RemoveEntryList(&current->CacheSegmentListEntry);
-            RemoveEntryList(&current->CacheSegmentLRUListEntry);
+            current_entry = RemoveTailList(&SharedCacheMap->CacheMapVacbListHead);
+            KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
+
+            current = CONTAINING_RECORD(current_entry, ROS_VACB, CacheMapVacbListEntry);
+            CcRosAcquireVacbLock(current, NULL);
+            RemoveEntryList(&current->VacbLruListEntry);
             if (current->Dirty)
             {
-                RemoveEntryList(&current->DirtySegmentListEntry);
-                DirtyPageCount -= VACB_MAPPING_GRANULARITY / PAGE_SIZE;
-                DPRINT1("Freeing dirty segment\n");
+                RemoveEntryList(&current->DirtyVacbListEntry);
+                CcTotalDirtyPages -= VACB_MAPPING_GRANULARITY / PAGE_SIZE;
+                current->SharedCacheMap->DirtyPages -= VACB_MAPPING_GRANULARITY / PAGE_SIZE;
+                DPRINT1("Freeing dirty VACB\n");
             }
-            InsertHeadList(&FreeList, &current->BcbSegmentListEntry);
+            InsertHeadList(&FreeList, &current->CacheMapVacbListEntry);
+            CcRosReleaseVacbLock(current);
+
+            KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
         }
 #if DBG
-        Bcb->Trace = FALSE;
+        SharedCacheMap->Trace = FALSE;
 #endif
-        KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
+        KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
 
         KeReleaseGuardedMutex(&ViewLock);
-        ObDereferenceObject (Bcb->FileObject);
+        ObDereferenceObject(SharedCacheMap->FileObject);
 
         while (!IsListEmpty(&FreeList))
         {
             current_entry = RemoveTailList(&FreeList);
-            current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT, BcbSegmentListEntry);
-            CcRosInternalFreeCacheSegment(current);
+            current = CONTAINING_RECORD(current_entry, ROS_VACB, CacheMapVacbListEntry);
+            CcRosInternalFreeVacb(current);
         }
-        ExFreeToNPagedLookasideList(&BcbLookasideList, Bcb);
+
+        KeAcquireSpinLock(&iSharedCacheMapLock, &OldIrql);
+        RemoveEntryList(&SharedCacheMap->SharedCacheMapLinks);
+        KeReleaseSpinLock(&iSharedCacheMapLock, OldIrql);
+
+        ExFreeToNPagedLookasideList(&SharedCacheMapLookasideList, SharedCacheMap);
         KeAcquireGuardedMutex(&ViewLock);
     }
     return STATUS_SUCCESS;
@@ -1151,41 +1172,27 @@ NTAPI
 CcRosReferenceCache (
     PFILE_OBJECT FileObject)
 {
-    PBCB Bcb;
+    PROS_SHARED_CACHE_MAP SharedCacheMap;
     KeAcquireGuardedMutex(&ViewLock);
-    Bcb = (PBCB)FileObject->SectionObjectPointer->SharedCacheMap;
-    ASSERT(Bcb);
-    if (Bcb->RefCount == 0)
-    {
-        ASSERT(Bcb->BcbRemoveListEntry.Flink != NULL);
-        RemoveEntryList(&Bcb->BcbRemoveListEntry);
-        Bcb->BcbRemoveListEntry.Flink = NULL;
-
-    }
-    else
-    {
-        ASSERT(Bcb->BcbRemoveListEntry.Flink == NULL);
-    }
-    Bcb->RefCount++;
+    SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
+    ASSERT(SharedCacheMap);
+    ASSERT(SharedCacheMap->OpenCount != 0);
+    SharedCacheMap->OpenCount++;
     KeReleaseGuardedMutex(&ViewLock);
 }
 
 VOID
 NTAPI
-CcRosSetRemoveOnClose (
+CcRosRemoveIfClosed (
     PSECTION_OBJECT_POINTERS SectionObjectPointer)
 {
-    PBCB Bcb;
-    DPRINT("CcRosSetRemoveOnClose()\n");
+    PROS_SHARED_CACHE_MAP SharedCacheMap;
+    DPRINT("CcRosRemoveIfClosed()\n");
     KeAcquireGuardedMutex(&ViewLock);
-    Bcb = (PBCB)SectionObjectPointer->SharedCacheMap;
-    if (Bcb)
+    SharedCacheMap = SectionObjectPointer->SharedCacheMap;
+    if (SharedCacheMap && SharedCacheMap->OpenCount == 0)
     {
-        Bcb->RemoveOnClose = TRUE;
-        if (Bcb->RefCount == 0)
-        {
-            CcRosDeleteFileCache(Bcb->FileObject, Bcb);
-        }
+        CcRosDeleteFileCache(SharedCacheMap->FileObject, SharedCacheMap);
     }
     KeReleaseGuardedMutex(&ViewLock);
 }
@@ -1196,17 +1203,17 @@ NTAPI
 CcRosDereferenceCache (
     PFILE_OBJECT FileObject)
 {
-    PBCB Bcb;
+    PROS_SHARED_CACHE_MAP SharedCacheMap;
     KeAcquireGuardedMutex(&ViewLock);
-    Bcb = (PBCB)FileObject->SectionObjectPointer->SharedCacheMap;
-    ASSERT(Bcb);
-    if (Bcb->RefCount > 0)
+    SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
+    ASSERT(SharedCacheMap);
+    if (SharedCacheMap->OpenCount > 0)
     {
-        Bcb->RefCount--;
-        if (Bcb->RefCount == 0)
+        SharedCacheMap->OpenCount--;
+        if (SharedCacheMap->OpenCount == 0)
         {
-            MmFreeSectionSegments(Bcb->FileObject);
-            CcRosDeleteFileCache(FileObject, Bcb);
+            MmFreeSectionSegments(SharedCacheMap->FileObject);
+            CcRosDeleteFileCache(FileObject, SharedCacheMap);
         }
     }
     KeReleaseGuardedMutex(&ViewLock);
@@ -1221,23 +1228,23 @@ CcRosReleaseFileCache (
  * has been closed.
  */
 {
-    PBCB Bcb;
+    PROS_SHARED_CACHE_MAP SharedCacheMap;
 
     KeAcquireGuardedMutex(&ViewLock);
 
     if (FileObject->SectionObjectPointer->SharedCacheMap != NULL)
     {
-        Bcb = FileObject->SectionObjectPointer->SharedCacheMap;
+        SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
         if (FileObject->PrivateCacheMap != NULL)
         {
             FileObject->PrivateCacheMap = NULL;
-            if (Bcb->RefCount > 0)
+            if (SharedCacheMap->OpenCount > 0)
             {
-                Bcb->RefCount--;
-                if (Bcb->RefCount == 0)
+                SharedCacheMap->OpenCount--;
+                if (SharedCacheMap->OpenCount == 0)
                 {
-                    MmFreeSectionSegments(Bcb->FileObject);
-                    CcRosDeleteFileCache(FileObject, Bcb);
+                    MmFreeSectionSegments(SharedCacheMap->FileObject);
+                    CcRosDeleteFileCache(FileObject, SharedCacheMap);
                 }
             }
         }
@@ -1251,14 +1258,14 @@ NTAPI
 CcTryToInitializeFileCache (
     PFILE_OBJECT FileObject)
 {
-    PBCB Bcb;
+    PROS_SHARED_CACHE_MAP SharedCacheMap;
     NTSTATUS Status;
 
     KeAcquireGuardedMutex(&ViewLock);
 
     ASSERT(FileObject->SectionObjectPointer);
-    Bcb = FileObject->SectionObjectPointer->SharedCacheMap;
-    if (Bcb == NULL)
+    SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
+    if (SharedCacheMap == NULL)
     {
         Status = STATUS_UNSUCCESSFUL;
     }
@@ -1266,13 +1273,8 @@ CcTryToInitializeFileCache (
     {
         if (FileObject->PrivateCacheMap == NULL)
         {
-            FileObject->PrivateCacheMap = Bcb;
-            Bcb->RefCount++;
-        }
-        if (Bcb->BcbRemoveListEntry.Flink != NULL)
-        {
-            RemoveEntryList(&Bcb->BcbRemoveListEntry);
-            Bcb->BcbRemoveListEntry.Flink = NULL;
+            FileObject->PrivateCacheMap = SharedCacheMap;
+            SharedCacheMap->OpenCount++;
         }
         Status = STATUS_SUCCESS;
     }
@@ -1286,55 +1288,56 @@ NTSTATUS
 NTAPI
 CcRosInitializeFileCache (
     PFILE_OBJECT FileObject,
+    PCC_FILE_SIZES FileSizes,
+    BOOLEAN PinAccess,
     PCACHE_MANAGER_CALLBACKS CallBacks,
     PVOID LazyWriterContext)
 /*
- * FUNCTION: Initializes a BCB for a file object
+ * FUNCTION: Initializes a shared cache map for a file object
  */
 {
-    PBCB Bcb;
+    PROS_SHARED_CACHE_MAP SharedCacheMap;
 
-    Bcb = FileObject->SectionObjectPointer->SharedCacheMap;
-    DPRINT("CcRosInitializeFileCache(FileObject 0x%p, Bcb 0x%p)\n",
-           FileObject, Bcb);
+    SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
+    DPRINT("CcRosInitializeFileCache(FileObject 0x%p, SharedCacheMap 0x%p)\n",
+           FileObject, SharedCacheMap);
 
     KeAcquireGuardedMutex(&ViewLock);
-    if (Bcb == NULL)
+    if (SharedCacheMap == NULL)
     {
-        Bcb = ExAllocateFromNPagedLookasideList(&BcbLookasideList);
-        if (Bcb == NULL)
+        KIRQL OldIrql;
+
+        SharedCacheMap = ExAllocateFromNPagedLookasideList(&SharedCacheMapLookasideList);
+        if (SharedCacheMap == NULL)
         {
             KeReleaseGuardedMutex(&ViewLock);
-            return STATUS_UNSUCCESSFUL;
+            return STATUS_INSUFFICIENT_RESOURCES;
         }
-        RtlZeroMemory(Bcb, sizeof(*Bcb));
+        RtlZeroMemory(SharedCacheMap, sizeof(*SharedCacheMap));
         ObReferenceObjectByPointer(FileObject,
                                    FILE_ALL_ACCESS,
                                    NULL,
                                    KernelMode);
-        Bcb->FileObject = FileObject;
-        Bcb->Callbacks = CallBacks;
-        Bcb->LazyWriteContext = LazyWriterContext;
-        if (FileObject->FsContext)
-        {
-            Bcb->AllocationSize =
-                ((PFSRTL_COMMON_FCB_HEADER)FileObject->FsContext)->AllocationSize;
-            Bcb->FileSize =
-                ((PFSRTL_COMMON_FCB_HEADER)FileObject->FsContext)->FileSize;
-        }
-        KeInitializeSpinLock(&Bcb->BcbLock);
-        InitializeListHead(&Bcb->BcbSegmentListHead);
-        FileObject->SectionObjectPointer->SharedCacheMap = Bcb;
+        SharedCacheMap->FileObject = FileObject;
+        SharedCacheMap->Callbacks = CallBacks;
+        SharedCacheMap->LazyWriteContext = LazyWriterContext;
+        SharedCacheMap->SectionSize = FileSizes->AllocationSize;
+        SharedCacheMap->FileSize = FileSizes->FileSize;
+        SharedCacheMap->PinAccess = PinAccess;
+        SharedCacheMap->DirtyPageThreshold = 0;
+        SharedCacheMap->DirtyPages = 0;
+        KeInitializeSpinLock(&SharedCacheMap->CacheMapLock);
+        InitializeListHead(&SharedCacheMap->CacheMapVacbListHead);
+        FileObject->SectionObjectPointer->SharedCacheMap = SharedCacheMap;
+
+        KeAcquireSpinLock(&iSharedCacheMapLock, &OldIrql);
+        InsertTailList(&CcCleanSharedCacheMapList, &SharedCacheMap->SharedCacheMapLinks);
+        KeReleaseSpinLock(&iSharedCacheMapLock, OldIrql);
     }
     if (FileObject->PrivateCacheMap == NULL)
     {
-        FileObject->PrivateCacheMap = Bcb;
-        Bcb->RefCount++;
-    }
-    if (Bcb->BcbRemoveListEntry.Flink != NULL)
-    {
-        RemoveEntryList(&Bcb->BcbRemoveListEntry);
-        Bcb->BcbRemoveListEntry.Flink = NULL;
+        FileObject->PrivateCacheMap = SharedCacheMap;
+        SharedCacheMap->OpenCount++;
     }
     KeReleaseGuardedMutex(&ViewLock);
 
@@ -1349,95 +1352,180 @@ NTAPI
 CcGetFileObjectFromSectionPtrs (
     IN PSECTION_OBJECT_POINTERS SectionObjectPointers)
 {
-    PBCB Bcb;
+    PROS_SHARED_CACHE_MAP SharedCacheMap;
+
+    CCTRACE(CC_API_DEBUG, "SectionObjectPointers=%p\n", SectionObjectPointers);
+
     if (SectionObjectPointers && SectionObjectPointers->SharedCacheMap)
     {
-        Bcb = (PBCB)SectionObjectPointers->SharedCacheMap;
-        ASSERT(Bcb);
-        return Bcb->FileObject;
+        SharedCacheMap = SectionObjectPointers->SharedCacheMap;
+        ASSERT(SharedCacheMap);
+        return SharedCacheMap->FileObject;
     }
     return NULL;
 }
 
 VOID
+NTAPI
+CcShutdownLazyWriter (
+    VOID)
+{
+    /* Simply set the event, lazy writer will stop when it's done */
+    KeSetEvent(&iLazyWriterShutdown, IO_DISK_INCREMENT, FALSE);
+}
+
+BOOLEAN
 INIT_FUNCTION
 NTAPI
 CcInitView (
     VOID)
 {
-#ifdef CACHE_BITMAP
-    PMEMORY_AREA marea;
-    PVOID Buffer;
-#endif
+    HANDLE LazyWriter;
+    NTSTATUS Status;
+    KPRIORITY Priority;
+    OBJECT_ATTRIBUTES ObjectAttributes;
 
     DPRINT("CcInitView()\n");
-#ifdef CACHE_BITMAP
-    CiCacheSegMappingRegionHint = 0;
-    CiCacheSegMappingRegionBase = NULL;
 
-    MmLockAddressSpace(MmGetKernelAddressSpace());
+    InitializeListHead(&DirtyVacbListHead);
+    InitializeListHead(&VacbLruListHead);
+    InitializeListHead(&CcDeferredWrites);
+    InitializeListHead(&CcCleanSharedCacheMapList);
+    KeInitializeSpinLock(&CcDeferredWriteSpinLock);
+    KeInitializeSpinLock(&iSharedCacheMapLock);
+    KeInitializeGuardedMutex(&ViewLock);
+    ExInitializeNPagedLookasideList(&iBcbLookasideList,
+                                    NULL,
+                                    NULL,
+                                    0,
+                                    sizeof(INTERNAL_BCB),
+                                    TAG_BCB,
+                                    20);
+    ExInitializeNPagedLookasideList(&SharedCacheMapLookasideList,
+                                    NULL,
+                                    NULL,
+                                    0,
+                                    sizeof(ROS_SHARED_CACHE_MAP),
+                                    TAG_SHARED_CACHE_MAP,
+                                    20);
+    ExInitializeNPagedLookasideList(&VacbLookasideList,
+                                    NULL,
+                                    NULL,
+                                    0,
+                                    sizeof(ROS_VACB),
+                                    TAG_VACB,
+                                    20);
 
-    Status = MmCreateMemoryArea(MmGetKernelAddressSpace(),
-                                MEMORY_AREA_CACHE_SEGMENT,
-                                &CiCacheSegMappingRegionBase,
-                                CI_CACHESEG_MAPPING_REGION_SIZE,
-                                PAGE_READWRITE,
-                                &marea,
-                                FALSE,
-                                0,
-                                PAGE_SIZE);
-    MmUnlockAddressSpace(MmGetKernelAddressSpace());
-    if (!NT_SUCCESS(Status))
+    MmInitializeMemoryConsumer(MC_CACHE, CcRosTrimCache);
+
+    /* Initialize lazy writer events */
+    KeInitializeEvent(&iLazyWriterShutdown, SynchronizationEvent, FALSE);
+    KeInitializeEvent(&iLazyWriterNotify, NotificationEvent, FALSE);
+
+    /* Define lazy writer threshold, depending on system type */
+    switch (MmQuerySystemSize())
     {
-        KeBugCheck(CACHE_MANAGER);
+        case MmSmallSystem:
+            CcDirtyPageThreshold = MmNumberOfPhysicalPages / 8;
+            break;
+
+        case MmMediumSystem:
+            CcDirtyPageThreshold = MmNumberOfPhysicalPages / 4;
+            break;
+
+        case MmLargeSystem:
+            CcDirtyPageThreshold = MmNumberOfPhysicalPages / 8 + MmNumberOfPhysicalPages / 4;
+            break;
     }
 
-    Buffer = ExAllocatePoolWithTag(NonPagedPool,
-                                   CI_CACHESEG_MAPPING_REGION_SIZE / (PAGE_SIZE * 8),
-                                   TAG_CC);
-    if (!Buffer)
+    /* Start the lazy writer thread */
+    InitializeObjectAttributes(&ObjectAttributes,
+                               NULL,
+                               OBJ_KERNEL_HANDLE,
+                               NULL,
+                               NULL);
+    Status = PsCreateSystemThread(&LazyWriter,
+                                  THREAD_ALL_ACCESS,
+                                  &ObjectAttributes,
+                                  NULL,
+                                  NULL,
+                                  CciLazyWriter,
+                                  NULL);
+    if (!NT_SUCCESS(Status))
     {
-        KeBugCheck(CACHE_MANAGER);
+        return FALSE;
     }
 
-    RtlInitializeBitMap(&CiCacheSegMappingRegionAllocMap,
-                        Buffer,
-                        CI_CACHESEG_MAPPING_REGION_SIZE / PAGE_SIZE);
-    RtlClearAllBits(&CiCacheSegMappingRegionAllocMap);
+    Priority = 27;
+    Status = NtSetInformationThread(LazyWriter,
+                                   ThreadPriority,
+                                   &Priority,
+                                   sizeof(Priority));
+    ASSERT(NT_SUCCESS(Status));
 
-    KeInitializeSpinLock(&CiCacheSegMappingRegionLock);
-#endif
-    InitializeListHead(&CacheSegmentListHead);
-    InitializeListHead(&DirtySegmentListHead);
-    InitializeListHead(&CacheSegmentLRUListHead);
-    InitializeListHead(&ClosedListHead);
-    KeInitializeGuardedMutex(&ViewLock);
-    ExInitializeNPagedLookasideList (&iBcbLookasideList,
-                                     NULL,
-                                     NULL,
-                                     0,
-                                     sizeof(INTERNAL_BCB),
-                                     TAG_IBCB,
-                                     20);
-    ExInitializeNPagedLookasideList (&BcbLookasideList,
-                                     NULL,
-                                     NULL,
-                                     0,
-                                     sizeof(BCB),
-                                     TAG_BCB,
-                                     20);
-    ExInitializeNPagedLookasideList (&CacheSegLookasideList,
-                                     NULL,
-                                     NULL,
-                                     0,
-                                     sizeof(CACHE_SEGMENT),
-                                     TAG_CSEG,
-                                     20);
-
-    MmInitializeMemoryConsumer(MC_CACHE, CcRosTrimCache);
+    /* Handle is not needed */
+    ObCloseHandle(LazyWriter, KernelMode);
 
     CcInitCacheZeroPage();
 
+    return TRUE;
+}
+
+#if DBG && defined(KDBG)
+BOOLEAN
+ExpKdbgExtFileCache(ULONG Argc, PCHAR Argv[])
+{
+    PLIST_ENTRY ListEntry;
+    UNICODE_STRING NoName = RTL_CONSTANT_STRING(L"No name for File");
+
+    KdbpPrint("  Usage Summary (in kb)\n");
+    KdbpPrint("Shared\t\tValid\tDirty\tName\n");
+    /* No need to lock the spin lock here, we're in DBG */
+    for (ListEntry = CcCleanSharedCacheMapList.Flink;
+         ListEntry != &CcCleanSharedCacheMapList;
+         ListEntry = ListEntry->Flink)
+    {
+        PLIST_ENTRY Vacbs;
+        ULONG Valid = 0, Dirty = 0;
+        PROS_SHARED_CACHE_MAP SharedCacheMap;
+        PUNICODE_STRING FileName;
+
+        SharedCacheMap = CONTAINING_RECORD(ListEntry, ROS_SHARED_CACHE_MAP, SharedCacheMapLinks);
+
+        /* Dirty size */
+        Dirty = (SharedCacheMap->DirtyPages * PAGE_SIZE) / 1024;
+
+        /* First, count for all the associated VACB */
+        for (Vacbs = SharedCacheMap->CacheMapVacbListHead.Flink;
+             Vacbs != &SharedCacheMap->CacheMapVacbListHead;
+             Vacbs = Vacbs->Flink)
+        {
+            PROS_VACB Vacb;
+
+            Vacb = CONTAINING_RECORD(Vacbs, ROS_VACB, CacheMapVacbListEntry);
+            if (Vacb->Valid)
+            {
+                Valid += VACB_MAPPING_GRANULARITY / 1024;
+            }
+        }
+
+        /* Setup name */
+        if (SharedCacheMap->FileObject != NULL &&
+            SharedCacheMap->FileObject->FileName.Length != 0)
+        {
+            FileName = &SharedCacheMap->FileObject->FileName;
+        }
+        else
+        {
+            FileName = &NoName;
+        }
+
+        /* And print */
+        KdbpPrint("%p\t%d\t%d\t%wZ\n", SharedCacheMap, Valid, Dirty, FileName);
+    }
+
+    return TRUE;
 }
+#endif
 
 /* EOF */