[NTOSKRNL] Add an assert against VACB reference count overflow.
[reactos.git] / ntoskrnl / cc / view.c
index e9964d4..2e570a1 100644 (file)
@@ -5,6 +5,7 @@
  * PURPOSE:         Cache manager
  *
  * PROGRAMMERS:     David Welch (welch@mcmail.com)
+ *                  Pierre Schweitzer (pierre@reactos.org)
  */
 
 /* NOTES **********************************************************************
@@ -41,9 +42,8 @@
 
 /* GLOBALS *******************************************************************/
 
-static LIST_ENTRY DirtyVacbListHead;
+LIST_ENTRY DirtyVacbListHead;
 static LIST_ENTRY VacbLruListHead;
-ULONG DirtyPageCount = 0;
 
 KGUARDED_MUTEX ViewLock;
 
@@ -51,6 +51,37 @@ NPAGED_LOOKASIDE_LIST iBcbLookasideList;
 static NPAGED_LOOKASIDE_LIST SharedCacheMapLookasideList;
 static NPAGED_LOOKASIDE_LIST VacbLookasideList;
 
+/* Counters:
+ * - Amount of pages flushed by lazy writer
+ * - Number of times lazy writer ran
+ */
+ULONG CcLazyWritePages = 0;
+ULONG CcLazyWriteIos = 0;
+
+/* Internal vars (MS):
+ * - Threshold above which lazy writer will start action
+ * - Amount of dirty pages
+ * - List for deferred writes
+ * - Spinlock when dealing with the deferred list
+ * - List for "clean" shared cache maps
+ * - One second delay for lazy writer
+ */
+ULONG CcDirtyPageThreshold = 0;
+ULONG CcTotalDirtyPages = 0;
+LIST_ENTRY CcDeferredWrites;
+KSPIN_LOCK CcDeferredWriteSpinLock;
+LIST_ENTRY CcCleanSharedCacheMapList;
+LARGE_INTEGER CcIdleDelay = RTL_CONSTANT_LARGE_INTEGER((LONGLONG)-1*1000*1000*10);
+
+/* Internal vars (ROS):
+ * - Event to notify lazy writer to shutdown
+ * - Event to inform watchers lazy writer is done for this loop
+ * - Lock for the CcCleanSharedCacheMapList list
+ */
+KEVENT iLazyWriterShutdown;
+KEVENT iLazyWriterNotify;
+KSPIN_LOCK iSharedCacheMapLock;
+
 #if DBG
 static void CcRosVacbIncRefCount_(PROS_VACB vacb, const char* file, int line)
 {
@@ -63,7 +94,9 @@ static void CcRosVacbIncRefCount_(PROS_VACB vacb, const char* file, int line)
 }
 static void CcRosVacbDecRefCount_(PROS_VACB vacb, const char* file, int line)
 {
+    ASSERT(vacb->ReferenceCount != 0);
     --vacb->ReferenceCount;
+    ASSERT(!(vacb->ReferenceCount == 0 && vacb->Dirty));
     if (vacb->SharedCacheMap->Trace)
     {
         DbgPrint("(%s:%i) VACB %p --RefCount=%lu, Dirty %u, PageOut %lu\n",
@@ -135,21 +168,11 @@ CcRosFlushVacb (
     PROS_VACB Vacb)
 {
     NTSTATUS Status;
-    KIRQL oldIrql;
 
     Status = CcWriteVirtualAddress(Vacb);
     if (NT_SUCCESS(Status))
     {
-        KeAcquireGuardedMutex(&ViewLock);
-        KeAcquireSpinLock(&Vacb->SharedCacheMap->CacheMapLock, &oldIrql);
-
-        Vacb->Dirty = FALSE;
-        RemoveEntryList(&Vacb->DirtyVacbListEntry);
-        DirtyPageCount -= VACB_MAPPING_GRANULARITY / PAGE_SIZE;
-        CcRosVacbDecRefCount(Vacb);
-
-        KeReleaseSpinLock(&Vacb->SharedCacheMap->CacheMapLock, oldIrql);
-        KeReleaseGuardedMutex(&ViewLock);
+        CcRosUnmarkDirtyVacb(Vacb, TRUE);
     }
 
     return Status;
@@ -160,7 +183,8 @@ NTAPI
 CcRosFlushDirtyPages (
     ULONG Target,
     PULONG Count,
-    BOOLEAN Wait)
+    BOOLEAN Wait,
+    BOOLEAN CalledFromLazy)
 {
     PLIST_ENTRY current_entry;
     PROS_VACB current;
@@ -191,6 +215,14 @@ CcRosFlushDirtyPages (
 
         CcRosVacbIncRefCount(current);
 
+        /* When performing lazy write, don't handle temporary files */
+        if (CalledFromLazy &&
+            BooleanFlagOn(current->SharedCacheMap->FileObject->Flags, FO_TEMPORARY_FILE))
+        {
+            CcRosVacbDecRefCount(current);
+            continue;
+        }
+
         Locked = current->SharedCacheMap->Callbacks->AcquireForLazyWrite(
                      current->SharedCacheMap->LazyWriteContext, Wait);
         if (!Locked)
@@ -199,11 +231,8 @@ CcRosFlushDirtyPages (
             continue;
         }
 
-        Status = KeWaitForSingleObject(&current->Mutex,
-                                       Executive,
-                                       KernelMode,
-                                       FALSE,
-                                       Wait ? NULL : &ZeroTimeout);
+        Status = CcRosAcquireVacbLock(current,
+                                      Wait ? NULL : &ZeroTimeout);
         if (Status != STATUS_SUCCESS)
         {
             current->SharedCacheMap->Callbacks->ReleaseFromLazyWrite(
@@ -215,9 +244,10 @@ CcRosFlushDirtyPages (
         ASSERT(current->Dirty);
 
         /* One reference is added above */
-        if (current->ReferenceCount > 2)
+        if ((current->ReferenceCount > 2 && current->PinCount == 0) ||
+            (current->ReferenceCount > 3 && current->PinCount > 1))
         {
-            KeReleaseMutex(&current->Mutex, FALSE);
+            CcRosReleaseVacbLock(current);
             current->SharedCacheMap->Callbacks->ReleaseFromLazyWrite(
                 current->SharedCacheMap->LazyWriteContext);
             CcRosVacbDecRefCount(current);
@@ -228,21 +258,36 @@ CcRosFlushDirtyPages (
 
         Status = CcRosFlushVacb(current);
 
-        KeReleaseMutex(&current->Mutex, FALSE);
+        CcRosReleaseVacbLock(current);
         current->SharedCacheMap->Callbacks->ReleaseFromLazyWrite(
             current->SharedCacheMap->LazyWriteContext);
 
         KeAcquireGuardedMutex(&ViewLock);
         CcRosVacbDecRefCount(current);
 
-        if (!NT_SUCCESS(Status) &&  (Status != STATUS_END_OF_FILE))
+        if (!NT_SUCCESS(Status) && (Status != STATUS_END_OF_FILE) &&
+            (Status != STATUS_MEDIA_WRITE_PROTECTED))
         {
             DPRINT1("CC: Failed to flush VACB.\n");
         }
         else
         {
-            (*Count) += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
-            Target -= VACB_MAPPING_GRANULARITY / PAGE_SIZE;
+            ULONG PagesFreed;
+
+            /* How many pages did we free? */
+            PagesFreed = VACB_MAPPING_GRANULARITY / PAGE_SIZE;
+            (*Count) += PagesFreed;
+
+            /* Make sure we don't overflow target! */
+            if (Target < PagesFreed)
+            {
+                /* If we would have, jump to zero directly */
+                Target = 0;
+            }
+            else
+            {
+                Target -= PagesFreed;
+            }
         }
 
         current_entry = DirtyVacbListHead.Flink;
@@ -255,6 +300,81 @@ CcRosFlushDirtyPages (
     return STATUS_SUCCESS;
 }
 
+/* FIXME: Someday this could somewhat implement write-behind/read-ahead */
+VOID
+NTAPI
+CciLazyWriter(PVOID Unused)
+{
+    while (TRUE)
+    {
+        NTSTATUS Status;
+        PLIST_ENTRY ListEntry;
+        ULONG Target, Count = 0;
+
+        /* One per second or until we have to stop */
+        Status = KeWaitForSingleObject(&iLazyWriterShutdown,
+                                       Executive,
+                                       KernelMode,
+                                       FALSE,
+                                       &CcIdleDelay);
+
+        /* If we succeeed, we've to stop running! */
+        if (Status == STATUS_SUCCESS)
+        {
+            break;
+        }
+
+        /* We're not sleeping anymore */
+        KeClearEvent(&iLazyWriterNotify);
+
+        /* Our target is one-eighth of the dirty pages */
+        Target = CcTotalDirtyPages / 8;
+        if (Target != 0)
+        {
+            /* Flush! */
+            DPRINT("Lazy writer starting (%d)\n", Target);
+            CcRosFlushDirtyPages(Target, &Count, FALSE, TRUE);
+
+            /* And update stats */
+            CcLazyWritePages += Count;
+            ++CcLazyWriteIos;
+            DPRINT("Lazy writer done (%d)\n", Count);
+        }
+
+        /* Inform people waiting on us that we're done */
+        KeSetEvent(&iLazyWriterNotify, IO_DISK_INCREMENT, FALSE);
+
+        /* Likely not optimal, but let's handle one deferred write now! */
+        ListEntry = ExInterlockedRemoveHeadList(&CcDeferredWrites, &CcDeferredWriteSpinLock);
+        if (ListEntry != NULL)
+        {
+            PDEFERRED_WRITE Context;
+
+            /* Extract the context */
+            Context = CONTAINING_RECORD(ListEntry, DEFERRED_WRITE, DeferredWriteLinks);
+            ASSERT(Context->NodeTypeCode == NODE_TYPE_DEFERRED_WRITE);
+
+            /* Can we write now? */
+            if (CcCanIWrite(Context->FileObject, Context->BytesToWrite, FALSE, TRUE))
+            {
+                /* Yes! Do it, and destroy the associated context */
+                Context->PostRoutine(Context->Context1, Context->Context2);
+                ExFreePoolWithTag(Context, 'CcDw');
+            }
+            else
+            {
+                /* Otherwise, requeue it, but in tail, so that it doesn't block others
+                 * This is clearly to improve, but given the poor algorithm used now
+                 * It's better than nothing!
+                 */
+                ExInterlockedInsertTailList(&CcDeferredWrites,
+                                            &Context->DeferredWriteLinks,
+                                            &CcDeferredWriteSpinLock);
+            }
+        }
+    }
+}
+
 NTSTATUS
 CcRosTrimCache (
     ULONG Target,
@@ -348,7 +468,7 @@ retry:
     if ((Target > 0) && !FlushedPages)
     {
         /* Flush dirty pages to disk */
-        CcRosFlushDirtyPages(Target, &PagesFreed, FALSE);
+        CcRosFlushDirtyPages(Target, &PagesFreed, FALSE, FALSE);
         FlushedPages = TRUE;
 
         /* We can only swap as many pages as we flushed */
@@ -386,26 +506,16 @@ CcRosReleaseVacb (
     BOOLEAN Dirty,
     BOOLEAN Mapped)
 {
-    BOOLEAN WasDirty;
-    KIRQL oldIrql;
-
     ASSERT(SharedCacheMap);
 
     DPRINT("CcRosReleaseVacb(SharedCacheMap 0x%p, Vacb 0x%p, Valid %u)\n",
            SharedCacheMap, Vacb, Valid);
 
-    KeAcquireGuardedMutex(&ViewLock);
-    KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
-
     Vacb->Valid = Valid;
 
-    WasDirty = Vacb->Dirty;
-    Vacb->Dirty = Vacb->Dirty || Dirty;
-
-    if (!WasDirty && Vacb->Dirty)
+    if (Dirty && !Vacb->Dirty)
     {
-        InsertTailList(&DirtyVacbListHead, &Vacb->DirtyVacbListEntry);
-        DirtyPageCount += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
+        CcRosMarkDirtyVacb(Vacb);
     }
 
     if (Mapped)
@@ -417,14 +527,8 @@ CcRosReleaseVacb (
     {
         CcRosVacbIncRefCount(Vacb);
     }
-    if (!WasDirty && Vacb->Dirty)
-    {
-        CcRosVacbIncRefCount(Vacb);
-    }
 
-    KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
-    KeReleaseGuardedMutex(&ViewLock);
-    KeReleaseMutex(&Vacb->Mutex, FALSE);
+    CcRosReleaseVacbLock(Vacb);
 
     return STATUS_SUCCESS;
 }
@@ -461,11 +565,7 @@ CcRosLookupVacb (
             CcRosVacbIncRefCount(current);
             KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
             KeReleaseGuardedMutex(&ViewLock);
-            KeWaitForSingleObject(&current->Mutex,
-                                  Executive,
-                                  KernelMode,
-                                  FALSE,
-                                  NULL);
+            CcRosAcquireVacbLock(current, NULL);
             return current;
         }
         if (current->FileOffset.QuadPart > FileOffset)
@@ -479,14 +579,76 @@ CcRosLookupVacb (
     return NULL;
 }
 
-NTSTATUS
+VOID
 NTAPI
 CcRosMarkDirtyVacb (
+    PROS_VACB Vacb)
+{
+    KIRQL oldIrql;
+    PROS_SHARED_CACHE_MAP SharedCacheMap;
+
+    SharedCacheMap = Vacb->SharedCacheMap;
+
+    KeAcquireGuardedMutex(&ViewLock);
+    KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
+
+    ASSERT(!Vacb->Dirty);
+
+    InsertTailList(&DirtyVacbListHead, &Vacb->DirtyVacbListEntry);
+    CcTotalDirtyPages += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
+    Vacb->SharedCacheMap->DirtyPages += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
+    CcRosVacbIncRefCount(Vacb);
+
+    /* Move to the tail of the LRU list */
+    RemoveEntryList(&Vacb->VacbLruListEntry);
+    InsertTailList(&VacbLruListHead, &Vacb->VacbLruListEntry);
+
+    Vacb->Dirty = TRUE;
+
+    KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
+    KeReleaseGuardedMutex(&ViewLock);
+}
+
+VOID
+NTAPI
+CcRosUnmarkDirtyVacb (
+    PROS_VACB Vacb,
+    BOOLEAN LockViews)
+{
+    KIRQL oldIrql;
+    PROS_SHARED_CACHE_MAP SharedCacheMap;
+
+    SharedCacheMap = Vacb->SharedCacheMap;
+
+    if (LockViews)
+    {
+        KeAcquireGuardedMutex(&ViewLock);
+        KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
+    }
+
+    ASSERT(Vacb->Dirty);
+
+    Vacb->Dirty = FALSE;
+
+    RemoveEntryList(&Vacb->DirtyVacbListEntry);
+    CcTotalDirtyPages -= VACB_MAPPING_GRANULARITY / PAGE_SIZE;
+    Vacb->SharedCacheMap->DirtyPages -= VACB_MAPPING_GRANULARITY / PAGE_SIZE;
+    CcRosVacbDecRefCount(Vacb);
+
+    if (LockViews)
+    {
+        KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
+        KeReleaseGuardedMutex(&ViewLock);
+    }
+}
+
+NTSTATUS
+NTAPI
+CcRosMarkDirtyFile (
     PROS_SHARED_CACHE_MAP SharedCacheMap,
     LONGLONG FileOffset)
 {
     PROS_VACB Vacb;
-    KIRQL oldIrql;
 
     ASSERT(SharedCacheMap);
 
@@ -499,28 +661,12 @@ CcRosMarkDirtyVacb (
         KeBugCheck(CACHE_MANAGER);
     }
 
-    KeAcquireGuardedMutex(&ViewLock);
-    KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
-
     if (!Vacb->Dirty)
     {
-        InsertTailList(&DirtyVacbListHead, &Vacb->DirtyVacbListEntry);
-        DirtyPageCount += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
-    }
-    else
-    {
-        CcRosVacbDecRefCount(Vacb);
+        CcRosMarkDirtyVacb(Vacb);
     }
 
-    /* Move to the tail of the LRU list */
-    RemoveEntryList(&Vacb->VacbLruListEntry);
-    InsertTailList(&VacbLruListHead, &Vacb->VacbLruListEntry);
-
-    Vacb->Dirty = TRUE;
-
-    KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
-    KeReleaseGuardedMutex(&ViewLock);
-    KeReleaseMutex(&Vacb->Mutex, FALSE);
+    CcRosReleaseVacbLock(Vacb);
 
     return STATUS_SUCCESS;
 }
@@ -533,8 +679,6 @@ CcRosUnmapVacb (
     BOOLEAN NowDirty)
 {
     PROS_VACB Vacb;
-    BOOLEAN WasDirty;
-    KIRQL oldIrql;
 
     ASSERT(SharedCacheMap);
 
@@ -547,33 +691,20 @@ CcRosUnmapVacb (
         return STATUS_UNSUCCESSFUL;
     }
 
-    KeAcquireGuardedMutex(&ViewLock);
-    KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
-
-    WasDirty = Vacb->Dirty;
-    Vacb->Dirty = Vacb->Dirty || NowDirty;
-
-    Vacb->MappedCount--;
-
-    if (!WasDirty && NowDirty)
+    if (NowDirty && !Vacb->Dirty)
     {
-        InsertTailList(&DirtyVacbListHead, &Vacb->DirtyVacbListEntry);
-        DirtyPageCount += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
+        CcRosMarkDirtyVacb(Vacb);
     }
 
+    Vacb->MappedCount--;
+
     CcRosVacbDecRefCount(Vacb);
-    if (!WasDirty && NowDirty)
-    {
-        CcRosVacbIncRefCount(Vacb);
-    }
     if (Vacb->MappedCount == 0)
     {
         CcRosVacbDecRefCount(Vacb);
     }
 
-    KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
-    KeReleaseGuardedMutex(&ViewLock);
-    KeReleaseMutex(&Vacb->Mutex, FALSE);
+    CcRosReleaseVacbLock(Vacb);
 
     return STATUS_SUCCESS;
 }
@@ -595,13 +726,13 @@ CcRosMapVacb(
                                 VACB_MAPPING_GRANULARITY,
                                 PAGE_READWRITE,
                                 (PMEMORY_AREA*)&Vacb->MemoryArea,
-                                FALSE,
                                 0,
                                 PAGE_SIZE);
     MmUnlockAddressSpace(MmGetKernelAddressSpace());
     if (!NT_SUCCESS(Status))
     {
-        KeBugCheck(CACHE_MANAGER);
+        DPRINT1("MmCreateMemoryArea failed with %lx for VACB %p\n", Status, Vacb);
+        return Status;
     }
 
     ASSERT(((ULONG_PTR)Vacb->BaseAddress % PAGE_SIZE) == 0);
@@ -613,6 +744,7 @@ CcRosMapVacb(
     {
         PFN_NUMBER PageFrameNumber;
 
+        MI_SET_USAGE(MI_USAGE_CACHE);
         Status = MmRequestPageMemoryConsumer(MC_CACHE, TRUE, &PageFrameNumber);
         if (PageFrameNumber == 0)
         {
@@ -652,7 +784,7 @@ CcRosCreateVacb (
 
     DPRINT("CcRosCreateVacb()\n");
 
-    if (FileOffset >= SharedCacheMap->FileSize.QuadPart)
+    if (FileOffset >= SharedCacheMap->SectionSize.QuadPart)
     {
         *Vacb = NULL;
         return STATUS_INVALID_PARAMETER;
@@ -675,12 +807,9 @@ CcRosCreateVacb (
     current->DirtyVacbListEntry.Flink = NULL;
     current->DirtyVacbListEntry.Blink = NULL;
     current->ReferenceCount = 1;
+    current->PinCount = 0;
     KeInitializeMutex(&current->Mutex, 0);
-    KeWaitForSingleObject(&current->Mutex,
-                          Executive,
-                          KernelMode,
-                          FALSE,
-                          NULL);
+    CcRosAcquireVacbLock(current, NULL);
     KeAcquireGuardedMutex(&ViewLock);
 
     *Vacb = current;
@@ -712,15 +841,11 @@ CcRosCreateVacb (
                         current);
             }
 #endif
-            KeReleaseMutex(&(*Vacb)->Mutex, FALSE);
+            CcRosReleaseVacbLock(*Vacb);
             KeReleaseGuardedMutex(&ViewLock);
             ExFreeToNPagedLookasideList(&VacbLookasideList, *Vacb);
             *Vacb = current;
-            KeWaitForSingleObject(&current->Mutex,
-                                  Executive,
-                                  KernelMode,
-                                  FALSE,
-                                  NULL);
+            CcRosAcquireVacbLock(current, NULL);
             return STATUS_SUCCESS;
         }
         if (current->FileOffset.QuadPart < FileOffset)
@@ -751,15 +876,29 @@ CcRosCreateVacb (
 #if MI_TRACE_PFNS
     if ((SharedCacheMap->FileObject) && (SharedCacheMap->FileObject->FileName.Buffer))
     {
-        PWCHAR pos = NULL;
+        PWCHAR pos;
         ULONG len = 0;
         pos = wcsrchr(SharedCacheMap->FileObject->FileName.Buffer, '\\');
-        len = wcslen(pos) * sizeof(WCHAR);
-        if (pos) snprintf(MI_PFN_CURRENT_PROCESS_NAME, min(16, len), "%S", pos);
+        if (pos)
+        {
+            len = wcslen(pos) * sizeof(WCHAR);
+            snprintf(MI_PFN_CURRENT_PROCESS_NAME, min(16, len), "%S", pos);
+        }
+        else
+        {
+            snprintf(MI_PFN_CURRENT_PROCESS_NAME, min(16, len), "%wZ", &SharedCacheMap->FileObject->FileName);
+        }
     }
 #endif
 
     Status = CcRosMapVacb(current);
+    if (!NT_SUCCESS(Status))
+    {
+        RemoveEntryList(&current->CacheMapVacbListEntry);
+        RemoveEntryList(&current->VacbLruListEntry);
+        CcRosReleaseVacbLock(current);
+        ExFreeToNPagedLookasideList(&VacbLookasideList, current);
+    }
 
     return Status;
 }
@@ -909,6 +1048,9 @@ CcFlushCache (
     NTSTATUS Status;
     KIRQL oldIrql;
 
+    CCTRACE(CC_API_DEBUG, "SectionObjectPointers=%p FileOffset=%p Length=%lu\n",
+        SectionObjectPointers, FileOffset, Length);
+
     DPRINT("CcFlushCache(SectionObjectPointers 0x%p, FileOffset 0x%p, Length %lu, IoStatus 0x%p)\n",
            SectionObjectPointers, FileOffset, Length, IoStatus);
 
@@ -946,7 +1088,8 @@ CcFlushCache (
                         IoStatus->Status = Status;
                     }
                 }
-                KeReleaseMutex(&current->Mutex, FALSE);
+
+                CcRosReleaseVacbLock(current);
 
                 KeAcquireGuardedMutex(&ViewLock);
                 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
@@ -984,15 +1127,17 @@ CcRosDeleteFileCache (
 
     ASSERT(SharedCacheMap);
 
-    SharedCacheMap->RefCount++;
+    SharedCacheMap->OpenCount++;
     KeReleaseGuardedMutex(&ViewLock);
 
     CcFlushCache(FileObject->SectionObjectPointer, NULL, 0, NULL);
 
     KeAcquireGuardedMutex(&ViewLock);
-    SharedCacheMap->RefCount--;
-    if (SharedCacheMap->RefCount == 0)
+    SharedCacheMap->OpenCount--;
+    if (SharedCacheMap->OpenCount == 0)
     {
+        KIRQL OldIrql;
+
         FileObject->SectionObjectPointer->SharedCacheMap = NULL;
 
         /*
@@ -1003,15 +1148,22 @@ CcRosDeleteFileCache (
         while (!IsListEmpty(&SharedCacheMap->CacheMapVacbListHead))
         {
             current_entry = RemoveTailList(&SharedCacheMap->CacheMapVacbListHead);
+            KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
+
             current = CONTAINING_RECORD(current_entry, ROS_VACB, CacheMapVacbListEntry);
+            CcRosAcquireVacbLock(current, NULL);
             RemoveEntryList(&current->VacbLruListEntry);
             if (current->Dirty)
             {
-                RemoveEntryList(&current->DirtyVacbListEntry);
-                DirtyPageCount -= VACB_MAPPING_GRANULARITY / PAGE_SIZE;
+                KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
+                CcRosUnmarkDirtyVacb(current, FALSE);
+                KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
                 DPRINT1("Freeing dirty VACB\n");
             }
             InsertHeadList(&FreeList, &current->CacheMapVacbListEntry);
+            CcRosReleaseVacbLock(current);
+
+            KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
         }
 #if DBG
         SharedCacheMap->Trace = FALSE;
@@ -1027,6 +1179,11 @@ CcRosDeleteFileCache (
             current = CONTAINING_RECORD(current_entry, ROS_VACB, CacheMapVacbListEntry);
             CcRosInternalFreeVacb(current);
         }
+
+        KeAcquireSpinLock(&iSharedCacheMapLock, &OldIrql);
+        RemoveEntryList(&SharedCacheMap->SharedCacheMapLinks);
+        KeReleaseSpinLock(&iSharedCacheMapLock, OldIrql);
+
         ExFreeToNPagedLookasideList(&SharedCacheMapLookasideList, SharedCacheMap);
         KeAcquireGuardedMutex(&ViewLock);
     }
@@ -1042,8 +1199,8 @@ CcRosReferenceCache (
     KeAcquireGuardedMutex(&ViewLock);
     SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
     ASSERT(SharedCacheMap);
-    ASSERT(SharedCacheMap->RefCount != 0);
-    SharedCacheMap->RefCount++;
+    ASSERT(SharedCacheMap->OpenCount != 0);
+    SharedCacheMap->OpenCount++;
     KeReleaseGuardedMutex(&ViewLock);
 }
 
@@ -1056,7 +1213,7 @@ CcRosRemoveIfClosed (
     DPRINT("CcRosRemoveIfClosed()\n");
     KeAcquireGuardedMutex(&ViewLock);
     SharedCacheMap = SectionObjectPointer->SharedCacheMap;
-    if (SharedCacheMap && SharedCacheMap->RefCount == 0)
+    if (SharedCacheMap && SharedCacheMap->OpenCount == 0)
     {
         CcRosDeleteFileCache(SharedCacheMap->FileObject, SharedCacheMap);
     }
@@ -1073,10 +1230,10 @@ CcRosDereferenceCache (
     KeAcquireGuardedMutex(&ViewLock);
     SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
     ASSERT(SharedCacheMap);
-    if (SharedCacheMap->RefCount > 0)
+    if (SharedCacheMap->OpenCount > 0)
     {
-        SharedCacheMap->RefCount--;
-        if (SharedCacheMap->RefCount == 0)
+        SharedCacheMap->OpenCount--;
+        if (SharedCacheMap->OpenCount == 0)
         {
             MmFreeSectionSegments(SharedCacheMap->FileObject);
             CcRosDeleteFileCache(FileObject, SharedCacheMap);
@@ -1104,10 +1261,10 @@ CcRosReleaseFileCache (
         if (FileObject->PrivateCacheMap != NULL)
         {
             FileObject->PrivateCacheMap = NULL;
-            if (SharedCacheMap->RefCount > 0)
+            if (SharedCacheMap->OpenCount > 0)
             {
-                SharedCacheMap->RefCount--;
-                if (SharedCacheMap->RefCount == 0)
+                SharedCacheMap->OpenCount--;
+                if (SharedCacheMap->OpenCount == 0)
                 {
                     MmFreeSectionSegments(SharedCacheMap->FileObject);
                     CcRosDeleteFileCache(FileObject, SharedCacheMap);
@@ -1140,7 +1297,7 @@ CcTryToInitializeFileCache (
         if (FileObject->PrivateCacheMap == NULL)
         {
             FileObject->PrivateCacheMap = SharedCacheMap;
-            SharedCacheMap->RefCount++;
+            SharedCacheMap->OpenCount++;
         }
         Status = STATUS_SUCCESS;
     }
@@ -1155,6 +1312,7 @@ NTAPI
 CcRosInitializeFileCache (
     PFILE_OBJECT FileObject,
     PCC_FILE_SIZES FileSizes,
+    BOOLEAN PinAccess,
     PCACHE_MANAGER_CALLBACKS CallBacks,
     PVOID LazyWriterContext)
 /*
@@ -1170,11 +1328,13 @@ CcRosInitializeFileCache (
     KeAcquireGuardedMutex(&ViewLock);
     if (SharedCacheMap == NULL)
     {
+        KIRQL OldIrql;
+
         SharedCacheMap = ExAllocateFromNPagedLookasideList(&SharedCacheMapLookasideList);
         if (SharedCacheMap == NULL)
         {
             KeReleaseGuardedMutex(&ViewLock);
-            return STATUS_UNSUCCESSFUL;
+            return STATUS_INSUFFICIENT_RESOURCES;
         }
         RtlZeroMemory(SharedCacheMap, sizeof(*SharedCacheMap));
         ObReferenceObjectByPointer(FileObject,
@@ -1186,14 +1346,21 @@ CcRosInitializeFileCache (
         SharedCacheMap->LazyWriteContext = LazyWriterContext;
         SharedCacheMap->SectionSize = FileSizes->AllocationSize;
         SharedCacheMap->FileSize = FileSizes->FileSize;
+        SharedCacheMap->PinAccess = PinAccess;
+        SharedCacheMap->DirtyPageThreshold = 0;
+        SharedCacheMap->DirtyPages = 0;
         KeInitializeSpinLock(&SharedCacheMap->CacheMapLock);
         InitializeListHead(&SharedCacheMap->CacheMapVacbListHead);
         FileObject->SectionObjectPointer->SharedCacheMap = SharedCacheMap;
+
+        KeAcquireSpinLock(&iSharedCacheMapLock, &OldIrql);
+        InsertTailList(&CcCleanSharedCacheMapList, &SharedCacheMap->SharedCacheMapLinks);
+        KeReleaseSpinLock(&iSharedCacheMapLock, OldIrql);
     }
     if (FileObject->PrivateCacheMap == NULL)
     {
         FileObject->PrivateCacheMap = SharedCacheMap;
-        SharedCacheMap->RefCount++;
+        SharedCacheMap->OpenCount++;
     }
     KeReleaseGuardedMutex(&ViewLock);
 
@@ -1209,6 +1376,9 @@ CcGetFileObjectFromSectionPtrs (
     IN PSECTION_OBJECT_POINTERS SectionObjectPointers)
 {
     PROS_SHARED_CACHE_MAP SharedCacheMap;
+
+    CCTRACE(CC_API_DEBUG, "SectionObjectPointers=%p\n", SectionObjectPointers);
+
     if (SectionObjectPointers && SectionObjectPointers->SharedCacheMap)
     {
         SharedCacheMap = SectionObjectPointers->SharedCacheMap;
@@ -1219,15 +1389,33 @@ CcGetFileObjectFromSectionPtrs (
 }
 
 VOID
+NTAPI
+CcShutdownLazyWriter (
+    VOID)
+{
+    /* Simply set the event, lazy writer will stop when it's done */
+    KeSetEvent(&iLazyWriterShutdown, IO_DISK_INCREMENT, FALSE);
+}
+
+BOOLEAN
 INIT_FUNCTION
 NTAPI
 CcInitView (
     VOID)
 {
+    HANDLE LazyWriter;
+    NTSTATUS Status;
+    KPRIORITY Priority;
+    OBJECT_ATTRIBUTES ObjectAttributes;
+
     DPRINT("CcInitView()\n");
 
     InitializeListHead(&DirtyVacbListHead);
     InitializeListHead(&VacbLruListHead);
+    InitializeListHead(&CcDeferredWrites);
+    InitializeListHead(&CcCleanSharedCacheMapList);
+    KeInitializeSpinLock(&CcDeferredWriteSpinLock);
+    KeInitializeSpinLock(&iSharedCacheMapLock);
     KeInitializeGuardedMutex(&ViewLock);
     ExInitializeNPagedLookasideList(&iBcbLookasideList,
                                     NULL,
@@ -1253,7 +1441,114 @@ CcInitView (
 
     MmInitializeMemoryConsumer(MC_CACHE, CcRosTrimCache);
 
+    /* Initialize lazy writer events */
+    KeInitializeEvent(&iLazyWriterShutdown, SynchronizationEvent, FALSE);
+    KeInitializeEvent(&iLazyWriterNotify, NotificationEvent, FALSE);
+
+    /* Define lazy writer threshold, depending on system type */
+    switch (MmQuerySystemSize())
+    {
+        case MmSmallSystem:
+            CcDirtyPageThreshold = MmNumberOfPhysicalPages / 8;
+            break;
+
+        case MmMediumSystem:
+            CcDirtyPageThreshold = MmNumberOfPhysicalPages / 4;
+            break;
+
+        case MmLargeSystem:
+            CcDirtyPageThreshold = MmNumberOfPhysicalPages / 8 + MmNumberOfPhysicalPages / 4;
+            break;
+    }
+
+    /* Start the lazy writer thread */
+    InitializeObjectAttributes(&ObjectAttributes,
+                               NULL,
+                               OBJ_KERNEL_HANDLE,
+                               NULL,
+                               NULL);
+    Status = PsCreateSystemThread(&LazyWriter,
+                                  THREAD_ALL_ACCESS,
+                                  &ObjectAttributes,
+                                  NULL,
+                                  NULL,
+                                  CciLazyWriter,
+                                  NULL);
+    if (!NT_SUCCESS(Status))
+    {
+        return FALSE;
+    }
+
+    Priority = 27;
+    Status = NtSetInformationThread(LazyWriter,
+                                   ThreadPriority,
+                                   &Priority,
+                                   sizeof(Priority));
+    ASSERT(NT_SUCCESS(Status));
+
+    /* Handle is not needed */
+    ObCloseHandle(LazyWriter, KernelMode);
+
     CcInitCacheZeroPage();
+
+    return TRUE;
+}
+
+#if DBG && defined(KDBG)
+BOOLEAN
+ExpKdbgExtFileCache(ULONG Argc, PCHAR Argv[])
+{
+    PLIST_ENTRY ListEntry;
+    UNICODE_STRING NoName = RTL_CONSTANT_STRING(L"No name for File");
+
+    KdbpPrint("  Usage Summary (in kb)\n");
+    KdbpPrint("Shared\t\tValid\tDirty\tName\n");
+    /* No need to lock the spin lock here, we're in DBG */
+    for (ListEntry = CcCleanSharedCacheMapList.Flink;
+         ListEntry != &CcCleanSharedCacheMapList;
+         ListEntry = ListEntry->Flink)
+    {
+        PLIST_ENTRY Vacbs;
+        ULONG Valid = 0, Dirty = 0;
+        PROS_SHARED_CACHE_MAP SharedCacheMap;
+        PUNICODE_STRING FileName;
+
+        SharedCacheMap = CONTAINING_RECORD(ListEntry, ROS_SHARED_CACHE_MAP, SharedCacheMapLinks);
+
+        /* Dirty size */
+        Dirty = (SharedCacheMap->DirtyPages * PAGE_SIZE) / 1024;
+
+        /* First, count for all the associated VACB */
+        for (Vacbs = SharedCacheMap->CacheMapVacbListHead.Flink;
+             Vacbs != &SharedCacheMap->CacheMapVacbListHead;
+             Vacbs = Vacbs->Flink)
+        {
+            PROS_VACB Vacb;
+
+            Vacb = CONTAINING_RECORD(Vacbs, ROS_VACB, CacheMapVacbListEntry);
+            if (Vacb->Valid)
+            {
+                Valid += VACB_MAPPING_GRANULARITY / 1024;
+            }
+        }
+
+        /* Setup name */
+        if (SharedCacheMap->FileObject != NULL &&
+            SharedCacheMap->FileObject->FileName.Length != 0)
+        {
+            FileName = &SharedCacheMap->FileObject->FileName;
+        }
+        else
+        {
+            FileName = &NoName;
+        }
+
+        /* And print */
+        KdbpPrint("%p\t%d\t%d\t%wZ\n", SharedCacheMap, Valid, Dirty, FileName);
+    }
+
+    return TRUE;
 }
+#endif
 
 /* EOF */