[NTOSKRNL] Add a raw implementation of !irpfind in kdbg
[reactos.git] / ntoskrnl / cc / view.c
index cf26455..890c75e 100644 (file)
@@ -45,8 +45,6 @@
 LIST_ENTRY DirtyVacbListHead;
 static LIST_ENTRY VacbLruListHead;
 
-KGUARDED_MUTEX ViewLock;
-
 NPAGED_LOOKASIDE_LIST iBcbLookasideList;
 static NPAGED_LOOKASIDE_LIST SharedCacheMapLookasideList;
 static NPAGED_LOOKASIDE_LIST VacbLookasideList;
@@ -65,35 +63,52 @@ KSPIN_LOCK CcDeferredWriteSpinLock;
 LIST_ENTRY CcCleanSharedCacheMapList;
 
 #if DBG
-static void CcRosVacbIncRefCount_(PROS_VACB vacb, const char* file, int line)
+ULONG CcRosVacbIncRefCount_(PROS_VACB vacb, PCSTR file, INT line)
 {
-    ++vacb->ReferenceCount;
+    ULONG Refs;
+
+    Refs = InterlockedIncrement((PLONG)&vacb->ReferenceCount);
     if (vacb->SharedCacheMap->Trace)
     {
         DbgPrint("(%s:%i) VACB %p ++RefCount=%lu, Dirty %u, PageOut %lu\n",
-                 file, line, vacb, vacb->ReferenceCount, vacb->Dirty, vacb->PageOut);
+                 file, line, vacb, Refs, vacb->Dirty, vacb->PageOut);
     }
+
+    return Refs;
 }
-static void CcRosVacbDecRefCount_(PROS_VACB vacb, const char* file, int line)
+ULONG CcRosVacbDecRefCount_(PROS_VACB vacb, PCSTR file, INT line)
 {
-    ASSERT(vacb->ReferenceCount != 0);
-    --vacb->ReferenceCount;
-    ASSERT(!(vacb->ReferenceCount == 0 && vacb->Dirty));
+    ULONG Refs;
+
+    Refs = InterlockedDecrement((PLONG)&vacb->ReferenceCount);
+    ASSERT(!(Refs == 0 && vacb->Dirty));
     if (vacb->SharedCacheMap->Trace)
     {
         DbgPrint("(%s:%i) VACB %p --RefCount=%lu, Dirty %u, PageOut %lu\n",
-                 file, line, vacb, vacb->ReferenceCount, vacb->Dirty, vacb->PageOut);
+                 file, line, vacb, Refs, vacb->Dirty, vacb->PageOut);
+    }
+
+    if (Refs == 0)
+    {
+        CcRosInternalFreeVacb(vacb);
     }
+
+    return Refs;
 }
-#define CcRosVacbIncRefCount(vacb) CcRosVacbIncRefCount_(vacb,__FILE__,__LINE__)
-#define CcRosVacbDecRefCount(vacb) CcRosVacbDecRefCount_(vacb,__FILE__,__LINE__)
-#else
-#define CcRosVacbIncRefCount(vacb) (++((vacb)->ReferenceCount))
-#define CcRosVacbDecRefCount(vacb) (--((vacb)->ReferenceCount))
-#endif
+ULONG CcRosVacbGetRefCount_(PROS_VACB vacb, PCSTR file, INT line)
+{
+    ULONG Refs;
 
-NTSTATUS
-CcRosInternalFreeVacb(PROS_VACB Vacb);
+    Refs = InterlockedCompareExchange((PLONG)&vacb->ReferenceCount, 0, 0);
+    if (vacb->SharedCacheMap->Trace)
+    {
+        DbgPrint("(%s:%i) VACB %p ==RefCount=%lu, Dirty %u, PageOut %lu\n",
+                 file, line, vacb, Refs, vacb->Dirty, vacb->PageOut);
+    }
+
+    return Refs;
+}
+#endif
 
 
 /* FUNCTIONS *****************************************************************/
@@ -118,8 +133,8 @@ CcRosTraceCacheMap (
     {
         DPRINT1("Enabling Tracing for CacheMap 0x%p:\n", SharedCacheMap);
 
-        KeAcquireGuardedMutex(&ViewLock);
-        KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldirql);
+        oldirql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
+        KeAcquireSpinLockAtDpcLevel(&SharedCacheMap->CacheMapLock);
 
         current_entry = SharedCacheMap->CacheMapVacbListHead.Flink;
         while (current_entry != &SharedCacheMap->CacheMapVacbListHead)
@@ -130,8 +145,9 @@ CcRosTraceCacheMap (
             DPRINT1("  VACB 0x%p enabled, RefCount %lu, Dirty %u, PageOut %lu\n",
                     current, current->ReferenceCount, current->Dirty, current->PageOut );
         }
-        KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldirql);
-        KeReleaseGuardedMutex(&ViewLock);
+
+        KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock);
+        KeReleaseQueuedSpinLock(LockQueueMasterLock, oldirql);
     }
     else
     {
@@ -151,10 +167,12 @@ CcRosFlushVacb (
 {
     NTSTATUS Status;
 
+    CcRosUnmarkDirtyVacb(Vacb, TRUE);
+
     Status = CcWriteVirtualAddress(Vacb);
-    if (NT_SUCCESS(Status))
+    if (!NT_SUCCESS(Status))
     {
-        CcRosUnmarkDirtyVacb(Vacb, TRUE);
+        CcRosMarkDirtyVacb(Vacb);
     }
 
     return Status;
@@ -172,15 +190,14 @@ CcRosFlushDirtyPages (
     PROS_VACB current;
     BOOLEAN Locked;
     NTSTATUS Status;
-    LARGE_INTEGER ZeroTimeout;
+    KIRQL OldIrql;
 
     DPRINT("CcRosFlushDirtyPages(Target %lu)\n", Target);
 
     (*Count) = 0;
-    ZeroTimeout.QuadPart = 0;
 
     KeEnterCriticalRegion();
-    KeAcquireGuardedMutex(&ViewLock);
+    OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
 
     current_entry = DirtyVacbListHead.Flink;
     if (current_entry == &DirtyVacbListHead)
@@ -205,45 +222,33 @@ CcRosFlushDirtyPages (
             continue;
         }
 
-        Locked = current->SharedCacheMap->Callbacks->AcquireForLazyWrite(
-                     current->SharedCacheMap->LazyWriteContext, Wait);
-        if (!Locked)
-        {
-            CcRosVacbDecRefCount(current);
-            continue;
-        }
-
-        Status = CcRosAcquireVacbLock(current,
-                                      Wait ? NULL : &ZeroTimeout);
-        if (Status != STATUS_SUCCESS)
+        /* Don't attempt to lazy write the files that asked not to */
+        if (CalledFromLazy &&
+            BooleanFlagOn(current->SharedCacheMap->Flags, WRITEBEHIND_DISABLED))
         {
-            current->SharedCacheMap->Callbacks->ReleaseFromLazyWrite(
-                current->SharedCacheMap->LazyWriteContext);
             CcRosVacbDecRefCount(current);
             continue;
         }
 
         ASSERT(current->Dirty);
 
-        /* One reference is added above */
-        if (current->ReferenceCount > 2)
+        KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
+
+        Locked = current->SharedCacheMap->Callbacks->AcquireForLazyWrite(
+                     current->SharedCacheMap->LazyWriteContext, Wait);
+        if (!Locked)
         {
-            CcRosReleaseVacbLock(current);
-            current->SharedCacheMap->Callbacks->ReleaseFromLazyWrite(
-                current->SharedCacheMap->LazyWriteContext);
+            OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
             CcRosVacbDecRefCount(current);
             continue;
         }
 
-        KeReleaseGuardedMutex(&ViewLock);
-
         Status = CcRosFlushVacb(current);
 
-        CcRosReleaseVacbLock(current);
         current->SharedCacheMap->Callbacks->ReleaseFromLazyWrite(
             current->SharedCacheMap->LazyWriteContext);
 
-        KeAcquireGuardedMutex(&ViewLock);
+        OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
         CcRosVacbDecRefCount(current);
 
         if (!NT_SUCCESS(Status) && (Status != STATUS_END_OF_FILE) &&
@@ -274,7 +279,7 @@ CcRosFlushDirtyPages (
         current_entry = DirtyVacbListHead.Flink;
     }
 
-    KeReleaseGuardedMutex(&ViewLock);
+    KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
     KeLeaveCriticalRegion();
 
     DPRINT("CcRosFlushDirtyPages() finished\n");
@@ -311,27 +316,29 @@ CcRosTrimCache (
     *NrFreed = 0;
 
 retry:
-    KeAcquireGuardedMutex(&ViewLock);
+    oldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
 
     current_entry = VacbLruListHead.Flink;
     while (current_entry != &VacbLruListHead)
     {
+        ULONG Refs;
+
         current = CONTAINING_RECORD(current_entry,
                                     ROS_VACB,
                                     VacbLruListEntry);
         current_entry = current_entry->Flink;
 
-        KeAcquireSpinLock(&current->SharedCacheMap->CacheMapLock, &oldIrql);
+        KeAcquireSpinLockAtDpcLevel(&current->SharedCacheMap->CacheMapLock);
 
         /* Reference the VACB */
         CcRosVacbIncRefCount(current);
 
         /* Check if it's mapped and not dirty */
-        if (current->MappedCount > 0 && !current->Dirty)
+        if (InterlockedCompareExchange((PLONG)&current->MappedCount, 0, 0) > 0 && !current->Dirty)
         {
             /* We have to break these locks because Cc sucks */
-            KeReleaseSpinLock(&current->SharedCacheMap->CacheMapLock, oldIrql);
-            KeReleaseGuardedMutex(&ViewLock);
+            KeReleaseSpinLockFromDpcLevel(&current->SharedCacheMap->CacheMapLock);
+            KeReleaseQueuedSpinLock(LockQueueMasterLock, oldIrql);
 
             /* Page out the VACB */
             for (i = 0; i < VACB_MAPPING_GRANULARITY / PAGE_SIZE; i++)
@@ -342,21 +349,23 @@ retry:
             }
 
             /* Reacquire the locks */
-            KeAcquireGuardedMutex(&ViewLock);
-            KeAcquireSpinLock(&current->SharedCacheMap->CacheMapLock, &oldIrql);
+            oldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
+            KeAcquireSpinLockAtDpcLevel(&current->SharedCacheMap->CacheMapLock);
         }
 
         /* Dereference the VACB */
-        CcRosVacbDecRefCount(current);
+        Refs = CcRosVacbDecRefCount(current);
 
         /* Check if we can free this entry now */
-        if (current->ReferenceCount == 0)
+        if (Refs < 2)
         {
             ASSERT(!current->Dirty);
             ASSERT(!current->MappedCount);
+            ASSERT(Refs == 1);
 
             RemoveEntryList(&current->CacheMapVacbListEntry);
             RemoveEntryList(&current->VacbLruListEntry);
+            InitializeListHead(&current->VacbLruListEntry);
             InsertHeadList(&FreeList, &current->CacheMapVacbListEntry);
 
             /* Calculate how many pages we freed for Mm */
@@ -365,10 +374,10 @@ retry:
             (*NrFreed) += PagesFreed;
         }
 
-        KeReleaseSpinLock(&current->SharedCacheMap->CacheMapLock, oldIrql);
+        KeReleaseSpinLockFromDpcLevel(&current->SharedCacheMap->CacheMapLock);
     }
 
-    KeReleaseGuardedMutex(&ViewLock);
+    KeReleaseQueuedSpinLock(LockQueueMasterLock, oldIrql);
 
     /* Try flushing pages if we haven't met our target */
     if ((Target > 0) && !FlushedPages)
@@ -391,11 +400,15 @@ retry:
 
     while (!IsListEmpty(&FreeList))
     {
+        ULONG Refs;
+
         current_entry = RemoveHeadList(&FreeList);
         current = CONTAINING_RECORD(current_entry,
                                     ROS_VACB,
                                     CacheMapVacbListEntry);
-        CcRosInternalFreeVacb(current);
+        InitializeListHead(&current->CacheMapVacbListEntry);
+        Refs = CcRosVacbDecRefCount(current);
+        ASSERT(Refs == 0);
     }
 
     DPRINT("Evicted %lu cache pages\n", (*NrFreed));
@@ -412,6 +425,7 @@ CcRosReleaseVacb (
     BOOLEAN Dirty,
     BOOLEAN Mapped)
 {
+    ULONG Refs;
     ASSERT(SharedCacheMap);
 
     DPRINT("CcRosReleaseVacb(SharedCacheMap 0x%p, Vacb 0x%p, Valid %u)\n",
@@ -426,15 +440,14 @@ CcRosReleaseVacb (
 
     if (Mapped)
     {
-        Vacb->MappedCount++;
-    }
-    CcRosVacbDecRefCount(Vacb);
-    if (Mapped && (Vacb->MappedCount == 1))
-    {
-        CcRosVacbIncRefCount(Vacb);
+        if (InterlockedIncrement((PLONG)&Vacb->MappedCount) == 1)
+        {
+            CcRosVacbIncRefCount(Vacb);
+        }
     }
 
-    CcRosReleaseVacbLock(Vacb);
+    Refs = CcRosVacbDecRefCount(Vacb);
+    ASSERT(Refs > 0);
 
     return STATUS_SUCCESS;
 }
@@ -455,8 +468,8 @@ CcRosLookupVacb (
     DPRINT("CcRosLookupVacb(SharedCacheMap 0x%p, FileOffset %I64u)\n",
            SharedCacheMap, FileOffset);
 
-    KeAcquireGuardedMutex(&ViewLock);
-    KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
+    oldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
+    KeAcquireSpinLockAtDpcLevel(&SharedCacheMap->CacheMapLock);
 
     current_entry = SharedCacheMap->CacheMapVacbListHead.Flink;
     while (current_entry != &SharedCacheMap->CacheMapVacbListHead)
@@ -469,9 +482,8 @@ CcRosLookupVacb (
                            FileOffset))
         {
             CcRosVacbIncRefCount(current);
-            KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
-            KeReleaseGuardedMutex(&ViewLock);
-            CcRosAcquireVacbLock(current, NULL);
+            KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock);
+            KeReleaseQueuedSpinLock(LockQueueMasterLock, oldIrql);
             return current;
         }
         if (current->FileOffset.QuadPart > FileOffset)
@@ -479,8 +491,8 @@ CcRosLookupVacb (
         current_entry = current_entry->Flink;
     }
 
-    KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
-    KeReleaseGuardedMutex(&ViewLock);
+    KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock);
+    KeReleaseQueuedSpinLock(LockQueueMasterLock, oldIrql);
 
     return NULL;
 }
@@ -495,8 +507,8 @@ CcRosMarkDirtyVacb (
 
     SharedCacheMap = Vacb->SharedCacheMap;
 
-    KeAcquireGuardedMutex(&ViewLock);
-    KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
+    oldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
+    KeAcquireSpinLockAtDpcLevel(&SharedCacheMap->CacheMapLock);
 
     ASSERT(!Vacb->Dirty);
 
@@ -511,11 +523,9 @@ CcRosMarkDirtyVacb (
 
     Vacb->Dirty = TRUE;
 
-    KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
-    KeReleaseGuardedMutex(&ViewLock);
+    KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock);
 
     /* Schedule a lazy writer run to now that we have dirty VACB */
-    oldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
     if (!LazyWriter.ScanActive)
     {
         CcScheduleLazyWriteScan(FALSE);
@@ -536,8 +546,8 @@ CcRosUnmarkDirtyVacb (
 
     if (LockViews)
     {
-        KeAcquireGuardedMutex(&ViewLock);
-        KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
+        oldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
+        KeAcquireSpinLockAtDpcLevel(&SharedCacheMap->CacheMapLock);
     }
 
     ASSERT(Vacb->Dirty);
@@ -545,14 +555,15 @@ CcRosUnmarkDirtyVacb (
     Vacb->Dirty = FALSE;
 
     RemoveEntryList(&Vacb->DirtyVacbListEntry);
+    InitializeListHead(&Vacb->DirtyVacbListEntry);
     CcTotalDirtyPages -= VACB_MAPPING_GRANULARITY / PAGE_SIZE;
     Vacb->SharedCacheMap->DirtyPages -= VACB_MAPPING_GRANULARITY / PAGE_SIZE;
     CcRosVacbDecRefCount(Vacb);
 
     if (LockViews)
     {
-        KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
-        KeReleaseGuardedMutex(&ViewLock);
+        KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock);
+        KeReleaseQueuedSpinLock(LockQueueMasterLock, oldIrql);
     }
 }
 
@@ -575,16 +586,15 @@ CcRosMarkDirtyFile (
         KeBugCheck(CACHE_MANAGER);
     }
 
-    if (!Vacb->Dirty)
-    {
-        CcRosMarkDirtyVacb(Vacb);
-    }
-
-    CcRosReleaseVacbLock(Vacb);
+    CcRosReleaseVacb(SharedCacheMap, Vacb, Vacb->Valid, TRUE, FALSE);
 
     return STATUS_SUCCESS;
 }
 
+/*
+ * Note: this is not the contrary function of
+ * CcRosMapVacbInKernelSpace()
+ */
 NTSTATUS
 NTAPI
 CcRosUnmapVacb (
@@ -605,44 +615,39 @@ CcRosUnmapVacb (
         return STATUS_UNSUCCESSFUL;
     }
 
-    if (NowDirty && !Vacb->Dirty)
-    {
-        CcRosMarkDirtyVacb(Vacb);
-    }
-
     ASSERT(Vacb->MappedCount != 0);
-    Vacb->MappedCount--;
-
-    CcRosVacbDecRefCount(Vacb);
-    if (Vacb->MappedCount == 0)
+    if (InterlockedDecrement((PLONG)&Vacb->MappedCount) == 0)
     {
         CcRosVacbDecRefCount(Vacb);
     }
 
-    CcRosReleaseVacbLock(Vacb);
+    CcRosReleaseVacb(SharedCacheMap, Vacb, Vacb->Valid, NowDirty, FALSE);
 
     return STATUS_SUCCESS;
 }
 
 static
 NTSTATUS
-CcRosMapVacb(
+CcRosMapVacbInKernelSpace(
     PROS_VACB Vacb)
 {
     ULONG i;
     NTSTATUS Status;
     ULONG_PTR NumberOfPages;
+    PVOID BaseAddress = NULL;
 
     /* Create a memory area. */
     MmLockAddressSpace(MmGetKernelAddressSpace());
     Status = MmCreateMemoryArea(MmGetKernelAddressSpace(),
                                 0, // nothing checks for VACB mareas, so set to 0
-                                &Vacb->BaseAddress,
+                                &BaseAddress,
                                 VACB_MAPPING_GRANULARITY,
                                 PAGE_READWRITE,
                                 (PMEMORY_AREA*)&Vacb->MemoryArea,
                                 0,
                                 PAGE_SIZE);
+    ASSERT(Vacb->BaseAddress == NULL);
+    Vacb->BaseAddress = BaseAddress;
     MmUnlockAddressSpace(MmGetKernelAddressSpace());
     if (!NT_SUCCESS(Status))
     {
@@ -652,6 +657,7 @@ CcRosMapVacb(
 
     ASSERT(((ULONG_PTR)Vacb->BaseAddress % PAGE_SIZE) == 0);
     ASSERT((ULONG_PTR)Vacb->BaseAddress > (ULONG_PTR)MmSystemRangeStart);
+    ASSERT((ULONG_PTR)Vacb->BaseAddress + VACB_MAPPING_GRANULARITY - 1 > (ULONG_PTR)MmSystemRangeStart);
 
     /* Create a virtual mapping for this memory area */
     NumberOfPages = BYTES_TO_PAGES(VACB_MAPPING_GRANULARITY);
@@ -667,6 +673,11 @@ CcRosMapVacb(
             KeBugCheck(MEMORY_MANAGEMENT);
         }
 
+        ASSERT(BaseAddress == Vacb->BaseAddress);
+        ASSERT(i * PAGE_SIZE < VACB_MAPPING_GRANULARITY);
+        ASSERT((ULONG_PTR)Vacb->BaseAddress + (i * PAGE_SIZE) >= (ULONG_PTR)BaseAddress);
+        ASSERT((ULONG_PTR)Vacb->BaseAddress + (i * PAGE_SIZE) > (ULONG_PTR)MmSystemRangeStart);
+
         Status = MmCreateVirtualMapping(NULL,
                                         (PVOID)((ULONG_PTR)Vacb->BaseAddress + (i * PAGE_SIZE)),
                                         PAGE_READWRITE,
@@ -682,6 +693,88 @@ CcRosMapVacb(
     return STATUS_SUCCESS;
 }
 
+static
+BOOLEAN
+CcRosFreeUnusedVacb (
+    PULONG Count)
+{
+    ULONG cFreed;
+    BOOLEAN Freed;
+    KIRQL oldIrql;
+    PROS_VACB current;
+    LIST_ENTRY FreeList;
+    PLIST_ENTRY current_entry;
+
+    cFreed = 0;
+    Freed = FALSE;
+    InitializeListHead(&FreeList);
+
+    oldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
+
+    /* Browse all the available VACB */
+    current_entry = VacbLruListHead.Flink;
+    while (current_entry != &VacbLruListHead)
+    {
+        ULONG Refs;
+
+        current = CONTAINING_RECORD(current_entry,
+                                    ROS_VACB,
+                                    VacbLruListEntry);
+        current_entry = current_entry->Flink;
+
+        KeAcquireSpinLockAtDpcLevel(&current->SharedCacheMap->CacheMapLock);
+
+        /* Only deal with unused VACB, we will free them */
+        Refs = CcRosVacbGetRefCount(current);
+        if (Refs < 2)
+        {
+            ASSERT(!current->Dirty);
+            ASSERT(!current->MappedCount);
+            ASSERT(Refs == 1);
+
+            /* Reset and move to free list */
+            RemoveEntryList(&current->CacheMapVacbListEntry);
+            RemoveEntryList(&current->VacbLruListEntry);
+            InitializeListHead(&current->VacbLruListEntry);
+            InsertHeadList(&FreeList, &current->CacheMapVacbListEntry);
+        }
+
+        KeReleaseSpinLockFromDpcLevel(&current->SharedCacheMap->CacheMapLock);
+
+    }
+
+    KeReleaseQueuedSpinLock(LockQueueMasterLock, oldIrql);
+
+    /* And now, free any of the found VACB, that'll free memory! */
+    while (!IsListEmpty(&FreeList))
+    {
+        ULONG Refs;
+
+        current_entry = RemoveHeadList(&FreeList);
+        current = CONTAINING_RECORD(current_entry,
+                                    ROS_VACB,
+                                    CacheMapVacbListEntry);
+        InitializeListHead(&current->CacheMapVacbListEntry);
+        Refs = CcRosVacbDecRefCount(current);
+        ASSERT(Refs == 0);
+        ++cFreed;
+    }
+
+    /* If we freed at least one VACB, return success */
+    if (cFreed != 0)
+    {
+        Freed = TRUE;
+    }
+
+    /* If caller asked for free count, return it */
+    if (Count != NULL)
+    {
+        *Count = cFreed;
+    }
+
+    return Freed;
+}
+
 static
 NTSTATUS
 CcRosCreateVacb (
@@ -694,6 +787,8 @@ CcRosCreateVacb (
     PLIST_ENTRY current_entry;
     NTSTATUS Status;
     KIRQL oldIrql;
+    ULONG Refs;
+    BOOLEAN Retried;
 
     ASSERT(SharedCacheMap);
 
@@ -719,13 +814,37 @@ CcRosCreateVacb (
     }
 #endif
     current->MappedCount = 0;
-    current->DirtyVacbListEntry.Flink = NULL;
-    current->DirtyVacbListEntry.Blink = NULL;
-    current->ReferenceCount = 1;
-    current->PinCount = 0;
-    KeInitializeMutex(&current->Mutex, 0);
-    CcRosAcquireVacbLock(current, NULL);
-    KeAcquireGuardedMutex(&ViewLock);
+    current->ReferenceCount = 0;
+    InitializeListHead(&current->CacheMapVacbListEntry);
+    InitializeListHead(&current->DirtyVacbListEntry);
+    InitializeListHead(&current->VacbLruListEntry);
+
+    CcRosVacbIncRefCount(current);
+
+    Retried = FALSE;
+Retry:
+    /* Map VACB in kernel space */
+    Status = CcRosMapVacbInKernelSpace(current);
+    if (!NT_SUCCESS(Status))
+    {
+        ULONG Freed;
+        /* If no space left, try to prune unused VACB
+         * to recover space to map our VACB
+         * If it succeed, retry to map, otherwise
+         * just fail.
+         */
+        if (!Retried && CcRosFreeUnusedVacb(&Freed))
+        {
+            DPRINT("Prunned %d VACB, trying again\n", Freed);
+            Retried = TRUE;
+            goto Retry;
+        }
+
+        ExFreeToNPagedLookasideList(&VacbLookasideList, current);
+        return Status;
+    }
+
+    oldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
 
     *Vacb = current;
     /* There is window between the call to CcRosLookupVacb
@@ -733,7 +852,7 @@ CcRosCreateVacb (
      * file offset exist. If there is a VACB, we release
      * our newly created VACB and return the existing one.
      */
-    KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
+    KeAcquireSpinLockAtDpcLevel(&SharedCacheMap->CacheMapLock);
     current_entry = SharedCacheMap->CacheMapVacbListHead.Flink;
     previous = NULL;
     while (current_entry != &SharedCacheMap->CacheMapVacbListHead)
@@ -746,7 +865,7 @@ CcRosCreateVacb (
                            FileOffset))
         {
             CcRosVacbIncRefCount(current);
-            KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
+            KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock);
 #if DBG
             if (SharedCacheMap->Trace)
             {
@@ -756,11 +875,12 @@ CcRosCreateVacb (
                         current);
             }
 #endif
-            CcRosReleaseVacbLock(*Vacb);
-            KeReleaseGuardedMutex(&ViewLock);
-            ExFreeToNPagedLookasideList(&VacbLookasideList, *Vacb);
+            KeReleaseQueuedSpinLock(LockQueueMasterLock, oldIrql);
+
+            Refs = CcRosVacbDecRefCount(*Vacb);
+            ASSERT(Refs == 0);
+
             *Vacb = current;
-            CcRosAcquireVacbLock(current, NULL);
             return STATUS_SUCCESS;
         }
         if (current->FileOffset.QuadPart < FileOffset)
@@ -783,9 +903,9 @@ CcRosCreateVacb (
     {
         InsertHeadList(&SharedCacheMap->CacheMapVacbListHead, &current->CacheMapVacbListEntry);
     }
-    KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
+    KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock);
     InsertTailList(&VacbLruListHead, &current->VacbLruListEntry);
-    KeReleaseGuardedMutex(&ViewLock);
+    KeReleaseQueuedSpinLock(LockQueueMasterLock, oldIrql);
 
     MI_SET_USAGE(MI_USAGE_CACHE);
 #if MI_TRACE_PFNS
@@ -806,14 +926,8 @@ CcRosCreateVacb (
     }
 #endif
 
-    Status = CcRosMapVacb(current);
-    if (!NT_SUCCESS(Status))
-    {
-        RemoveEntryList(&current->CacheMapVacbListEntry);
-        RemoveEntryList(&current->VacbLruListEntry);
-        CcRosReleaseVacbLock(current);
-        ExFreeToNPagedLookasideList(&VacbLookasideList, current);
-    }
+    /* Reference it to allow release */
+    CcRosVacbIncRefCount(current);
 
     return Status;
 }
@@ -830,6 +944,8 @@ CcRosGetVacb (
 {
     PROS_VACB current;
     NTSTATUS Status;
+    ULONG Refs;
+    KIRQL OldIrql;
 
     ASSERT(SharedCacheMap);
 
@@ -851,13 +967,15 @@ CcRosGetVacb (
         }
     }
 
-    KeAcquireGuardedMutex(&ViewLock);
+    Refs = CcRosVacbGetRefCount(current);
+
+    OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
 
     /* Move to the tail of the LRU list */
     RemoveEntryList(&current->VacbLruListEntry);
     InsertTailList(&VacbLruListHead, &current->VacbLruListEntry);
 
-    KeReleaseGuardedMutex(&ViewLock);
+    KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
 
     /*
      * Return information about the VACB to the caller.
@@ -867,6 +985,9 @@ CcRosGetVacb (
     DPRINT("*BaseAddress %p\n", *BaseAddress);
     *Vacb = current;
     *BaseOffset = current->FileOffset.QuadPart;
+
+    ASSERT(Refs > 1);
+
     return STATUS_SUCCESS;
 }
 
@@ -941,6 +1062,20 @@ CcRosInternalFreeVacb (
                      NULL);
     MmUnlockAddressSpace(MmGetKernelAddressSpace());
 
+    if (Vacb->ReferenceCount != 0)
+    {
+        DPRINT1("Invalid free: %ld\n", Vacb->ReferenceCount);
+        if (Vacb->SharedCacheMap->FileObject && Vacb->SharedCacheMap->FileObject->FileName.Length)
+        {
+            DPRINT1("For file: %wZ\n", &Vacb->SharedCacheMap->FileObject->FileName);
+        }
+    }
+
+    ASSERT(Vacb->ReferenceCount == 0);
+    ASSERT(IsListEmpty(&Vacb->CacheMapVacbListEntry));
+    ASSERT(IsListEmpty(&Vacb->DirtyVacbListEntry));
+    ASSERT(IsListEmpty(&Vacb->VacbLruListEntry));
+    RtlFillMemory(Vacb, sizeof(*Vacb), 0xfd);
     ExFreeToNPagedLookasideList(&VacbLookasideList, Vacb);
     return STATUS_SUCCESS;
 }
@@ -961,7 +1096,6 @@ CcFlushCache (
     LONGLONG RemainingLength;
     PROS_VACB current;
     NTSTATUS Status;
-    KIRQL oldIrql;
 
     CCTRACE(CC_API_DEBUG, "SectionObjectPointers=%p FileOffset=%p Length=%lu\n",
         SectionObjectPointers, FileOffset, Length);
@@ -1004,13 +1138,7 @@ CcFlushCache (
                     }
                 }
 
-                CcRosReleaseVacbLock(current);
-
-                KeAcquireGuardedMutex(&ViewLock);
-                KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
-                CcRosVacbDecRefCount(current);
-                KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
-                KeReleaseGuardedMutex(&ViewLock);
+                CcRosReleaseVacb(SharedCacheMap, current, current->Valid, current->Dirty, FALSE);
             }
 
             Offset.QuadPart += VACB_MAPPING_GRANULARITY;
@@ -1030,7 +1158,8 @@ NTSTATUS
 NTAPI
 CcRosDeleteFileCache (
     PFILE_OBJECT FileObject,
-    PROS_SHARED_CACHE_MAP SharedCacheMap)
+    PROS_SHARED_CACHE_MAP SharedCacheMap,
+    PKIRQL OldIrql)
 /*
  * FUNCTION: Releases the shared cache map associated with a file object
  */
@@ -1038,69 +1167,100 @@ CcRosDeleteFileCache (
     PLIST_ENTRY current_entry;
     PROS_VACB current;
     LIST_ENTRY FreeList;
-    KIRQL oldIrql;
 
     ASSERT(SharedCacheMap);
 
     SharedCacheMap->OpenCount++;
-    KeReleaseGuardedMutex(&ViewLock);
+    KeReleaseQueuedSpinLock(LockQueueMasterLock, *OldIrql);
 
     CcFlushCache(FileObject->SectionObjectPointer, NULL, 0, NULL);
 
-    KeAcquireGuardedMutex(&ViewLock);
+    *OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
     SharedCacheMap->OpenCount--;
     if (SharedCacheMap->OpenCount == 0)
     {
-        KIRQL OldIrql;
-
         FileObject->SectionObjectPointer->SharedCacheMap = NULL;
 
         /*
          * Release all VACBs
          */
         InitializeListHead(&FreeList);
-        KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
+        KeAcquireSpinLockAtDpcLevel(&SharedCacheMap->CacheMapLock);
         while (!IsListEmpty(&SharedCacheMap->CacheMapVacbListHead))
         {
             current_entry = RemoveTailList(&SharedCacheMap->CacheMapVacbListHead);
-            KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
+            KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock);
 
             current = CONTAINING_RECORD(current_entry, ROS_VACB, CacheMapVacbListEntry);
-            CcRosAcquireVacbLock(current, NULL);
             RemoveEntryList(&current->VacbLruListEntry);
+            InitializeListHead(&current->VacbLruListEntry);
             if (current->Dirty)
             {
-                KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
+                KeAcquireSpinLockAtDpcLevel(&SharedCacheMap->CacheMapLock);
                 CcRosUnmarkDirtyVacb(current, FALSE);
-                KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
+                KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock);
                 DPRINT1("Freeing dirty VACB\n");
             }
+            if (current->MappedCount != 0)
+            {
+                current->MappedCount = 0;
+                NT_VERIFY(CcRosVacbDecRefCount(current) > 0);
+                DPRINT1("Freeing mapped VACB\n");
+            }
             InsertHeadList(&FreeList, &current->CacheMapVacbListEntry);
-            CcRosReleaseVacbLock(current);
 
-            KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
+            KeAcquireSpinLockAtDpcLevel(&SharedCacheMap->CacheMapLock);
         }
 #if DBG
         SharedCacheMap->Trace = FALSE;
 #endif
-        KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
+        KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock);
 
-        KeReleaseGuardedMutex(&ViewLock);
+        KeReleaseQueuedSpinLock(LockQueueMasterLock, *OldIrql);
         ObDereferenceObject(SharedCacheMap->FileObject);
 
         while (!IsListEmpty(&FreeList))
         {
+            ULONG Refs;
+
             current_entry = RemoveTailList(&FreeList);
             current = CONTAINING_RECORD(current_entry, ROS_VACB, CacheMapVacbListEntry);
-            CcRosInternalFreeVacb(current);
+            InitializeListHead(&current->CacheMapVacbListEntry);
+            Refs = CcRosVacbDecRefCount(current);
+#if DBG // CORE-14578
+            if (Refs != 0)
+            {
+                DPRINT1("Leaking VACB %p attached to %p (%I64d)\n", current, FileObject, current->FileOffset.QuadPart);
+                DPRINT1("There are: %d references left\n", Refs);
+                DPRINT1("Map: %d\n", current->MappedCount);
+                DPRINT1("Dirty: %d\n", current->Dirty);
+                if (FileObject->FileName.Length != 0)
+                {
+                    DPRINT1("File was: %wZ\n", &FileObject->FileName);
+                }
+                else if (FileObject->FsContext != NULL &&
+                         ((PFSRTL_COMMON_FCB_HEADER)(FileObject->FsContext))->NodeTypeCode == 0x0502 &&
+                         ((PFSRTL_COMMON_FCB_HEADER)(FileObject->FsContext))->NodeByteSize == 0x1F8 &&
+                         ((PUNICODE_STRING)(((PUCHAR)FileObject->FsContext) + 0x100))->Length != 0)
+                {
+                    DPRINT1("File was: %wZ (FastFAT)\n", (PUNICODE_STRING)(((PUCHAR)FileObject->FsContext) + 0x100));
+                }
+                else
+                {
+                    DPRINT1("No name for the file\n");
+                }
+            }
+#else
+            ASSERT(Refs == 0);
+#endif
         }
 
-        OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
+        *OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
         RemoveEntryList(&SharedCacheMap->SharedCacheMapLinks);
-        KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
+        KeReleaseQueuedSpinLock(LockQueueMasterLock, *OldIrql);
 
         ExFreeToNPagedLookasideList(&SharedCacheMapLookasideList, SharedCacheMap);
-        KeAcquireGuardedMutex(&ViewLock);
+        *OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
     }
     return STATUS_SUCCESS;
 }
@@ -1111,12 +1271,14 @@ CcRosReferenceCache (
     PFILE_OBJECT FileObject)
 {
     PROS_SHARED_CACHE_MAP SharedCacheMap;
-    KeAcquireGuardedMutex(&ViewLock);
+    KIRQL OldIrql;
+
+    OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
     SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
     ASSERT(SharedCacheMap);
     ASSERT(SharedCacheMap->OpenCount != 0);
     SharedCacheMap->OpenCount++;
-    KeReleaseGuardedMutex(&ViewLock);
+    KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
 }
 
 VOID
@@ -1125,14 +1287,16 @@ CcRosRemoveIfClosed (
     PSECTION_OBJECT_POINTERS SectionObjectPointer)
 {
     PROS_SHARED_CACHE_MAP SharedCacheMap;
+    KIRQL OldIrql;
+
     DPRINT("CcRosRemoveIfClosed()\n");
-    KeAcquireGuardedMutex(&ViewLock);
+    OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
     SharedCacheMap = SectionObjectPointer->SharedCacheMap;
     if (SharedCacheMap && SharedCacheMap->OpenCount == 0)
     {
-        CcRosDeleteFileCache(SharedCacheMap->FileObject, SharedCacheMap);
+        CcRosDeleteFileCache(SharedCacheMap->FileObject, SharedCacheMap, &OldIrql);
     }
-    KeReleaseGuardedMutex(&ViewLock);
+    KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
 }
 
 
@@ -1142,7 +1306,9 @@ CcRosDereferenceCache (
     PFILE_OBJECT FileObject)
 {
     PROS_SHARED_CACHE_MAP SharedCacheMap;
-    KeAcquireGuardedMutex(&ViewLock);
+    KIRQL OldIrql;
+
+    OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
     SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
     ASSERT(SharedCacheMap);
     if (SharedCacheMap->OpenCount > 0)
@@ -1150,11 +1316,17 @@ CcRosDereferenceCache (
         SharedCacheMap->OpenCount--;
         if (SharedCacheMap->OpenCount == 0)
         {
+            KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
             MmFreeSectionSegments(SharedCacheMap->FileObject);
-            CcRosDeleteFileCache(FileObject, SharedCacheMap);
+
+            OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
+            CcRosDeleteFileCache(FileObject, SharedCacheMap, &OldIrql);
+            KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
+
+            return;
         }
     }
-    KeReleaseGuardedMutex(&ViewLock);
+    KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
 }
 
 NTSTATUS
@@ -1170,7 +1342,7 @@ CcRosReleaseFileCache (
     PPRIVATE_CACHE_MAP PrivateMap;
     PROS_SHARED_CACHE_MAP SharedCacheMap;
 
-    KeAcquireGuardedMutex(&ViewLock);
+    OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
 
     if (FileObject->SectionObjectPointer->SharedCacheMap != NULL)
     {
@@ -1181,33 +1353,44 @@ CcRosReleaseFileCache (
          * lock the master lock, to be sure not to race
          * with a potential read ahead ongoing!
          */
-        OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
         PrivateMap = FileObject->PrivateCacheMap;
         FileObject->PrivateCacheMap = NULL;
-        KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
 
         if (PrivateMap != NULL)
         {
             /* Remove it from the file */
-            KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &OldIrql);
+            KeAcquireSpinLockAtDpcLevel(&SharedCacheMap->CacheMapLock);
             RemoveEntryList(&PrivateMap->PrivateLinks);
-            KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, OldIrql);
+            KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock);
 
             /* And free it. */
-            ExFreePoolWithTag(PrivateMap, TAG_PRIVATE_CACHE_MAP);
+            if (PrivateMap != &SharedCacheMap->PrivateCacheMap)
+            {
+                ExFreePoolWithTag(PrivateMap, TAG_PRIVATE_CACHE_MAP);
+            }
+            else
+            {
+                PrivateMap->NodeTypeCode = 0;
+            }
 
             if (SharedCacheMap->OpenCount > 0)
             {
                 SharedCacheMap->OpenCount--;
                 if (SharedCacheMap->OpenCount == 0)
                 {
+                    KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
                     MmFreeSectionSegments(SharedCacheMap->FileObject);
-                    CcRosDeleteFileCache(FileObject, SharedCacheMap);
+
+                    OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
+                    CcRosDeleteFileCache(FileObject, SharedCacheMap, &OldIrql);
+                    KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
+
+                    return STATUS_SUCCESS;
                 }
             }
         }
     }
-    KeReleaseGuardedMutex(&ViewLock);
+    KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
     return STATUS_SUCCESS;
 }
 
@@ -1227,26 +1410,19 @@ CcRosInitializeFileCache (
     BOOLEAN Allocated;
     PROS_SHARED_CACHE_MAP SharedCacheMap;
 
-    SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
-    DPRINT("CcRosInitializeFileCache(FileObject 0x%p, SharedCacheMap 0x%p)\n",
-           FileObject, SharedCacheMap);
+    DPRINT("CcRosInitializeFileCache(FileObject 0x%p)\n", FileObject);
 
     Allocated = FALSE;
-    KeAcquireGuardedMutex(&ViewLock);
+    SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
     if (SharedCacheMap == NULL)
     {
         Allocated = TRUE;
         SharedCacheMap = ExAllocateFromNPagedLookasideList(&SharedCacheMapLookasideList);
         if (SharedCacheMap == NULL)
         {
-            KeReleaseGuardedMutex(&ViewLock);
             return STATUS_INSUFFICIENT_RESOURCES;
         }
         RtlZeroMemory(SharedCacheMap, sizeof(*SharedCacheMap));
-        ObReferenceObjectByPointer(FileObject,
-                                   FILE_ALL_ACCESS,
-                                   NULL,
-                                   KernelMode);
         SharedCacheMap->NodeTypeCode = NODE_TYPE_SHARED_MAP;
         SharedCacheMap->NodeByteSize = sizeof(*SharedCacheMap);
         SharedCacheMap->FileObject = FileObject;
@@ -1260,33 +1436,55 @@ CcRosInitializeFileCache (
         InitializeListHead(&SharedCacheMap->PrivateList);
         KeInitializeSpinLock(&SharedCacheMap->CacheMapLock);
         InitializeListHead(&SharedCacheMap->CacheMapVacbListHead);
-        FileObject->SectionObjectPointer->SharedCacheMap = SharedCacheMap;
+        InitializeListHead(&SharedCacheMap->BcbList);
+    }
 
-        OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
-        InsertTailList(&CcCleanSharedCacheMapList, &SharedCacheMap->SharedCacheMapLinks);
-        KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
+    OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
+    if (Allocated)
+    {
+        if (FileObject->SectionObjectPointer->SharedCacheMap == NULL)
+        {
+            ObReferenceObjectByPointer(FileObject,
+                                       FILE_ALL_ACCESS,
+                                       NULL,
+                                       KernelMode);
+            FileObject->SectionObjectPointer->SharedCacheMap = SharedCacheMap;
+
+            InsertTailList(&CcCleanSharedCacheMapList, &SharedCacheMap->SharedCacheMapLinks);
+        }
+        else
+        {
+            ExFreeToNPagedLookasideList(&SharedCacheMapLookasideList, SharedCacheMap);
+            SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
+        }
     }
     if (FileObject->PrivateCacheMap == NULL)
     {
         PPRIVATE_CACHE_MAP PrivateMap;
 
         /* Allocate the private cache map for this handle */
-        PrivateMap = ExAllocatePoolWithTag(NonPagedPool, sizeof(PRIVATE_CACHE_MAP), TAG_PRIVATE_CACHE_MAP);
+        if (SharedCacheMap->PrivateCacheMap.NodeTypeCode != 0)
+        {
+            PrivateMap = ExAllocatePoolWithTag(NonPagedPool, sizeof(PRIVATE_CACHE_MAP), TAG_PRIVATE_CACHE_MAP);
+        }
+        else
+        {
+            PrivateMap = &SharedCacheMap->PrivateCacheMap;
+        }
+
         if (PrivateMap == NULL)
         {
             /* If we also allocated the shared cache map for this file, kill it */
             if (Allocated)
             {
-                OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
                 RemoveEntryList(&SharedCacheMap->SharedCacheMapLinks);
-                KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
 
                 FileObject->SectionObjectPointer->SharedCacheMap = NULL;
                 ObDereferenceObject(FileObject);
                 ExFreeToNPagedLookasideList(&SharedCacheMapLookasideList, SharedCacheMap);
             }
 
-            KeReleaseGuardedMutex(&ViewLock);
+            KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
             return STATUS_INSUFFICIENT_RESOURCES;
         }
 
@@ -1298,14 +1496,14 @@ CcRosInitializeFileCache (
         KeInitializeSpinLock(&PrivateMap->ReadAheadSpinLock);
 
         /* Link it to the file */
-        KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &OldIrql);
+        KeAcquireSpinLockAtDpcLevel(&SharedCacheMap->CacheMapLock);
         InsertTailList(&SharedCacheMap->PrivateList, &PrivateMap->PrivateLinks);
-        KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, OldIrql);
+        KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock);
 
         FileObject->PrivateCacheMap = PrivateMap;
         SharedCacheMap->OpenCount++;
     }
-    KeReleaseGuardedMutex(&ViewLock);
+    KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
 
     return STATUS_SUCCESS;
 }
@@ -1344,7 +1542,6 @@ CcInitView (
     InitializeListHead(&CcDeferredWrites);
     InitializeListHead(&CcCleanSharedCacheMapList);
     KeInitializeSpinLock(&CcDeferredWriteSpinLock);
-    KeInitializeGuardedMutex(&ViewLock);
     ExInitializeNPagedLookasideList(&iBcbLookasideList,
                                     NULL,
                                     NULL,
@@ -1390,6 +1587,7 @@ ExpKdbgExtFileCache(ULONG Argc, PCHAR Argv[])
         ULONG Valid = 0, Dirty = 0;
         PROS_SHARED_CACHE_MAP SharedCacheMap;
         PUNICODE_STRING FileName;
+        PWSTR Extra = L"";
 
         SharedCacheMap = CONTAINING_RECORD(ListEntry, ROS_SHARED_CACHE_MAP, SharedCacheMapLinks);
 
@@ -1416,13 +1614,22 @@ ExpKdbgExtFileCache(ULONG Argc, PCHAR Argv[])
         {
             FileName = &SharedCacheMap->FileObject->FileName;
         }
+        else if (SharedCacheMap->FileObject != NULL &&
+                 SharedCacheMap->FileObject->FsContext != NULL &&
+                 ((PFSRTL_COMMON_FCB_HEADER)(SharedCacheMap->FileObject->FsContext))->NodeTypeCode == 0x0502 &&
+                 ((PFSRTL_COMMON_FCB_HEADER)(SharedCacheMap->FileObject->FsContext))->NodeByteSize == 0x1F8 &&
+                 ((PUNICODE_STRING)(((PUCHAR)SharedCacheMap->FileObject->FsContext) + 0x100))->Length != 0)
+        {
+            FileName = (PUNICODE_STRING)(((PUCHAR)SharedCacheMap->FileObject->FsContext) + 0x100);
+            Extra = L" (FastFAT)";
+        }
         else
         {
             FileName = &NoName;
         }
 
         /* And print */
-        KdbpPrint("%p\t%d\t%d\t%wZ\n", SharedCacheMap, Valid, Dirty, FileName);
+        KdbpPrint("%p\t%d\t%d\t%wZ%S\n", SharedCacheMap, Valid, Dirty, FileName, Extra);
     }
 
     return TRUE;
@@ -1437,6 +1644,12 @@ ExpKdbgExtDefWrites(ULONG Argc, PCHAR Argv[])
               (CcDirtyPageThreshold * PAGE_SIZE) / 1024);
     KdbpPrint("MmAvailablePages:\t%lu (%lu Kb)\n", MmAvailablePages,
               (MmAvailablePages * PAGE_SIZE) / 1024);
+    KdbpPrint("MmThrottleTop:\t\t%lu (%lu Kb)\n", MmThrottleTop,
+              (MmThrottleTop * PAGE_SIZE) / 1024);
+    KdbpPrint("MmThrottleBottom:\t%lu (%lu Kb)\n", MmThrottleBottom,
+              (MmThrottleBottom * PAGE_SIZE) / 1024);
+    KdbpPrint("MmModifiedPageListHead.Total:\t%lu (%lu Kb)\n", MmModifiedPageListHead.Total,
+              (MmModifiedPageListHead.Total * PAGE_SIZE) / 1024);
 
     if (CcTotalDirtyPages >= CcDirtyPageThreshold)
     {