[NTOSKRNL] Map the VACB in kernel space before inserting it in lists.
[reactos.git] / ntoskrnl / cc / view.c
index 0d6d51b..1ec4c35 100644 (file)
@@ -64,37 +64,47 @@ LIST_ENTRY CcDeferredWrites;
 KSPIN_LOCK CcDeferredWriteSpinLock;
 LIST_ENTRY CcCleanSharedCacheMapList;
 
-/* Internal vars (ROS):
- * - Lock for the CcCleanSharedCacheMapList list
- */
-KSPIN_LOCK iSharedCacheMapLock;
-
 #if DBG
-static void CcRosVacbIncRefCount_(PROS_VACB vacb, const char* file, int line)
+ULONG CcRosVacbIncRefCount_(PROS_VACB vacb, PCSTR file, INT line)
 {
-    ++vacb->ReferenceCount;
+    ULONG Refs;
+
+    Refs = InterlockedIncrement((PLONG)&vacb->ReferenceCount);
     if (vacb->SharedCacheMap->Trace)
     {
         DbgPrint("(%s:%i) VACB %p ++RefCount=%lu, Dirty %u, PageOut %lu\n",
-                 file, line, vacb, vacb->ReferenceCount, vacb->Dirty, vacb->PageOut);
+                 file, line, vacb, Refs, vacb->Dirty, vacb->PageOut);
     }
+
+    return Refs;
 }
-static void CcRosVacbDecRefCount_(PROS_VACB vacb, const char* file, int line)
+ULONG CcRosVacbDecRefCount_(PROS_VACB vacb, PCSTR file, INT line)
 {
-    ASSERT(vacb->ReferenceCount != 0);
-    --vacb->ReferenceCount;
-    ASSERT(!(vacb->ReferenceCount == 0 && vacb->Dirty));
+    ULONG Refs;
+
+    Refs = InterlockedDecrement((PLONG)&vacb->ReferenceCount);
+    ASSERT(!(Refs == 0 && vacb->Dirty));
     if (vacb->SharedCacheMap->Trace)
     {
         DbgPrint("(%s:%i) VACB %p --RefCount=%lu, Dirty %u, PageOut %lu\n",
-                 file, line, vacb, vacb->ReferenceCount, vacb->Dirty, vacb->PageOut);
+                 file, line, vacb, Refs, vacb->Dirty, vacb->PageOut);
     }
+
+    return Refs;
+}
+ULONG CcRosVacbGetRefCount_(PROS_VACB vacb, PCSTR file, INT line)
+{
+    ULONG Refs;
+
+    Refs = InterlockedCompareExchange((PLONG)&vacb->ReferenceCount, 0, 0);
+    if (vacb->SharedCacheMap->Trace)
+    {
+        DbgPrint("(%s:%i) VACB %p ==RefCount=%lu, Dirty %u, PageOut %lu\n",
+                 file, line, vacb, Refs, vacb->Dirty, vacb->PageOut);
+    }
+
+    return Refs;
 }
-#define CcRosVacbIncRefCount(vacb) CcRosVacbIncRefCount_(vacb,__FILE__,__LINE__)
-#define CcRosVacbDecRefCount(vacb) CcRosVacbDecRefCount_(vacb,__FILE__,__LINE__)
-#else
-#define CcRosVacbIncRefCount(vacb) (++((vacb)->ReferenceCount))
-#define CcRosVacbDecRefCount(vacb) (--((vacb)->ReferenceCount))
 #endif
 
 NTSTATUS
@@ -231,8 +241,7 @@ CcRosFlushDirtyPages (
         ASSERT(current->Dirty);
 
         /* One reference is added above */
-        if ((current->ReferenceCount > 2 && current->PinCount == 0) ||
-            (current->ReferenceCount > 3 && current->PinCount > 1))
+        if (CcRosVacbGetRefCount(current) > 2)
         {
             CcRosReleaseVacbLock(current);
             current->SharedCacheMap->Callbacks->ReleaseFromLazyWrite(
@@ -322,6 +331,8 @@ retry:
     current_entry = VacbLruListHead.Flink;
     while (current_entry != &VacbLruListHead)
     {
+        ULONG Refs;
+
         current = CONTAINING_RECORD(current_entry,
                                     ROS_VACB,
                                     VacbLruListEntry);
@@ -353,16 +364,18 @@ retry:
         }
 
         /* Dereference the VACB */
-        CcRosVacbDecRefCount(current);
+        Refs = CcRosVacbDecRefCount(current);
 
         /* Check if we can free this entry now */
-        if (current->ReferenceCount == 0)
+        if (Refs < 2)
         {
             ASSERT(!current->Dirty);
             ASSERT(!current->MappedCount);
+            ASSERT(Refs == 1);
 
             RemoveEntryList(&current->CacheMapVacbListEntry);
             RemoveEntryList(&current->VacbLruListEntry);
+            InitializeListHead(&current->VacbLruListEntry);
             InsertHeadList(&FreeList, &current->CacheMapVacbListEntry);
 
             /* Calculate how many pages we freed for Mm */
@@ -401,6 +414,8 @@ retry:
         current = CONTAINING_RECORD(current_entry,
                                     ROS_VACB,
                                     CacheMapVacbListEntry);
+        InitializeListHead(&current->CacheMapVacbListEntry);
+        CcRosVacbDecRefCount(current);
         CcRosInternalFreeVacb(current);
     }
 
@@ -418,6 +433,7 @@ CcRosReleaseVacb (
     BOOLEAN Dirty,
     BOOLEAN Mapped)
 {
+    ULONG Refs;
     ASSERT(SharedCacheMap);
 
     DPRINT("CcRosReleaseVacb(SharedCacheMap 0x%p, Vacb 0x%p, Valid %u)\n",
@@ -434,12 +450,14 @@ CcRosReleaseVacb (
     {
         Vacb->MappedCount++;
     }
-    CcRosVacbDecRefCount(Vacb);
+    Refs = CcRosVacbDecRefCount(Vacb);
     if (Mapped && (Vacb->MappedCount == 1))
     {
         CcRosVacbIncRefCount(Vacb);
     }
 
+    ASSERT(Refs > 0);
+
     CcRosReleaseVacbLock(Vacb);
 
     return STATUS_SUCCESS;
@@ -551,6 +569,7 @@ CcRosUnmarkDirtyVacb (
     Vacb->Dirty = FALSE;
 
     RemoveEntryList(&Vacb->DirtyVacbListEntry);
+    InitializeListHead(&Vacb->DirtyVacbListEntry);
     CcTotalDirtyPages -= VACB_MAPPING_GRANULARITY / PAGE_SIZE;
     Vacb->SharedCacheMap->DirtyPages -= VACB_MAPPING_GRANULARITY / PAGE_SIZE;
     CcRosVacbDecRefCount(Vacb);
@@ -581,16 +600,15 @@ CcRosMarkDirtyFile (
         KeBugCheck(CACHE_MANAGER);
     }
 
-    if (!Vacb->Dirty)
-    {
-        CcRosMarkDirtyVacb(Vacb);
-    }
-
-    CcRosReleaseVacbLock(Vacb);
+    CcRosReleaseVacb(SharedCacheMap, Vacb, Vacb->Valid, TRUE, FALSE);
 
     return STATUS_SUCCESS;
 }
 
+/*
+ * Note: this is not the contrary function of
+ * CcRosMapVacbInKernelSpace()
+ */
 NTSTATUS
 NTAPI
 CcRosUnmapVacb (
@@ -611,44 +629,41 @@ CcRosUnmapVacb (
         return STATUS_UNSUCCESSFUL;
     }
 
-    if (NowDirty && !Vacb->Dirty)
-    {
-        CcRosMarkDirtyVacb(Vacb);
-    }
-
     ASSERT(Vacb->MappedCount != 0);
     Vacb->MappedCount--;
 
-    CcRosVacbDecRefCount(Vacb);
     if (Vacb->MappedCount == 0)
     {
         CcRosVacbDecRefCount(Vacb);
     }
 
-    CcRosReleaseVacbLock(Vacb);
+    CcRosReleaseVacb(SharedCacheMap, Vacb, Vacb->Valid, NowDirty, FALSE);
 
     return STATUS_SUCCESS;
 }
 
 static
 NTSTATUS
-CcRosMapVacb(
+CcRosMapVacbInKernelSpace(
     PROS_VACB Vacb)
 {
     ULONG i;
     NTSTATUS Status;
     ULONG_PTR NumberOfPages;
+    PVOID BaseAddress = NULL;
 
     /* Create a memory area. */
     MmLockAddressSpace(MmGetKernelAddressSpace());
     Status = MmCreateMemoryArea(MmGetKernelAddressSpace(),
                                 0, // nothing checks for VACB mareas, so set to 0
-                                &Vacb->BaseAddress,
+                                &BaseAddress,
                                 VACB_MAPPING_GRANULARITY,
                                 PAGE_READWRITE,
                                 (PMEMORY_AREA*)&Vacb->MemoryArea,
                                 0,
                                 PAGE_SIZE);
+    ASSERT(Vacb->BaseAddress == NULL);
+    Vacb->BaseAddress = BaseAddress;
     MmUnlockAddressSpace(MmGetKernelAddressSpace());
     if (!NT_SUCCESS(Status))
     {
@@ -658,6 +673,7 @@ CcRosMapVacb(
 
     ASSERT(((ULONG_PTR)Vacb->BaseAddress % PAGE_SIZE) == 0);
     ASSERT((ULONG_PTR)Vacb->BaseAddress > (ULONG_PTR)MmSystemRangeStart);
+    ASSERT((ULONG_PTR)Vacb->BaseAddress + VACB_MAPPING_GRANULARITY - 1 > (ULONG_PTR)MmSystemRangeStart);
 
     /* Create a virtual mapping for this memory area */
     NumberOfPages = BYTES_TO_PAGES(VACB_MAPPING_GRANULARITY);
@@ -673,6 +689,11 @@ CcRosMapVacb(
             KeBugCheck(MEMORY_MANAGEMENT);
         }
 
+        ASSERT(BaseAddress == Vacb->BaseAddress);
+        ASSERT(i * PAGE_SIZE < VACB_MAPPING_GRANULARITY);
+        ASSERT((ULONG_PTR)Vacb->BaseAddress + (i * PAGE_SIZE) >= (ULONG_PTR)BaseAddress);
+        ASSERT((ULONG_PTR)Vacb->BaseAddress + (i * PAGE_SIZE) > (ULONG_PTR)MmSystemRangeStart);
+
         Status = MmCreateVirtualMapping(NULL,
                                         (PVOID)((ULONG_PTR)Vacb->BaseAddress + (i * PAGE_SIZE)),
                                         PAGE_READWRITE,
@@ -725,11 +746,23 @@ CcRosCreateVacb (
     }
 #endif
     current->MappedCount = 0;
-    current->DirtyVacbListEntry.Flink = NULL;
-    current->DirtyVacbListEntry.Blink = NULL;
-    current->ReferenceCount = 1;
+    current->ReferenceCount = 0;
     current->PinCount = 0;
     KeInitializeMutex(&current->Mutex, 0);
+    InitializeListHead(&current->CacheMapVacbListEntry);
+    InitializeListHead(&current->DirtyVacbListEntry);
+    InitializeListHead(&current->VacbLruListEntry);
+
+    CcRosVacbIncRefCount(current);
+
+    Status = CcRosMapVacbInKernelSpace(current);
+    if (!NT_SUCCESS(Status))
+    {
+        CcRosVacbDecRefCount(current);
+        ExFreeToNPagedLookasideList(&VacbLookasideList, current);
+        return Status;
+    }
+
     CcRosAcquireVacbLock(current, NULL);
     KeAcquireGuardedMutex(&ViewLock);
 
@@ -762,9 +795,10 @@ CcRosCreateVacb (
                         current);
             }
 #endif
+            CcRosVacbDecRefCount(*Vacb);
             CcRosReleaseVacbLock(*Vacb);
             KeReleaseGuardedMutex(&ViewLock);
-            ExFreeToNPagedLookasideList(&VacbLookasideList, *Vacb);
+            CcRosInternalFreeVacb(*Vacb);
             *Vacb = current;
             CcRosAcquireVacbLock(current, NULL);
             return STATUS_SUCCESS;
@@ -812,14 +846,8 @@ CcRosCreateVacb (
     }
 #endif
 
-    Status = CcRosMapVacb(current);
-    if (!NT_SUCCESS(Status))
-    {
-        RemoveEntryList(&current->CacheMapVacbListEntry);
-        RemoveEntryList(&current->VacbLruListEntry);
-        CcRosReleaseVacbLock(current);
-        ExFreeToNPagedLookasideList(&VacbLookasideList, current);
-    }
+    /* Reference it to allow release */
+    CcRosVacbIncRefCount(current);
 
     return Status;
 }
@@ -836,6 +864,7 @@ CcRosGetVacb (
 {
     PROS_VACB current;
     NTSTATUS Status;
+    ULONG Refs;
 
     ASSERT(SharedCacheMap);
 
@@ -857,6 +886,8 @@ CcRosGetVacb (
         }
     }
 
+    Refs = CcRosVacbGetRefCount(current);
+
     KeAcquireGuardedMutex(&ViewLock);
 
     /* Move to the tail of the LRU list */
@@ -873,6 +904,9 @@ CcRosGetVacb (
     DPRINT("*BaseAddress %p\n", *BaseAddress);
     *Vacb = current;
     *BaseOffset = current->FileOffset.QuadPart;
+
+    ASSERT(Refs > 1);
+
     return STATUS_SUCCESS;
 }
 
@@ -947,6 +981,21 @@ CcRosInternalFreeVacb (
                      NULL);
     MmUnlockAddressSpace(MmGetKernelAddressSpace());
 
+    if (Vacb->PinCount != 0 || Vacb->ReferenceCount != 0)
+    {
+        DPRINT1("Invalid free: %ld, %ld\n", Vacb->ReferenceCount, Vacb->PinCount);
+        if (Vacb->SharedCacheMap->FileObject && Vacb->SharedCacheMap->FileObject->FileName.Length)
+        {
+            DPRINT1("For file: %wZ\n", &Vacb->SharedCacheMap->FileObject->FileName);
+        }
+    }
+
+    ASSERT(Vacb->PinCount == 0);
+    ASSERT(Vacb->ReferenceCount == 0);
+    ASSERT(IsListEmpty(&Vacb->CacheMapVacbListEntry));
+    ASSERT(IsListEmpty(&Vacb->DirtyVacbListEntry));
+    ASSERT(IsListEmpty(&Vacb->VacbLruListEntry));
+    RtlFillMemory(Vacb, sizeof(Vacb), 0xfd);
     ExFreeToNPagedLookasideList(&VacbLookasideList, Vacb);
     return STATUS_SUCCESS;
 }
@@ -967,7 +1016,6 @@ CcFlushCache (
     LONGLONG RemainingLength;
     PROS_VACB current;
     NTSTATUS Status;
-    KIRQL oldIrql;
 
     CCTRACE(CC_API_DEBUG, "SectionObjectPointers=%p FileOffset=%p Length=%lu\n",
         SectionObjectPointers, FileOffset, Length);
@@ -1010,13 +1058,7 @@ CcFlushCache (
                     }
                 }
 
-                CcRosReleaseVacbLock(current);
-
-                KeAcquireGuardedMutex(&ViewLock);
-                KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
-                CcRosVacbDecRefCount(current);
-                KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
-                KeReleaseGuardedMutex(&ViewLock);
+                CcRosReleaseVacb(SharedCacheMap, current, current->Valid, current->Dirty, FALSE);
             }
 
             Offset.QuadPart += VACB_MAPPING_GRANULARITY;
@@ -1074,6 +1116,7 @@ CcRosDeleteFileCache (
             current = CONTAINING_RECORD(current_entry, ROS_VACB, CacheMapVacbListEntry);
             CcRosAcquireVacbLock(current, NULL);
             RemoveEntryList(&current->VacbLruListEntry);
+            InitializeListHead(&current->VacbLruListEntry);
             if (current->Dirty)
             {
                 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
@@ -1098,12 +1141,14 @@ CcRosDeleteFileCache (
         {
             current_entry = RemoveTailList(&FreeList);
             current = CONTAINING_RECORD(current_entry, ROS_VACB, CacheMapVacbListEntry);
+            InitializeListHead(&current->CacheMapVacbListEntry);
+            CcRosVacbDecRefCount(current);
             CcRosInternalFreeVacb(current);
         }
 
-        KeAcquireSpinLock(&iSharedCacheMapLock, &OldIrql);
+        OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
         RemoveEntryList(&SharedCacheMap->SharedCacheMapLinks);
-        KeReleaseSpinLock(&iSharedCacheMapLock, OldIrql);
+        KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
 
         ExFreeToNPagedLookasideList(&SharedCacheMapLookasideList, SharedCacheMap);
         KeAcquireGuardedMutex(&ViewLock);
@@ -1172,6 +1217,8 @@ CcRosReleaseFileCache (
  * has been closed.
  */
 {
+    KIRQL OldIrql;
+    PPRIVATE_CACHE_MAP PrivateMap;
     PROS_SHARED_CACHE_MAP SharedCacheMap;
 
     KeAcquireGuardedMutex(&ViewLock);
@@ -1179,9 +1226,34 @@ CcRosReleaseFileCache (
     if (FileObject->SectionObjectPointer->SharedCacheMap != NULL)
     {
         SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
-        if (FileObject->PrivateCacheMap != NULL)
+
+        /* Closing the handle, so kill the private cache map
+         * Before you event try to remove it from FO, always
+         * lock the master lock, to be sure not to race
+         * with a potential read ahead ongoing!
+         */
+        OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
+        PrivateMap = FileObject->PrivateCacheMap;
+        FileObject->PrivateCacheMap = NULL;
+        KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
+
+        if (PrivateMap != NULL)
         {
-            FileObject->PrivateCacheMap = NULL;
+            /* Remove it from the file */
+            KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &OldIrql);
+            RemoveEntryList(&PrivateMap->PrivateLinks);
+            KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, OldIrql);
+
+            /* And free it. */
+            if (PrivateMap != &SharedCacheMap->PrivateCacheMap)
+            {
+                ExFreePoolWithTag(PrivateMap, TAG_PRIVATE_CACHE_MAP);
+            }
+            else
+            {
+                PrivateMap->NodeTypeCode = 0;
+            }
+
             if (SharedCacheMap->OpenCount > 0)
             {
                 SharedCacheMap->OpenCount--;
@@ -1248,24 +1320,32 @@ CcRosInitializeFileCache (
         InitializeListHead(&SharedCacheMap->CacheMapVacbListHead);
         FileObject->SectionObjectPointer->SharedCacheMap = SharedCacheMap;
 
-        KeAcquireSpinLock(&iSharedCacheMapLock, &OldIrql);
+        OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
         InsertTailList(&CcCleanSharedCacheMapList, &SharedCacheMap->SharedCacheMapLinks);
-        KeReleaseSpinLock(&iSharedCacheMapLock, OldIrql);
+        KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
     }
     if (FileObject->PrivateCacheMap == NULL)
     {
         PPRIVATE_CACHE_MAP PrivateMap;
 
         /* Allocate the private cache map for this handle */
-        PrivateMap = ExAllocatePoolWithTag(NonPagedPool, sizeof(PRIVATE_CACHE_MAP), TAG_PRIVATE_CACHE_MAP);
+        if (SharedCacheMap->PrivateCacheMap.NodeTypeCode != 0)
+        {
+            PrivateMap = ExAllocatePoolWithTag(NonPagedPool, sizeof(PRIVATE_CACHE_MAP), TAG_PRIVATE_CACHE_MAP);
+        }
+        else
+        {
+            PrivateMap = &SharedCacheMap->PrivateCacheMap;
+        }
+
         if (PrivateMap == NULL)
         {
             /* If we also allocated the shared cache map for this file, kill it */
             if (Allocated)
             {
-                KeAcquireSpinLock(&iSharedCacheMapLock, &OldIrql);
+                OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
                 RemoveEntryList(&SharedCacheMap->SharedCacheMapLinks);
-                KeReleaseSpinLock(&iSharedCacheMapLock, OldIrql);
+                KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
 
                 FileObject->SectionObjectPointer->SharedCacheMap = NULL;
                 ObDereferenceObject(FileObject);
@@ -1281,6 +1361,7 @@ CcRosInitializeFileCache (
         PrivateMap->NodeTypeCode = NODE_TYPE_PRIVATE_MAP;
         PrivateMap->ReadAheadMask = PAGE_SIZE - 1;
         PrivateMap->FileObject = FileObject;
+        KeInitializeSpinLock(&PrivateMap->ReadAheadSpinLock);
 
         /* Link it to the file */
         KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &OldIrql);
@@ -1329,7 +1410,6 @@ CcInitView (
     InitializeListHead(&CcDeferredWrites);
     InitializeListHead(&CcCleanSharedCacheMapList);
     KeInitializeSpinLock(&CcDeferredWriteSpinLock);
-    KeInitializeSpinLock(&iSharedCacheMapLock);
     KeInitializeGuardedMutex(&ViewLock);
     ExInitializeNPagedLookasideList(&iBcbLookasideList,
                                     NULL,
@@ -1413,6 +1493,38 @@ ExpKdbgExtFileCache(ULONG Argc, PCHAR Argv[])
 
     return TRUE;
 }
+
+BOOLEAN
+ExpKdbgExtDefWrites(ULONG Argc, PCHAR Argv[])
+{
+    KdbpPrint("CcTotalDirtyPages:\t%lu (%lu Kb)\n", CcTotalDirtyPages,
+              (CcTotalDirtyPages * PAGE_SIZE) / 1024);
+    KdbpPrint("CcDirtyPageThreshold:\t%lu (%lu Kb)\n", CcDirtyPageThreshold,
+              (CcDirtyPageThreshold * PAGE_SIZE) / 1024);
+    KdbpPrint("MmAvailablePages:\t%lu (%lu Kb)\n", MmAvailablePages,
+              (MmAvailablePages * PAGE_SIZE) / 1024);
+    KdbpPrint("MmThrottleTop:\t\t%lu (%lu Kb)\n", MmThrottleTop,
+              (MmThrottleTop * PAGE_SIZE) / 1024);
+    KdbpPrint("MmThrottleBottom:\t%lu (%lu Kb)\n", MmThrottleBottom,
+              (MmThrottleBottom * PAGE_SIZE) / 1024);
+    KdbpPrint("MmModifiedPageListHead.Total:\t%lu (%lu Kb)\n", MmModifiedPageListHead.Total,
+              (MmModifiedPageListHead.Total * PAGE_SIZE) / 1024);
+
+    if (CcTotalDirtyPages >= CcDirtyPageThreshold)
+    {
+        KdbpPrint("CcTotalDirtyPages above the threshold, writes should be throttled\n");
+    }
+    else if (CcTotalDirtyPages + 64 >= CcDirtyPageThreshold)
+    {
+        KdbpPrint("CcTotalDirtyPages within 64 (max charge) pages of the threshold, writes may be throttled\n");
+    }
+    else
+    {
+        KdbpPrint("CcTotalDirtyPages below the threshold, writes should not be throttled\n");
+    }
+
+    return TRUE;
+}
 #endif
 
 /* EOF */