- Okay so...listen up. First off: When you acquire a lock such as a fast mutex, you...
authorAleksey Bragin <aleksey@reactos.org>
Mon, 12 Nov 2007 19:00:26 +0000 (19:00 +0000)
committerAleksey Bragin <aleksey@reactos.org>
Mon, 12 Nov 2007 19:00:26 +0000 (19:00 +0000)
  For example, when you handle a page fault in a section, then page fault while handling that page fault (which is perfectly okay),
  you shouldn't be trying to re-acquire the address space lock that you're already holding. After this fix, this scenario works
  and countless others. Apps like QTInfo now work and load, and PictureViewer doesn't BSOD the system anymore. I've fixed this by changing
  the lock to a pushlock. It not only increases speed inside the memory manager significantly (such as during page fault handling), but
  does allow recursive acquisition without any problems.
- Now if that wasn't bad enough, here's a couple more tips. Fast Mutexes actually require APC_LEVEL to be effective. If you're going
  to be using a Fast Mutex and calling it with the "Unsafe" version, then don't expect anything to work. Also, using functions like
  "CcTryToAcquireBrokenMutex" where correct code is duplicated then hacked to work isn't a big help either. And that's not all. Fast Mutex
  disables kernel APCs by setting the KernelApcDisable flag on, and it's expected that the count inside the fast mutex will match the count
  inside the thread. In other words, LOCK ACQUISITION AND RELEASE MUST BE ORDERED. You can't acquire LOCK A and B, and then release lock A
  and B, because that leads to deadlocks and other issues. So of course, the Cache Manager acquired a view lock, then acquired a segment lock,
  then released the view lock, then released the segment lock, then re-acquired the view lock. Uh, no, that won't work. You know what else
  doesn't work so well? Disabling APCs about 6-9 times to acquire a single lock, and using spinlocks in the same code path as well. Just how
  paranoid are you about thread safety, but still manage to get it wrong? Okay, so we've got recursion, out-of-order lock acquision and
  release, made-up "broken" acquire functions, and using a lock that depends on APC_LEVEL at PASSIVE_LEVEL. The best part is when Cc builds
  an array of cache segments, and locks each of them... then during release, the list gets parsed head-first, so the first acquired locks
  get released first. So locks a, b, c, d get acquired, then a, b, c, d get released. Great! Sounds about right for ReactOS's Cache Manager
  design. I've changed the view lock to a guarded mutex -- which actually properly disables APCs and works at PASSIVE_LEVEL, and changed the
  segment locks to be push locks. First it'll be 10 times faster then acquiring a bazillion fast mutexes, especially since APCs have already
  been disabled at this point, and it also allows you to do most of the stupid things the Cache Manager does. Out-of-order release is still
  not going to work well, so eventually on a multi-processor machine the code will completely die -- but at least it'll work on UP for now.
  In the end, this makes things like the Inkscape installer and Quicktime Installer to work, and probably countless other things that generated
  ASSERTS in the fast mutex code.
  -- Alex Ionescu

svn path=/trunk/; revision=30401

reactos/ntoskrnl/cc/fs.c
reactos/ntoskrnl/cc/pin.c
reactos/ntoskrnl/cc/view.c
reactos/ntoskrnl/include/internal/cc.h
reactos/ntoskrnl/include/internal/ex.h
reactos/ntoskrnl/include/internal/ke.h
reactos/ntoskrnl/mm/aspace.c
reactos/ntoskrnl/mm/section.c

index 3e9bcd7..77d6bc4 100644 (file)
@@ -20,7 +20,7 @@
 
 /* GLOBALS   *****************************************************************/
 
-extern FAST_MUTEX ViewLock;
+extern KGUARDED_MUTEX ViewLock;
 extern ULONG DirtyPageCount;
 
 NTSTATUS CcRosInternalFreeCacheSegment(PCACHE_SEGMENT CacheSeg);
@@ -153,7 +153,7 @@ CcSetFileSizes (IN PFILE_OBJECT FileObject,
   if (FileSizes->AllocationSize.QuadPart < Bcb->AllocationSize.QuadPart)
   {
      InitializeListHead(&FreeListHead);
-     ExEnterCriticalRegionAndAcquireFastMutexUnsafe(&ViewLock);
+     KeAcquireGuardedMutex(&ViewLock);
      KeAcquireSpinLock(&Bcb->BcbLock, &oldirql);
 
      current_entry = Bcb->BcbSegmentListHead.Flink;
@@ -186,7 +186,7 @@ CcSetFileSizes (IN PFILE_OBJECT FileObject,
      Bcb->AllocationSize = FileSizes->AllocationSize;
      Bcb->FileSize = FileSizes->FileSize;
      KeReleaseSpinLock(&Bcb->BcbLock, oldirql);
-     ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&ViewLock);
+     KeReleaseGuardedMutex(&ViewLock);
 
      current_entry = FreeListHead.Flink;
      while(current_entry != &FreeListHead)
index ec1b93b..d7b70fd 100644 (file)
@@ -236,7 +236,7 @@ CcUnpinRepinnedBcb (
       IoStatus->Information = 0;
       if (WriteThrough)
         {
-          ExEnterCriticalRegionAndAcquireFastMutexUnsafe(&iBcb->CacheSegment->Lock);
+          ExAcquirePushLockExclusive(&iBcb->CacheSegment->Lock);
           if (iBcb->CacheSegment->Dirty)
             {
               IoStatus->Status = CcRosFlushCacheSegment(iBcb->CacheSegment);
@@ -245,7 +245,7 @@ CcUnpinRepinnedBcb (
             {
               IoStatus->Status = STATUS_SUCCESS;
             }
-          ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&iBcb->CacheSegment->Lock);
+          ExReleasePushLockExclusive(&iBcb->CacheSegment->Lock);
         }
       else
         {
index 838a46c..b208e7a 100644 (file)
@@ -56,7 +56,7 @@ static LIST_ENTRY CacheSegmentLRUListHead;
 static LIST_ENTRY ClosedListHead;
 ULONG DirtyPageCount=0;
 
-FAST_MUTEX ViewLock;
+KGUARDED_MUTEX ViewLock;
 
 #ifdef CACHE_BITMAP
 #define        CI_CACHESEG_MAPPING_REGION_SIZE (128*1024*1024)
@@ -114,22 +114,6 @@ static void CcRosCacheSegmentDecRefCount_ ( PCACHE_SEGMENT cs, const char* file,
 NTSTATUS
 CcRosInternalFreeCacheSegment(PCACHE_SEGMENT CacheSeg);
 
-BOOLEAN
-FASTCALL
-CcTryToAcquireBrokenMutex(PFAST_MUTEX FastMutex)
-{
-    KeEnterCriticalRegion();
-    if (InterlockedCompareExchange(&FastMutex->Count, 0, 1) == 1)
-    {
-        FastMutex->Owner = KeGetCurrentThread();
-        return(TRUE);
-    }
-    else
-    {
-        KeLeaveCriticalRegion();
-        return(FALSE);
-    }
-}
 
 /* FUNCTIONS *****************************************************************/
 
@@ -153,7 +137,7 @@ CcRosTraceCacheMap (
        {
                DPRINT1("Enabling Tracing for CacheMap 0x%p:\n", Bcb );
 
-               ExEnterCriticalRegionAndAcquireFastMutexUnsafe(&ViewLock);
+               KeAcquireGuardedMutex(&ViewLock);
                KeAcquireSpinLock(&Bcb->BcbLock, &oldirql);
 
                current_entry = Bcb->BcbSegmentListHead.Flink;
@@ -166,7 +150,7 @@ CcRosTraceCacheMap (
                                current, current->ReferenceCount, current->Dirty, current->PageOut );
                }
                KeReleaseSpinLock(&Bcb->BcbLock, oldirql);
-               ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&ViewLock);
+               KeReleaseGuardedMutex(&ViewLock);
        }
        else
        {
@@ -183,119 +167,130 @@ NTSTATUS
 NTAPI
 CcRosFlushCacheSegment(PCACHE_SEGMENT CacheSegment)
 {
-  NTSTATUS Status;
-  KIRQL oldIrql;
-  Status = WriteCacheSegment(CacheSegment);
-  if (NT_SUCCESS(Status))
+    NTSTATUS Status;
+    KIRQL oldIrql;
+    
+    Status = WriteCacheSegment(CacheSegment);
+    if (NT_SUCCESS(Status))
     {
-      ExEnterCriticalRegionAndAcquireFastMutexUnsafe(&ViewLock);
-      KeAcquireSpinLock(&CacheSegment->Bcb->BcbLock, &oldIrql);
-      CacheSegment->Dirty = FALSE;
-      RemoveEntryList(&CacheSegment->DirtySegmentListEntry);
-      DirtyPageCount -= CacheSegment->Bcb->CacheSegmentSize / PAGE_SIZE;
-      CcRosCacheSegmentDecRefCount ( CacheSegment );
-      KeReleaseSpinLock(&CacheSegment->Bcb->BcbLock, oldIrql);
-      ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&ViewLock);
+        KeAcquireGuardedMutex(&ViewLock);
+        KeAcquireSpinLock(&CacheSegment->Bcb->BcbLock, &oldIrql);
+        
+        CacheSegment->Dirty = FALSE;
+        RemoveEntryList(&CacheSegment->DirtySegmentListEntry);
+        DirtyPageCount -= CacheSegment->Bcb->CacheSegmentSize / PAGE_SIZE;
+        CcRosCacheSegmentDecRefCount ( CacheSegment );
+        
+        KeReleaseSpinLock(&CacheSegment->Bcb->BcbLock, oldIrql);
+        KeReleaseGuardedMutex(&ViewLock);
     }
-  return(Status);
+    
+    return(Status);
 }
 
 NTSTATUS
 NTAPI
 CcRosFlushDirtyPages(ULONG Target, PULONG Count)
 {
-  PLIST_ENTRY current_entry;
-  PCACHE_SEGMENT current;
-  ULONG PagesPerSegment;
-  BOOLEAN Locked;
-  NTSTATUS Status;
-  static ULONG WriteCount[4] = {0, 0, 0, 0};
-  ULONG NewTarget;
-
-  DPRINT("CcRosFlushDirtyPages(Target %d)\n", Target);
-
-  (*Count) = 0;
-
-  ExEnterCriticalRegionAndAcquireFastMutexUnsafe(&ViewLock);
-
-  WriteCount[0] = WriteCount[1];
-  WriteCount[1] = WriteCount[2];
-  WriteCount[2] = WriteCount[3];
-  WriteCount[3] = 0;
-
-  NewTarget = WriteCount[0] + WriteCount[1] + WriteCount[2];
-
-  if (NewTarget < DirtyPageCount)
-  {
-     NewTarget = (DirtyPageCount - NewTarget + 3) / 4;
-     WriteCount[0] += NewTarget;
-     WriteCount[1] += NewTarget;
-     WriteCount[2] += NewTarget;
-     WriteCount[3] += NewTarget;
-  }
-
-  NewTarget = WriteCount[0];
+    PLIST_ENTRY current_entry;
+    PCACHE_SEGMENT current;
+    ULONG PagesPerSegment;
+    BOOLEAN Locked;
+    NTSTATUS Status;
+    static ULONG WriteCount[4] = {0, 0, 0, 0};
+    ULONG NewTarget;
+    
+    DPRINT("CcRosFlushDirtyPages(Target %d)\n", Target);
+    
+    (*Count) = 0;
+    
+    KeAcquireGuardedMutex(&ViewLock);
+    
+    WriteCount[0] = WriteCount[1];
+    WriteCount[1] = WriteCount[2];
+    WriteCount[2] = WriteCount[3];
+    WriteCount[3] = 0;
+    
+    NewTarget = WriteCount[0] + WriteCount[1] + WriteCount[2];
+    
+    if (NewTarget < DirtyPageCount)
+    {
+        NewTarget = (DirtyPageCount - NewTarget + 3) / 4;
+        WriteCount[0] += NewTarget;
+        WriteCount[1] += NewTarget;
+        WriteCount[2] += NewTarget;
+        WriteCount[3] += NewTarget;
+    }
+    
+    NewTarget = WriteCount[0];
+    
+    Target = max(NewTarget, Target);
+    
+    current_entry = DirtySegmentListHead.Flink;
+    if (current_entry == &DirtySegmentListHead)
+    {
+        DPRINT("No Dirty pages\n");
+    }
+    
+    while (current_entry != &DirtySegmentListHead && Target > 0)
+    {
+        current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT,
+                                    DirtySegmentListEntry);
+        current_entry = current_entry->Flink;
+        
+        Locked = ExTryToAcquireResourceExclusiveLite(((FSRTL_COMMON_FCB_HEADER*)(current->Bcb->FileObject->FsContext))->Resource);
+        if (!Locked)
+        {
+            continue;
+        }
+        
+        Locked = ExTryToAcquirePushLockExclusive(&current->Lock);
+        if (!Locked)
+        {
+            ExReleaseResourceLite(((FSRTL_COMMON_FCB_HEADER*)(current->Bcb->FileObject->FsContext))->Resource);
+            continue;
+        }
+        
+        ASSERT(current->Dirty);
+        if (current->ReferenceCount > 1)
+        {
+            ExReleasePushLock(&current->Lock);
+            ExReleaseResourceLite(((FSRTL_COMMON_FCB_HEADER*)(current->Bcb->FileObject->FsContext))->Resource);
+            continue;
+        }
+        
+        PagesPerSegment = current->Bcb->CacheSegmentSize / PAGE_SIZE;
 
-  Target = max(NewTarget, Target);
+        KeReleaseGuardedMutex(&ViewLock);
+        
+        Status = CcRosFlushCacheSegment(current);
 
-  current_entry = DirtySegmentListHead.Flink;
-  if (current_entry == &DirtySegmentListHead)
-  {
-     DPRINT("No Dirty pages\n");
-  }
-  while (current_entry != &DirtySegmentListHead && Target > 0)
-    {
-      current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT,
-                                 DirtySegmentListEntry);
-      current_entry = current_entry->Flink;
+        ExReleasePushLock(&current->Lock);
+        ExReleaseResourceLite(((FSRTL_COMMON_FCB_HEADER*)(current->Bcb->FileObject->FsContext))->Resource);
 
-//      Locked = current->Bcb->Callbacks.AcquireForLazyWrite(current->Bcb->Context, FALSE);
-      Locked = ExTryToAcquireResourceExclusiveLite(((FSRTL_COMMON_FCB_HEADER*)(current->Bcb->FileObject->FsContext))->Resource);
-      if (!Locked)
+        if (!NT_SUCCESS(Status) &&  (Status != STATUS_END_OF_FILE))
         {
-          continue;
+            DPRINT1("CC: Failed to flush cache segment.\n");
         }
-      Locked = CcTryToAcquireBrokenMutex(&current->Lock);
-      if (!Locked)
-       {
-//          current->Bcb->Callbacks.ReleaseFromLazyWrite(current->Bcb->Context);
-          ExReleaseResourceLite(((FSRTL_COMMON_FCB_HEADER*)(current->Bcb->FileObject->FsContext))->Resource);
-         continue;
-       }
-      ASSERT(current->Dirty);
-      if (current->ReferenceCount > 1)
-       {
-         ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&current->Lock);
-//          current->Bcb->Callbacks.ReleaseFromLazyWrite(current->Bcb->Context);
-          ExReleaseResourceLite(((FSRTL_COMMON_FCB_HEADER*)(current->Bcb->FileObject->FsContext))->Resource);
-         continue;
-       }
-      ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&ViewLock);
-      PagesPerSegment = current->Bcb->CacheSegmentSize / PAGE_SIZE;
-      Status = CcRosFlushCacheSegment(current);
-      ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&current->Lock);
-//      current->Bcb->Callbacks.ReleaseFromLazyWrite(current->Bcb->Context);
-      ExReleaseResourceLite(((FSRTL_COMMON_FCB_HEADER*)(current->Bcb->FileObject->FsContext))->Resource);
-      if (!NT_SUCCESS(Status) &&  (Status != STATUS_END_OF_FILE))
-      {
-        DPRINT1("CC: Failed to flush cache segment.\n");
-      }
-      else
-      {
-         (*Count) += PagesPerSegment;
-         Target -= PagesPerSegment;
-      }
-      ExEnterCriticalRegionAndAcquireFastMutexUnsafe(&ViewLock);
-      current_entry = DirtySegmentListHead.Flink;
+        else
+        {
+            (*Count) += PagesPerSegment;
+            Target -= PagesPerSegment;
+        }
+        
+        KeAcquireGuardedMutex(&ViewLock);
+        current_entry = DirtySegmentListHead.Flink;
     }
-  if (*Count < NewTarget)
-  {
-     WriteCount[1] += (NewTarget - *Count);
-  }
-  ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&ViewLock);
-  DPRINT("CcRosFlushDirtyPages() finished\n");
-
-  return(STATUS_SUCCESS);
+    
+    if (*Count < NewTarget)
+    {
+        WriteCount[1] += (NewTarget - *Count);
+    }
+    
+    KeReleaseGuardedMutex(&ViewLock);
+    
+    DPRINT("CcRosFlushDirtyPages() finished\n");
+    return(STATUS_SUCCESS);
 }
 
 NTSTATUS
@@ -322,7 +317,7 @@ CcRosTrimCache(ULONG Target, ULONG Priority, PULONG NrFreed)
 
   InitializeListHead(&FreeList);
 
-  ExEnterCriticalRegionAndAcquireFastMutexUnsafe(&ViewLock);
+  KeAcquireGuardedMutex(&ViewLock);
   current_entry = CacheSegmentLRUListHead.Flink;
   while (current_entry != &CacheSegmentLRUListHead && Target > 0)
     {
@@ -354,7 +349,7 @@ CcRosTrimCache(ULONG Target, ULONG Priority, PULONG NrFreed)
             last = current;
             current->PageOut = TRUE;
              KeReleaseSpinLock(&current->Bcb->BcbLock, oldIrql);
-            ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&ViewLock);
+            KeReleaseGuardedMutex(&ViewLock);
             for (i = 0; i < current->Bcb->CacheSegmentSize / PAGE_SIZE; i++)
               {
                 PFN_TYPE Page;
@@ -365,7 +360,7 @@ CcRosTrimCache(ULONG Target, ULONG Priority, PULONG NrFreed)
                     break;
                   }
               }
-             ExEnterCriticalRegionAndAcquireFastMutexUnsafe(&ViewLock);
+             KeAcquireGuardedMutex(&ViewLock);
              KeAcquireSpinLock(&current->Bcb->BcbLock, &oldIrql);
              CcRosCacheSegmentDecRefCount(current);
              current->PageOut = FALSE;
@@ -376,7 +371,7 @@ CcRosTrimCache(ULONG Target, ULONG Priority, PULONG NrFreed)
         KeReleaseSpinLock(&current->Bcb->BcbLock, oldIrql);
       }
   }
-  ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&ViewLock);
+  KeReleaseGuardedMutex(&ViewLock);
 
   while (!IsListEmpty(&FreeList))
   {
@@ -409,7 +404,7 @@ CcRosReleaseCacheSegment(PBCB Bcb,
   CacheSeg->Valid = Valid;
   CacheSeg->Dirty = CacheSeg->Dirty || Dirty;
 
-  ExEnterCriticalRegionAndAcquireFastMutexUnsafe(&ViewLock);
+  KeAcquireGuardedMutex(&ViewLock);
   if (!WasDirty && CacheSeg->Dirty)
     {
       InsertTailList(&DirtySegmentListHead, &CacheSeg->DirtySegmentListEntry);
@@ -433,42 +428,43 @@ CcRosReleaseCacheSegment(PBCB Bcb,
       CcRosCacheSegmentIncRefCount(CacheSeg);
   }
   KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
-  ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&ViewLock);
-  ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&CacheSeg->Lock);
+  KeReleaseGuardedMutex(&ViewLock);
+  ExReleasePushLock(&CacheSeg->Lock);
 
   return(STATUS_SUCCESS);
 }
 
+/* Returns with Cache Segment Lock Held! */
 PCACHE_SEGMENT
 NTAPI
 CcRosLookupCacheSegment(PBCB Bcb, ULONG FileOffset)
 {
-  PLIST_ENTRY current_entry;
-  PCACHE_SEGMENT current;
-  KIRQL oldIrql;
-
-  ASSERT(Bcb);
-
-  DPRINT("CcRosLookupCacheSegment(Bcb -x%p, FileOffset %d)\n", Bcb, FileOffset);
-
-  KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
-  current_entry = Bcb->BcbSegmentListHead.Flink;
-  while (current_entry != &Bcb->BcbSegmentListHead)
+    PLIST_ENTRY current_entry;
+    PCACHE_SEGMENT current;
+    KIRQL oldIrql;
+    
+    ASSERT(Bcb);
+    
+    DPRINT("CcRosLookupCacheSegment(Bcb -x%p, FileOffset %d)\n", Bcb, FileOffset);
+    
+    KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
+    current_entry = Bcb->BcbSegmentListHead.Flink;
+    while (current_entry != &Bcb->BcbSegmentListHead)
     {
-      current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT,
-                                 BcbSegmentListEntry);
-      if (current->FileOffset <= FileOffset &&
-         (current->FileOffset + Bcb->CacheSegmentSize) > FileOffset)
-       {
-          CcRosCacheSegmentIncRefCount(current);
-         KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
-          ExEnterCriticalRegionAndAcquireFastMutexUnsafe(&current->Lock);
-         return(current);
-       }
-      current_entry = current_entry->Flink;
+        current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT,
+                                    BcbSegmentListEntry);
+        if (current->FileOffset <= FileOffset &&
+            (current->FileOffset + Bcb->CacheSegmentSize) > FileOffset)
+        {
+            CcRosCacheSegmentIncRefCount(current);
+            KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
+            ExAcquirePushLockExclusive(&current->Lock);
+            return(current);
+        }
+        current_entry = current_entry->Flink;
     }
-  KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
-  return(NULL);
+    KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
+    return(NULL);
 }
 
 NTSTATUS
@@ -489,10 +485,10 @@ CcRosMarkDirtyCacheSegment(PBCB Bcb, ULONG FileOffset)
     }
   if (!CacheSeg->Dirty)
     {
-      ExEnterCriticalRegionAndAcquireFastMutexUnsafe(&ViewLock);
+      KeAcquireGuardedMutex(&ViewLock);
       InsertTailList(&DirtySegmentListHead, &CacheSeg->DirtySegmentListEntry);
       DirtyPageCount += Bcb->CacheSegmentSize / PAGE_SIZE;
-      ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&ViewLock);
+      KeReleaseGuardedMutex(&ViewLock);
     }
   else
   {
@@ -503,7 +499,7 @@ CcRosMarkDirtyCacheSegment(PBCB Bcb, ULONG FileOffset)
 
 
   CacheSeg->Dirty = TRUE;
-  ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&CacheSeg->Lock);
+  ExReleasePushLock(&CacheSeg->Lock);
 
   return(STATUS_SUCCESS);
 }
@@ -534,10 +530,10 @@ CcRosUnmapCacheSegment(PBCB Bcb, ULONG FileOffset, BOOLEAN NowDirty)
 
   if (!WasDirty && NowDirty)
   {
-     ExEnterCriticalRegionAndAcquireFastMutexUnsafe(&ViewLock);
+     KeAcquireGuardedMutex(&ViewLock);
      InsertTailList(&DirtySegmentListHead, &CacheSeg->DirtySegmentListEntry);
      DirtyPageCount += Bcb->CacheSegmentSize / PAGE_SIZE;
-     ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&ViewLock);
+     KeReleaseGuardedMutex(&ViewLock);
   }
 
   KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
@@ -552,7 +548,7 @@ CcRosUnmapCacheSegment(PBCB Bcb, ULONG FileOffset, BOOLEAN NowDirty)
   }
   KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
 
-  ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&CacheSeg->Lock);
+  ExReleasePushLock(&CacheSeg->Lock);
   return(STATUS_SUCCESS);
 }
 
@@ -601,9 +597,9 @@ CcRosCreateCacheSegment(PBCB Bcb,
   current->DirtySegmentListEntry.Flink = NULL;
   current->DirtySegmentListEntry.Blink = NULL;
   current->ReferenceCount = 1;
-  ExInitializeFastMutex(&current->Lock);
-  ExEnterCriticalRegionAndAcquireFastMutexUnsafe(&current->Lock);
-  ExEnterCriticalRegionAndAcquireFastMutexUnsafe(&ViewLock);
+  ExInitializePushLock((PULONG_PTR)&current->Lock);
+  ExAcquirePushLockExclusive(&current->Lock);
+  KeAcquireGuardedMutex(&ViewLock);
 
   *CacheSeg = current;
   /* There is window between the call to CcRosLookupCacheSegment
@@ -632,11 +628,11 @@ CcRosCreateCacheSegment(PBCB Bcb,
                        current );
        }
 #endif
-       ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&(*CacheSeg)->Lock);
-       ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&ViewLock);
+       ExReleasePushLock(&(*CacheSeg)->Lock);
+       KeReleaseGuardedMutex(&ViewLock);
        ExFreeToNPagedLookasideList(&CacheSegLookasideList, *CacheSeg);
        *CacheSeg = current;
-        ExEnterCriticalRegionAndAcquireFastMutexUnsafe(&current->Lock);
+        ExAcquirePushLockExclusive(&current->Lock);
        return STATUS_SUCCESS;
      }
      if (current->FileOffset < FileOffset)
@@ -668,7 +664,7 @@ CcRosCreateCacheSegment(PBCB Bcb,
   KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
   InsertTailList(&CacheSegmentListHead, &current->CacheSegmentListEntry);
   InsertTailList(&CacheSegmentLRUListHead, &current->CacheSegmentLRUListEntry);
-  ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&ViewLock);
+  KeReleaseGuardedMutex(&ViewLock);
 #ifdef CACHE_BITMAP
   KeAcquireSpinLock(&CiCacheSegMappingRegionLock, &oldIrql);
 
@@ -942,7 +938,7 @@ CcRosFreeCacheSegment(PBCB Bcb, PCACHE_SEGMENT CacheSeg)
   DPRINT("CcRosFreeCacheSegment(Bcb 0x%p, CacheSeg 0x%p)\n",
          Bcb, CacheSeg);
 
-  ExEnterCriticalRegionAndAcquireFastMutexUnsafe(&ViewLock);
+  KeAcquireGuardedMutex(&ViewLock);
   KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
   RemoveEntryList(&CacheSeg->BcbSegmentListEntry);
   RemoveEntryList(&CacheSeg->CacheSegmentListEntry);
@@ -954,7 +950,7 @@ CcRosFreeCacheSegment(PBCB Bcb, PCACHE_SEGMENT CacheSeg)
 
   }
   KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
-  ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&ViewLock);
+  KeReleaseGuardedMutex(&ViewLock);
 
   Status = CcRosInternalFreeCacheSegment(CacheSeg);
   return(Status);
@@ -1012,7 +1008,7 @@ CcFlushCache(IN PSECTION_OBJECT_POINTERS SectionObjectPointers,
               }
            }
             KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
-           ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&current->Lock);
+           ExReleasePushLock(&current->Lock);
             CcRosCacheSegmentDecRefCount(current);
            KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
         }
@@ -1053,11 +1049,11 @@ CcRosDeleteFileCache(PFILE_OBJECT FileObject, PBCB Bcb)
    ASSERT(Bcb);
 
    Bcb->RefCount++;
-   ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&ViewLock);
+   KeReleaseGuardedMutex(&ViewLock);
 
    CcFlushCache(FileObject->SectionObjectPointer, NULL, 0, NULL);
 
-   ExEnterCriticalRegionAndAcquireFastMutexUnsafe(&ViewLock);
+   KeAcquireGuardedMutex(&ViewLock);
    Bcb->RefCount--;
    if (Bcb->RefCount == 0)
    {
@@ -1094,7 +1090,7 @@ CcRosDeleteFileCache(PFILE_OBJECT FileObject, PBCB Bcb)
 #endif
       KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
 
-      ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&ViewLock);
+      KeReleaseGuardedMutex(&ViewLock);
       ObDereferenceObject (Bcb->FileObject);
 
       while (!IsListEmpty(&FreeList))
@@ -1104,7 +1100,7 @@ CcRosDeleteFileCache(PFILE_OBJECT FileObject, PBCB Bcb)
          Status = CcRosInternalFreeCacheSegment(current);
       }
       ExFreeToNPagedLookasideList(&BcbLookasideList, Bcb);
-      ExEnterCriticalRegionAndAcquireFastMutexUnsafe(&ViewLock);
+      KeAcquireGuardedMutex(&ViewLock);
    }
    return(STATUS_SUCCESS);
 }
@@ -1114,7 +1110,7 @@ NTAPI
 CcRosReferenceCache(PFILE_OBJECT FileObject)
 {
   PBCB Bcb;
-  ExEnterCriticalRegionAndAcquireFastMutexUnsafe(&ViewLock);
+  KeAcquireGuardedMutex(&ViewLock);
   Bcb = (PBCB)FileObject->SectionObjectPointer->SharedCacheMap;
   ASSERT(Bcb);
   if (Bcb->RefCount == 0)
@@ -1129,7 +1125,7 @@ CcRosReferenceCache(PFILE_OBJECT FileObject)
      ASSERT(Bcb->BcbRemoveListEntry.Flink == NULL);
   }
   Bcb->RefCount++;
-  ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&ViewLock);
+  KeReleaseGuardedMutex(&ViewLock);
 }
 
 VOID
@@ -1138,7 +1134,7 @@ CcRosSetRemoveOnClose(PSECTION_OBJECT_POINTERS SectionObjectPointer)
 {
   PBCB Bcb;
   DPRINT("CcRosSetRemoveOnClose()\n");
-  ExEnterCriticalRegionAndAcquireFastMutexUnsafe(&ViewLock);
+  KeAcquireGuardedMutex(&ViewLock);
   Bcb = (PBCB)SectionObjectPointer->SharedCacheMap;
   if (Bcb)
   {
@@ -1148,7 +1144,7 @@ CcRosSetRemoveOnClose(PSECTION_OBJECT_POINTERS SectionObjectPointer)
       CcRosDeleteFileCache(Bcb->FileObject, Bcb);
     }
   }
-  ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&ViewLock);
+  KeReleaseGuardedMutex(&ViewLock);
 }
 
 
@@ -1157,7 +1153,7 @@ NTAPI
 CcRosDereferenceCache(PFILE_OBJECT FileObject)
 {
   PBCB Bcb;
-  ExEnterCriticalRegionAndAcquireFastMutexUnsafe(&ViewLock);
+  KeAcquireGuardedMutex(&ViewLock);
   Bcb = (PBCB)FileObject->SectionObjectPointer->SharedCacheMap;
   ASSERT(Bcb);
   if (Bcb->RefCount > 0)
@@ -1177,7 +1173,7 @@ CcRosDereferenceCache(PFILE_OBJECT FileObject)
        }
     }
   }
-  ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&ViewLock);
+  KeReleaseGuardedMutex(&ViewLock);
 }
 
 NTSTATUS STDCALL
@@ -1189,7 +1185,7 @@ CcRosReleaseFileCache(PFILE_OBJECT FileObject)
 {
   PBCB Bcb;
 
-  ExEnterCriticalRegionAndAcquireFastMutexUnsafe(&ViewLock);
+  KeAcquireGuardedMutex(&ViewLock);
 
   if (FileObject->SectionObjectPointer->SharedCacheMap != NULL)
   {
@@ -1216,7 +1212,7 @@ CcRosReleaseFileCache(PFILE_OBJECT FileObject)
       }
     }
   }
-  ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&ViewLock);
+  KeReleaseGuardedMutex(&ViewLock);
   return(STATUS_SUCCESS);
 }
 
@@ -1227,7 +1223,7 @@ CcTryToInitializeFileCache(PFILE_OBJECT FileObject)
    PBCB Bcb;
    NTSTATUS Status;
 
-   ExEnterCriticalRegionAndAcquireFastMutexUnsafe(&ViewLock);
+   KeAcquireGuardedMutex(&ViewLock);
 
    Bcb = FileObject->SectionObjectPointer->SharedCacheMap;
    if (Bcb == NULL)
@@ -1248,7 +1244,7 @@ CcTryToInitializeFileCache(PFILE_OBJECT FileObject)
       }
       Status = STATUS_SUCCESS;
    }
-   ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&ViewLock);
+   KeReleaseGuardedMutex(&ViewLock);
 
    return Status;
 }
@@ -1267,13 +1263,13 @@ CcRosInitializeFileCache(PFILE_OBJECT FileObject,
    DPRINT("CcRosInitializeFileCache(FileObject 0x%p, Bcb 0x%p, CacheSegmentSize %d)\n",
            FileObject, Bcb, CacheSegmentSize);
 
-   ExEnterCriticalRegionAndAcquireFastMutexUnsafe(&ViewLock);
+   KeAcquireGuardedMutex(&ViewLock);
    if (Bcb == NULL)
    {
       Bcb = ExAllocateFromNPagedLookasideList(&BcbLookasideList);
       if (Bcb == NULL)
       {
-        ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&ViewLock);
+        KeReleaseGuardedMutex(&ViewLock);
        return(STATUS_UNSUCCESSFUL);
       }
       memset(Bcb, 0, sizeof(BCB));
@@ -1304,7 +1300,7 @@ CcRosInitializeFileCache(PFILE_OBJECT FileObject,
       RemoveEntryList(&Bcb->BcbRemoveListEntry);
       Bcb->BcbRemoveListEntry.Flink = NULL;
    }
-   ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&ViewLock);
+   KeReleaseGuardedMutex(&ViewLock);
 
    return(STATUS_SUCCESS);
 }
@@ -1359,7 +1355,7 @@ CmLazyCloseThreadMain(PVOID Ignored)
          break;
       }
 
-      ExEnterCriticalRegionAndAcquireFastMutexUnsafe(&ViewLock);
+      KeAcquireGuardedMutex(&ViewLock);
       CcTimeStamp++;
       if (CcTimeStamp >= 30)
       {
@@ -1375,7 +1371,7 @@ CmLazyCloseThreadMain(PVOID Ignored)
             CcRosDeleteFileCache(current->FileObject, current);
         }
       }
-      ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&ViewLock);
+      KeReleaseGuardedMutex(&ViewLock);
    }
 }
 
@@ -1426,7 +1422,7 @@ CcInitView(VOID)
   InitializeListHead(&DirtySegmentListHead);
   InitializeListHead(&CacheSegmentLRUListHead);
   InitializeListHead(&ClosedListHead);
-  ExInitializeFastMutex(&ViewLock);
+  KeInitializeGuardedMutex(&ViewLock);
   ExInitializeNPagedLookasideList (&iBcbLookasideList,
                                   NULL,
                                   NULL,
@@ -1482,3 +1478,4 @@ CcInitView(VOID)
 
 
 
+
index 7e1ba8a..c5d74a6 100644 (file)
@@ -145,7 +145,7 @@ typedef struct _CACHE_SEGMENT
     /* Offset in the file which this cache segment maps. */
     ULONG FileOffset;
     /* Lock. */
-    FAST_MUTEX Lock;
+    EX_PUSH_LOCK Lock;
     /* Number of references. */
     ULONG ReferenceCount;
     /* Pointer to the BCB for the file which this cache segment maps data for. */
index 9f64e70..8779d30 100644 (file)
@@ -719,6 +719,40 @@ ExAcquirePushLockExclusive(PEX_PUSH_LOCK PushLock)
     ASSERT(PushLock->Locked);
 }
 
+/*++
+* @name ExTryToAcquirePushLockExclusive
+* INTERNAL MACRO
+*
+*     The ExAcquirePushLockExclusive macro exclusively acquires a PushLock.
+*
+* @params PushLock
+*         Pointer to the pushlock which is to be acquired.
+*
+* @return None.
+*
+* @remarks The function attempts the quickest route to acquire the lock, which is
+*          to simply set the lock bit.
+*          However, if the pushlock is already shared, the slower path is taken.
+*
+*          Callers of ExAcquirePushLockShared must be running at IRQL <= APC_LEVEL.
+*          This macro should usually be paired up with KeAcquireCriticalRegion.
+*
+*--*/
+BOOLEAN
+FORCEINLINE
+ExTryToAcquirePushLockExclusive(PEX_PUSH_LOCK PushLock)
+{
+    /* Try acquiring the lock */
+    if (InterlockedBitTestAndSet((PLONG)PushLock, EX_PUSH_LOCK_LOCK_V))
+    {
+        /* Can't acquire */
+        return FALSE;
+    }
+
+    /* Got acquired */
+    return TRUE;
+}
+
 /*++
  * @name ExAcquirePushLockShared
  * INTERNAL MACRO
index 0a6ac15..1d66c74 100644 (file)
@@ -118,7 +118,7 @@ extern KTSS KiBootTss;
 #endif
 extern UCHAR P0BootStack[];
 extern UCHAR KiDoubleFaultStack[];
-extern FAST_MUTEX KernelAddressSpaceLock;
+extern EX_PUSH_LOCK KernelAddressSpaceLock;
 extern ULONG KiMaximumDpcQueueDepth;
 extern ULONG KiMinimumDpcRate;
 extern ULONG KiAdjustDpcThreshold;
index 3d92d90..78c6883 100644 (file)
@@ -21,7 +21,7 @@
 /* GLOBALS ******************************************************************/
 
 static MADDRESS_SPACE KernelAddressSpace;
-FAST_MUTEX KernelAddressSpaceLock;
+EX_PUSH_LOCK KernelAddressSpaceLock;
 
 /* FUNCTIONS *****************************************************************/
 
@@ -29,21 +29,13 @@ VOID
 NTAPI
 MmLockAddressSpace(PMADDRESS_SPACE AddressSpace)
 {
-   /*
-    * Don't bother with locking if we are the first thread.
-    */
-   if (KeGetCurrentThread() == NULL)
-   {
-      return;
-   }
-
    if (AddressSpace->Process)
    {
-       ExEnterCriticalRegionAndAcquireFastMutexUnsafe((PFAST_MUTEX)&AddressSpace->Process->AddressCreationLock);
+       ExAcquirePushLockExclusive((PEX_PUSH_LOCK)&AddressSpace->Process->AddressCreationLock);
    }
    else
    {
-       ExEnterCriticalRegionAndAcquireFastMutexUnsafe((PFAST_MUTEX)&KernelAddressSpaceLock);
+       ExAcquirePushLockExclusive(&KernelAddressSpaceLock);
    }
 }
 
@@ -51,20 +43,13 @@ VOID
 NTAPI
 MmUnlockAddressSpace(PMADDRESS_SPACE AddressSpace)
 {
-   /*
-    * Don't bother locking if we are the first thread.
-    */
-   if (KeGetCurrentThread() == NULL)
-   {
-      return;
-   }
    if (AddressSpace->Process)
    {
-        ExReleaseFastMutexUnsafeAndLeaveCriticalRegion((PFAST_MUTEX)&AddressSpace->Process->AddressCreationLock);
+        ExReleasePushLock((PEX_PUSH_LOCK)&AddressSpace->Process->AddressCreationLock);
    }
    else
    {
-        ExReleaseFastMutexUnsafeAndLeaveCriticalRegion((PFAST_MUTEX)&KernelAddressSpaceLock);
+        ExReleasePushLock(&KernelAddressSpaceLock);
    }
 }
 
@@ -98,11 +83,11 @@ MmInitializeAddressSpace(PEPROCESS Process,
    AddressSpace->MemoryAreaRoot = NULL;
    if (Process)
    {
-       ExInitializeFastMutex((PFAST_MUTEX)&Process->AddressCreationLock);
+       ExInitializePushLock((PULONG_PTR)&Process->AddressCreationLock);
    }
    else
    {
-        ExInitializeFastMutex((PFAST_MUTEX)&KernelAddressSpaceLock);
+        ExInitializePushLock((PULONG_PTR)&KernelAddressSpaceLock);
    }
    if (Process != NULL)
    {
index 0e8419c..0728094 100644 (file)
 #pragma alloc_text(INIT, MmInitSectionImplementation)
 #endif
 
+FORCEINLINE
+VOID
+sprintf_nt(IN PCHAR Buffer,
+           IN PCHAR Format,
+           IN ...)
+{
+    va_list ap;
+    va_start(ap, Format);
+    vsprintf(Buffer, Format, ap);
+    va_end(ap);
+}
 
 /* TYPES *********************************************************************/
 
@@ -4506,6 +4517,9 @@ MmMapViewOfSection(IN PVOID SectionObject,
    PROS_SECTION_OBJECT Section;
    PMADDRESS_SPACE AddressSpace;
    ULONG ViewOffset;
+   //ANSI_STRING AnsiTemp;
+   //PCHAR Buffer;
+
    NTSTATUS Status = STATUS_SUCCESS;
 
    ASSERT(Process);
@@ -4608,6 +4622,41 @@ MmMapViewOfSection(IN PVOID SectionObject,
       }
 
       *BaseAddress = (PVOID)ImageBase;
+
+      /* Notify debugger about image being loaded */
+      if (NtGlobalFlag & FLG_ENABLE_KDEBUG_SYMBOL_LOAD)
+      {
+#ifdef KDBG
+          /* If KDBG is defined, then we always have symbols */
+          if (TRUE)
+#else
+          if (MiCacheImageSymbols(ImageBase))
+#endif
+          {
+#if 0
+              MmUnlockAddressSpace(AddressSpace);
+
+              /* Allocate a buffer we'll use for names */
+              Buffer = ExAllocatePoolWithTag(NonPagedPool, MAX_PATH, TAG_LDR_WSTR);
+              if (Buffer)
+              {
+                  /* Build the name */
+                  sprintf_nt(Buffer, "%wZ", &Section->FileObject->FileName);
+
+                  /* Setup the ansi string */
+                  RtlInitString(&AnsiTemp, Buffer);
+
+                  /* Notify the debugger */
+                  DbgLoadImageSymbols(&AnsiTemp, (PVOID)ImageBase, (ULONG_PTR)Process);
+
+                  /* Free allocated buffer */
+                  ExFreePool(Buffer);
+              }
+
+              MmLockAddressSpace(AddressSpace);
+#endif
+          }
+      }
    }
    else
    {