[NTOSKRNL] Implement ExfAcquireRundownProtectionCacheAwareEx(), ExfReleaseRundownProt...
[reactos.git] / ntoskrnl / ex / rundown.c
index fb0b82b..da96b08 100644 (file)
@@ -5,6 +5,7 @@
  * PURPOSE:         Rundown and Cache-Aware Rundown Protection
  * PROGRAMMERS:     Alex Ionescu (alex@relsoft.net)
  *                  Thomas Weidenmueller
+ *                  Pierre Schweitzer
  */
 
 /* INCLUDES *****************************************************************/
@@ -378,53 +379,57 @@ ExfWaitForRundownProtectionRelease(IN PEX_RUNDOWN_REF RunRef)
 /* FIXME: STUBS **************************************************************/
 
 /*
- * @unimplemented NT5.2
+ * @implemented NT5.2
  */
 BOOLEAN
 FASTCALL
 ExfAcquireRundownProtectionCacheAware(IN PEX_RUNDOWN_REF_CACHE_AWARE RunRefCacheAware)
 {
-    DBG_UNREFERENCED_PARAMETER(RunRefCacheAware);
-    UNIMPLEMENTED;
-    return FALSE;
+    PEX_RUNDOWN_REF RunRef;
+
+    RunRef = ExGetRunRefForCurrentProcessor(RunRefCacheAware);
+    return _ExAcquireRundownProtection(RunRef);
 }
 
 /*
- * @unimplemented NT5.2
+ * @implemented NT5.2
  */
 BOOLEAN
 FASTCALL
 ExfAcquireRundownProtectionCacheAwareEx(IN PEX_RUNDOWN_REF_CACHE_AWARE RunRefCacheAware,
                                         IN ULONG Count)
 {
-    DBG_UNREFERENCED_PARAMETER(RunRefCacheAware);
-    DBG_UNREFERENCED_PARAMETER(Count);
-    UNIMPLEMENTED;
-    return FALSE;
+    PEX_RUNDOWN_REF RunRef;
+
+    RunRef = ExGetRunRefForCurrentProcessor(RunRefCacheAware);
+    return ExfAcquireRundownProtectionEx(RunRef, Count);
 }
 
 /*
- * @unimplemented NT5.2
+ * @implemented NT5.2
  */
 VOID
 FASTCALL
 ExfReleaseRundownProtectionCacheAware(IN PEX_RUNDOWN_REF_CACHE_AWARE RunRefCacheAware)
 {
-    DBG_UNREFERENCED_PARAMETER(RunRefCacheAware);
-    UNIMPLEMENTED;
+    PEX_RUNDOWN_REF RunRef;
+
+    RunRef = ExGetRunRefForCurrentProcessor(RunRefCacheAware);
+    return _ExReleaseRundownProtection(RunRef);
 }
 
 /*
- * @unimplemented NT5.2
+ * @implemented NT5.2
  */
 VOID
 FASTCALL
 ExfReleaseRundownProtectionCacheAwareEx(IN PEX_RUNDOWN_REF_CACHE_AWARE RunRefCacheAware,
                                         IN ULONG Count)
 {
-    DBG_UNREFERENCED_PARAMETER(RunRefCacheAware);
-    DBG_UNREFERENCED_PARAMETER(Count);
-    UNIMPLEMENTED;
+    PEX_RUNDOWN_REF RunRef;
+
+    RunRef = ExGetRunRefForCurrentProcessor(RunRefCacheAware);
+    return ExfReleaseRundownProtectionEx(RunRef, Count);
 }
 
 /*
@@ -461,51 +466,173 @@ ExfReInitializeRundownProtectionCacheAware(IN PEX_RUNDOWN_REF_CACHE_AWARE RunRef
 }
 
 /*
- * @unimplemented NT5.2
+ * @implemented NT5.2
  */
 PEX_RUNDOWN_REF_CACHE_AWARE
 NTAPI
 ExAllocateCacheAwareRundownProtection(IN POOL_TYPE PoolType,
                                       IN ULONG Tag)
 {
-    DBG_UNREFERENCED_PARAMETER(PoolType);
-    DBG_UNREFERENCED_PARAMETER(Tag);
-    UNIMPLEMENTED;
-    return NULL;
+    PVOID PoolToFree;
+    PEX_RUNDOWN_REF RunRef;
+    ULONG RunRefSize, Count, Offset;
+    PEX_RUNDOWN_REF_CACHE_AWARE RunRefCacheAware;
+
+    PAGED_CODE();
+
+    /* Allocate the master structure */
+    RunRefCacheAware = ExAllocatePoolWithTag(PoolType, sizeof(EX_RUNDOWN_REF_CACHE_AWARE), Tag);
+    if (RunRefCacheAware == NULL)
+    {
+        return NULL;
+    }
+
+    /* Compute the size of each runref */
+    RunRefCacheAware->Number = KeNumberProcessors;
+    if (KeNumberProcessors <= 1)
+    {
+        RunRefSize = sizeof(EX_RUNDOWN_REF);
+    }
+    else
+    {
+        RunRefSize = KeGetRecommendedSharedDataAlignment();
+        ASSERT((RunRefSize & (RunRefSize - 1)) == 0);
+    }
+
+    /* It must at least hold a EX_RUNDOWN_REF structure */
+    ASSERT(sizeof(EX_RUNDOWN_REF) <= RunRefSize);
+    RunRefCacheAware->RunRefSize = RunRefSize;
+
+    /* Allocate our runref pool */
+    PoolToFree = ExAllocatePoolWithTag(PoolType, RunRefSize * RunRefCacheAware->Number, Tag);
+    if (PoolToFree == NULL)
+    {
+        ExFreePoolWithTag(RunRefCacheAware, Tag);
+        return NULL;
+    }
+
+    /* On SMP, check for alignment */
+    if (RunRefCacheAware->Number > 1)
+    {
+        /* FIXME: properly align run refs */
+        UNIMPLEMENTED;
+    }
+
+    RunRefCacheAware->RunRefs = PoolToFree;
+    RunRefCacheAware->PoolToFree = PoolToFree;
+
+    /* And initialize runref */
+    if (RunRefCacheAware->Number != 0)
+    {
+        for (Count = 0; Count < RunRefCacheAware->Number; ++Count)
+        {
+            Offset = RunRefCacheAware->RunRefSize * Count;
+            RunRef = (PEX_RUNDOWN_REF)((ULONG_PTR)RunRefCacheAware->RunRefs + Offset);
+            RunRef->Count = 0;
+        }
+    }
+
+    return RunRefCacheAware;
 }
 
 /*
- * @unimplemented NT5.2
+ * @implemented NT5.2
  */
 VOID
 NTAPI
 ExFreeCacheAwareRundownProtection(IN PEX_RUNDOWN_REF_CACHE_AWARE RunRefCacheAware)
 {
-    DBG_UNREFERENCED_PARAMETER(RunRefCacheAware);
-    UNIMPLEMENTED;
+    PAGED_CODE();
+
+    /*
+     * This is to be called for RunRefCacheAware that were allocated with
+     * ExAllocateCacheAwareRundownProtection and not for user-allocated
+     * ones
+     */
+    ASSERT(RunRefCacheAware->PoolToFree != (PVOID)0xBADCA11);
+
+    /* We don't know the tag that as used for allocation */
+    ExFreePoolWithTag(RunRefCacheAware->PoolToFree, 0);
+    ExFreePoolWithTag(RunRefCacheAware, 0);
 }
 
 /*
- * @unimplemented NT5.2
+ * @implemented NT5.2
  */
 VOID
 NTAPI
 ExInitializeRundownProtectionCacheAware(IN PEX_RUNDOWN_REF_CACHE_AWARE RunRefCacheAware,
-                                        IN SIZE_T Count)
+                                        IN SIZE_T Size)
 {
-    DBG_UNREFERENCED_PARAMETER(RunRefCacheAware);
-    DBG_UNREFERENCED_PARAMETER(Count);
-    UNIMPLEMENTED;
+    PVOID Pool;
+    PEX_RUNDOWN_REF RunRef;
+    ULONG Count, RunRefSize, Offset;
+
+    PAGED_CODE();
+
+    /* Get the user allocate pool for runrefs */
+    Pool = (PVOID)((ULONG_PTR)RunRefCacheAware + sizeof(EX_RUNDOWN_REF_CACHE_AWARE));
+
+    /* By default a runref is structure-sized */
+    RunRefSize = sizeof(EX_RUNDOWN_REF);
+
+    /*
+     * If we just have enough room for a single runref, deduce were on a single
+     * processor machine
+     */
+    if (Size == sizeof(EX_RUNDOWN_REF_CACHE_AWARE) + sizeof(EX_RUNDOWN_REF))
+    {
+        Count = 1;
+    }
+    else
+    {
+        /* FIXME: Properly align on SMP */
+        UNIMPLEMENTED;
+    }
+
+    /* Initialize the structure */
+    RunRefCacheAware->RunRefs = Pool;
+    RunRefCacheAware->RunRefSize = RunRefSize;
+    RunRefCacheAware->Number = Count;
+
+    /* There is no allocated pool! */
+    RunRefCacheAware->PoolToFree = (PVOID)0xBADCA11u;
+
+    /* Initialize runref */
+    if (RunRefCacheAware->Number != 0)
+    {
+        for (Count = 0; Count < RunRefCacheAware->Number; ++Count)
+        {
+            Offset = RunRefCacheAware->RunRefSize * Count;
+            RunRef = (PEX_RUNDOWN_REF)((ULONG_PTR)RunRefCacheAware->RunRefs + Offset);
+            RunRef->Count = 0;
+        }
+    }
 }
 
 /*
- * @unimplemented NT5.2
+ * @implemented NT5.2
  */
 SIZE_T
 NTAPI
 ExSizeOfRundownProtectionCacheAware(VOID)
 {
-    UNIMPLEMENTED;
-    return 0;
+    SIZE_T Size;
+
+    PAGED_CODE();
+
+    /* Compute the needed size for runrefs */
+    if (KeNumberProcessors <= 1)
+    {
+        Size = sizeof(EX_RUNDOWN_REF);
+    }
+    else
+    {
+        /* We +1, to have enough room for alignment */
+        Size = (KeNumberProcessors + 1) * KeGetRecommendedSharedDataAlignment();
+    }
+
+    /* Return total size (master structure and runrefs) */
+    return Size + sizeof(EX_RUNDOWN_REF_CACHE_AWARE);
 }