Merge from amd64-branch:
[reactos.git] / reactos / ntoskrnl / ke / spinlock.c
index 15c3b13..0a98b33 100644 (file)
-/* $Id$
- *
- * COPYRIGHT:       See COPYING in the top level directory
- * PROJECT:         ReactOS kernel
- * FILE:            ntoskrnl/ke/spinlock.c
- * PURPOSE:         Implements spinlocks
- * 
- * PROGRAMMERS:     David Welch (welch@cwcom.net)
- */
-
 /*
- * NOTE: On a uniprocessor machine spinlocks are implemented by raising
- * the irq level
+ * PROJECT:         ReactOS Kernel
+ * LICENSE:         GPL - See COPYING in the top level directory
+ * FILE:            ntoskrnl/ke/spinlock.c
+ * PURPOSE:         Spinlock and Queued Spinlock Support
+ * PROGRAMMERS:     Alex Ionescu (alex.ionescu@reactos.org)
  */
 
-/* INCLUDES ****************************************************************/
+/* INCLUDES ******************************************************************/
 
 #include <ntoskrnl.h>
 #define NDEBUG
-#include <internal/debug.h>
+#include <debug.h>
+
+#define LQ_WAIT     1
+#define LQ_OWN      2
+
+/* PRIVATE FUNCTIONS *********************************************************/
+
+#if 0
+//
+// FIXME: The queued spinlock routines are broken.
+//
+
+VOID
+FASTCALL
+KeAcquireQueuedSpinLockAtDpcLevel(IN PKSPIN_LOCK_QUEUE LockHandle)
+{
+#ifdef CONFIG_SMP
+    PKSPIN_LOCK_QUEUE Prev;
+
+    /* Set the new lock */
+    Prev = (PKSPIN_LOCK_QUEUE)
+           InterlockedExchange((PLONG)LockHandle->Next,
+                               (LONG)LockHandle);
+    if (!Prev)
+    {
+        /* There was nothing there before. We now own it */
+         *LockHandle->Lock |= LQ_OWN;
+        return;
+    }
+
+    /* Set the wait flag */
+     *LockHandle->Lock |= LQ_WAIT;
+
+    /* Link us */
+    Prev->Next = (PKSPIN_LOCK_QUEUE)LockHandle;
+
+    /* Loop and wait */
+    while (*LockHandle->Lock & LQ_WAIT)
+        YieldProcessor();
+#endif
+}
+
+VOID
+FASTCALL
+KeReleaseQueuedSpinLockFromDpcLevel(IN PKSPIN_LOCK_QUEUE LockHandle)
+{
+#ifdef CONFIG_SMP
+    KSPIN_LOCK LockVal;
+    PKSPIN_LOCK_QUEUE Waiter;
+
+    /* Remove own and wait flags */
+    *LockHandle->Lock &= ~(LQ_OWN | LQ_WAIT);
+    LockVal = *LockHandle->Lock;
+
+    /* Check if we already own it */
+    if (LockVal == (KSPIN_LOCK)LockHandle)
+    {
+        /* Disown it */
+        LockVal = (KSPIN_LOCK)
+                  InterlockedCompareExchangePointer(LockHandle->Lock,
+                                                    NULL,
+                                                    LockHandle);
+    }
+    if (LockVal == (KSPIN_LOCK)LockHandle) return;
+
+    /* Need to wait for it */
+    Waiter = LockHandle->Next;
+    while (!Waiter)
+    {
+        YieldProcessor();
+        Waiter = LockHandle->Next;
+    }
+
+    /* It's gone */
+    *(ULONG_PTR*)&Waiter->Lock ^= (LQ_OWN | LQ_WAIT);
+    LockHandle->Next = NULL;
+#endif
+}
+
+#else
+//
+// HACK: Hacked to work like normal spinlocks
+//
+
+VOID
+FASTCALL
+KeAcquireQueuedSpinLockAtDpcLevel(IN PKSPIN_LOCK_QUEUE LockHandle)
+{
+#ifdef CONFIG_SMP
+    /* Make sure we are at DPC or above! */
+    if (KeGetCurrentIrql() < DISPATCH_LEVEL)
+    {
+        /* We aren't -- bugcheck */
+        KeBugCheckEx(IRQL_NOT_GREATER_OR_EQUAL,
+                     (ULONG_PTR)LockHandle->Lock,
+                     KeGetCurrentIrql(),
+                     0,
+                     0);
+    }
+
+    /* Do the inlined function */
+    KxAcquireSpinLock(LockHandle->Lock);
+#endif
+}
+
+VOID
+FASTCALL
+KeReleaseQueuedSpinLockFromDpcLevel(IN PKSPIN_LOCK_QUEUE LockHandle)
+{
+#ifdef CONFIG_SMP
+    /* Make sure we are at DPC or above! */
+    if (KeGetCurrentIrql() < DISPATCH_LEVEL)
+    {
+        /* We aren't -- bugcheck */
+        KeBugCheckEx(IRQL_NOT_GREATER_OR_EQUAL,
+                     (ULONG_PTR)LockHandle->Lock,
+                     KeGetCurrentIrql(),
+                     0,
+                     0);
+    }
 
-/* FUNCTIONS ***************************************************************/
+    /* Do the inlined function */
+    KxReleaseSpinLock(LockHandle->Lock);
+#endif
+}
+
+#endif
+
+/* PUBLIC FUNCTIONS **********************************************************/
 
 /*
  * @implemented
  */
-BOOLEAN STDCALL
-KeSynchronizeExecution (PKINTERRUPT            Interrupt,
-                       PKSYNCHRONIZE_ROUTINE   SynchronizeRoutine,
-                       PVOID                   SynchronizeContext)
+KIRQL
+NTAPI
+KeAcquireInterruptSpinLock(IN PKINTERRUPT Interrupt)
+{
+    KIRQL OldIrql;
+
+    /* Raise IRQL */
+    KeRaiseIrql(Interrupt->SynchronizeIrql, &OldIrql);
+
+    /* Acquire spinlock on MP */
+    KeAcquireSpinLockAtDpcLevel(Interrupt->ActualLock);
+    return OldIrql;
+}
+
 /*
- * FUNCTION: Synchronizes the execution of a given routine with the ISR
- * of a given interrupt object
- * ARGUMENTS:
- *       Interrupt = Interrupt object to synchronize with
- *       SynchronizeRoutine = Routine to call whose execution is 
- *                            synchronized with the ISR
- *       SynchronizeContext = Parameter to pass to the synchronized routine
- * RETURNS: TRUE if the operation succeeded
+ * @implemented
  */
+VOID
+NTAPI
+KeReleaseInterruptSpinLock(IN PKINTERRUPT Interrupt,
+                           IN KIRQL OldIrql)
 {
-   KIRQL oldlvl;
-   BOOLEAN ret;
-   
-   oldlvl = KeAcquireInterruptSpinLock(Interrupt);
-   
-   ret = SynchronizeRoutine(SynchronizeContext);
-   
-   KeReleaseInterruptSpinLock(Interrupt, oldlvl);
-   
-   return(ret);
+    /* Release lock on MP */
+    KeReleaseSpinLockFromDpcLevel(Interrupt->ActualLock);
+
+    /* Lower IRQL */
+    KeLowerIrql(OldIrql);
 }
 
 /*
  * @implemented
  */
-KIRQL
-STDCALL
-KeAcquireInterruptSpinLock(
-    IN PKINTERRUPT Interrupt
-    )
+VOID
+NTAPI
+_KeInitializeSpinLock(IN PKSPIN_LOCK SpinLock)
 {
-   KIRQL oldIrql;
-        
-   KeRaiseIrql(Interrupt->SynchLevel, &oldIrql);
-   KiAcquireSpinLock(Interrupt->ActualLock);
-   return oldIrql;
+    /* Clear it */
+    *SpinLock = 0;
 }
 
 /*
  * @implemented
  */
-VOID STDCALL
-KeInitializeSpinLock (PKSPIN_LOCK      SpinLock)
+#undef KeAcquireSpinLockAtDpcLevel
+VOID
+NTAPI
+KeAcquireSpinLockAtDpcLevel(IN PKSPIN_LOCK SpinLock)
+{
+    /* Make sure we are at DPC or above! */
+    if (KeGetCurrentIrql() < DISPATCH_LEVEL)
+    {
+        /* We aren't -- bugcheck */
+        KeBugCheckEx(IRQL_NOT_GREATER_OR_EQUAL,
+                     (ULONG_PTR)SpinLock,
+                     KeGetCurrentIrql(),
+                     0,
+                     0);
+    }
+
+    /* Do the inlined function */
+    KxAcquireSpinLock(SpinLock);
+}
+
 /*
- * FUNCTION: Initalizes a spinlock
- * ARGUMENTS:
- *           SpinLock = Caller supplied storage for the spinlock
+ * @implemented
  */
+#undef KeReleaseSpinLockFromDpcLevel
+VOID
+NTAPI
+KeReleaseSpinLockFromDpcLevel(IN PKSPIN_LOCK SpinLock)
 {
-   *SpinLock = 0;
-}
+    /* Make sure we are at DPC or above! */
+    if (KeGetCurrentIrql() < DISPATCH_LEVEL)
+    {
+        /* We aren't -- bugcheck */
+        KeBugCheckEx(IRQL_NOT_GREATER_OR_EQUAL,
+                     (ULONG_PTR)SpinLock,
+                     KeGetCurrentIrql(),
+                     0,
+                     0);
+    }
 
-#undef KefAcquireSpinLockAtDpcLevel
+    /* Do the inlined function */
+    KxReleaseSpinLock(SpinLock);
+}
 
 /*
  * @implemented
  */
-VOID FASTCALL
-KefAcquireSpinLockAtDpcLevel(PKSPIN_LOCK SpinLock)
+VOID
+FASTCALL
+KefAcquireSpinLockAtDpcLevel(IN PKSPIN_LOCK SpinLock)
 {
-  ASSERT(KeGetCurrentIrql() == DISPATCH_LEVEL);
-  KiAcquireSpinLock(SpinLock);
-}
+    /* Make sure we are at DPC or above! */
+    if (KeGetCurrentIrql() < DISPATCH_LEVEL)
+    {
+        /* We aren't -- bugcheck */
+        KeBugCheckEx(IRQL_NOT_GREATER_OR_EQUAL,
+                     (ULONG_PTR)SpinLock,
+                     KeGetCurrentIrql(),
+                     0,
+                     0);
+    }
 
-#undef KeAcquireSpinLockAtDpcLevel
+    /* Do the inlined function */
+    KxAcquireSpinLock(SpinLock);
+}
 
 /*
  * @implemented
  */
-VOID STDCALL
-KeAcquireSpinLockAtDpcLevel (PKSPIN_LOCK       SpinLock)
+VOID
+FASTCALL
+KefReleaseSpinLockFromDpcLevel(IN PKSPIN_LOCK SpinLock)
+{
+    /* Make sure we are at DPC or above! */
+    if (KeGetCurrentIrql() < DISPATCH_LEVEL)
+    {
+        /* We aren't -- bugcheck */
+        KeBugCheckEx(IRQL_NOT_GREATER_OR_EQUAL,
+                     (ULONG_PTR)SpinLock,
+                     KeGetCurrentIrql(),
+                     0,
+                     0);
+    }
+
+    /* Do the inlined function */
+    KxReleaseSpinLock(SpinLock);
+}
+
 /*
- * FUNCTION: Acquires a spinlock when the caller is already running at 
- * dispatch level
- * ARGUMENTS:
- *        SpinLock = Spinlock to acquire
+ * @implemented
  */
+VOID
+FASTCALL
+KiAcquireSpinLock(IN PKSPIN_LOCK SpinLock)
 {
-  KefAcquireSpinLockAtDpcLevel(SpinLock);
+    /* Do the inlined function */
+    KxAcquireSpinLock(SpinLock);
 }
 
-
 /*
- * @unimplemented
+ * @implemented
  */
 VOID
 FASTCALL
-KeAcquireInStackQueuedSpinLockAtDpcLevel(
-    IN PKSPIN_LOCK SpinLock,
-    IN PKLOCK_QUEUE_HANDLE LockHandle
-    )
+KiReleaseSpinLock(IN PKSPIN_LOCK SpinLock)
 {
-       UNIMPLEMENTED;
+    /* Do the lined function */
+    KxReleaseSpinLock(SpinLock);
 }
 
+/*
+ * @implemented
+ */
+BOOLEAN
+FASTCALL
+KeTryToAcquireSpinLockAtDpcLevel(IN OUT PKSPIN_LOCK SpinLock)
+{
+#ifdef CONFIG_SMP
+    /* Check if it's already acquired */
+    if (!(*SpinLock))
+    {
+        /* Try to acquire it */
+        if (InterlockedBitTestAndSet((PLONG)SpinLock, 0))
+        {
+            /* Someone else acquired it */
+            return FALSE;
+        }
+    }
+    else
+    {
+        /* It was already acquired */
+        return FALSE;
+    }
 
-#undef KefReleaseSpinLockFromDpcLevel
+#if DBG
+    /* On debug builds, we OR in the KTHREAD */
+    *SpinLock = (ULONG_PTR)KeGetCurrentThread() | 1;
+#endif
+#endif
+
+    /* All is well, return TRUE */
+    return TRUE;
+}
 
 /*
  * @implemented
  */
-VOID FASTCALL
-KefReleaseSpinLockFromDpcLevel(PKSPIN_LOCK SpinLock)
+VOID
+FASTCALL
+KeAcquireInStackQueuedSpinLockAtDpcLevel(IN PKSPIN_LOCK SpinLock,
+                                         IN PKLOCK_QUEUE_HANDLE LockHandle)
 {
-  ASSERT(KeGetCurrentIrql() == DISPATCH_LEVEL);
-  KiReleaseSpinLock(SpinLock);  
-}
+#ifdef CONFIG_SMP
+    /* Set it up properly */
+    LockHandle->LockQueue.Next = NULL;
+    LockHandle->LockQueue.Lock = SpinLock;
+#if 0
+    KeAcquireQueuedSpinLockAtDpcLevel(LockHandle->LockQueue.Next);
+#else
+    /* Make sure we are at DPC or above! */
+    if (KeGetCurrentIrql() < DISPATCH_LEVEL)
+    {
+        /* We aren't -- bugcheck */
+        KeBugCheckEx(IRQL_NOT_GREATER_OR_EQUAL,
+                     (ULONG_PTR)LockHandle->LockQueue.Lock,
+                     KeGetCurrentIrql(),
+                     0,
+                     0);
+    }
 
-#undef KeReleaseSpinLockFromDpcLevel
+    /* Acquire the lock */
+    KxAcquireSpinLock(LockHandle->LockQueue.Lock); // HACK
+#endif
+#endif
+}
 
 /*
  * @implemented
  */
-VOID STDCALL
-KeReleaseSpinLockFromDpcLevel (PKSPIN_LOCK     SpinLock)
+VOID
+FASTCALL
+KeReleaseInStackQueuedSpinLockFromDpcLevel(IN PKLOCK_QUEUE_HANDLE LockHandle)
+{
+#ifdef CONFIG_SMP
+#if 0
+    /* Call the internal function */
+    KeReleaseQueuedSpinLockFromDpcLevel(LockHandle->LockQueue.Next);
+#else
+    /* Make sure we are at DPC or above! */
+    if (KeGetCurrentIrql() < DISPATCH_LEVEL)
+    {
+        /* We aren't -- bugcheck */
+        KeBugCheckEx(IRQL_NOT_GREATER_OR_EQUAL,
+                     (ULONG_PTR)LockHandle->LockQueue.Lock,
+                     KeGetCurrentIrql(),
+                     0,
+                     0);
+    }
+
+    /* Release the lock */
+    KxReleaseSpinLock(LockHandle->LockQueue.Lock); // HACK
+#endif
+#endif
+}
+
 /*
- * FUNCTION: Releases a spinlock when the caller was running at dispatch
- * level before acquiring it
- * ARGUMENTS: 
- *         SpinLock = Spinlock to release
+ * @unimplemented
  */
+KIRQL
+FASTCALL
+KeAcquireSpinLockForDpc(IN PKSPIN_LOCK SpinLock)
 {
-  KefReleaseSpinLockFromDpcLevel(SpinLock);
+    UNIMPLEMENTED;
+    return 0;
 }
 
 /*
@@ -159,77 +406,70 @@ KeReleaseSpinLockFromDpcLevel (PKSPIN_LOCK        SpinLock)
  */
 VOID
 FASTCALL
-KeReleaseInStackQueuedSpinLockFromDpcLevel(
-    IN PKLOCK_QUEUE_HANDLE LockHandle
-    )
+KeReleaseSpinLockForDpc(IN PKSPIN_LOCK SpinLock,
+                        IN KIRQL OldIrql)
 {
-       UNIMPLEMENTED;
+    UNIMPLEMENTED;
 }
 
 /*
- * @implemented
+ * @unimplemented
  */
-VOID FASTCALL
-KiAcquireSpinLock(PKSPIN_LOCK SpinLock)
-{
-  ULONG i;
-
-  /*
-   * FIXME: This depends on gcc assembling this test to a single load from
-   * the spinlock's value.
-   */
-  if (*SpinLock >= 2)
-  {
-    DbgPrint("Lock %x has bad value %x\n", SpinLock, *SpinLock);
-    KEBUGCHECK(0);
-  }
-   
-  while ((i = InterlockedExchangeUL(SpinLock, 1)) == 1)
-  {
-#ifdef CONFIG_SMP
-    /* Avoid reading the value again too fast */
-#if 1
-    __asm__ __volatile__ ("1:\n\t"
-                         "cmpl $0,(%0)\n\t"
-                         "jne  1b\n\t"
-                         :
-                          : "r" (SpinLock));
-#else                    
-    while (0 != *(volatile KSPIN_LOCK*)SpinLock);
-#endif
-#else
-    DbgPrint("Spinning on spinlock %x current value %x\n", SpinLock, i);
-    KEBUGCHECK(0);
-#endif /* CONFIG_SMP */
-  }
+KIRQL
+FASTCALL
+KeAcquireInStackQueuedSpinLockForDpc(IN PKSPIN_LOCK SpinLock,
+                                     IN PKLOCK_QUEUE_HANDLE LockHandle)
+{
+    UNIMPLEMENTED;
+    return 0;
 }
 
 /*
- * @implemented
+ * @unimplemented
  */
 VOID
-STDCALL
-KeReleaseInterruptSpinLock(
-       IN PKINTERRUPT Interrupt,
-       IN KIRQL OldIrql
-       )
+FASTCALL
+KeReleaseInStackQueuedSpinLockForDpc(IN PKLOCK_QUEUE_HANDLE LockHandle)
 {
-   KiReleaseSpinLock(Interrupt->ActualLock);
-   KeLowerIrql(OldIrql);
+    UNIMPLEMENTED;
 }
 
 /*
  * @implemented
  */
-VOID FASTCALL
-KiReleaseSpinLock(PKSPIN_LOCK SpinLock)
+BOOLEAN
+FASTCALL
+KeTestSpinLock(IN PKSPIN_LOCK SpinLock)
 {
-  if (*SpinLock != 1)
-  {
-    DbgPrint("Releasing unacquired spinlock %x\n", SpinLock);
-    KEBUGCHECK(0);
-  }
-  (void)InterlockedExchangeUL(SpinLock, 0);
+    /* Test this spinlock */
+    if (*SpinLock)
+    {
+        /* Spinlock is busy, yield execution */
+        YieldProcessor();
+
+        /* Return busy flag */
+        return FALSE;
+    }
+
+    /* Spinlock appears to be free */
+    return TRUE;
 }
 
-/* EOF */
+#ifdef _M_IX86
+VOID
+NTAPI
+Kii386SpinOnSpinLock(PKSPIN_LOCK SpinLock, ULONG Flags)
+{
+    // FIXME: Handle flags
+    UNREFERENCED_PARAMETER(Flags);
+
+    /* Spin until it's unlocked */
+    while (*(volatile KSPIN_LOCK *)SpinLock & 1)
+    {
+        // FIXME: Check for timeout
+
+        /* Yield and keep looping */
+        YieldProcessor();
+    }
+}
+#endif