#include <ntoskrnl.h>
#define NDEBUG
-#include <internal/debug.h>
+#include <debug.h>
#define LQ_WAIT 1
#define LQ_OWN 2
/* PRIVATE FUNCTIONS *********************************************************/
+#if 0
+//
+// FIXME: The queued spinlock routines are broken.
+//
+
VOID
FASTCALL
KeAcquireQueuedSpinLockAtDpcLevel(IN PKSPIN_LOCK_QUEUE LockHandle)
#endif
}
+#else
+//
+// HACK: Hacked to work like normal spinlocks
+//
+
+VOID
+FASTCALL
+KeAcquireQueuedSpinLockAtDpcLevel(IN PKSPIN_LOCK_QUEUE LockHandle)
+{
+#ifdef CONFIG_SMP
+ /* Make sure we are at DPC or above! */
+ if (KeGetCurrentIrql() < DISPATCH_LEVEL)
+ {
+ /* We aren't -- bugcheck */
+ KeBugCheckEx(IRQL_NOT_GREATER_OR_EQUAL,
+ (ULONG_PTR)LockHandle->Lock,
+ KeGetCurrentIrql(),
+ 0,
+ 0);
+ }
+
+ /* Do the inlined function */
+ KxAcquireSpinLock(LockHandle->Lock);
+#endif
+}
+
+VOID
+FASTCALL
+KeReleaseQueuedSpinLockFromDpcLevel(IN PKSPIN_LOCK_QUEUE LockHandle)
+{
+#ifdef CONFIG_SMP
+ /* Make sure we are at DPC or above! */
+ if (KeGetCurrentIrql() < DISPATCH_LEVEL)
+ {
+ /* We aren't -- bugcheck */
+ KeBugCheckEx(IRQL_NOT_GREATER_OR_EQUAL,
+ (ULONG_PTR)LockHandle->Lock,
+ KeGetCurrentIrql(),
+ 0,
+ 0);
+ }
+
+ /* Do the inlined function */
+ KxReleaseSpinLock(LockHandle->Lock);
+#endif
+}
+
+#endif
+
/* PUBLIC FUNCTIONS **********************************************************/
+/*
+ * @implemented
+ */
+KIRQL
+NTAPI
+KeAcquireInterruptSpinLock(IN PKINTERRUPT Interrupt)
+{
+ KIRQL OldIrql;
+
+ /* Raise IRQL */
+ KeRaiseIrql(Interrupt->SynchronizeIrql, &OldIrql);
+
+ /* Acquire spinlock on MP */
+ KeAcquireSpinLockAtDpcLevel(Interrupt->ActualLock);
+ return OldIrql;
+}
+
/*
* @implemented
*/
VOID
NTAPI
-KeInitializeSpinLock(IN PKSPIN_LOCK SpinLock)
+KeReleaseInterruptSpinLock(IN PKINTERRUPT Interrupt,
+ IN KIRQL OldIrql)
+{
+ /* Release lock on MP */
+ KeReleaseSpinLockFromDpcLevel(Interrupt->ActualLock);
+
+ /* Lower IRQL */
+ KeLowerIrql(OldIrql);
+}
+
+/*
+ * @implemented
+ */
+VOID
+NTAPI
+_KeInitializeSpinLock(IN PKSPIN_LOCK SpinLock)
{
/* Clear it */
*SpinLock = 0;
NTAPI
KeAcquireSpinLockAtDpcLevel(IN PKSPIN_LOCK SpinLock)
{
+ /* Make sure we are at DPC or above! */
+ if (KeGetCurrentIrql() < DISPATCH_LEVEL)
+ {
+ /* We aren't -- bugcheck */
+ KeBugCheckEx(IRQL_NOT_GREATER_OR_EQUAL,
+ (ULONG_PTR)SpinLock,
+ KeGetCurrentIrql(),
+ 0,
+ 0);
+ }
+
/* Do the inlined function */
KxAcquireSpinLock(SpinLock);
}
NTAPI
KeReleaseSpinLockFromDpcLevel(IN PKSPIN_LOCK SpinLock)
{
- /* Do the lined function */
+ /* Make sure we are at DPC or above! */
+ if (KeGetCurrentIrql() < DISPATCH_LEVEL)
+ {
+ /* We aren't -- bugcheck */
+ KeBugCheckEx(IRQL_NOT_GREATER_OR_EQUAL,
+ (ULONG_PTR)SpinLock,
+ KeGetCurrentIrql(),
+ 0,
+ 0);
+ }
+
+ /* Do the inlined function */
KxReleaseSpinLock(SpinLock);
}
FASTCALL
KefAcquireSpinLockAtDpcLevel(IN PKSPIN_LOCK SpinLock)
{
+ /* Make sure we are at DPC or above! */
+ if (KeGetCurrentIrql() < DISPATCH_LEVEL)
+ {
+ /* We aren't -- bugcheck */
+ KeBugCheckEx(IRQL_NOT_GREATER_OR_EQUAL,
+ (ULONG_PTR)SpinLock,
+ KeGetCurrentIrql(),
+ 0,
+ 0);
+ }
+
/* Do the inlined function */
KxAcquireSpinLock(SpinLock);
}
FASTCALL
KefReleaseSpinLockFromDpcLevel(IN PKSPIN_LOCK SpinLock)
{
- /* Do the lined function */
+ /* Make sure we are at DPC or above! */
+ if (KeGetCurrentIrql() < DISPATCH_LEVEL)
+ {
+ /* We aren't -- bugcheck */
+ KeBugCheckEx(IRQL_NOT_GREATER_OR_EQUAL,
+ (ULONG_PTR)SpinLock,
+ KeGetCurrentIrql(),
+ 0,
+ 0);
+ }
+
+ /* Do the inlined function */
KxReleaseSpinLock(SpinLock);
}
return FALSE;
}
-#ifdef DBG
+#if DBG
/* On debug builds, we OR in the KTHREAD */
*SpinLock = (ULONG_PTR)KeGetCurrentThread() | 1;
#endif
/* Set it up properly */
LockHandle->LockQueue.Next = NULL;
LockHandle->LockQueue.Lock = SpinLock;
+#if 0
KeAcquireQueuedSpinLockAtDpcLevel(LockHandle->LockQueue.Next);
+#else
+ /* Make sure we are at DPC or above! */
+ if (KeGetCurrentIrql() < DISPATCH_LEVEL)
+ {
+ /* We aren't -- bugcheck */
+ KeBugCheckEx(IRQL_NOT_GREATER_OR_EQUAL,
+ (ULONG_PTR)LockHandle->LockQueue.Lock,
+ KeGetCurrentIrql(),
+ 0,
+ 0);
+ }
+
+ /* Acquire the lock */
+ KxAcquireSpinLock(LockHandle->LockQueue.Lock); // HACK
+#endif
#endif
}
KeReleaseInStackQueuedSpinLockFromDpcLevel(IN PKLOCK_QUEUE_HANDLE LockHandle)
{
#ifdef CONFIG_SMP
+#if 0
/* Call the internal function */
KeReleaseQueuedSpinLockFromDpcLevel(LockHandle->LockQueue.Next);
+#else
+ /* Make sure we are at DPC or above! */
+ if (KeGetCurrentIrql() < DISPATCH_LEVEL)
+ {
+ /* We aren't -- bugcheck */
+ KeBugCheckEx(IRQL_NOT_GREATER_OR_EQUAL,
+ (ULONG_PTR)LockHandle->LockQueue.Lock,
+ KeGetCurrentIrql(),
+ 0,
+ 0);
+ }
+
+ /* Release the lock */
+ KxReleaseSpinLock(LockHandle->LockQueue.Lock); // HACK
+#endif
#endif
}
/*
- * @implemented
+ * @unimplemented
*/
KIRQL
-NTAPI
-KeAcquireInterruptSpinLock(IN PKINTERRUPT Interrupt)
+FASTCALL
+KeAcquireSpinLockForDpc(IN PKSPIN_LOCK SpinLock)
{
- KIRQL OldIrql;
+ UNIMPLEMENTED;
+ return 0;
+}
- /* Raise IRQL */
- KeRaiseIrql(Interrupt->SynchronizeIrql, &OldIrql);
+/*
+ * @unimplemented
+ */
+VOID
+FASTCALL
+KeReleaseSpinLockForDpc(IN PKSPIN_LOCK SpinLock,
+ IN KIRQL OldIrql)
+{
+ UNIMPLEMENTED;
+}
- /* Acquire spinlock on MP */
- KefAcquireSpinLockAtDpcLevel(Interrupt->ActualLock);
- return OldIrql;
+/*
+ * @unimplemented
+ */
+KIRQL
+FASTCALL
+KeAcquireInStackQueuedSpinLockForDpc(IN PKSPIN_LOCK SpinLock,
+ IN PKLOCK_QUEUE_HANDLE LockHandle)
+{
+ UNIMPLEMENTED;
+ return 0;
+}
+
+/*
+ * @unimplemented
+ */
+VOID
+FASTCALL
+KeReleaseInStackQueuedSpinLockForDpc(IN PKLOCK_QUEUE_HANDLE LockHandle)
+{
+ UNIMPLEMENTED;
}
/*
* @implemented
*/
+BOOLEAN
+FASTCALL
+KeTestSpinLock(IN PKSPIN_LOCK SpinLock)
+{
+ /* Test this spinlock */
+ if (*SpinLock)
+ {
+ /* Spinlock is busy, yield execution */
+ YieldProcessor();
+
+ /* Return busy flag */
+ return FALSE;
+ }
+
+ /* Spinlock appears to be free */
+ return TRUE;
+}
+
+#ifdef _M_IX86
VOID
NTAPI
-KeReleaseInterruptSpinLock(IN PKINTERRUPT Interrupt,
- IN KIRQL OldIrql)
+Kii386SpinOnSpinLock(PKSPIN_LOCK SpinLock, ULONG Flags)
{
- /* Release lock on MP */
- KefReleaseSpinLockFromDpcLevel(Interrupt->ActualLock);
+ // FIXME: Handle flags
+ UNREFERENCED_PARAMETER(Flags);
- /* Lower IRQL */
- KeLowerIrql(OldIrql);
-}
+ /* Spin until it's unlocked */
+ while (*(volatile KSPIN_LOCK *)SpinLock & 1)
+ {
+ // FIXME: Check for timeout
-/* EOF */
+ /* Yield and keep looping */
+ YieldProcessor();
+ }
+}
+#endif