/* PRIVATE FUNCTIONS *********************************************************/
+#if 0
+//
+// FIXME: The queued spinlock routines are broken.
+//
+
VOID
FASTCALL
KeAcquireQueuedSpinLockAtDpcLevel(IN PKSPIN_LOCK_QUEUE LockHandle)
#endif
}
+#else
+//
+// HACK: Hacked to work like normal spinlocks
+//
+
+VOID
+FASTCALL
+KeAcquireQueuedSpinLockAtDpcLevel(IN PKSPIN_LOCK_QUEUE LockHandle)
+{
+#ifdef CONFIG_SMP
+ /* Make sure we are at DPC or above! */
+ if (KeGetCurrentIrql() < DISPATCH_LEVEL)
+ {
+ /* We aren't -- bugcheck */
+ KeBugCheckEx(IRQL_NOT_GREATER_OR_EQUAL,
+ (ULONG_PTR)LockHandle->Lock,
+ KeGetCurrentIrql(),
+ 0,
+ 0);
+ }
+
+ /* Do the inlined function */
+ KxAcquireSpinLock(LockHandle->Lock);
+#endif
+}
+
+VOID
+FASTCALL
+KeReleaseQueuedSpinLockFromDpcLevel(IN PKSPIN_LOCK_QUEUE LockHandle)
+{
+#ifdef CONFIG_SMP
+ /* Make sure we are at DPC or above! */
+ if (KeGetCurrentIrql() < DISPATCH_LEVEL)
+ {
+ /* We aren't -- bugcheck */
+ KeBugCheckEx(IRQL_NOT_GREATER_OR_EQUAL,
+ (ULONG_PTR)LockHandle->Lock,
+ KeGetCurrentIrql(),
+ 0,
+ 0);
+ }
+
+ /* Do the inlined function */
+ KxReleaseSpinLock(LockHandle->Lock);
+#endif
+}
+
+#endif
+
/* PUBLIC FUNCTIONS **********************************************************/
-#ifdef _X86_
+/*
+ * @implemented
+ */
+KIRQL
+NTAPI
+KeAcquireInterruptSpinLock(IN PKINTERRUPT Interrupt)
+{
+ KIRQL OldIrql;
+
+ /* Raise IRQL */
+ KeRaiseIrql(Interrupt->SynchronizeIrql, &OldIrql);
+
+ /* Acquire spinlock on MP */
+ KeAcquireSpinLockAtDpcLevel(Interrupt->ActualLock);
+ return OldIrql;
+}
+
/*
* @implemented
*/
VOID
NTAPI
-KeInitializeSpinLock(IN PKSPIN_LOCK SpinLock)
+KeReleaseInterruptSpinLock(IN PKINTERRUPT Interrupt,
+ IN KIRQL OldIrql)
+{
+ /* Release lock on MP */
+ KeReleaseSpinLockFromDpcLevel(Interrupt->ActualLock);
+
+ /* Lower IRQL */
+ KeLowerIrql(OldIrql);
+}
+
+/*
+ * @implemented
+ */
+VOID
+NTAPI
+_KeInitializeSpinLock(IN PKSPIN_LOCK SpinLock)
{
/* Clear it */
*SpinLock = 0;
}
-#endif
/*
* @implemented
NTAPI
KeAcquireSpinLockAtDpcLevel(IN PKSPIN_LOCK SpinLock)
{
+ /* Make sure we are at DPC or above! */
+ if (KeGetCurrentIrql() < DISPATCH_LEVEL)
+ {
+ /* We aren't -- bugcheck */
+ KeBugCheckEx(IRQL_NOT_GREATER_OR_EQUAL,
+ (ULONG_PTR)SpinLock,
+ KeGetCurrentIrql(),
+ 0,
+ 0);
+ }
+
/* Do the inlined function */
KxAcquireSpinLock(SpinLock);
}
NTAPI
KeReleaseSpinLockFromDpcLevel(IN PKSPIN_LOCK SpinLock)
{
- /* Do the lined function */
+ /* Make sure we are at DPC or above! */
+ if (KeGetCurrentIrql() < DISPATCH_LEVEL)
+ {
+ /* We aren't -- bugcheck */
+ KeBugCheckEx(IRQL_NOT_GREATER_OR_EQUAL,
+ (ULONG_PTR)SpinLock,
+ KeGetCurrentIrql(),
+ 0,
+ 0);
+ }
+
+ /* Do the inlined function */
KxReleaseSpinLock(SpinLock);
}
FASTCALL
KefAcquireSpinLockAtDpcLevel(IN PKSPIN_LOCK SpinLock)
{
+ /* Make sure we are at DPC or above! */
+ if (KeGetCurrentIrql() < DISPATCH_LEVEL)
+ {
+ /* We aren't -- bugcheck */
+ KeBugCheckEx(IRQL_NOT_GREATER_OR_EQUAL,
+ (ULONG_PTR)SpinLock,
+ KeGetCurrentIrql(),
+ 0,
+ 0);
+ }
+
/* Do the inlined function */
KxAcquireSpinLock(SpinLock);
}
FASTCALL
KefReleaseSpinLockFromDpcLevel(IN PKSPIN_LOCK SpinLock)
{
- /* Do the lined function */
+ /* Make sure we are at DPC or above! */
+ if (KeGetCurrentIrql() < DISPATCH_LEVEL)
+ {
+ /* We aren't -- bugcheck */
+ KeBugCheckEx(IRQL_NOT_GREATER_OR_EQUAL,
+ (ULONG_PTR)SpinLock,
+ KeGetCurrentIrql(),
+ 0,
+ 0);
+ }
+
+ /* Do the inlined function */
KxReleaseSpinLock(SpinLock);
}
/* Set it up properly */
LockHandle->LockQueue.Next = NULL;
LockHandle->LockQueue.Lock = SpinLock;
+#if 0
KeAcquireQueuedSpinLockAtDpcLevel(LockHandle->LockQueue.Next);
+#else
+ /* Make sure we are at DPC or above! */
+ if (KeGetCurrentIrql() < DISPATCH_LEVEL)
+ {
+ /* We aren't -- bugcheck */
+ KeBugCheckEx(IRQL_NOT_GREATER_OR_EQUAL,
+ (ULONG_PTR)LockHandle->LockQueue.Lock,
+ KeGetCurrentIrql(),
+ 0,
+ 0);
+ }
+
+ /* Acquire the lock */
+ KxAcquireSpinLock(LockHandle->LockQueue.Lock); // HACK
+#endif
#endif
}
KeReleaseInStackQueuedSpinLockFromDpcLevel(IN PKLOCK_QUEUE_HANDLE LockHandle)
{
#ifdef CONFIG_SMP
+#if 0
/* Call the internal function */
KeReleaseQueuedSpinLockFromDpcLevel(LockHandle->LockQueue.Next);
-#endif
-}
-
-/*
- * @implemented
- */
-KIRQL
-NTAPI
-KeAcquireInterruptSpinLock(IN PKINTERRUPT Interrupt)
-{
- KIRQL OldIrql;
-
- /* Raise IRQL */
- KeRaiseIrql(Interrupt->SynchronizeIrql, &OldIrql);
-
- /* Acquire spinlock on MP */
- KefAcquireSpinLockAtDpcLevel(Interrupt->ActualLock);
- return OldIrql;
-}
-
-/*
- * @implemented
- */
-VOID
-NTAPI
-KeReleaseInterruptSpinLock(IN PKINTERRUPT Interrupt,
- IN KIRQL OldIrql)
-{
- /* Release lock on MP */
- KefReleaseSpinLockFromDpcLevel(Interrupt->ActualLock);
+#else
+ /* Make sure we are at DPC or above! */
+ if (KeGetCurrentIrql() < DISPATCH_LEVEL)
+ {
+ /* We aren't -- bugcheck */
+ KeBugCheckEx(IRQL_NOT_GREATER_OR_EQUAL,
+ (ULONG_PTR)LockHandle->LockQueue.Lock,
+ KeGetCurrentIrql(),
+ 0,
+ 0);
+ }
- /* Lower IRQL */
- KeLowerIrql(OldIrql);
+ /* Release the lock */
+ KxReleaseSpinLock(LockHandle->LockQueue.Lock); // HACK
+#endif
+#endif
}
/*
return TRUE;
}
-/* EOF */
+#ifdef _M_IX86
+VOID
+NTAPI
+Kii386SpinOnSpinLock(PKSPIN_LOCK SpinLock, ULONG Flags)
+{
+ // FIXME: Handle flags
+ UNREFERENCED_PARAMETER(Flags);
+
+ /* Spin until it's unlocked */
+ while (*(volatile KSPIN_LOCK *)SpinLock & 1)
+ {
+ // FIXME: Check for timeout
+
+ /* Yield and keep looping */
+ YieldProcessor();
+ }
+}
+#endif