* PROGRAMMERS: Alex Ionescu (alex.ionescu@reactos.org)
*/
-//
-// Thread Dispatcher Header DebugActive Mask
-//
-#define DR_MASK(x) 1 << x
-#define DR_ACTIVE_MASK 0x10
-#define DR_REG_MASK 0x4F
-
-#ifdef _M_IX86
-//
-// Sanitizes a selector
-//
-FORCEINLINE
-ULONG
-Ke386SanitizeSeg(IN ULONG Cs,
- IN KPROCESSOR_MODE Mode)
-{
- //
- // Check if we're in kernel-mode, and force CPL 0 if so.
- // Otherwise, force CPL 3.
- //
- return ((Mode == KernelMode) ?
- (Cs & (0xFFFF & ~RPL_MASK)) :
- (RPL_MASK | (Cs & 0xFFFF)));
-}
-
-//
-// Sanitizes EFLAGS
-//
-FORCEINLINE
-ULONG
-Ke386SanitizeFlags(IN ULONG Eflags,
- IN KPROCESSOR_MODE Mode)
-{
- //
- // Check if we're in kernel-mode, and sanitize EFLAGS if so.
- // Otherwise, also force interrupt mask on.
- //
- return ((Mode == KernelMode) ?
- (Eflags & (EFLAGS_USER_SANITIZE | EFLAGS_INTERRUPT_MASK)) :
- (EFLAGS_INTERRUPT_MASK | (Eflags & EFLAGS_USER_SANITIZE)));
-}
-
-//
-// Gets a DR register from a CONTEXT structure
-//
-FORCEINLINE
-PVOID
-KiDrFromContext(IN ULONG Dr,
- IN PCONTEXT Context)
-{
- return *(PVOID*)((ULONG_PTR)Context + KiDebugRegisterContextOffsets[Dr]);
-}
-
-//
-// Gets a DR register from a KTRAP_FRAME structure
-//
-FORCEINLINE
-PVOID*
-KiDrFromTrapFrame(IN ULONG Dr,
- IN PKTRAP_FRAME TrapFrame)
-{
- return (PVOID*)((ULONG_PTR)TrapFrame + KiDebugRegisterTrapOffsets[Dr]);
-}
-
-//
-//
-//
+#ifndef _M_ARM
FORCEINLINE
-PVOID
-Ke386SanitizeDr(IN PVOID DrAddress,
- IN KPROCESSOR_MODE Mode)
+UCHAR
+KeGetPreviousMode(VOID)
{
- //
- // Check if we're in kernel-mode, and return the address directly if so.
- // Otherwise, make sure it's not inside the kernel-mode address space.
- // If it is, then clear the address.
- //
- return ((Mode == KernelMode) ? DrAddress :
- (DrAddress <= MM_HIGHEST_USER_ADDRESS) ? DrAddress : 0);
+ /* Return the current mode */
+ return KeGetCurrentThread()->PreviousMode;
}
-#endif /* _M_IX86 */
+#endif
//
// Enters a Guarded Region
} \
}
-//
-// TODO: Guarded Mutex Routines
-//
-
//
// Enters a Critical Region
//
} \
}
-#ifndef _CONFIG_SMP
-//
-// Spinlock Acquire at IRQL >= DISPATCH_LEVEL
-//
-FORCEINLINE
-VOID
-KxAcquireSpinLock(IN PKSPIN_LOCK SpinLock)
-{
- /* On UP builds, spinlocks don't exist at IRQL >= DISPATCH */
- UNREFERENCED_PARAMETER(SpinLock);
-}
-
-//
-// Spinlock Release at IRQL >= DISPATCH_LEVEL
-//
-FORCEINLINE
-VOID
-KxReleaseSpinLock(IN PKSPIN_LOCK SpinLock)
-{
- /* On UP builds, spinlocks don't exist at IRQL >= DISPATCH */
- UNREFERENCED_PARAMETER(SpinLock);
-}
+#ifndef CONFIG_SMP
//
// This routine protects against multiple CPU acquires, it's meaningless on UP.
//
-VOID
FORCEINLINE
+VOID
KiAcquireDispatcherObject(IN DISPATCHER_HEADER* Object)
{
UNREFERENCED_PARAMETER(Object);
//
// This routine protects against multiple CPU acquires, it's meaningless on UP.
//
-VOID
FORCEINLINE
+VOID
KiReleaseDispatcherObject(IN DISPATCHER_HEADER* Object)
{
UNREFERENCED_PARAMETER(Object);
}
-KIRQL
FORCEINLINE
+KIRQL
KiAcquireDispatcherLock(VOID)
{
/* Raise to DPC level */
return KeRaiseIrqlToDpcLevel();
}
-VOID
FORCEINLINE
+VOID
KiReleaseDispatcherLock(IN KIRQL OldIrql)
{
/* Just exit the dispatcher */
KiExitDispatcher(OldIrql);
}
-VOID
FORCEINLINE
+VOID
KiAcquireDispatcherLockAtDpcLevel(VOID)
{
/* This is a no-op at DPC Level for UP systems */
return;
}
-VOID
FORCEINLINE
+VOID
KiReleaseDispatcherLockFromDpcLevel(VOID)
{
/* This is a no-op at DPC Level for UP systems */
UNREFERENCED_PARAMETER(Prcb);
}
-FORCEINLINE
-VOID
-KiRundownThread(IN PKTHREAD Thread)
-{
-#if defined(_M_IX86)
- /* Check if this is the NPX Thread */
- if (KeGetCurrentPrcb()->NpxThread == Thread)
- {
- /* Clear it */
- KeGetCurrentPrcb()->NpxThread = NULL;
- KeArchFnInit();
- }
-#endif
-}
-
FORCEINLINE
VOID
KiRequestApcInterrupt(IN BOOLEAN NeedApc,
#else
-//
-// Spinlock Acquisition at IRQL >= DISPATCH_LEVEL
-//
-FORCEINLINE
-VOID
-KxAcquireSpinLock(IN PKSPIN_LOCK SpinLock)
-{
- for (;;)
- {
- /* Try to acquire it */
- if (InterlockedBitTestAndSet((PLONG)SpinLock, 0))
- {
- /* Value changed... wait until it's locked */
- while (*(volatile KSPIN_LOCK *)SpinLock == 1)
- {
-#ifdef DBG
- /* On debug builds, we use a much slower but useful routine */
- Kii386SpinOnSpinLock(SpinLock, 5);
-#else
- /* Otherwise, just yield and keep looping */
- YieldProcessor();
-#endif
- }
- }
- else
- {
-#ifdef DBG
- /* On debug builds, we OR in the KTHREAD */
- *SpinLock = KeGetCurrentThread() | 1;
-#endif
- /* All is well, break out */
- break;
- }
- }
-}
-
-//
-// Spinlock Release at IRQL >= DISPATCH_LEVEL
-//
FORCEINLINE
VOID
-KxReleaseSpinLock(IN PKSPIN_LOCK SpinLock)
-{
-#ifdef DBG
- /* Make sure that the threads match */
- if ((KeGetCurrentThread() | 1) != *SpinLock)
- {
- /* They don't, bugcheck */
- KeBugCheckEx(SPIN_LOCK_NOT_OWNED, SpinLock, 0, 0, 0);
- }
-#endif
- /* Clear the lock */
- InterlockedAnd(SpinLock, 0);
-}
-
-KIRQL
-FORCEINLINE
KiAcquireDispatcherObject(IN DISPATCHER_HEADER* Object)
{
- LONG OldValue, NewValue;
+ LONG OldValue;
/* Make sure we're at a safe level to touch the lock */
ASSERT(KeGetCurrentIrql() >= DISPATCH_LEVEL);
do
{
/* Loop until the other CPU releases it */
- while ((UCHAR)Object->Lock & KOBJECT_LOCK_BIT)
+ while (TRUE)
{
+ /* Check if it got released */
+ OldValue = Object->Lock;
+ if ((OldValue & KOBJECT_LOCK_BIT) == 0) break;
+
/* Let the CPU know that this is a loop */
YieldProcessor();
- };
+ }
/* Try acquiring the lock now */
- NewValue = InterlockedCompareExchange(&Object->Lock,
- OldValue | KOBJECT_LOCK_BIT,
- OldValue);
- } while (NewValue != OldValue);
+ } while (InterlockedCompareExchange(&Object->Lock,
+ OldValue | KOBJECT_LOCK_BIT,
+ OldValue) != OldValue);
}
-KIRQL
FORCEINLINE
+VOID
KiReleaseDispatcherObject(IN DISPATCHER_HEADER* Object)
{
/* Make sure we're at a safe level to touch the lock */
InterlockedAnd(&Object->Lock, ~KOBJECT_LOCK_BIT);
}
-KIRQL
FORCEINLINE
+KIRQL
KiAcquireDispatcherLock(VOID)
{
/* Raise to synchronization level and acquire the dispatcher lock */
return KeAcquireQueuedSpinLockRaiseToSynch(LockQueueDispatcherLock);
}
-VOID
FORCEINLINE
+VOID
KiReleaseDispatcherLock(IN KIRQL OldIrql)
{
/* First release the lock */
KiExitDispatcher(OldIrql);
}
+FORCEINLINE
+VOID
+KiAcquireDispatcherLockAtDpcLevel(VOID)
+{
+ /* Acquire the dispatcher lock */
+ KeAcquireQueuedSpinLockAtDpcLevel(&KeGetCurrentPrcb()->
+ LockQueue[LockQueueDispatcherLock]);
+}
+
+FORCEINLINE
+VOID
+KiReleaseDispatcherLockFromDpcLevel(VOID)
+{
+ /* Release the dispatcher lock */
+ KeReleaseQueuedSpinLockFromDpcLevel(&KeGetCurrentPrcb()->
+ LockQueue[LockQueueDispatcherLock]);
+}
+
//
-// This routine inserts a thread into the deferred ready list of the given CPU
+// This routine inserts a thread into the deferred ready list of the current CPU
//
FORCEINLINE
VOID
if ((NewThread) && !(KeGetPcr()->Number == Cpu))
{
/* Send an IPI to request delivery */
- KiIpiSendRequest(AFFINITY_MASK(Cpu), IPI_DPC);
+ KiIpiSend(AFFINITY_MASK(Cpu), IPI_DPC);
}
}
// This routine acquires the PRCB lock so that only one caller can touch
// volatile PRCB data.
//
-// Since this is a simple optimized spin-lock, it must be be only acquired
+// Since this is a simple optimized spin-lock, it must only be acquired
// at dispatcher level or higher!
//
FORCEINLINE
for (;;)
{
/* Acquire the lock and break out if we acquired it first */
- if (!InterlockedExchange(&Prcb->PrcbLock, 1)) break;
+ if (!InterlockedExchange((PLONG)&Prcb->PrcbLock, 1)) break;
/* Loop until the other CPU releases it */
do
VOID
KiReleasePrcbLock(IN PKPRCB Prcb)
{
- /* Make sure it's acquired! */
+ /* Make sure we are above dispatch and the lock is acquired! */
+ ASSERT(KeGetCurrentIrql() >= DISPATCH_LEVEL);
ASSERT(Prcb->PrcbLock != 0);
/* Release it */
- InterlockedAnd(&Prcb->PrcbLock, 0);
+ InterlockedAnd((PLONG)&Prcb->PrcbLock, 0);
}
//
for (;;)
{
/* Acquire the lock and break out if we acquired it first */
- if (!InterlockedExchange(&Thread->ThreadLock, 1)) break;
+ if (!InterlockedExchange((PLONG)&Thread->ThreadLock, 1)) break;
/* Loop until the other CPU releases it */
do
VOID
KiReleaseThreadLock(IN PKTHREAD Thread)
{
+ /* Make sure we are still above dispatch */
+ ASSERT(KeGetCurrentIrql() >= DISPATCH_LEVEL);
+
/* Release it */
- InterlockedAnd(&Thread->ThreadLock, 0);
+ InterlockedAnd((PLONG)&Thread->ThreadLock, 0);
}
FORCEINLINE
/* Otherwise, try to acquire it and check the result */
Value = 1;
- Value = InterlockedExchange(&Thread->ThreadLock, &Value);
+ Value = InterlockedExchange((PLONG)&Thread->ThreadLock, Value);
/* Return the lock state */
return (Value == TRUE);
if (NeedApc)
{
/* Check if it's on another CPU */
- if (KeGetPcr()->Number != Cpu)
+ if (KeGetPcr()->Number != Processor)
{
/* Send an IPI to request delivery */
- KiIpiSendRequest(AFFINITY_MASK(Cpu), IPI_DPC);
+ KiIpiSend(AFFINITY_MASK(Processor), IPI_APC);
}
else
{
}
}
+FORCEINLINE
+PKSPIN_LOCK_QUEUE
+KiAcquireTimerLock(IN ULONG Hand)
+{
+ PKSPIN_LOCK_QUEUE LockQueue;
+ ULONG LockIndex;
+ ASSERT(KeGetCurrentIrql() >= DISPATCH_LEVEL);
+
+ /* Get the lock index */
+ LockIndex = Hand >> LOCK_QUEUE_TIMER_LOCK_SHIFT;
+ LockIndex &= (LOCK_QUEUE_TIMER_TABLE_LOCKS - 1);
+
+ /* Now get the lock */
+ LockQueue = &KeGetCurrentPrcb()->LockQueue[LockQueueTimerTableLock + LockIndex];
+
+ /* Acquire it and return */
+ KeAcquireQueuedSpinLockAtDpcLevel(LockQueue);
+ return LockQueue;
+}
+
+FORCEINLINE
+VOID
+KiReleaseTimerLock(IN PKSPIN_LOCK_QUEUE LockQueue)
+{
+ ASSERT(KeGetCurrentIrql() >= DISPATCH_LEVEL);
+
+ /* Release the lock */
+ KeReleaseQueuedSpinLockFromDpcLevel(LockQueue);
+}
+
#endif
FORCEINLINE
//
// Recalculates the due time
//
-PLARGE_INTEGER
FORCEINLINE
+PLARGE_INTEGER
KiRecalculateDueTime(IN PLARGE_INTEGER OriginalDueTime,
IN PLARGE_INTEGER DueTime,
IN OUT PLARGE_INTEGER NewDueTime)
return STATUS_WAIT_0;
}
+ULONG
+FORCEINLINE
+KiComputeTimerTableIndex(IN ULONGLONG DueTime)
+{
+ return (DueTime / KeMaximumIncrement) & (TIMER_TABLE_SIZE - 1);
+}
+
+//
+// Called from KiCompleteTimer, KiInsertTreeTimer, KeSetSystemTime
+// to remove timer entries
+// See Windows HPI blog for more information.
+FORCEINLINE
+VOID
+KiRemoveEntryTimer(IN PKTIMER Timer)
+{
+ ULONG Hand;
+ PKTIMER_TABLE_ENTRY TableEntry;
+
+ /* Remove the timer from the timer list and check if it's empty */
+ Hand = Timer->Header.Hand;
+ if (RemoveEntryList(&Timer->TimerListEntry))
+ {
+ /* Get the respective timer table entry */
+ TableEntry = &KiTimerTableListHead[Hand];
+ if (&TableEntry->Entry == TableEntry->Entry.Flink)
+ {
+ /* Set the entry to an infinite absolute time */
+ TableEntry->Time.HighPart = 0xFFFFFFFF;
+ }
+ }
+
+ /* Clear the list entries on dbg builds so we can tell the timer is gone */
+#if DBG
+ Timer->TimerListEntry.Flink = NULL;
+ Timer->TimerListEntry.Blink = NULL;
+#endif
+}
+
//
// Called by Wait and Queue code to insert a timer for dispatching.
// Also called by KeSetTimerEx to insert a timer from the caller.
//
-VOID
FORCEINLINE
+VOID
KxInsertTimer(IN PKTIMER Timer,
IN ULONG Hand)
{
}
}
+//
+// Called by KeSetTimerEx and KiInsertTreeTimer to calculate Due Time
+// See the Windows HPI Blog for more information
+//
+FORCEINLINE
+BOOLEAN
+KiComputeDueTime(IN PKTIMER Timer,
+ IN LARGE_INTEGER DueTime,
+ OUT PULONG Hand)
+{
+ LARGE_INTEGER InterruptTime, SystemTime, DifferenceTime;
+
+ /* Convert to relative time if needed */
+ Timer->Header.Absolute = FALSE;
+ if (DueTime.HighPart >= 0)
+ {
+ /* Get System Time */
+ KeQuerySystemTime(&SystemTime);
+
+ /* Do the conversion */
+ DifferenceTime.QuadPart = SystemTime.QuadPart - DueTime.QuadPart;
+
+ /* Make sure it hasn't already expired */
+ Timer->Header.Absolute = TRUE;
+ if (DifferenceTime.HighPart >= 0)
+ {
+ /* Cancel everything */
+ Timer->Header.SignalState = TRUE;
+ Timer->Header.Hand = 0;
+ Timer->DueTime.QuadPart = 0;
+ *Hand = 0;
+ return FALSE;
+ }
+
+ /* Set the time as Absolute */
+ DueTime = DifferenceTime;
+ }
+
+ /* Get the Interrupt Time */
+ InterruptTime.QuadPart = KeQueryInterruptTime();
+
+ /* Recalculate due time */
+ Timer->DueTime.QuadPart = InterruptTime.QuadPart - DueTime.QuadPart;
+
+ /* Get the handle */
+ *Hand = KiComputeTimerTableIndex(Timer->DueTime.QuadPart);
+ Timer->Header.Hand = (UCHAR)*Hand;
+ Timer->Header.Inserted = TRUE;
+ return TRUE;
+}
+
//
// Called from Unlink and Queue Insert Code.
// Also called by timer code when canceling an inserted timer.
// Removes a timer from it's tree.
//
-VOID
FORCEINLINE
+VOID
KxRemoveTreeTimer(IN PKTIMER Timer)
{
ULONG Hand = Timer->Header.Hand;
KiReleaseTimerLock(LockQueue);
}
-VOID
FORCEINLINE
+VOID
KxSetTimerForThreadWait(IN PKTIMER Timer,
IN LARGE_INTEGER Interval,
OUT PULONG Hand)
ASSERT(Thread->NextProcessor == Prcb->Number);
/* Check if this thread is allowed to run in this CPU */
-#ifdef _CONFIG_SMP
+#ifdef CONFIG_SMP
if ((Thread->Affinity) & (Prcb->SetMember))
#else
if (TRUE)
KiSelectReadyThread(IN KPRIORITY Priority,
IN PKPRCB Prcb)
{
- ULONG PrioritySet, HighPriority;
+ ULONG PrioritySet;
+ LONG HighPriority;
PLIST_ENTRY ListEntry;
PKTHREAD Thread = NULL;
// This routine computes the new priority for a thread. It is only valid for
// threads with priorities in the dynamic priority range.
//
-SCHAR
FORCEINLINE
+SCHAR
KiComputeNewPriority(IN PKTHREAD Thread,
IN SCHAR Adjustment)
{
return Priority;
}
-#ifndef _M_ARM
-PRKTHREAD
+//
+// Guarded Mutex Routines
+//
FORCEINLINE
-KeGetCurrentThread(VOID)
+VOID
+_KeInitializeGuardedMutex(OUT PKGUARDED_MUTEX GuardedMutex)
{
-#ifdef _M_IX86
- /* Return the current thread */
- return ((PKIPCR)KeGetPcr())->PrcbData.CurrentThread;
-#elif defined (_M_AMD64)
- return (PRKTHREAD)__readgsqword(FIELD_OFFSET(KIPCR, Prcb.CurrentThread));
-#else
- PKPRCB Prcb = KeGetCurrentPrcb();
- return Prcb->CurrentThread;
-#endif
+ /* Setup the Initial Data */
+ GuardedMutex->Count = GM_LOCK_BIT;
+ GuardedMutex->Owner = NULL;
+ GuardedMutex->Contention = 0;
+
+ /* Initialize the Wait Gate */
+ KeInitializeGate(&GuardedMutex->Gate);
}
-UCHAR
FORCEINLINE
-KeGetPreviousMode(VOID)
+VOID
+_KeAcquireGuardedMutexUnsafe(IN OUT PKGUARDED_MUTEX GuardedMutex)
{
- /* Return the current mode */
- return KeGetCurrentThread()->PreviousMode;
+ PKTHREAD Thread = KeGetCurrentThread();
+
+ /* Sanity checks */
+ ASSERT((KeGetCurrentIrql() == APC_LEVEL) ||
+ (Thread->SpecialApcDisable < 0) ||
+ (Thread->Teb == NULL) ||
+ (Thread->Teb >= (PTEB)MM_SYSTEM_RANGE_START));
+ ASSERT(GuardedMutex->Owner != Thread);
+
+ /* Remove the lock */
+ if (!InterlockedBitTestAndReset(&GuardedMutex->Count, GM_LOCK_BIT_V))
+ {
+ /* The Guarded Mutex was already locked, enter contented case */
+ KiAcquireGuardedMutex(GuardedMutex);
+ }
+
+ /* Set the Owner */
+ GuardedMutex->Owner = Thread;
}
-#endif
+FORCEINLINE
VOID
+_KeReleaseGuardedMutexUnsafe(IN OUT PKGUARDED_MUTEX GuardedMutex)
+{
+ LONG OldValue, NewValue;
+
+ /* Sanity checks */
+ ASSERT((KeGetCurrentIrql() == APC_LEVEL) ||
+ (KeGetCurrentThread()->SpecialApcDisable < 0) ||
+ (KeGetCurrentThread()->Teb == NULL) ||
+ (KeGetCurrentThread()->Teb >= (PTEB)MM_SYSTEM_RANGE_START));
+ ASSERT(GuardedMutex->Owner == KeGetCurrentThread());
+
+ /* Destroy the Owner */
+ GuardedMutex->Owner = NULL;
+
+ /* Add the Lock Bit */
+ OldValue = InterlockedExchangeAdd(&GuardedMutex->Count, GM_LOCK_BIT);
+ ASSERT((OldValue & GM_LOCK_BIT) == 0);
+
+ /* Check if it was already locked, but not woken */
+ if ((OldValue) && !(OldValue & GM_LOCK_WAITER_WOKEN))
+ {
+ /* Update the Oldvalue to what it should be now */
+ OldValue += GM_LOCK_BIT;
+
+ /* The mutex will be woken, minus one waiter */
+ NewValue = OldValue + GM_LOCK_WAITER_WOKEN -
+ GM_LOCK_WAITER_INC;
+
+ /* Remove the Woken bit */
+ if (InterlockedCompareExchange(&GuardedMutex->Count,
+ NewValue,
+ OldValue) == OldValue)
+ {
+ /* Signal the Gate */
+ KeSignalGateBoostPriority(&GuardedMutex->Gate);
+ }
+ }
+}
+
FORCEINLINE
-KeFlushProcessTb(VOID)
+VOID
+_KeAcquireGuardedMutex(IN PKGUARDED_MUTEX GuardedMutex)
{
- /* Flush the TLB by resetting CR3 */
-#ifdef _M_PPC
- __asm__("sync\n\tisync\n\t");
-#elif _M_ARM
- //
- // We need to implement this!
- //
- ASSERTMSG("Need ARM flush routine\n", FALSE);
-#else
- __writecr3(__readcr3());
-#endif
+ PKTHREAD Thread = KeGetCurrentThread();
+
+ /* Sanity checks */
+ ASSERT(KeGetCurrentIrql() <= APC_LEVEL);
+ ASSERT(GuardedMutex->Owner != Thread);
+
+ /* Disable Special APCs */
+ KeEnterGuardedRegion();
+
+ /* Remove the lock */
+ if (!InterlockedBitTestAndReset(&GuardedMutex->Count, GM_LOCK_BIT_V))
+ {
+ /* The Guarded Mutex was already locked, enter contented case */
+ KiAcquireGuardedMutex(GuardedMutex);
+ }
+
+ /* Set the Owner and Special APC Disable state */
+ GuardedMutex->Owner = Thread;
+ GuardedMutex->SpecialApcDisable = Thread->SpecialApcDisable;
+}
+
+FORCEINLINE
+VOID
+_KeReleaseGuardedMutex(IN OUT PKGUARDED_MUTEX GuardedMutex)
+{
+ LONG OldValue, NewValue;
+
+ /* Sanity checks */
+ ASSERT(KeGetCurrentIrql() <= APC_LEVEL);
+ ASSERT(GuardedMutex->Owner == KeGetCurrentThread());
+ ASSERT(KeGetCurrentThread()->SpecialApcDisable ==
+ GuardedMutex->SpecialApcDisable);
+
+ /* Destroy the Owner */
+ GuardedMutex->Owner = NULL;
+
+ /* Add the Lock Bit */
+ OldValue = InterlockedExchangeAdd(&GuardedMutex->Count, GM_LOCK_BIT);
+ ASSERT((OldValue & GM_LOCK_BIT) == 0);
+
+ /* Check if it was already locked, but not woken */
+ if ((OldValue) && !(OldValue & GM_LOCK_WAITER_WOKEN))
+ {
+ /* Update the Oldvalue to what it should be now */
+ OldValue += GM_LOCK_BIT;
+
+ /* The mutex will be woken, minus one waiter */
+ NewValue = OldValue + GM_LOCK_WAITER_WOKEN -
+ GM_LOCK_WAITER_INC;
+
+ /* Remove the Woken bit */
+ if (InterlockedCompareExchange(&GuardedMutex->Count,
+ NewValue,
+ OldValue) == OldValue)
+ {
+ /* Signal the Gate */
+ KeSignalGateBoostPriority(&GuardedMutex->Gate);
+ }
+ }
+
+ /* Re-enable APCs */
+ KeLeaveGuardedRegion();
+}
+
+FORCEINLINE
+BOOLEAN
+_KeTryToAcquireGuardedMutex(IN OUT PKGUARDED_MUTEX GuardedMutex)
+{
+ PKTHREAD Thread = KeGetCurrentThread();
+
+ /* Block APCs */
+ KeEnterGuardedRegion();
+
+ /* Remove the lock */
+ if (!InterlockedBitTestAndReset(&GuardedMutex->Count, GM_LOCK_BIT_V))
+ {
+ /* Re-enable APCs */
+ KeLeaveGuardedRegion();
+ YieldProcessor();
+
+ /* Return failure */
+ return FALSE;
+ }
+
+ /* Set the Owner and APC State */
+ GuardedMutex->Owner = Thread;
+ GuardedMutex->SpecialApcDisable = Thread->SpecialApcDisable;
+ return TRUE;
+}
+
+
+FORCEINLINE
+VOID
+KiAcquireNmiListLock(OUT PKIRQL OldIrql)
+{
+ KeAcquireSpinLock(&KiNmiCallbackListLock, OldIrql);
}
+FORCEINLINE
+VOID
+KiReleaseNmiListLock(IN KIRQL OldIrql)
+{
+ KeReleaseSpinLock(&KiNmiCallbackListLock, OldIrql);
+}