- Inline Guarded Mutex and Fast Mutex implementations when called from within the...
[reactos.git] / reactos / ntoskrnl / include / internal / ke_x.h
1 /*
2 * PROJECT: ReactOS Kernel
3 * LICENSE: GPL - See COPYING in the top level directory
4 * FILE: ntoskrnl/include/ke_x.h
5 * PURPOSE: Internal Inlined Functions for the Kernel
6 * PROGRAMMERS: Alex Ionescu (alex.ionescu@reactos.org)
7 */
8
9 //
10 // Thread Dispatcher Header DebugActive Mask
11 //
12 #define DR_MASK(x) 1 << x
13 #define DR_ACTIVE_MASK 0x10
14 #define DR_REG_MASK 0x4F
15
16 #ifdef _M_IX86
17 //
18 // Sanitizes a selector
19 //
20 FORCEINLINE
21 ULONG
22 Ke386SanitizeSeg(IN ULONG Cs,
23 IN KPROCESSOR_MODE Mode)
24 {
25 //
26 // Check if we're in kernel-mode, and force CPL 0 if so.
27 // Otherwise, force CPL 3.
28 //
29 return ((Mode == KernelMode) ?
30 (Cs & (0xFFFF & ~RPL_MASK)) :
31 (RPL_MASK | (Cs & 0xFFFF)));
32 }
33
34 //
35 // Sanitizes EFLAGS
36 //
37 FORCEINLINE
38 ULONG
39 Ke386SanitizeFlags(IN ULONG Eflags,
40 IN KPROCESSOR_MODE Mode)
41 {
42 //
43 // Check if we're in kernel-mode, and sanitize EFLAGS if so.
44 // Otherwise, also force interrupt mask on.
45 //
46 return ((Mode == KernelMode) ?
47 (Eflags & (EFLAGS_USER_SANITIZE | EFLAGS_INTERRUPT_MASK)) :
48 (EFLAGS_INTERRUPT_MASK | (Eflags & EFLAGS_USER_SANITIZE)));
49 }
50
51 //
52 // Gets a DR register from a CONTEXT structure
53 //
54 FORCEINLINE
55 PVOID
56 KiDrFromContext(IN ULONG Dr,
57 IN PCONTEXT Context)
58 {
59 return *(PVOID*)((ULONG_PTR)Context + KiDebugRegisterContextOffsets[Dr]);
60 }
61
62 //
63 // Gets a DR register from a KTRAP_FRAME structure
64 //
65 FORCEINLINE
66 PVOID*
67 KiDrFromTrapFrame(IN ULONG Dr,
68 IN PKTRAP_FRAME TrapFrame)
69 {
70 return (PVOID*)((ULONG_PTR)TrapFrame + KiDebugRegisterTrapOffsets[Dr]);
71 }
72
73 //
74 //
75 //
76 FORCEINLINE
77 PVOID
78 Ke386SanitizeDr(IN PVOID DrAddress,
79 IN KPROCESSOR_MODE Mode)
80 {
81 //
82 // Check if we're in kernel-mode, and return the address directly if so.
83 // Otherwise, make sure it's not inside the kernel-mode address space.
84 // If it is, then clear the address.
85 //
86 return ((Mode == KernelMode) ? DrAddress :
87 (DrAddress <= MM_HIGHEST_USER_ADDRESS) ? DrAddress : 0);
88 }
89 #endif /* _M_IX86 */
90
91 #ifndef _M_ARM
92 FORCEINLINE
93 PRKTHREAD
94 KeGetCurrentThread(VOID)
95 {
96 #ifdef _M_IX86
97 /* Return the current thread */
98 return ((PKIPCR)KeGetPcr())->PrcbData.CurrentThread;
99 #else
100 PKPRCB Prcb = KeGetCurrentPrcb();
101 return Prcb->CurrentThread;
102 #endif
103 }
104
105 FORCEINLINE
106 UCHAR
107 KeGetPreviousMode(VOID)
108 {
109 /* Return the current mode */
110 return KeGetCurrentThread()->PreviousMode;
111 }
112 #endif
113
114 FORCEINLINE
115 VOID
116 KeFlushProcessTb(VOID)
117 {
118 /* Flush the TLB by resetting CR3 */
119 #ifdef _M_PPC
120 __asm__("sync\n\tisync\n\t");
121 #elif _M_ARM
122 //
123 // We need to implement this!
124 //
125 ASSERTMSG("Need ARM flush routine\n", FALSE);
126 #else
127 __writecr3(__readcr3());
128 #endif
129 }
130
131 //
132 // Enters a Guarded Region
133 //
134 #define KeEnterGuardedRegion() \
135 { \
136 PKTHREAD _Thread = KeGetCurrentThread(); \
137 \
138 /* Sanity checks */ \
139 ASSERT(KeGetCurrentIrql() <= APC_LEVEL); \
140 ASSERT(_Thread == KeGetCurrentThread()); \
141 ASSERT((_Thread->SpecialApcDisable <= 0) && \
142 (_Thread->SpecialApcDisable != -32768)); \
143 \
144 /* Disable Special APCs */ \
145 _Thread->SpecialApcDisable--; \
146 }
147
148 //
149 // Leaves a Guarded Region
150 //
151 #define KeLeaveGuardedRegion() \
152 { \
153 PKTHREAD _Thread = KeGetCurrentThread(); \
154 \
155 /* Sanity checks */ \
156 ASSERT(KeGetCurrentIrql() <= APC_LEVEL); \
157 ASSERT(_Thread == KeGetCurrentThread()); \
158 ASSERT(_Thread->SpecialApcDisable < 0); \
159 \
160 /* Leave region and check if APCs are OK now */ \
161 if (!(++_Thread->SpecialApcDisable)) \
162 { \
163 /* Check for Kernel APCs on the list */ \
164 if (!IsListEmpty(&_Thread->ApcState. \
165 ApcListHead[KernelMode])) \
166 { \
167 /* Check for APC Delivery */ \
168 KiCheckForKernelApcDelivery(); \
169 } \
170 } \
171 }
172
173 //
174 // Enters a Critical Region
175 //
176 #define KeEnterCriticalRegion() \
177 { \
178 PKTHREAD _Thread = KeGetCurrentThread(); \
179 \
180 /* Sanity checks */ \
181 ASSERT(_Thread == KeGetCurrentThread()); \
182 ASSERT((_Thread->KernelApcDisable <= 0) && \
183 (_Thread->KernelApcDisable != -32768)); \
184 \
185 /* Disable Kernel APCs */ \
186 _Thread->KernelApcDisable--; \
187 }
188
189 //
190 // Leaves a Critical Region
191 //
192 #define KeLeaveCriticalRegion() \
193 { \
194 PKTHREAD _Thread = KeGetCurrentThread(); \
195 \
196 /* Sanity checks */ \
197 ASSERT(_Thread == KeGetCurrentThread()); \
198 ASSERT(_Thread->KernelApcDisable < 0); \
199 \
200 /* Enable Kernel APCs */ \
201 _Thread->KernelApcDisable++; \
202 \
203 /* Check if Kernel APCs are now enabled */ \
204 if (!(_Thread->KernelApcDisable)) \
205 { \
206 /* Check if we need to request an APC Delivery */ \
207 if (!(IsListEmpty(&_Thread->ApcState.ApcListHead[KernelMode])) && \
208 !(_Thread->SpecialApcDisable)) \
209 { \
210 /* Check for the right environment */ \
211 KiCheckForKernelApcDelivery(); \
212 } \
213 } \
214 }
215
216 #ifndef CONFIG_SMP
217 //
218 // Spinlock Acquire at IRQL >= DISPATCH_LEVEL
219 //
220 FORCEINLINE
221 VOID
222 KxAcquireSpinLock(IN PKSPIN_LOCK SpinLock)
223 {
224 /* On UP builds, spinlocks don't exist at IRQL >= DISPATCH */
225 UNREFERENCED_PARAMETER(SpinLock);
226 }
227
228 //
229 // Spinlock Release at IRQL >= DISPATCH_LEVEL
230 //
231 FORCEINLINE
232 VOID
233 KxReleaseSpinLock(IN PKSPIN_LOCK SpinLock)
234 {
235 /* On UP builds, spinlocks don't exist at IRQL >= DISPATCH */
236 UNREFERENCED_PARAMETER(SpinLock);
237 }
238
239 //
240 // This routine protects against multiple CPU acquires, it's meaningless on UP.
241 //
242 FORCEINLINE
243 VOID
244 KiAcquireDispatcherObject(IN DISPATCHER_HEADER* Object)
245 {
246 UNREFERENCED_PARAMETER(Object);
247 }
248
249 //
250 // This routine protects against multiple CPU acquires, it's meaningless on UP.
251 //
252 FORCEINLINE
253 VOID
254 KiReleaseDispatcherObject(IN DISPATCHER_HEADER* Object)
255 {
256 UNREFERENCED_PARAMETER(Object);
257 }
258
259 FORCEINLINE
260 KIRQL
261 KiAcquireDispatcherLock(VOID)
262 {
263 /* Raise to DPC level */
264 return KeRaiseIrqlToDpcLevel();
265 }
266
267 FORCEINLINE
268 VOID
269 KiReleaseDispatcherLock(IN KIRQL OldIrql)
270 {
271 /* Just exit the dispatcher */
272 KiExitDispatcher(OldIrql);
273 }
274
275 FORCEINLINE
276 VOID
277 KiAcquireDispatcherLockAtDpcLevel(VOID)
278 {
279 /* This is a no-op at DPC Level for UP systems */
280 return;
281 }
282
283 FORCEINLINE
284 VOID
285 KiReleaseDispatcherLockFromDpcLevel(VOID)
286 {
287 /* This is a no-op at DPC Level for UP systems */
288 return;
289 }
290
291 //
292 // This routine makes the thread deferred ready on the boot CPU.
293 //
294 FORCEINLINE
295 VOID
296 KiInsertDeferredReadyList(IN PKTHREAD Thread)
297 {
298 /* Set the thread to deferred state and boot CPU */
299 Thread->State = DeferredReady;
300 Thread->DeferredProcessor = 0;
301
302 /* Make the thread ready immediately */
303 KiDeferredReadyThread(Thread);
304 }
305
306 FORCEINLINE
307 VOID
308 KiRescheduleThread(IN BOOLEAN NewThread,
309 IN ULONG Cpu)
310 {
311 /* This is meaningless on UP systems */
312 UNREFERENCED_PARAMETER(NewThread);
313 UNREFERENCED_PARAMETER(Cpu);
314 }
315
316 //
317 // This routine protects against multiple CPU acquires, it's meaningless on UP.
318 //
319 FORCEINLINE
320 VOID
321 KiSetThreadSwapBusy(IN PKTHREAD Thread)
322 {
323 UNREFERENCED_PARAMETER(Thread);
324 }
325
326 //
327 // This routine protects against multiple CPU acquires, it's meaningless on UP.
328 //
329 FORCEINLINE
330 VOID
331 KiAcquirePrcbLock(IN PKPRCB Prcb)
332 {
333 UNREFERENCED_PARAMETER(Prcb);
334 }
335
336 //
337 // This routine protects against multiple CPU acquires, it's meaningless on UP.
338 //
339 FORCEINLINE
340 VOID
341 KiReleasePrcbLock(IN PKPRCB Prcb)
342 {
343 UNREFERENCED_PARAMETER(Prcb);
344 }
345
346 //
347 // This routine protects against multiple CPU acquires, it's meaningless on UP.
348 //
349 FORCEINLINE
350 VOID
351 KiAcquireThreadLock(IN PKTHREAD Thread)
352 {
353 UNREFERENCED_PARAMETER(Thread);
354 }
355
356 //
357 // This routine protects against multiple CPU acquires, it's meaningless on UP.
358 //
359 FORCEINLINE
360 VOID
361 KiReleaseThreadLock(IN PKTHREAD Thread)
362 {
363 UNREFERENCED_PARAMETER(Thread);
364 }
365
366 //
367 // This routine protects against multiple CPU acquires, it's meaningless on UP.
368 //
369 FORCEINLINE
370 BOOLEAN
371 KiTryThreadLock(IN PKTHREAD Thread)
372 {
373 UNREFERENCED_PARAMETER(Thread);
374 return FALSE;
375 }
376
377 FORCEINLINE
378 VOID
379 KiCheckDeferredReadyList(IN PKPRCB Prcb)
380 {
381 /* There are no deferred ready lists on UP systems */
382 UNREFERENCED_PARAMETER(Prcb);
383 }
384
385 FORCEINLINE
386 VOID
387 KiRundownThread(IN PKTHREAD Thread)
388 {
389 #if defined(_M_IX86) || defined(_M_AMD64)
390 /* Check if this is the NPX Thread */
391 if (KeGetCurrentPrcb()->NpxThread == Thread)
392 {
393 /* Clear it */
394 KeGetCurrentPrcb()->NpxThread = NULL;
395 KeArchFnInit();
396 }
397 #endif
398 }
399
400 FORCEINLINE
401 VOID
402 KiRequestApcInterrupt(IN BOOLEAN NeedApc,
403 IN UCHAR Processor)
404 {
405 /* We deliver instantly on UP */
406 UNREFERENCED_PARAMETER(NeedApc);
407 UNREFERENCED_PARAMETER(Processor);
408 }
409
410 FORCEINLINE
411 PKSPIN_LOCK_QUEUE
412 KiAcquireTimerLock(IN ULONG Hand)
413 {
414 ASSERT(KeGetCurrentIrql() >= DISPATCH_LEVEL);
415
416 /* Nothing to do on UP */
417 UNREFERENCED_PARAMETER(Hand);
418 return NULL;
419 }
420
421 FORCEINLINE
422 VOID
423 KiReleaseTimerLock(IN PKSPIN_LOCK_QUEUE LockQueue)
424 {
425 ASSERT(KeGetCurrentIrql() >= DISPATCH_LEVEL);
426
427 /* Nothing to do on UP */
428 UNREFERENCED_PARAMETER(LockQueue);
429 }
430
431 #else
432
433 //
434 // Spinlock Acquisition at IRQL >= DISPATCH_LEVEL
435 //
436 FORCEINLINE
437 VOID
438 KxAcquireSpinLock(IN PKSPIN_LOCK SpinLock)
439 {
440 for (;;)
441 {
442 /* Try to acquire it */
443 if (InterlockedBitTestAndSet((PLONG)SpinLock, 0))
444 {
445 /* Value changed... wait until it's locked */
446 while (*(volatile KSPIN_LOCK *)SpinLock == 1)
447 {
448 #ifdef DBG
449 /* On debug builds, we use a much slower but useful routine */
450 //Kii386SpinOnSpinLock(SpinLock, 5);
451
452 /* FIXME: Do normal yield for now */
453 YieldProcessor();
454 #else
455 /* Otherwise, just yield and keep looping */
456 YieldProcessor();
457 #endif
458 }
459 }
460 else
461 {
462 #ifdef DBG
463 /* On debug builds, we OR in the KTHREAD */
464 *SpinLock = (KSPIN_LOCK)KeGetCurrentThread() | 1;
465 #endif
466 /* All is well, break out */
467 break;
468 }
469 }
470 }
471
472 //
473 // Spinlock Release at IRQL >= DISPATCH_LEVEL
474 //
475 FORCEINLINE
476 VOID
477 KxReleaseSpinLock(IN PKSPIN_LOCK SpinLock)
478 {
479 #ifdef DBG
480 /* Make sure that the threads match */
481 if (((KSPIN_LOCK)KeGetCurrentThread() | 1) != *SpinLock)
482 {
483 /* They don't, bugcheck */
484 KeBugCheckEx(SPIN_LOCK_NOT_OWNED, (ULONG_PTR)SpinLock, 0, 0, 0);
485 }
486 #endif
487 /* Clear the lock */
488 InterlockedAnd((PLONG)SpinLock, 0);
489 }
490
491 FORCEINLINE
492 VOID
493 KiAcquireDispatcherObject(IN DISPATCHER_HEADER* Object)
494 {
495 LONG OldValue;
496
497 /* Make sure we're at a safe level to touch the lock */
498 ASSERT(KeGetCurrentIrql() >= DISPATCH_LEVEL);
499
500 /* Start acquire loop */
501 do
502 {
503 /* Loop until the other CPU releases it */
504 while (TRUE)
505 {
506 /* Check if it got released */
507 OldValue = Object->Lock;
508 if ((OldValue & KOBJECT_LOCK_BIT) == 0) break;
509
510 /* Let the CPU know that this is a loop */
511 YieldProcessor();
512 }
513
514 /* Try acquiring the lock now */
515 } while (InterlockedCompareExchange(&Object->Lock,
516 OldValue | KOBJECT_LOCK_BIT,
517 OldValue) != OldValue);
518 }
519
520 FORCEINLINE
521 VOID
522 KiReleaseDispatcherObject(IN DISPATCHER_HEADER* Object)
523 {
524 /* Make sure we're at a safe level to touch the lock */
525 ASSERT(KeGetCurrentIrql() >= DISPATCH_LEVEL);
526
527 /* Release it */
528 InterlockedAnd(&Object->Lock, ~KOBJECT_LOCK_BIT);
529 }
530
531 FORCEINLINE
532 KIRQL
533 KiAcquireDispatcherLock(VOID)
534 {
535 /* Raise to synchronization level and acquire the dispatcher lock */
536 return KeAcquireQueuedSpinLockRaiseToSynch(LockQueueDispatcherLock);
537 }
538
539 FORCEINLINE
540 VOID
541 KiReleaseDispatcherLock(IN KIRQL OldIrql)
542 {
543 /* First release the lock */
544 KeReleaseQueuedSpinLockFromDpcLevel(&KeGetCurrentPrcb()->
545 LockQueue[LockQueueDispatcherLock]);
546
547 /* Then exit the dispatcher */
548 KiExitDispatcher(OldIrql);
549 }
550
551 FORCEINLINE
552 VOID
553 KiAcquireDispatcherLockAtDpcLevel(VOID)
554 {
555 /* Acquire the dispatcher lock */
556 KeAcquireQueuedSpinLockAtDpcLevel(LockQueueDispatcherLock);
557 }
558
559 FORCEINLINE
560 VOID
561 KiReleaseDispatcherLockFromDpcLevel(VOID)
562 {
563 /* Release the dispatcher lock */
564 KeReleaseQueuedSpinLockFromDpcLevel(LockQueueDispatcherLock);
565 }
566
567 //
568 // This routine inserts a thread into the deferred ready list of the given CPU
569 //
570 FORCEINLINE
571 VOID
572 KiInsertDeferredReadyList(IN PKTHREAD Thread)
573 {
574 PKPRCB Prcb = KeGetCurrentPrcb();
575
576 /* Set the thread to deferred state and CPU */
577 Thread->State = DeferredReady;
578 Thread->DeferredProcessor = Prcb->Number;
579
580 /* Add it on the list */
581 PushEntryList(&Prcb->DeferredReadyListHead, &Thread->SwapListEntry);
582 }
583
584 FORCEINLINE
585 VOID
586 KiRescheduleThread(IN BOOLEAN NewThread,
587 IN ULONG Cpu)
588 {
589 /* Check if a new thread needs to be scheduled on a different CPU */
590 if ((NewThread) && !(KeGetPcr()->Number == Cpu))
591 {
592 /* Send an IPI to request delivery */
593 KiIpiSend(AFFINITY_MASK(Cpu), IPI_DPC);
594 }
595 }
596
597 //
598 // This routine sets the current thread in a swap busy state, which ensure that
599 // nobody else tries to swap it concurrently.
600 //
601 FORCEINLINE
602 VOID
603 KiSetThreadSwapBusy(IN PKTHREAD Thread)
604 {
605 /* Make sure nobody already set it */
606 ASSERT(Thread->SwapBusy == FALSE);
607
608 /* Set it ourselves */
609 Thread->SwapBusy = TRUE;
610 }
611
612 //
613 // This routine acquires the PRCB lock so that only one caller can touch
614 // volatile PRCB data.
615 //
616 // Since this is a simple optimized spin-lock, it must be be only acquired
617 // at dispatcher level or higher!
618 //
619 FORCEINLINE
620 VOID
621 KiAcquirePrcbLock(IN PKPRCB Prcb)
622 {
623 /* Make sure we're at a safe level to touch the PRCB lock */
624 ASSERT(KeGetCurrentIrql() >= DISPATCH_LEVEL);
625
626 /* Start acquire loop */
627 for (;;)
628 {
629 /* Acquire the lock and break out if we acquired it first */
630 if (!InterlockedExchange((PLONG)&Prcb->PrcbLock, 1)) break;
631
632 /* Loop until the other CPU releases it */
633 do
634 {
635 /* Let the CPU know that this is a loop */
636 YieldProcessor();
637 } while (Prcb->PrcbLock);
638 }
639 }
640
641 //
642 // This routine releases the PRCB lock so that other callers can touch
643 // volatile PRCB data.
644 //
645 // Since this is a simple optimized spin-lock, it must be be only acquired
646 // at dispatcher level or higher!
647 //
648 FORCEINLINE
649 VOID
650 KiReleasePrcbLock(IN PKPRCB Prcb)
651 {
652 /* Make sure it's acquired! */
653 ASSERT(Prcb->PrcbLock != 0);
654
655 /* Release it */
656 InterlockedAnd((PLONG)&Prcb->PrcbLock, 0);
657 }
658
659 //
660 // This routine acquires the thread lock so that only one caller can touch
661 // volatile thread data.
662 //
663 // Since this is a simple optimized spin-lock, it must be be only acquired
664 // at dispatcher level or higher!
665 //
666 FORCEINLINE
667 VOID
668 KiAcquireThreadLock(IN PKTHREAD Thread)
669 {
670 /* Make sure we're at a safe level to touch the thread lock */
671 ASSERT(KeGetCurrentIrql() >= DISPATCH_LEVEL);
672
673 /* Start acquire loop */
674 for (;;)
675 {
676 /* Acquire the lock and break out if we acquired it first */
677 if (!InterlockedExchange((PLONG)&Thread->ThreadLock, 1)) break;
678
679 /* Loop until the other CPU releases it */
680 do
681 {
682 /* Let the CPU know that this is a loop */
683 YieldProcessor();
684 } while (Thread->ThreadLock);
685 }
686 }
687
688 //
689 // This routine releases the thread lock so that other callers can touch
690 // volatile thread data.
691 //
692 // Since this is a simple optimized spin-lock, it must be be only acquired
693 // at dispatcher level or higher!
694 //
695 FORCEINLINE
696 VOID
697 KiReleaseThreadLock(IN PKTHREAD Thread)
698 {
699 /* Release it */
700 InterlockedAnd((PLONG)&Thread->ThreadLock, 0);
701 }
702
703 FORCEINLINE
704 BOOLEAN
705 KiTryThreadLock(IN PKTHREAD Thread)
706 {
707 LONG Value;
708
709 /* If the lock isn't acquired, return false */
710 if (!Thread->ThreadLock) return FALSE;
711
712 /* Otherwise, try to acquire it and check the result */
713 Value = 1;
714 Value = InterlockedExchange((PLONG)&Thread->ThreadLock, Value);
715
716 /* Return the lock state */
717 return (Value == TRUE);
718 }
719
720 FORCEINLINE
721 VOID
722 KiCheckDeferredReadyList(IN PKPRCB Prcb)
723 {
724 /* Scan the deferred ready lists if required */
725 if (Prcb->DeferredReadyListHead.Next) KiProcessDeferredReadyList(Prcb);
726 }
727
728 FORCEINLINE
729 VOID
730 KiRundownThread(IN PKTHREAD Thread)
731 {
732 #if defined(_M_IX86) || defined(_M_AMD64)
733 /* FIXME: TODO */
734 ASSERTMSG("Not yet implemented\n", FALSE);
735 #endif
736 }
737
738 FORCEINLINE
739 VOID
740 KiRequestApcInterrupt(IN BOOLEAN NeedApc,
741 IN UCHAR Processor)
742 {
743 /* Check if we need to request APC delivery */
744 if (NeedApc)
745 {
746 /* Check if it's on another CPU */
747 if (KeGetPcr()->Number != Processor)
748 {
749 /* Send an IPI to request delivery */
750 KiIpiSend(AFFINITY_MASK(Processor), IPI_APC);
751 }
752 else
753 {
754 /* Request a software interrupt */
755 HalRequestSoftwareInterrupt(APC_LEVEL);
756 }
757 }
758 }
759
760 FORCEINLINE
761 PKSPIN_LOCK_QUEUE
762 KiAcquireTimerLock(IN ULONG Hand)
763 {
764 PKSPIN_LOCK_QUEUE LockQueue;
765 ULONG LockIndex;
766 ASSERT(KeGetCurrentIrql() >= DISPATCH_LEVEL);
767
768 /* Get the lock index */
769 LockIndex = Hand >> LOCK_QUEUE_TIMER_LOCK_SHIFT;
770 LockIndex &= (LOCK_QUEUE_TIMER_TABLE_LOCKS - 1);
771
772 /* Now get the lock */
773 LockQueue = &KeGetCurrentPrcb()->LockQueue[LockQueueTimerTableLock + LockIndex];
774
775 /* Acquire it and return */
776 KeAcquireQueuedSpinLockAtDpcLevel(LockQueue);
777 return LockQueue;
778 }
779
780 FORCEINLINE
781 VOID
782 KiReleaseTimerLock(IN PKSPIN_LOCK_QUEUE LockQueue)
783 {
784 ASSERT(KeGetCurrentIrql() >= DISPATCH_LEVEL);
785
786 /* Release the lock */
787 KeReleaseQueuedSpinLockFromDpcLevel(LockQueue);
788 }
789
790 #endif
791
792 FORCEINLINE
793 VOID
794 KiAcquireApcLock(IN PKTHREAD Thread,
795 IN PKLOCK_QUEUE_HANDLE Handle)
796 {
797 /* Acquire the lock and raise to synchronization level */
798 KeAcquireInStackQueuedSpinLockRaiseToSynch(&Thread->ApcQueueLock, Handle);
799 }
800
801 FORCEINLINE
802 VOID
803 KiAcquireApcLockAtDpcLevel(IN PKTHREAD Thread,
804 IN PKLOCK_QUEUE_HANDLE Handle)
805 {
806 /* Acquire the lock */
807 KeAcquireInStackQueuedSpinLockAtDpcLevel(&Thread->ApcQueueLock, Handle);
808 }
809
810 FORCEINLINE
811 VOID
812 KiAcquireApcLockAtApcLevel(IN PKTHREAD Thread,
813 IN PKLOCK_QUEUE_HANDLE Handle)
814 {
815 /* Acquire the lock */
816 KeAcquireInStackQueuedSpinLock(&Thread->ApcQueueLock, Handle);
817 }
818
819 FORCEINLINE
820 VOID
821 KiReleaseApcLock(IN PKLOCK_QUEUE_HANDLE Handle)
822 {
823 /* Release the lock */
824 KeReleaseInStackQueuedSpinLock(Handle);
825 }
826
827 FORCEINLINE
828 VOID
829 KiReleaseApcLockFromDpcLevel(IN PKLOCK_QUEUE_HANDLE Handle)
830 {
831 /* Release the lock */
832 KeReleaseInStackQueuedSpinLockFromDpcLevel(Handle);
833 }
834
835 FORCEINLINE
836 VOID
837 KiAcquireProcessLock(IN PKPROCESS Process,
838 IN PKLOCK_QUEUE_HANDLE Handle)
839 {
840 /* Acquire the lock and raise to synchronization level */
841 KeAcquireInStackQueuedSpinLockRaiseToSynch(&Process->ProcessLock, Handle);
842 }
843
844 FORCEINLINE
845 VOID
846 KiReleaseProcessLock(IN PKLOCK_QUEUE_HANDLE Handle)
847 {
848 /* Release the lock */
849 KeReleaseInStackQueuedSpinLock(Handle);
850 }
851
852 FORCEINLINE
853 VOID
854 KiReleaseProcessLockFromDpcLevel(IN PKLOCK_QUEUE_HANDLE Handle)
855 {
856 /* Release the lock */
857 KeReleaseInStackQueuedSpinLockFromDpcLevel(Handle);
858 }
859
860 FORCEINLINE
861 VOID
862 KiAcquireDeviceQueueLock(IN PKDEVICE_QUEUE DeviceQueue,
863 IN PKLOCK_QUEUE_HANDLE DeviceLock)
864 {
865 /* Check if we were called from a threaded DPC */
866 if (KeGetCurrentPrcb()->DpcThreadActive)
867 {
868 /* Lock the Queue, we're not at DPC level */
869 KeAcquireInStackQueuedSpinLock(&DeviceQueue->Lock, DeviceLock);
870 }
871 else
872 {
873 /* We must be at DPC level, acquire the lock safely */
874 ASSERT(KeGetCurrentIrql() == DISPATCH_LEVEL);
875 KeAcquireInStackQueuedSpinLockAtDpcLevel(&DeviceQueue->Lock,
876 DeviceLock);
877 }
878 }
879
880 FORCEINLINE
881 VOID
882 KiReleaseDeviceQueueLock(IN PKLOCK_QUEUE_HANDLE DeviceLock)
883 {
884 /* Check if we were called from a threaded DPC */
885 if (KeGetCurrentPrcb()->DpcThreadActive)
886 {
887 /* Unlock the Queue, we're not at DPC level */
888 KeReleaseInStackQueuedSpinLock(DeviceLock);
889 }
890 else
891 {
892 /* We must be at DPC level, release the lock safely */
893 ASSERT(KeGetCurrentIrql() == DISPATCH_LEVEL);
894 KeReleaseInStackQueuedSpinLockFromDpcLevel(DeviceLock);
895 }
896 }
897
898 //
899 // Satisfies the wait of any dispatcher object
900 //
901 #define KiSatisfyObjectWait(Object, Thread) \
902 { \
903 /* Special case for Mutants */ \
904 if ((Object)->Header.Type == MutantObject) \
905 { \
906 /* Decrease the Signal State */ \
907 (Object)->Header.SignalState--; \
908 \
909 /* Check if it's now non-signaled */ \
910 if (!(Object)->Header.SignalState) \
911 { \
912 /* Set the Owner Thread */ \
913 (Object)->OwnerThread = Thread; \
914 \
915 /* Disable APCs if needed */ \
916 Thread->KernelApcDisable = Thread->KernelApcDisable - \
917 (Object)->ApcDisable; \
918 \
919 /* Check if it's abandoned */ \
920 if ((Object)->Abandoned) \
921 { \
922 /* Unabandon it */ \
923 (Object)->Abandoned = FALSE; \
924 \
925 /* Return Status */ \
926 Thread->WaitStatus = STATUS_ABANDONED; \
927 } \
928 \
929 /* Insert it into the Mutant List */ \
930 InsertHeadList(Thread->MutantListHead.Blink, \
931 &(Object)->MutantListEntry); \
932 } \
933 } \
934 else if (((Object)->Header.Type & TIMER_OR_EVENT_TYPE) == \
935 EventSynchronizationObject) \
936 { \
937 /* Synchronization Timers and Events just get un-signaled */ \
938 (Object)->Header.SignalState = 0; \
939 } \
940 else if ((Object)->Header.Type == SemaphoreObject) \
941 { \
942 /* These ones can have multiple states, so we only decrease it */ \
943 (Object)->Header.SignalState--; \
944 } \
945 }
946
947 //
948 // Satisfies the wait of a mutant dispatcher object
949 //
950 #define KiSatisfyMutantWait(Object, Thread) \
951 { \
952 /* Decrease the Signal State */ \
953 (Object)->Header.SignalState--; \
954 \
955 /* Check if it's now non-signaled */ \
956 if (!(Object)->Header.SignalState) \
957 { \
958 /* Set the Owner Thread */ \
959 (Object)->OwnerThread = Thread; \
960 \
961 /* Disable APCs if needed */ \
962 Thread->KernelApcDisable = Thread->KernelApcDisable - \
963 (Object)->ApcDisable; \
964 \
965 /* Check if it's abandoned */ \
966 if ((Object)->Abandoned) \
967 { \
968 /* Unabandon it */ \
969 (Object)->Abandoned = FALSE; \
970 \
971 /* Return Status */ \
972 Thread->WaitStatus = STATUS_ABANDONED; \
973 } \
974 \
975 /* Insert it into the Mutant List */ \
976 InsertHeadList(Thread->MutantListHead.Blink, \
977 &(Object)->MutantListEntry); \
978 } \
979 }
980
981 //
982 // Satisfies the wait of any nonmutant dispatcher object
983 //
984 #define KiSatisfyNonMutantWait(Object) \
985 { \
986 if (((Object)->Header.Type & TIMER_OR_EVENT_TYPE) == \
987 EventSynchronizationObject) \
988 { \
989 /* Synchronization Timers and Events just get un-signaled */ \
990 (Object)->Header.SignalState = 0; \
991 } \
992 else if ((Object)->Header.Type == SemaphoreObject) \
993 { \
994 /* These ones can have multiple states, so we only decrease it */ \
995 (Object)->Header.SignalState--; \
996 } \
997 }
998
999 //
1000 // Recalculates the due time
1001 //
1002 FORCEINLINE
1003 PLARGE_INTEGER
1004 KiRecalculateDueTime(IN PLARGE_INTEGER OriginalDueTime,
1005 IN PLARGE_INTEGER DueTime,
1006 IN OUT PLARGE_INTEGER NewDueTime)
1007 {
1008 /* Don't do anything for absolute waits */
1009 if (OriginalDueTime->QuadPart >= 0) return OriginalDueTime;
1010
1011 /* Otherwise, query the interrupt time and recalculate */
1012 NewDueTime->QuadPart = KeQueryInterruptTime();
1013 NewDueTime->QuadPart -= DueTime->QuadPart;
1014 return NewDueTime;
1015 }
1016
1017 //
1018 // Determines whether a thread should be added to the wait list
1019 //
1020 FORCEINLINE
1021 BOOLEAN
1022 KiCheckThreadStackSwap(IN PKTHREAD Thread,
1023 IN KPROCESSOR_MODE WaitMode)
1024 {
1025 /* Check the required conditions */
1026 if ((WaitMode != KernelMode) &&
1027 (Thread->EnableStackSwap) &&
1028 (Thread->Priority >= (LOW_REALTIME_PRIORITY + 9)))
1029 {
1030 /* We are go for swap */
1031 return TRUE;
1032 }
1033 else
1034 {
1035 /* Don't swap the thread */
1036 return FALSE;
1037 }
1038 }
1039
1040 //
1041 // Adds a thread to the wait list
1042 //
1043 #define KiAddThreadToWaitList(Thread, Swappable) \
1044 { \
1045 /* Make sure it's swappable */ \
1046 if (Swappable) \
1047 { \
1048 /* Insert it into the PRCB's List */ \
1049 InsertTailList(&KeGetCurrentPrcb()->WaitListHead, \
1050 &Thread->WaitListEntry); \
1051 } \
1052 }
1053
1054 //
1055 // Checks if a wait in progress should be interrupted by APCs or an alertable
1056 // state.
1057 //
1058 FORCEINLINE
1059 NTSTATUS
1060 KiCheckAlertability(IN PKTHREAD Thread,
1061 IN BOOLEAN Alertable,
1062 IN KPROCESSOR_MODE WaitMode)
1063 {
1064 /* Check if the wait is alertable */
1065 if (Alertable)
1066 {
1067 /* It is, first check if the thread is alerted in this mode */
1068 if (Thread->Alerted[WaitMode])
1069 {
1070 /* It is, so bail out of the wait */
1071 Thread->Alerted[WaitMode] = FALSE;
1072 return STATUS_ALERTED;
1073 }
1074 else if ((WaitMode != KernelMode) &&
1075 (!IsListEmpty(&Thread->ApcState.ApcListHead[UserMode])))
1076 {
1077 /* It's isn't, but this is a user wait with queued user APCs */
1078 Thread->ApcState.UserApcPending = TRUE;
1079 return STATUS_USER_APC;
1080 }
1081 else if (Thread->Alerted[KernelMode])
1082 {
1083 /* It isn't that either, but we're alered in kernel mode */
1084 Thread->Alerted[KernelMode] = FALSE;
1085 return STATUS_ALERTED;
1086 }
1087 }
1088 else if ((WaitMode != KernelMode) && (Thread->ApcState.UserApcPending))
1089 {
1090 /* Not alertable, but this is a user wait with pending user APCs */
1091 return STATUS_USER_APC;
1092 }
1093
1094 /* Otherwise, we're fine */
1095 return STATUS_WAIT_0;
1096 }
1097
1098 //
1099 // Called from KiCompleteTimer, KiInsertTreeTimer, KeSetSystemTime
1100 // to remove timer entries
1101 // See Windows HPI blog for more information.
1102 FORCEINLINE
1103 VOID
1104 KiRemoveEntryTimer(IN PKTIMER Timer)
1105 {
1106 ULONG Hand;
1107 PKTIMER_TABLE_ENTRY TableEntry;
1108
1109 /* Remove the timer from the timer list and check if it's empty */
1110 Hand = Timer->Header.Hand;
1111 if (RemoveEntryList(&Timer->TimerListEntry))
1112 {
1113 /* Get the respective timer table entry */
1114 TableEntry = &KiTimerTableListHead[Hand];
1115 if (&TableEntry->Entry == TableEntry->Entry.Flink)
1116 {
1117 /* Set the entry to an infinite absolute time */
1118 TableEntry->Time.HighPart = 0xFFFFFFFF;
1119 }
1120 }
1121
1122 /* Clear the list entries on dbg builds so we can tell the timer is gone */
1123 #if DBG
1124 Timer->TimerListEntry.Flink = NULL;
1125 Timer->TimerListEntry.Blink = NULL;
1126 #endif
1127 }
1128
1129 //
1130 // Called by Wait and Queue code to insert a timer for dispatching.
1131 // Also called by KeSetTimerEx to insert a timer from the caller.
1132 //
1133 FORCEINLINE
1134 VOID
1135 KxInsertTimer(IN PKTIMER Timer,
1136 IN ULONG Hand)
1137 {
1138 PKSPIN_LOCK_QUEUE LockQueue;
1139
1140 /* Acquire the lock and release the dispatcher lock */
1141 LockQueue = KiAcquireTimerLock(Hand);
1142 KiReleaseDispatcherLockFromDpcLevel();
1143
1144 /* Try to insert the timer */
1145 if (KiInsertTimerTable(Timer, Hand))
1146 {
1147 /* Complete it */
1148 KiCompleteTimer(Timer, LockQueue);
1149 }
1150 else
1151 {
1152 /* Do nothing, just release the lock */
1153 KiReleaseTimerLock(LockQueue);
1154 }
1155 }
1156
1157 //
1158 // Called by KeSetTimerEx and KiInsertTreeTimer to calculate Due Time
1159 // See the Windows HPI Blog for more information
1160 //
1161 FORCEINLINE
1162 BOOLEAN
1163 KiComputeDueTime(IN PKTIMER Timer,
1164 IN LARGE_INTEGER DueTime,
1165 OUT PULONG Hand)
1166 {
1167 LARGE_INTEGER InterruptTime, SystemTime, DifferenceTime;
1168
1169 /* Convert to relative time if needed */
1170 Timer->Header.Absolute = FALSE;
1171 if (DueTime.HighPart >= 0)
1172 {
1173 /* Get System Time */
1174 KeQuerySystemTime(&SystemTime);
1175
1176 /* Do the conversion */
1177 DifferenceTime.QuadPart = SystemTime.QuadPart - DueTime.QuadPart;
1178
1179 /* Make sure it hasn't already expired */
1180 Timer->Header.Absolute = TRUE;
1181 if (DifferenceTime.HighPart >= 0)
1182 {
1183 /* Cancel everything */
1184 Timer->Header.SignalState = TRUE;
1185 Timer->Header.Hand = 0;
1186 Timer->DueTime.QuadPart = 0;
1187 *Hand = 0;
1188 return FALSE;
1189 }
1190
1191 /* Set the time as Absolute */
1192 DueTime = DifferenceTime;
1193 }
1194
1195 /* Get the Interrupt Time */
1196 InterruptTime.QuadPart = KeQueryInterruptTime();
1197
1198 /* Recalculate due time */
1199 Timer->DueTime.QuadPart = InterruptTime.QuadPart - DueTime.QuadPart;
1200
1201 /* Get the handle */
1202 *Hand = KiComputeTimerTableIndex(Timer->DueTime.QuadPart);
1203 Timer->Header.Hand = (UCHAR)*Hand;
1204 Timer->Header.Inserted = TRUE;
1205 return TRUE;
1206 }
1207
1208 //
1209 // Called from Unlink and Queue Insert Code.
1210 // Also called by timer code when canceling an inserted timer.
1211 // Removes a timer from it's tree.
1212 //
1213 FORCEINLINE
1214 VOID
1215 KxRemoveTreeTimer(IN PKTIMER Timer)
1216 {
1217 ULONG Hand = Timer->Header.Hand;
1218 PKSPIN_LOCK_QUEUE LockQueue;
1219 PKTIMER_TABLE_ENTRY TimerEntry;
1220
1221 /* Acquire timer lock */
1222 LockQueue = KiAcquireTimerLock(Hand);
1223
1224 /* Set the timer as non-inserted */
1225 Timer->Header.Inserted = FALSE;
1226
1227 /* Remove it from the timer list */
1228 if (RemoveEntryList(&Timer->TimerListEntry))
1229 {
1230 /* Get the entry and check if it's empty */
1231 TimerEntry = &KiTimerTableListHead[Hand];
1232 if (IsListEmpty(&TimerEntry->Entry))
1233 {
1234 /* Clear the time then */
1235 TimerEntry->Time.HighPart = 0xFFFFFFFF;
1236 }
1237 }
1238
1239 /* Release the timer lock */
1240 KiReleaseTimerLock(LockQueue);
1241 }
1242
1243 FORCEINLINE
1244 VOID
1245 KxSetTimerForThreadWait(IN PKTIMER Timer,
1246 IN LARGE_INTEGER Interval,
1247 OUT PULONG Hand)
1248 {
1249 ULONGLONG DueTime;
1250 LARGE_INTEGER InterruptTime, SystemTime, TimeDifference;
1251
1252 /* Check the timer's interval to see if it's absolute */
1253 Timer->Header.Absolute = FALSE;
1254 if (Interval.HighPart >= 0)
1255 {
1256 /* Get the system time and calculate the relative time */
1257 KeQuerySystemTime(&SystemTime);
1258 TimeDifference.QuadPart = SystemTime.QuadPart - Interval.QuadPart;
1259 Timer->Header.Absolute = TRUE;
1260
1261 /* Check if we've already expired */
1262 if (TimeDifference.HighPart >= 0)
1263 {
1264 /* Reset everything */
1265 Timer->DueTime.QuadPart = 0;
1266 *Hand = 0;
1267 Timer->Header.Hand = 0;
1268 return;
1269 }
1270 else
1271 {
1272 /* Update the interval */
1273 Interval = TimeDifference;
1274 }
1275 }
1276
1277 /* Calculate the due time */
1278 InterruptTime.QuadPart = KeQueryInterruptTime();
1279 DueTime = InterruptTime.QuadPart - Interval.QuadPart;
1280 Timer->DueTime.QuadPart = DueTime;
1281
1282 /* Calculate the timer handle */
1283 *Hand = KiComputeTimerTableIndex(DueTime);
1284 Timer->Header.Hand = (UCHAR)*Hand;
1285 }
1286
1287 #define KxDelayThreadWait() \
1288 \
1289 /* Setup the Wait Block */ \
1290 Thread->WaitBlockList = TimerBlock; \
1291 \
1292 /* Setup the timer */ \
1293 KxSetTimerForThreadWait(Timer, *Interval, &Hand); \
1294 \
1295 /* Save the due time for the caller */ \
1296 DueTime.QuadPart = Timer->DueTime.QuadPart; \
1297 \
1298 /* Link the timer to this Wait Block */ \
1299 TimerBlock->NextWaitBlock = TimerBlock; \
1300 Timer->Header.WaitListHead.Flink = &TimerBlock->WaitListEntry; \
1301 Timer->Header.WaitListHead.Blink = &TimerBlock->WaitListEntry; \
1302 \
1303 /* Clear wait status */ \
1304 Thread->WaitStatus = STATUS_SUCCESS; \
1305 \
1306 /* Setup wait fields */ \
1307 Thread->Alertable = Alertable; \
1308 Thread->WaitReason = DelayExecution; \
1309 Thread->WaitMode = WaitMode; \
1310 \
1311 /* Check if we can swap the thread's stack */ \
1312 Thread->WaitListEntry.Flink = NULL; \
1313 Swappable = KiCheckThreadStackSwap(Thread, WaitMode); \
1314 \
1315 /* Set the wait time */ \
1316 Thread->WaitTime = KeTickCount.LowPart;
1317
1318 #define KxMultiThreadWait() \
1319 /* Link wait block array to the thread */ \
1320 Thread->WaitBlockList = WaitBlockArray; \
1321 \
1322 /* Reset the index */ \
1323 Index = 0; \
1324 \
1325 /* Loop wait blocks */ \
1326 do \
1327 { \
1328 /* Fill out the wait block */ \
1329 WaitBlock = &WaitBlockArray[Index]; \
1330 WaitBlock->Object = Object[Index]; \
1331 WaitBlock->WaitKey = (USHORT)Index; \
1332 WaitBlock->WaitType = WaitType; \
1333 WaitBlock->Thread = Thread; \
1334 \
1335 /* Link to next block */ \
1336 WaitBlock->NextWaitBlock = &WaitBlockArray[Index + 1]; \
1337 Index++; \
1338 } while (Index < Count); \
1339 \
1340 /* Link the last block */ \
1341 WaitBlock->NextWaitBlock = WaitBlockArray; \
1342 \
1343 /* Set default wait status */ \
1344 Thread->WaitStatus = STATUS_WAIT_0; \
1345 \
1346 /* Check if we have a timer */ \
1347 if (Timeout) \
1348 { \
1349 /* Link to the block */ \
1350 TimerBlock->NextWaitBlock = WaitBlockArray; \
1351 \
1352 /* Setup the timer */ \
1353 KxSetTimerForThreadWait(Timer, *Timeout, &Hand); \
1354 \
1355 /* Save the due time for the caller */ \
1356 DueTime.QuadPart = Timer->DueTime.QuadPart; \
1357 \
1358 /* Initialize the list */ \
1359 InitializeListHead(&Timer->Header.WaitListHead); \
1360 } \
1361 \
1362 /* Set wait settings */ \
1363 Thread->Alertable = Alertable; \
1364 Thread->WaitMode = WaitMode; \
1365 Thread->WaitReason = WaitReason; \
1366 \
1367 /* Check if we can swap the thread's stack */ \
1368 Thread->WaitListEntry.Flink = NULL; \
1369 Swappable = KiCheckThreadStackSwap(Thread, WaitMode); \
1370 \
1371 /* Set the wait time */ \
1372 Thread->WaitTime = KeTickCount.LowPart;
1373
1374 #define KxSingleThreadWait() \
1375 /* Setup the Wait Block */ \
1376 Thread->WaitBlockList = WaitBlock; \
1377 WaitBlock->WaitKey = STATUS_SUCCESS; \
1378 WaitBlock->Object = Object; \
1379 WaitBlock->WaitType = WaitAny; \
1380 \
1381 /* Clear wait status */ \
1382 Thread->WaitStatus = STATUS_SUCCESS; \
1383 \
1384 /* Check if we have a timer */ \
1385 if (Timeout) \
1386 { \
1387 /* Setup the timer */ \
1388 KxSetTimerForThreadWait(Timer, *Timeout, &Hand); \
1389 \
1390 /* Save the due time for the caller */ \
1391 DueTime.QuadPart = Timer->DueTime.QuadPart; \
1392 \
1393 /* Pointer to timer block */ \
1394 WaitBlock->NextWaitBlock = TimerBlock; \
1395 TimerBlock->NextWaitBlock = WaitBlock; \
1396 \
1397 /* Link the timer to this Wait Block */ \
1398 Timer->Header.WaitListHead.Flink = &TimerBlock->WaitListEntry; \
1399 Timer->Header.WaitListHead.Blink = &TimerBlock->WaitListEntry; \
1400 } \
1401 else \
1402 { \
1403 /* No timer block, just ourselves */ \
1404 WaitBlock->NextWaitBlock = WaitBlock; \
1405 } \
1406 \
1407 /* Set wait settings */ \
1408 Thread->Alertable = Alertable; \
1409 Thread->WaitMode = WaitMode; \
1410 Thread->WaitReason = WaitReason; \
1411 \
1412 /* Check if we can swap the thread's stack */ \
1413 Thread->WaitListEntry.Flink = NULL; \
1414 Swappable = KiCheckThreadStackSwap(Thread, WaitMode); \
1415 \
1416 /* Set the wait time */ \
1417 Thread->WaitTime = KeTickCount.LowPart;
1418
1419 #define KxQueueThreadWait() \
1420 /* Setup the Wait Block */ \
1421 Thread->WaitBlockList = WaitBlock; \
1422 WaitBlock->WaitKey = STATUS_SUCCESS; \
1423 WaitBlock->Object = Queue; \
1424 WaitBlock->WaitType = WaitAny; \
1425 WaitBlock->Thread = Thread; \
1426 \
1427 /* Clear wait status */ \
1428 Thread->WaitStatus = STATUS_SUCCESS; \
1429 \
1430 /* Check if we have a timer */ \
1431 if (Timeout) \
1432 { \
1433 /* Setup the timer */ \
1434 KxSetTimerForThreadWait(Timer, *Timeout, &Hand); \
1435 \
1436 /* Save the due time for the caller */ \
1437 DueTime.QuadPart = Timer->DueTime.QuadPart; \
1438 \
1439 /* Pointer to timer block */ \
1440 WaitBlock->NextWaitBlock = TimerBlock; \
1441 TimerBlock->NextWaitBlock = WaitBlock; \
1442 \
1443 /* Link the timer to this Wait Block */ \
1444 Timer->Header.WaitListHead.Flink = &TimerBlock->WaitListEntry; \
1445 Timer->Header.WaitListHead.Blink = &TimerBlock->WaitListEntry; \
1446 } \
1447 else \
1448 { \
1449 /* No timer block, just ourselves */ \
1450 WaitBlock->NextWaitBlock = WaitBlock; \
1451 } \
1452 \
1453 /* Set wait settings */ \
1454 Thread->Alertable = FALSE; \
1455 Thread->WaitMode = WaitMode; \
1456 Thread->WaitReason = WrQueue; \
1457 \
1458 /* Check if we can swap the thread's stack */ \
1459 Thread->WaitListEntry.Flink = NULL; \
1460 Swappable = KiCheckThreadStackSwap(Thread, WaitMode); \
1461 \
1462 /* Set the wait time */ \
1463 Thread->WaitTime = KeTickCount.LowPart;
1464
1465 //
1466 // Unwaits a Thread
1467 //
1468 FORCEINLINE
1469 VOID
1470 KxUnwaitThread(IN DISPATCHER_HEADER *Object,
1471 IN KPRIORITY Increment)
1472 {
1473 PLIST_ENTRY WaitEntry, WaitList;
1474 PKWAIT_BLOCK WaitBlock;
1475 PKTHREAD WaitThread;
1476 ULONG WaitKey;
1477
1478 /* Loop the Wait Entries */
1479 WaitList = &Object->WaitListHead;
1480 ASSERT(IsListEmpty(&Object->WaitListHead) == FALSE);
1481 WaitEntry = WaitList->Flink;
1482 do
1483 {
1484 /* Get the current wait block */
1485 WaitBlock = CONTAINING_RECORD(WaitEntry, KWAIT_BLOCK, WaitListEntry);
1486
1487 /* Get the waiting thread */
1488 WaitThread = WaitBlock->Thread;
1489
1490 /* Check the current Wait Mode */
1491 if (WaitBlock->WaitType == WaitAny)
1492 {
1493 /* Use the actual wait key */
1494 WaitKey = WaitBlock->WaitKey;
1495 }
1496 else
1497 {
1498 /* Otherwise, use STATUS_KERNEL_APC */
1499 WaitKey = STATUS_KERNEL_APC;
1500 }
1501
1502 /* Unwait the thread */
1503 KiUnwaitThread(WaitThread, WaitKey, Increment);
1504
1505 /* Next entry */
1506 WaitEntry = WaitList->Flink;
1507 } while (WaitEntry != WaitList);
1508 }
1509
1510 //
1511 // Unwaits a Thread waiting on an event
1512 //
1513 FORCEINLINE
1514 VOID
1515 KxUnwaitThreadForEvent(IN PKEVENT Event,
1516 IN KPRIORITY Increment)
1517 {
1518 PLIST_ENTRY WaitEntry, WaitList;
1519 PKWAIT_BLOCK WaitBlock;
1520 PKTHREAD WaitThread;
1521
1522 /* Loop the Wait Entries */
1523 WaitList = &Event->Header.WaitListHead;
1524 ASSERT(IsListEmpty(&Event->Header.WaitListHead) == FALSE);
1525 WaitEntry = WaitList->Flink;
1526 do
1527 {
1528 /* Get the current wait block */
1529 WaitBlock = CONTAINING_RECORD(WaitEntry, KWAIT_BLOCK, WaitListEntry);
1530
1531 /* Get the waiting thread */
1532 WaitThread = WaitBlock->Thread;
1533
1534 /* Check the current Wait Mode */
1535 if (WaitBlock->WaitType == WaitAny)
1536 {
1537 /* Un-signal it */
1538 Event->Header.SignalState = 0;
1539
1540 /* Un-signal the event and unwait the thread */
1541 KiUnwaitThread(WaitThread, WaitBlock->WaitKey, Increment);
1542 break;
1543 }
1544
1545 /* Unwait the thread with STATUS_KERNEL_APC */
1546 KiUnwaitThread(WaitThread, STATUS_KERNEL_APC, Increment);
1547
1548 /* Next entry */
1549 WaitEntry = WaitList->Flink;
1550 } while (WaitEntry != WaitList);
1551 }
1552
1553 //
1554 // This routine queues a thread that is ready on the PRCB's ready lists.
1555 // If this thread cannot currently run on this CPU, then the thread is
1556 // added to the deferred ready list instead.
1557 //
1558 // This routine must be entered with the PRCB lock held and it will exit
1559 // with the PRCB lock released!
1560 //
1561 FORCEINLINE
1562 VOID
1563 KxQueueReadyThread(IN PKTHREAD Thread,
1564 IN PKPRCB Prcb)
1565 {
1566 BOOLEAN Preempted;
1567 KPRIORITY Priority;
1568
1569 /* Sanity checks */
1570 ASSERT(Prcb == KeGetCurrentPrcb());
1571 ASSERT(Thread->State == Running);
1572 ASSERT(Thread->NextProcessor == Prcb->Number);
1573
1574 /* Check if this thread is allowed to run in this CPU */
1575 #ifdef CONFIG_SMP
1576 if ((Thread->Affinity) & (Prcb->SetMember))
1577 #else
1578 if (TRUE)
1579 #endif
1580 {
1581 /* Set thread ready for execution */
1582 Thread->State = Ready;
1583
1584 /* Save current priority and if someone had pre-empted it */
1585 Priority = Thread->Priority;
1586 Preempted = Thread->Preempted;
1587
1588 /* We're not pre-empting now, and set the wait time */
1589 Thread->Preempted = FALSE;
1590 Thread->WaitTime = KeTickCount.LowPart;
1591
1592 /* Sanity check */
1593 ASSERT((Priority >= 0) && (Priority <= HIGH_PRIORITY));
1594
1595 /* Insert this thread in the appropriate order */
1596 Preempted ? InsertHeadList(&Prcb->DispatcherReadyListHead[Priority],
1597 &Thread->WaitListEntry) :
1598 InsertTailList(&Prcb->DispatcherReadyListHead[Priority],
1599 &Thread->WaitListEntry);
1600
1601 /* Update the ready summary */
1602 Prcb->ReadySummary |= PRIORITY_MASK(Priority);
1603
1604 /* Sanity check */
1605 ASSERT(Priority == Thread->Priority);
1606
1607 /* Release the PRCB lock */
1608 KiReleasePrcbLock(Prcb);
1609 }
1610 else
1611 {
1612 /* Otherwise, prepare this thread to be deferred */
1613 Thread->State = DeferredReady;
1614 Thread->DeferredProcessor = Prcb->Number;
1615
1616 /* Release the lock and defer scheduling */
1617 KiReleasePrcbLock(Prcb);
1618 KiDeferredReadyThread(Thread);
1619 }
1620 }
1621
1622 //
1623 // This routine scans for an appropriate ready thread to select at the
1624 // given priority and for the given CPU.
1625 //
1626 FORCEINLINE
1627 PKTHREAD
1628 KiSelectReadyThread(IN KPRIORITY Priority,
1629 IN PKPRCB Prcb)
1630 {
1631 ULONG PrioritySet;
1632 LONG HighPriority;
1633 PLIST_ENTRY ListEntry;
1634 PKTHREAD Thread = NULL;
1635
1636 /* Save the current mask and get the priority set for the CPU */
1637 PrioritySet = Prcb->ReadySummary >> Priority;
1638 if (!PrioritySet) goto Quickie;
1639
1640 /* Get the highest priority possible */
1641 BitScanReverse((PULONG)&HighPriority, PrioritySet);
1642 ASSERT((PrioritySet & PRIORITY_MASK(HighPriority)) != 0);
1643 HighPriority += Priority;
1644
1645 /* Make sure the list isn't empty at the highest priority */
1646 ASSERT(IsListEmpty(&Prcb->DispatcherReadyListHead[HighPriority]) == FALSE);
1647
1648 /* Get the first thread on the list */
1649 ListEntry = Prcb->DispatcherReadyListHead[HighPriority].Flink;
1650 Thread = CONTAINING_RECORD(ListEntry, KTHREAD, WaitListEntry);
1651
1652 /* Make sure this thread is here for a reason */
1653 ASSERT(HighPriority == Thread->Priority);
1654 ASSERT(Thread->Affinity & AFFINITY_MASK(Prcb->Number));
1655 ASSERT(Thread->NextProcessor == Prcb->Number);
1656
1657 /* Remove it from the list */
1658 if (RemoveEntryList(&Thread->WaitListEntry))
1659 {
1660 /* The list is empty now, reset the ready summary */
1661 Prcb->ReadySummary ^= PRIORITY_MASK(HighPriority);
1662 }
1663
1664 /* Sanity check and return the thread */
1665 Quickie:
1666 ASSERT((Thread == NULL) ||
1667 (Thread->BasePriority == 0) ||
1668 (Thread->Priority != 0));
1669 return Thread;
1670 }
1671
1672 //
1673 // This routine computes the new priority for a thread. It is only valid for
1674 // threads with priorities in the dynamic priority range.
1675 //
1676 FORCEINLINE
1677 SCHAR
1678 KiComputeNewPriority(IN PKTHREAD Thread,
1679 IN SCHAR Adjustment)
1680 {
1681 SCHAR Priority;
1682
1683 /* Priority sanity checks */
1684 ASSERT((Thread->PriorityDecrement >= 0) &&
1685 (Thread->PriorityDecrement <= Thread->Priority));
1686 ASSERT((Thread->Priority < LOW_REALTIME_PRIORITY) ?
1687 TRUE : (Thread->PriorityDecrement == 0));
1688
1689 /* Get the current priority */
1690 Priority = Thread->Priority;
1691 if (Priority < LOW_REALTIME_PRIORITY)
1692 {
1693 /* Decrease priority by the priority decrement */
1694 Priority -= (Thread->PriorityDecrement + Adjustment);
1695
1696 /* Don't go out of bounds */
1697 if (Priority < Thread->BasePriority) Priority = Thread->BasePriority;
1698
1699 /* Reset the priority decrement */
1700 Thread->PriorityDecrement = 0;
1701 }
1702
1703 /* Sanity check */
1704 ASSERT((Thread->BasePriority == 0) || (Priority != 0));
1705
1706 /* Return the new priority */
1707 return Priority;
1708 }
1709
1710 //
1711 // Guarded Mutex Routines
1712 //
1713 FORCEINLINE
1714 VOID
1715 _KeInitializeGuardedMutex(OUT PKGUARDED_MUTEX GuardedMutex)
1716 {
1717 /* Setup the Initial Data */
1718 GuardedMutex->Count = GM_LOCK_BIT;
1719 GuardedMutex->Owner = NULL;
1720 GuardedMutex->Contention = 0;
1721
1722 /* Initialize the Wait Gate */
1723 KeInitializeGate(&GuardedMutex->Gate);
1724 }
1725
1726 FORCEINLINE
1727 VOID
1728 _KeAcquireGuardedMutexUnsafe(IN OUT PKGUARDED_MUTEX GuardedMutex)
1729 {
1730 PKTHREAD Thread = KeGetCurrentThread();
1731
1732 /* Sanity checks */
1733 ASSERT((KeGetCurrentIrql() == APC_LEVEL) ||
1734 (Thread->SpecialApcDisable < 0) ||
1735 (Thread->Teb == NULL) ||
1736 (Thread->Teb >= (PTEB)MM_SYSTEM_RANGE_START));
1737 ASSERT(GuardedMutex->Owner != Thread);
1738
1739 /* Remove the lock */
1740 if (!InterlockedBitTestAndReset(&GuardedMutex->Count, GM_LOCK_BIT_V))
1741 {
1742 /* The Guarded Mutex was already locked, enter contented case */
1743 KiAcquireGuardedMutex(GuardedMutex);
1744 }
1745
1746 /* Set the Owner */
1747 GuardedMutex->Owner = Thread;
1748 }
1749
1750 FORCEINLINE
1751 VOID
1752 _KeReleaseGuardedMutexUnsafe(IN OUT PKGUARDED_MUTEX GuardedMutex)
1753 {
1754 LONG OldValue, NewValue;
1755
1756 /* Sanity checks */
1757 ASSERT((KeGetCurrentIrql() == APC_LEVEL) ||
1758 (KeGetCurrentThread()->SpecialApcDisable < 0) ||
1759 (KeGetCurrentThread()->Teb == NULL) ||
1760 (KeGetCurrentThread()->Teb >= (PTEB)MM_SYSTEM_RANGE_START));
1761 ASSERT(GuardedMutex->Owner == KeGetCurrentThread());
1762
1763 /* Destroy the Owner */
1764 GuardedMutex->Owner = NULL;
1765
1766 /* Add the Lock Bit */
1767 OldValue = InterlockedExchangeAdd(&GuardedMutex->Count, GM_LOCK_BIT);
1768 ASSERT((OldValue & GM_LOCK_BIT) == 0);
1769
1770 /* Check if it was already locked, but not woken */
1771 if ((OldValue) && !(OldValue & GM_LOCK_WAITER_WOKEN))
1772 {
1773 /* Update the Oldvalue to what it should be now */
1774 OldValue += GM_LOCK_BIT;
1775
1776 /* The mutex will be woken, minus one waiter */
1777 NewValue = OldValue + GM_LOCK_WAITER_WOKEN -
1778 GM_LOCK_WAITER_INC;
1779
1780 /* Remove the Woken bit */
1781 if (InterlockedCompareExchange(&GuardedMutex->Count,
1782 NewValue,
1783 OldValue) == OldValue)
1784 {
1785 /* Signal the Gate */
1786 KeSignalGateBoostPriority(&GuardedMutex->Gate);
1787 }
1788 }
1789 }
1790
1791 FORCEINLINE
1792 VOID
1793 _KeAcquireGuardedMutex(IN PKGUARDED_MUTEX GuardedMutex)
1794 {
1795 PKTHREAD Thread = KeGetCurrentThread();
1796
1797 /* Sanity checks */
1798 ASSERT(KeGetCurrentIrql() <= APC_LEVEL);
1799 ASSERT(GuardedMutex->Owner != Thread);
1800
1801 /* Disable Special APCs */
1802 KeEnterGuardedRegion();
1803
1804 /* Remove the lock */
1805 if (!InterlockedBitTestAndReset(&GuardedMutex->Count, GM_LOCK_BIT_V))
1806 {
1807 /* The Guarded Mutex was already locked, enter contented case */
1808 KiAcquireGuardedMutex(GuardedMutex);
1809 }
1810
1811 /* Set the Owner and Special APC Disable state */
1812 GuardedMutex->Owner = Thread;
1813 GuardedMutex->SpecialApcDisable = Thread->SpecialApcDisable;
1814 }
1815
1816 FORCEINLINE
1817 VOID
1818 _KeReleaseGuardedMutex(IN OUT PKGUARDED_MUTEX GuardedMutex)
1819 {
1820 LONG OldValue, NewValue;
1821
1822 /* Sanity checks */
1823 ASSERT(KeGetCurrentIrql() <= APC_LEVEL);
1824 ASSERT(GuardedMutex->Owner == KeGetCurrentThread());
1825 ASSERT(KeGetCurrentThread()->SpecialApcDisable ==
1826 GuardedMutex->SpecialApcDisable);
1827
1828 /* Destroy the Owner */
1829 GuardedMutex->Owner = NULL;
1830
1831 /* Add the Lock Bit */
1832 OldValue = InterlockedExchangeAdd(&GuardedMutex->Count, GM_LOCK_BIT);
1833 ASSERT((OldValue & GM_LOCK_BIT) == 0);
1834
1835 /* Check if it was already locked, but not woken */
1836 if ((OldValue) && !(OldValue & GM_LOCK_WAITER_WOKEN))
1837 {
1838 /* Update the Oldvalue to what it should be now */
1839 OldValue += GM_LOCK_BIT;
1840
1841 /* The mutex will be woken, minus one waiter */
1842 NewValue = OldValue + GM_LOCK_WAITER_WOKEN -
1843 GM_LOCK_WAITER_INC;
1844
1845 /* Remove the Woken bit */
1846 if (InterlockedCompareExchange(&GuardedMutex->Count,
1847 NewValue,
1848 OldValue) == OldValue)
1849 {
1850 /* Signal the Gate */
1851 KeSignalGateBoostPriority(&GuardedMutex->Gate);
1852 }
1853 }
1854
1855 /* Re-enable APCs */
1856 KeLeaveGuardedRegion();
1857 }
1858
1859 FORCEINLINE
1860 BOOLEAN
1861 _KeTryToAcquireGuardedMutex(IN OUT PKGUARDED_MUTEX GuardedMutex)
1862 {
1863 PKTHREAD Thread = KeGetCurrentThread();
1864
1865 /* Block APCs */
1866 KeEnterGuardedRegion();
1867
1868 /* Remove the lock */
1869 if (!InterlockedBitTestAndReset(&GuardedMutex->Count, GM_LOCK_BIT_V))
1870 {
1871 /* Re-enable APCs */
1872 KeLeaveGuardedRegion();
1873 YieldProcessor();
1874
1875 /* Return failure */
1876 return FALSE;
1877 }
1878
1879 /* Set the Owner and APC State */
1880 GuardedMutex->Owner = Thread;
1881 GuardedMutex->SpecialApcDisable = Thread->SpecialApcDisable;
1882 return TRUE;
1883 }