ReactOS SMP Bringup Lite:
[reactos.git] / reactos / ntoskrnl / include / internal / ke_x.h
1 /*
2 * PROJECT: ReactOS Kernel
3 * LICENSE: GPL - See COPYING in the top level directory
4 * FILE: ntoskrnl/include/ke_x.h
5 * PURPOSE: Internal Inlined Functions for the Kernel
6 * PROGRAMMERS: Alex Ionescu (alex.ionescu@reactos.org)
7 */
8
9 //
10 // Thread Dispatcher Header DebugActive Mask
11 //
12 #define DR_MASK(x) 1 << x
13 #define DR_ACTIVE_MASK 0x10
14 #define DR_REG_MASK 0x4F
15
16 #ifdef _M_IX86
17 //
18 // Sanitizes a selector
19 //
20 FORCEINLINE
21 ULONG
22 Ke386SanitizeSeg(IN ULONG Cs,
23 IN KPROCESSOR_MODE Mode)
24 {
25 //
26 // Check if we're in kernel-mode, and force CPL 0 if so.
27 // Otherwise, force CPL 3.
28 //
29 return ((Mode == KernelMode) ?
30 (Cs & (0xFFFF & ~RPL_MASK)) :
31 (RPL_MASK | (Cs & 0xFFFF)));
32 }
33
34 //
35 // Sanitizes EFLAGS
36 //
37 FORCEINLINE
38 ULONG
39 Ke386SanitizeFlags(IN ULONG Eflags,
40 IN KPROCESSOR_MODE Mode)
41 {
42 //
43 // Check if we're in kernel-mode, and sanitize EFLAGS if so.
44 // Otherwise, also force interrupt mask on.
45 //
46 return ((Mode == KernelMode) ?
47 (Eflags & (EFLAGS_USER_SANITIZE | EFLAGS_INTERRUPT_MASK)) :
48 (EFLAGS_INTERRUPT_MASK | (Eflags & EFLAGS_USER_SANITIZE)));
49 }
50
51 //
52 // Gets a DR register from a CONTEXT structure
53 //
54 FORCEINLINE
55 PVOID
56 KiDrFromContext(IN ULONG Dr,
57 IN PCONTEXT Context)
58 {
59 return *(PVOID*)((ULONG_PTR)Context + KiDebugRegisterContextOffsets[Dr]);
60 }
61
62 //
63 // Gets a DR register from a KTRAP_FRAME structure
64 //
65 FORCEINLINE
66 PVOID*
67 KiDrFromTrapFrame(IN ULONG Dr,
68 IN PKTRAP_FRAME TrapFrame)
69 {
70 return (PVOID*)((ULONG_PTR)TrapFrame + KiDebugRegisterTrapOffsets[Dr]);
71 }
72
73 //
74 //
75 //
76 FORCEINLINE
77 PVOID
78 Ke386SanitizeDr(IN PVOID DrAddress,
79 IN KPROCESSOR_MODE Mode)
80 {
81 //
82 // Check if we're in kernel-mode, and return the address directly if so.
83 // Otherwise, make sure it's not inside the kernel-mode address space.
84 // If it is, then clear the address.
85 //
86 return ((Mode == KernelMode) ? DrAddress :
87 (DrAddress <= MM_HIGHEST_USER_ADDRESS) ? DrAddress : 0);
88 }
89 #endif /* _M_IX86 */
90
91 #ifndef _M_ARM
92 FORCEINLINE
93 PRKTHREAD
94 KeGetCurrentThread(VOID)
95 {
96 #ifdef _M_IX86
97 /* Return the current thread */
98 return ((PKIPCR)KeGetPcr())->PrcbData.CurrentThread;
99 #else
100 PKPRCB Prcb = KeGetCurrentPrcb();
101 return Prcb->CurrentThread;
102 #endif
103 }
104
105 FORCEINLINE
106 UCHAR
107 KeGetPreviousMode(VOID)
108 {
109 /* Return the current mode */
110 return KeGetCurrentThread()->PreviousMode;
111 }
112 #endif
113
114 FORCEINLINE
115 VOID
116 KeFlushProcessTb(VOID)
117 {
118 /* Flush the TLB by resetting CR3 */
119 #ifdef _M_PPC
120 __asm__("sync\n\tisync\n\t");
121 #elif _M_ARM
122 //
123 // We need to implement this!
124 //
125 ASSERTMSG("Need ARM flush routine\n", FALSE);
126 #else
127 __writecr3(__readcr3());
128 #endif
129 }
130
131 //
132 // Enters a Guarded Region
133 //
134 #define KeEnterGuardedRegion() \
135 { \
136 PKTHREAD _Thread = KeGetCurrentThread(); \
137 \
138 /* Sanity checks */ \
139 ASSERT(KeGetCurrentIrql() <= APC_LEVEL); \
140 ASSERT(_Thread == KeGetCurrentThread()); \
141 ASSERT((_Thread->SpecialApcDisable <= 0) && \
142 (_Thread->SpecialApcDisable != -32768)); \
143 \
144 /* Disable Special APCs */ \
145 _Thread->SpecialApcDisable--; \
146 }
147
148 //
149 // Leaves a Guarded Region
150 //
151 #define KeLeaveGuardedRegion() \
152 { \
153 PKTHREAD _Thread = KeGetCurrentThread(); \
154 \
155 /* Sanity checks */ \
156 ASSERT(KeGetCurrentIrql() <= APC_LEVEL); \
157 ASSERT(_Thread == KeGetCurrentThread()); \
158 ASSERT(_Thread->SpecialApcDisable < 0); \
159 \
160 /* Leave region and check if APCs are OK now */ \
161 if (!(++_Thread->SpecialApcDisable)) \
162 { \
163 /* Check for Kernel APCs on the list */ \
164 if (!IsListEmpty(&_Thread->ApcState. \
165 ApcListHead[KernelMode])) \
166 { \
167 /* Check for APC Delivery */ \
168 KiCheckForKernelApcDelivery(); \
169 } \
170 } \
171 }
172
173 //
174 // Enters a Critical Region
175 //
176 #define KeEnterCriticalRegion() \
177 { \
178 PKTHREAD _Thread = KeGetCurrentThread(); \
179 \
180 /* Sanity checks */ \
181 ASSERT(_Thread == KeGetCurrentThread()); \
182 ASSERT((_Thread->KernelApcDisable <= 0) && \
183 (_Thread->KernelApcDisable != -32768)); \
184 \
185 /* Disable Kernel APCs */ \
186 _Thread->KernelApcDisable--; \
187 }
188
189 //
190 // Leaves a Critical Region
191 //
192 #define KeLeaveCriticalRegion() \
193 { \
194 PKTHREAD _Thread = KeGetCurrentThread(); \
195 \
196 /* Sanity checks */ \
197 ASSERT(_Thread == KeGetCurrentThread()); \
198 ASSERT(_Thread->KernelApcDisable < 0); \
199 \
200 /* Enable Kernel APCs */ \
201 _Thread->KernelApcDisable++; \
202 \
203 /* Check if Kernel APCs are now enabled */ \
204 if (!(_Thread->KernelApcDisable)) \
205 { \
206 /* Check if we need to request an APC Delivery */ \
207 if (!(IsListEmpty(&_Thread->ApcState.ApcListHead[KernelMode])) && \
208 !(_Thread->SpecialApcDisable)) \
209 { \
210 /* Check for the right environment */ \
211 KiCheckForKernelApcDelivery(); \
212 } \
213 } \
214 }
215
216 #ifndef CONFIG_SMP
217 //
218 // Spinlock Acquire at IRQL >= DISPATCH_LEVEL
219 //
220 FORCEINLINE
221 VOID
222 KxAcquireSpinLock(IN PKSPIN_LOCK SpinLock)
223 {
224 /* On UP builds, spinlocks don't exist at IRQL >= DISPATCH */
225 UNREFERENCED_PARAMETER(SpinLock);
226 }
227
228 //
229 // Spinlock Release at IRQL >= DISPATCH_LEVEL
230 //
231 FORCEINLINE
232 VOID
233 KxReleaseSpinLock(IN PKSPIN_LOCK SpinLock)
234 {
235 /* On UP builds, spinlocks don't exist at IRQL >= DISPATCH */
236 UNREFERENCED_PARAMETER(SpinLock);
237 }
238
239 //
240 // This routine protects against multiple CPU acquires, it's meaningless on UP.
241 //
242 FORCEINLINE
243 VOID
244 KiAcquireDispatcherObject(IN DISPATCHER_HEADER* Object)
245 {
246 UNREFERENCED_PARAMETER(Object);
247 }
248
249 //
250 // This routine protects against multiple CPU acquires, it's meaningless on UP.
251 //
252 FORCEINLINE
253 VOID
254 KiReleaseDispatcherObject(IN DISPATCHER_HEADER* Object)
255 {
256 UNREFERENCED_PARAMETER(Object);
257 }
258
259 FORCEINLINE
260 KIRQL
261 KiAcquireDispatcherLock(VOID)
262 {
263 /* Raise to DPC level */
264 return KeRaiseIrqlToDpcLevel();
265 }
266
267 FORCEINLINE
268 VOID
269 KiReleaseDispatcherLock(IN KIRQL OldIrql)
270 {
271 /* Just exit the dispatcher */
272 KiExitDispatcher(OldIrql);
273 }
274
275 FORCEINLINE
276 VOID
277 KiAcquireDispatcherLockAtDpcLevel(VOID)
278 {
279 /* This is a no-op at DPC Level for UP systems */
280 return;
281 }
282
283 FORCEINLINE
284 VOID
285 KiReleaseDispatcherLockFromDpcLevel(VOID)
286 {
287 /* This is a no-op at DPC Level for UP systems */
288 return;
289 }
290
291 //
292 // This routine makes the thread deferred ready on the boot CPU.
293 //
294 FORCEINLINE
295 VOID
296 KiInsertDeferredReadyList(IN PKTHREAD Thread)
297 {
298 /* Set the thread to deferred state and boot CPU */
299 Thread->State = DeferredReady;
300 Thread->DeferredProcessor = 0;
301
302 /* Make the thread ready immediately */
303 KiDeferredReadyThread(Thread);
304 }
305
306 FORCEINLINE
307 VOID
308 KiRescheduleThread(IN BOOLEAN NewThread,
309 IN ULONG Cpu)
310 {
311 /* This is meaningless on UP systems */
312 UNREFERENCED_PARAMETER(NewThread);
313 UNREFERENCED_PARAMETER(Cpu);
314 }
315
316 //
317 // This routine protects against multiple CPU acquires, it's meaningless on UP.
318 //
319 FORCEINLINE
320 VOID
321 KiSetThreadSwapBusy(IN PKTHREAD Thread)
322 {
323 UNREFERENCED_PARAMETER(Thread);
324 }
325
326 //
327 // This routine protects against multiple CPU acquires, it's meaningless on UP.
328 //
329 FORCEINLINE
330 VOID
331 KiAcquirePrcbLock(IN PKPRCB Prcb)
332 {
333 UNREFERENCED_PARAMETER(Prcb);
334 }
335
336 //
337 // This routine protects against multiple CPU acquires, it's meaningless on UP.
338 //
339 FORCEINLINE
340 VOID
341 KiReleasePrcbLock(IN PKPRCB Prcb)
342 {
343 UNREFERENCED_PARAMETER(Prcb);
344 }
345
346 //
347 // This routine protects against multiple CPU acquires, it's meaningless on UP.
348 //
349 FORCEINLINE
350 VOID
351 KiAcquireThreadLock(IN PKTHREAD Thread)
352 {
353 UNREFERENCED_PARAMETER(Thread);
354 }
355
356 //
357 // This routine protects against multiple CPU acquires, it's meaningless on UP.
358 //
359 FORCEINLINE
360 VOID
361 KiReleaseThreadLock(IN PKTHREAD Thread)
362 {
363 UNREFERENCED_PARAMETER(Thread);
364 }
365
366 //
367 // This routine protects against multiple CPU acquires, it's meaningless on UP.
368 //
369 FORCEINLINE
370 BOOLEAN
371 KiTryThreadLock(IN PKTHREAD Thread)
372 {
373 UNREFERENCED_PARAMETER(Thread);
374 return FALSE;
375 }
376
377 FORCEINLINE
378 VOID
379 KiCheckDeferredReadyList(IN PKPRCB Prcb)
380 {
381 /* There are no deferred ready lists on UP systems */
382 UNREFERENCED_PARAMETER(Prcb);
383 }
384
385 FORCEINLINE
386 VOID
387 KiRundownThread(IN PKTHREAD Thread)
388 {
389 #if defined(_M_IX86) || defined(_M_AMD64)
390 /* Check if this is the NPX Thread */
391 if (KeGetCurrentPrcb()->NpxThread == Thread)
392 {
393 /* Clear it */
394 KeGetCurrentPrcb()->NpxThread = NULL;
395 Ke386FnInit();
396 }
397 #endif
398 }
399
400 FORCEINLINE
401 VOID
402 KiRequestApcInterrupt(IN BOOLEAN NeedApc,
403 IN UCHAR Processor)
404 {
405 /* We deliver instantly on UP */
406 UNREFERENCED_PARAMETER(NeedApc);
407 UNREFERENCED_PARAMETER(Processor);
408 }
409
410 FORCEINLINE
411 PKSPIN_LOCK_QUEUE
412 KiAcquireTimerLock(IN ULONG Hand)
413 {
414 ASSERT(KeGetCurrentIrql() >= DISPATCH_LEVEL);
415
416 /* Nothing to do on UP */
417 UNREFERENCED_PARAMETER(Hand);
418 return NULL;
419 }
420
421 FORCEINLINE
422 VOID
423 KiReleaseTimerLock(IN PKSPIN_LOCK_QUEUE LockQueue)
424 {
425 ASSERT(KeGetCurrentIrql() >= DISPATCH_LEVEL);
426
427 /* Nothing to do on UP */
428 UNREFERENCED_PARAMETER(LockQueue);
429 }
430
431 #else
432
433 //
434 // Spinlock Acquisition at IRQL >= DISPATCH_LEVEL
435 //
436 FORCEINLINE
437 VOID
438 KxAcquireSpinLock(IN PKSPIN_LOCK SpinLock)
439 {
440 /* Make sure that we don't own the lock already */
441 if (((KSPIN_LOCK)KeGetCurrentThread() | 1) == *SpinLock)
442 {
443 /* We do, bugcheck! */
444 KeBugCheckEx(SPIN_LOCK_ALREADY_OWNED, (ULONG_PTR)SpinLock, 0, 0, 0);
445 }
446
447 /* Start acquire loop */
448 for (;;)
449 {
450 /* Try to acquire it */
451 if (InterlockedBitTestAndSet((PLONG)SpinLock, 0))
452 {
453 /* Value changed... wait until it's unlocked */
454 while (*(volatile KSPIN_LOCK *)SpinLock == 1)
455 {
456 #if DBG
457 /* On debug builds, we use a much slower but useful routine */
458 //Kii386SpinOnSpinLock(SpinLock, 5);
459
460 /* FIXME: Do normal yield for now */
461 YieldProcessor();
462 #else
463 /* Otherwise, just yield and keep looping */
464 YieldProcessor();
465 #endif
466 }
467 }
468 else
469 {
470 #if DBG
471 /* On debug builds, we OR in the KTHREAD */
472 *SpinLock = (KSPIN_LOCK)KeGetCurrentThread() | 1;
473 #endif
474 /* All is well, break out */
475 break;
476 }
477 }
478 }
479
480 //
481 // Spinlock Release at IRQL >= DISPATCH_LEVEL
482 //
483 FORCEINLINE
484 VOID
485 KxReleaseSpinLock(IN PKSPIN_LOCK SpinLock)
486 {
487 #if DBG
488 /* Make sure that the threads match */
489 if (((KSPIN_LOCK)KeGetCurrentThread() | 1) != *SpinLock)
490 {
491 /* They don't, bugcheck */
492 KeBugCheckEx(SPIN_LOCK_NOT_OWNED, (ULONG_PTR)SpinLock, 0, 0, 0);
493 }
494 #endif
495 /* Clear the lock */
496 InterlockedAnd((PLONG)SpinLock, 0);
497 }
498
499 FORCEINLINE
500 VOID
501 KiAcquireDispatcherObject(IN DISPATCHER_HEADER* Object)
502 {
503 LONG OldValue;
504
505 /* Make sure we're at a safe level to touch the lock */
506 ASSERT(KeGetCurrentIrql() >= DISPATCH_LEVEL);
507
508 /* Start acquire loop */
509 do
510 {
511 /* Loop until the other CPU releases it */
512 while (TRUE)
513 {
514 /* Check if it got released */
515 OldValue = Object->Lock;
516 if ((OldValue & KOBJECT_LOCK_BIT) == 0) break;
517
518 /* Let the CPU know that this is a loop */
519 YieldProcessor();
520 }
521
522 /* Try acquiring the lock now */
523 } while (InterlockedCompareExchange(&Object->Lock,
524 OldValue | KOBJECT_LOCK_BIT,
525 OldValue) != OldValue);
526 }
527
528 FORCEINLINE
529 VOID
530 KiReleaseDispatcherObject(IN DISPATCHER_HEADER* Object)
531 {
532 /* Make sure we're at a safe level to touch the lock */
533 ASSERT(KeGetCurrentIrql() >= DISPATCH_LEVEL);
534
535 /* Release it */
536 InterlockedAnd(&Object->Lock, ~KOBJECT_LOCK_BIT);
537 }
538
539 FORCEINLINE
540 KIRQL
541 KiAcquireDispatcherLock(VOID)
542 {
543 /* Raise to synchronization level and acquire the dispatcher lock */
544 return KeAcquireQueuedSpinLockRaiseToSynch(LockQueueDispatcherLock);
545 }
546
547 FORCEINLINE
548 VOID
549 KiReleaseDispatcherLock(IN KIRQL OldIrql)
550 {
551 /* First release the lock */
552 KeReleaseQueuedSpinLockFromDpcLevel(&KeGetCurrentPrcb()->
553 LockQueue[LockQueueDispatcherLock]);
554
555 /* Then exit the dispatcher */
556 KiExitDispatcher(OldIrql);
557 }
558
559 FORCEINLINE
560 VOID
561 KiAcquireDispatcherLockAtDpcLevel(VOID)
562 {
563 /* Acquire the dispatcher lock */
564 KeAcquireQueuedSpinLockAtDpcLevel(&KeGetCurrentPrcb()->
565 LockQueue[LockQueueDispatcherLock]);
566 }
567
568 FORCEINLINE
569 VOID
570 KiReleaseDispatcherLockFromDpcLevel(VOID)
571 {
572 /* Release the dispatcher lock */
573 KeReleaseQueuedSpinLockFromDpcLevel(&KeGetCurrentPrcb()->
574 LockQueue[LockQueueDispatcherLock]);
575 }
576
577 //
578 // This routine inserts a thread into the deferred ready list of the current CPU
579 //
580 FORCEINLINE
581 VOID
582 KiInsertDeferredReadyList(IN PKTHREAD Thread)
583 {
584 PKPRCB Prcb = KeGetCurrentPrcb();
585
586 /* Set the thread to deferred state and CPU */
587 Thread->State = DeferredReady;
588 Thread->DeferredProcessor = Prcb->Number;
589
590 /* Add it on the list */
591 PushEntryList(&Prcb->DeferredReadyListHead, &Thread->SwapListEntry);
592 }
593
594 FORCEINLINE
595 VOID
596 KiRescheduleThread(IN BOOLEAN NewThread,
597 IN ULONG Cpu)
598 {
599 /* Check if a new thread needs to be scheduled on a different CPU */
600 if ((NewThread) && !(KeGetPcr()->Number == Cpu))
601 {
602 /* Send an IPI to request delivery */
603 KiIpiSend(AFFINITY_MASK(Cpu), IPI_DPC);
604 }
605 }
606
607 //
608 // This routine sets the current thread in a swap busy state, which ensure that
609 // nobody else tries to swap it concurrently.
610 //
611 FORCEINLINE
612 VOID
613 KiSetThreadSwapBusy(IN PKTHREAD Thread)
614 {
615 /* Make sure nobody already set it */
616 ASSERT(Thread->SwapBusy == FALSE);
617
618 /* Set it ourselves */
619 Thread->SwapBusy = TRUE;
620 }
621
622 //
623 // This routine acquires the PRCB lock so that only one caller can touch
624 // volatile PRCB data.
625 //
626 // Since this is a simple optimized spin-lock, it must only be acquired
627 // at dispatcher level or higher!
628 //
629 FORCEINLINE
630 VOID
631 KiAcquirePrcbLock(IN PKPRCB Prcb)
632 {
633 /* Make sure we're at a safe level to touch the PRCB lock */
634 ASSERT(KeGetCurrentIrql() >= DISPATCH_LEVEL);
635
636 /* Start acquire loop */
637 for (;;)
638 {
639 /* Acquire the lock and break out if we acquired it first */
640 if (!InterlockedExchange((PLONG)&Prcb->PrcbLock, 1)) break;
641
642 /* Loop until the other CPU releases it */
643 do
644 {
645 /* Let the CPU know that this is a loop */
646 YieldProcessor();
647 } while (Prcb->PrcbLock);
648 }
649 }
650
651 //
652 // This routine releases the PRCB lock so that other callers can touch
653 // volatile PRCB data.
654 //
655 // Since this is a simple optimized spin-lock, it must be be only acquired
656 // at dispatcher level or higher!
657 //
658 FORCEINLINE
659 VOID
660 KiReleasePrcbLock(IN PKPRCB Prcb)
661 {
662 /* Make sure we are above dispatch and the lock is acquired! */
663 ASSERT(KeGetCurrentIrql() >= DISPATCH_LEVEL);
664 ASSERT(Prcb->PrcbLock != 0);
665
666 /* Release it */
667 InterlockedAnd((PLONG)&Prcb->PrcbLock, 0);
668 }
669
670 //
671 // This routine acquires the thread lock so that only one caller can touch
672 // volatile thread data.
673 //
674 // Since this is a simple optimized spin-lock, it must be be only acquired
675 // at dispatcher level or higher!
676 //
677 FORCEINLINE
678 VOID
679 KiAcquireThreadLock(IN PKTHREAD Thread)
680 {
681 /* Make sure we're at a safe level to touch the thread lock */
682 ASSERT(KeGetCurrentIrql() >= DISPATCH_LEVEL);
683
684 /* Start acquire loop */
685 for (;;)
686 {
687 /* Acquire the lock and break out if we acquired it first */
688 if (!InterlockedExchange((PLONG)&Thread->ThreadLock, 1)) break;
689
690 /* Loop until the other CPU releases it */
691 do
692 {
693 /* Let the CPU know that this is a loop */
694 YieldProcessor();
695 } while (Thread->ThreadLock);
696 }
697 }
698
699 //
700 // This routine releases the thread lock so that other callers can touch
701 // volatile thread data.
702 //
703 // Since this is a simple optimized spin-lock, it must be be only acquired
704 // at dispatcher level or higher!
705 //
706 FORCEINLINE
707 VOID
708 KiReleaseThreadLock(IN PKTHREAD Thread)
709 {
710 /* Make sure we are still above dispatch */
711 ASSERT(KeGetCurrentIrql() >= DISPATCH_LEVEL);
712
713 /* Release it */
714 InterlockedAnd((PLONG)&Thread->ThreadLock, 0);
715 }
716
717 FORCEINLINE
718 BOOLEAN
719 KiTryThreadLock(IN PKTHREAD Thread)
720 {
721 LONG Value;
722
723 /* If the lock isn't acquired, return false */
724 if (!Thread->ThreadLock) return FALSE;
725
726 /* Otherwise, try to acquire it and check the result */
727 Value = 1;
728 Value = InterlockedExchange((PLONG)&Thread->ThreadLock, Value);
729
730 /* Return the lock state */
731 return (Value == TRUE);
732 }
733
734 FORCEINLINE
735 VOID
736 KiCheckDeferredReadyList(IN PKPRCB Prcb)
737 {
738 /* Scan the deferred ready lists if required */
739 if (Prcb->DeferredReadyListHead.Next) KiProcessDeferredReadyList(Prcb);
740 }
741
742 FORCEINLINE
743 VOID
744 KiRundownThread(IN PKTHREAD Thread)
745 {
746 /* Nothing to do */
747 return;
748 }
749
750 FORCEINLINE
751 VOID
752 KiRequestApcInterrupt(IN BOOLEAN NeedApc,
753 IN UCHAR Processor)
754 {
755 /* Check if we need to request APC delivery */
756 if (NeedApc)
757 {
758 /* Check if it's on another CPU */
759 if (KeGetPcr()->Number != Processor)
760 {
761 /* Send an IPI to request delivery */
762 KiIpiSend(AFFINITY_MASK(Processor), IPI_APC);
763 }
764 else
765 {
766 /* Request a software interrupt */
767 HalRequestSoftwareInterrupt(APC_LEVEL);
768 }
769 }
770 }
771
772 FORCEINLINE
773 PKSPIN_LOCK_QUEUE
774 KiAcquireTimerLock(IN ULONG Hand)
775 {
776 PKSPIN_LOCK_QUEUE LockQueue;
777 ULONG LockIndex;
778 ASSERT(KeGetCurrentIrql() >= DISPATCH_LEVEL);
779
780 /* Get the lock index */
781 LockIndex = Hand >> LOCK_QUEUE_TIMER_LOCK_SHIFT;
782 LockIndex &= (LOCK_QUEUE_TIMER_TABLE_LOCKS - 1);
783
784 /* Now get the lock */
785 LockQueue = &KeGetCurrentPrcb()->LockQueue[LockQueueTimerTableLock + LockIndex];
786
787 /* Acquire it and return */
788 KeAcquireQueuedSpinLockAtDpcLevel(LockQueue);
789 return LockQueue;
790 }
791
792 FORCEINLINE
793 VOID
794 KiReleaseTimerLock(IN PKSPIN_LOCK_QUEUE LockQueue)
795 {
796 ASSERT(KeGetCurrentIrql() >= DISPATCH_LEVEL);
797
798 /* Release the lock */
799 KeReleaseQueuedSpinLockFromDpcLevel(LockQueue);
800 }
801
802 #endif
803
804 FORCEINLINE
805 VOID
806 KiAcquireApcLock(IN PKTHREAD Thread,
807 IN PKLOCK_QUEUE_HANDLE Handle)
808 {
809 /* Acquire the lock and raise to synchronization level */
810 KeAcquireInStackQueuedSpinLockRaiseToSynch(&Thread->ApcQueueLock, Handle);
811 }
812
813 FORCEINLINE
814 VOID
815 KiAcquireApcLockAtDpcLevel(IN PKTHREAD Thread,
816 IN PKLOCK_QUEUE_HANDLE Handle)
817 {
818 /* Acquire the lock */
819 KeAcquireInStackQueuedSpinLockAtDpcLevel(&Thread->ApcQueueLock, Handle);
820 }
821
822 FORCEINLINE
823 VOID
824 KiAcquireApcLockAtApcLevel(IN PKTHREAD Thread,
825 IN PKLOCK_QUEUE_HANDLE Handle)
826 {
827 /* Acquire the lock */
828 KeAcquireInStackQueuedSpinLock(&Thread->ApcQueueLock, Handle);
829 }
830
831 FORCEINLINE
832 VOID
833 KiReleaseApcLock(IN PKLOCK_QUEUE_HANDLE Handle)
834 {
835 /* Release the lock */
836 KeReleaseInStackQueuedSpinLock(Handle);
837 }
838
839 FORCEINLINE
840 VOID
841 KiReleaseApcLockFromDpcLevel(IN PKLOCK_QUEUE_HANDLE Handle)
842 {
843 /* Release the lock */
844 KeReleaseInStackQueuedSpinLockFromDpcLevel(Handle);
845 }
846
847 FORCEINLINE
848 VOID
849 KiAcquireProcessLock(IN PKPROCESS Process,
850 IN PKLOCK_QUEUE_HANDLE Handle)
851 {
852 /* Acquire the lock and raise to synchronization level */
853 KeAcquireInStackQueuedSpinLockRaiseToSynch(&Process->ProcessLock, Handle);
854 }
855
856 FORCEINLINE
857 VOID
858 KiReleaseProcessLock(IN PKLOCK_QUEUE_HANDLE Handle)
859 {
860 /* Release the lock */
861 KeReleaseInStackQueuedSpinLock(Handle);
862 }
863
864 FORCEINLINE
865 VOID
866 KiReleaseProcessLockFromDpcLevel(IN PKLOCK_QUEUE_HANDLE Handle)
867 {
868 /* Release the lock */
869 KeReleaseInStackQueuedSpinLockFromDpcLevel(Handle);
870 }
871
872 FORCEINLINE
873 VOID
874 KiAcquireDeviceQueueLock(IN PKDEVICE_QUEUE DeviceQueue,
875 IN PKLOCK_QUEUE_HANDLE DeviceLock)
876 {
877 /* Check if we were called from a threaded DPC */
878 if (KeGetCurrentPrcb()->DpcThreadActive)
879 {
880 /* Lock the Queue, we're not at DPC level */
881 KeAcquireInStackQueuedSpinLock(&DeviceQueue->Lock, DeviceLock);
882 }
883 else
884 {
885 /* We must be at DPC level, acquire the lock safely */
886 ASSERT(KeGetCurrentIrql() == DISPATCH_LEVEL);
887 KeAcquireInStackQueuedSpinLockAtDpcLevel(&DeviceQueue->Lock,
888 DeviceLock);
889 }
890 }
891
892 FORCEINLINE
893 VOID
894 KiReleaseDeviceQueueLock(IN PKLOCK_QUEUE_HANDLE DeviceLock)
895 {
896 /* Check if we were called from a threaded DPC */
897 if (KeGetCurrentPrcb()->DpcThreadActive)
898 {
899 /* Unlock the Queue, we're not at DPC level */
900 KeReleaseInStackQueuedSpinLock(DeviceLock);
901 }
902 else
903 {
904 /* We must be at DPC level, release the lock safely */
905 ASSERT(KeGetCurrentIrql() == DISPATCH_LEVEL);
906 KeReleaseInStackQueuedSpinLockFromDpcLevel(DeviceLock);
907 }
908 }
909
910 //
911 // Satisfies the wait of any dispatcher object
912 //
913 #define KiSatisfyObjectWait(Object, Thread) \
914 { \
915 /* Special case for Mutants */ \
916 if ((Object)->Header.Type == MutantObject) \
917 { \
918 /* Decrease the Signal State */ \
919 (Object)->Header.SignalState--; \
920 \
921 /* Check if it's now non-signaled */ \
922 if (!(Object)->Header.SignalState) \
923 { \
924 /* Set the Owner Thread */ \
925 (Object)->OwnerThread = Thread; \
926 \
927 /* Disable APCs if needed */ \
928 Thread->KernelApcDisable = Thread->KernelApcDisable - \
929 (Object)->ApcDisable; \
930 \
931 /* Check if it's abandoned */ \
932 if ((Object)->Abandoned) \
933 { \
934 /* Unabandon it */ \
935 (Object)->Abandoned = FALSE; \
936 \
937 /* Return Status */ \
938 Thread->WaitStatus = STATUS_ABANDONED; \
939 } \
940 \
941 /* Insert it into the Mutant List */ \
942 InsertHeadList(Thread->MutantListHead.Blink, \
943 &(Object)->MutantListEntry); \
944 } \
945 } \
946 else if (((Object)->Header.Type & TIMER_OR_EVENT_TYPE) == \
947 EventSynchronizationObject) \
948 { \
949 /* Synchronization Timers and Events just get un-signaled */ \
950 (Object)->Header.SignalState = 0; \
951 } \
952 else if ((Object)->Header.Type == SemaphoreObject) \
953 { \
954 /* These ones can have multiple states, so we only decrease it */ \
955 (Object)->Header.SignalState--; \
956 } \
957 }
958
959 //
960 // Satisfies the wait of a mutant dispatcher object
961 //
962 #define KiSatisfyMutantWait(Object, Thread) \
963 { \
964 /* Decrease the Signal State */ \
965 (Object)->Header.SignalState--; \
966 \
967 /* Check if it's now non-signaled */ \
968 if (!(Object)->Header.SignalState) \
969 { \
970 /* Set the Owner Thread */ \
971 (Object)->OwnerThread = Thread; \
972 \
973 /* Disable APCs if needed */ \
974 Thread->KernelApcDisable = Thread->KernelApcDisable - \
975 (Object)->ApcDisable; \
976 \
977 /* Check if it's abandoned */ \
978 if ((Object)->Abandoned) \
979 { \
980 /* Unabandon it */ \
981 (Object)->Abandoned = FALSE; \
982 \
983 /* Return Status */ \
984 Thread->WaitStatus = STATUS_ABANDONED; \
985 } \
986 \
987 /* Insert it into the Mutant List */ \
988 InsertHeadList(Thread->MutantListHead.Blink, \
989 &(Object)->MutantListEntry); \
990 } \
991 }
992
993 //
994 // Satisfies the wait of any nonmutant dispatcher object
995 //
996 #define KiSatisfyNonMutantWait(Object) \
997 { \
998 if (((Object)->Header.Type & TIMER_OR_EVENT_TYPE) == \
999 EventSynchronizationObject) \
1000 { \
1001 /* Synchronization Timers and Events just get un-signaled */ \
1002 (Object)->Header.SignalState = 0; \
1003 } \
1004 else if ((Object)->Header.Type == SemaphoreObject) \
1005 { \
1006 /* These ones can have multiple states, so we only decrease it */ \
1007 (Object)->Header.SignalState--; \
1008 } \
1009 }
1010
1011 //
1012 // Recalculates the due time
1013 //
1014 FORCEINLINE
1015 PLARGE_INTEGER
1016 KiRecalculateDueTime(IN PLARGE_INTEGER OriginalDueTime,
1017 IN PLARGE_INTEGER DueTime,
1018 IN OUT PLARGE_INTEGER NewDueTime)
1019 {
1020 /* Don't do anything for absolute waits */
1021 if (OriginalDueTime->QuadPart >= 0) return OriginalDueTime;
1022
1023 /* Otherwise, query the interrupt time and recalculate */
1024 NewDueTime->QuadPart = KeQueryInterruptTime();
1025 NewDueTime->QuadPart -= DueTime->QuadPart;
1026 return NewDueTime;
1027 }
1028
1029 //
1030 // Determines whether a thread should be added to the wait list
1031 //
1032 FORCEINLINE
1033 BOOLEAN
1034 KiCheckThreadStackSwap(IN PKTHREAD Thread,
1035 IN KPROCESSOR_MODE WaitMode)
1036 {
1037 /* Check the required conditions */
1038 if ((WaitMode != KernelMode) &&
1039 (Thread->EnableStackSwap) &&
1040 (Thread->Priority >= (LOW_REALTIME_PRIORITY + 9)))
1041 {
1042 /* We are go for swap */
1043 return TRUE;
1044 }
1045 else
1046 {
1047 /* Don't swap the thread */
1048 return FALSE;
1049 }
1050 }
1051
1052 //
1053 // Adds a thread to the wait list
1054 //
1055 #define KiAddThreadToWaitList(Thread, Swappable) \
1056 { \
1057 /* Make sure it's swappable */ \
1058 if (Swappable) \
1059 { \
1060 /* Insert it into the PRCB's List */ \
1061 InsertTailList(&KeGetCurrentPrcb()->WaitListHead, \
1062 &Thread->WaitListEntry); \
1063 } \
1064 }
1065
1066 //
1067 // Checks if a wait in progress should be interrupted by APCs or an alertable
1068 // state.
1069 //
1070 FORCEINLINE
1071 NTSTATUS
1072 KiCheckAlertability(IN PKTHREAD Thread,
1073 IN BOOLEAN Alertable,
1074 IN KPROCESSOR_MODE WaitMode)
1075 {
1076 /* Check if the wait is alertable */
1077 if (Alertable)
1078 {
1079 /* It is, first check if the thread is alerted in this mode */
1080 if (Thread->Alerted[WaitMode])
1081 {
1082 /* It is, so bail out of the wait */
1083 Thread->Alerted[WaitMode] = FALSE;
1084 return STATUS_ALERTED;
1085 }
1086 else if ((WaitMode != KernelMode) &&
1087 (!IsListEmpty(&Thread->ApcState.ApcListHead[UserMode])))
1088 {
1089 /* It's isn't, but this is a user wait with queued user APCs */
1090 Thread->ApcState.UserApcPending = TRUE;
1091 return STATUS_USER_APC;
1092 }
1093 else if (Thread->Alerted[KernelMode])
1094 {
1095 /* It isn't that either, but we're alered in kernel mode */
1096 Thread->Alerted[KernelMode] = FALSE;
1097 return STATUS_ALERTED;
1098 }
1099 }
1100 else if ((WaitMode != KernelMode) && (Thread->ApcState.UserApcPending))
1101 {
1102 /* Not alertable, but this is a user wait with pending user APCs */
1103 return STATUS_USER_APC;
1104 }
1105
1106 /* Otherwise, we're fine */
1107 return STATUS_WAIT_0;
1108 }
1109
1110 //
1111 // Called from KiCompleteTimer, KiInsertTreeTimer, KeSetSystemTime
1112 // to remove timer entries
1113 // See Windows HPI blog for more information.
1114 FORCEINLINE
1115 VOID
1116 KiRemoveEntryTimer(IN PKTIMER Timer)
1117 {
1118 ULONG Hand;
1119 PKTIMER_TABLE_ENTRY TableEntry;
1120
1121 /* Remove the timer from the timer list and check if it's empty */
1122 Hand = Timer->Header.Hand;
1123 if (RemoveEntryList(&Timer->TimerListEntry))
1124 {
1125 /* Get the respective timer table entry */
1126 TableEntry = &KiTimerTableListHead[Hand];
1127 if (&TableEntry->Entry == TableEntry->Entry.Flink)
1128 {
1129 /* Set the entry to an infinite absolute time */
1130 TableEntry->Time.HighPart = 0xFFFFFFFF;
1131 }
1132 }
1133
1134 /* Clear the list entries on dbg builds so we can tell the timer is gone */
1135 #if DBG
1136 Timer->TimerListEntry.Flink = NULL;
1137 Timer->TimerListEntry.Blink = NULL;
1138 #endif
1139 }
1140
1141 //
1142 // Called by Wait and Queue code to insert a timer for dispatching.
1143 // Also called by KeSetTimerEx to insert a timer from the caller.
1144 //
1145 FORCEINLINE
1146 VOID
1147 KxInsertTimer(IN PKTIMER Timer,
1148 IN ULONG Hand)
1149 {
1150 PKSPIN_LOCK_QUEUE LockQueue;
1151
1152 /* Acquire the lock and release the dispatcher lock */
1153 LockQueue = KiAcquireTimerLock(Hand);
1154 KiReleaseDispatcherLockFromDpcLevel();
1155
1156 /* Try to insert the timer */
1157 if (KiInsertTimerTable(Timer, Hand))
1158 {
1159 /* Complete it */
1160 KiCompleteTimer(Timer, LockQueue);
1161 }
1162 else
1163 {
1164 /* Do nothing, just release the lock */
1165 KiReleaseTimerLock(LockQueue);
1166 }
1167 }
1168
1169 //
1170 // Called by KeSetTimerEx and KiInsertTreeTimer to calculate Due Time
1171 // See the Windows HPI Blog for more information
1172 //
1173 FORCEINLINE
1174 BOOLEAN
1175 KiComputeDueTime(IN PKTIMER Timer,
1176 IN LARGE_INTEGER DueTime,
1177 OUT PULONG Hand)
1178 {
1179 LARGE_INTEGER InterruptTime, SystemTime, DifferenceTime;
1180
1181 /* Convert to relative time if needed */
1182 Timer->Header.Absolute = FALSE;
1183 if (DueTime.HighPart >= 0)
1184 {
1185 /* Get System Time */
1186 KeQuerySystemTime(&SystemTime);
1187
1188 /* Do the conversion */
1189 DifferenceTime.QuadPart = SystemTime.QuadPart - DueTime.QuadPart;
1190
1191 /* Make sure it hasn't already expired */
1192 Timer->Header.Absolute = TRUE;
1193 if (DifferenceTime.HighPart >= 0)
1194 {
1195 /* Cancel everything */
1196 Timer->Header.SignalState = TRUE;
1197 Timer->Header.Hand = 0;
1198 Timer->DueTime.QuadPart = 0;
1199 *Hand = 0;
1200 return FALSE;
1201 }
1202
1203 /* Set the time as Absolute */
1204 DueTime = DifferenceTime;
1205 }
1206
1207 /* Get the Interrupt Time */
1208 InterruptTime.QuadPart = KeQueryInterruptTime();
1209
1210 /* Recalculate due time */
1211 Timer->DueTime.QuadPart = InterruptTime.QuadPart - DueTime.QuadPart;
1212
1213 /* Get the handle */
1214 *Hand = KiComputeTimerTableIndex(Timer->DueTime.QuadPart);
1215 Timer->Header.Hand = (UCHAR)*Hand;
1216 Timer->Header.Inserted = TRUE;
1217 return TRUE;
1218 }
1219
1220 //
1221 // Called from Unlink and Queue Insert Code.
1222 // Also called by timer code when canceling an inserted timer.
1223 // Removes a timer from it's tree.
1224 //
1225 FORCEINLINE
1226 VOID
1227 KxRemoveTreeTimer(IN PKTIMER Timer)
1228 {
1229 ULONG Hand = Timer->Header.Hand;
1230 PKSPIN_LOCK_QUEUE LockQueue;
1231 PKTIMER_TABLE_ENTRY TimerEntry;
1232
1233 /* Acquire timer lock */
1234 LockQueue = KiAcquireTimerLock(Hand);
1235
1236 /* Set the timer as non-inserted */
1237 Timer->Header.Inserted = FALSE;
1238
1239 /* Remove it from the timer list */
1240 if (RemoveEntryList(&Timer->TimerListEntry))
1241 {
1242 /* Get the entry and check if it's empty */
1243 TimerEntry = &KiTimerTableListHead[Hand];
1244 if (IsListEmpty(&TimerEntry->Entry))
1245 {
1246 /* Clear the time then */
1247 TimerEntry->Time.HighPart = 0xFFFFFFFF;
1248 }
1249 }
1250
1251 /* Release the timer lock */
1252 KiReleaseTimerLock(LockQueue);
1253 }
1254
1255 FORCEINLINE
1256 VOID
1257 KxSetTimerForThreadWait(IN PKTIMER Timer,
1258 IN LARGE_INTEGER Interval,
1259 OUT PULONG Hand)
1260 {
1261 ULONGLONG DueTime;
1262 LARGE_INTEGER InterruptTime, SystemTime, TimeDifference;
1263
1264 /* Check the timer's interval to see if it's absolute */
1265 Timer->Header.Absolute = FALSE;
1266 if (Interval.HighPart >= 0)
1267 {
1268 /* Get the system time and calculate the relative time */
1269 KeQuerySystemTime(&SystemTime);
1270 TimeDifference.QuadPart = SystemTime.QuadPart - Interval.QuadPart;
1271 Timer->Header.Absolute = TRUE;
1272
1273 /* Check if we've already expired */
1274 if (TimeDifference.HighPart >= 0)
1275 {
1276 /* Reset everything */
1277 Timer->DueTime.QuadPart = 0;
1278 *Hand = 0;
1279 Timer->Header.Hand = 0;
1280 return;
1281 }
1282 else
1283 {
1284 /* Update the interval */
1285 Interval = TimeDifference;
1286 }
1287 }
1288
1289 /* Calculate the due time */
1290 InterruptTime.QuadPart = KeQueryInterruptTime();
1291 DueTime = InterruptTime.QuadPart - Interval.QuadPart;
1292 Timer->DueTime.QuadPart = DueTime;
1293
1294 /* Calculate the timer handle */
1295 *Hand = KiComputeTimerTableIndex(DueTime);
1296 Timer->Header.Hand = (UCHAR)*Hand;
1297 }
1298
1299 #define KxDelayThreadWait() \
1300 \
1301 /* Setup the Wait Block */ \
1302 Thread->WaitBlockList = TimerBlock; \
1303 \
1304 /* Setup the timer */ \
1305 KxSetTimerForThreadWait(Timer, *Interval, &Hand); \
1306 \
1307 /* Save the due time for the caller */ \
1308 DueTime.QuadPart = Timer->DueTime.QuadPart; \
1309 \
1310 /* Link the timer to this Wait Block */ \
1311 TimerBlock->NextWaitBlock = TimerBlock; \
1312 Timer->Header.WaitListHead.Flink = &TimerBlock->WaitListEntry; \
1313 Timer->Header.WaitListHead.Blink = &TimerBlock->WaitListEntry; \
1314 \
1315 /* Clear wait status */ \
1316 Thread->WaitStatus = STATUS_SUCCESS; \
1317 \
1318 /* Setup wait fields */ \
1319 Thread->Alertable = Alertable; \
1320 Thread->WaitReason = DelayExecution; \
1321 Thread->WaitMode = WaitMode; \
1322 \
1323 /* Check if we can swap the thread's stack */ \
1324 Thread->WaitListEntry.Flink = NULL; \
1325 Swappable = KiCheckThreadStackSwap(Thread, WaitMode); \
1326 \
1327 /* Set the wait time */ \
1328 Thread->WaitTime = KeTickCount.LowPart;
1329
1330 #define KxMultiThreadWait() \
1331 /* Link wait block array to the thread */ \
1332 Thread->WaitBlockList = WaitBlockArray; \
1333 \
1334 /* Reset the index */ \
1335 Index = 0; \
1336 \
1337 /* Loop wait blocks */ \
1338 do \
1339 { \
1340 /* Fill out the wait block */ \
1341 WaitBlock = &WaitBlockArray[Index]; \
1342 WaitBlock->Object = Object[Index]; \
1343 WaitBlock->WaitKey = (USHORT)Index; \
1344 WaitBlock->WaitType = WaitType; \
1345 WaitBlock->Thread = Thread; \
1346 \
1347 /* Link to next block */ \
1348 WaitBlock->NextWaitBlock = &WaitBlockArray[Index + 1]; \
1349 Index++; \
1350 } while (Index < Count); \
1351 \
1352 /* Link the last block */ \
1353 WaitBlock->NextWaitBlock = WaitBlockArray; \
1354 \
1355 /* Set default wait status */ \
1356 Thread->WaitStatus = STATUS_WAIT_0; \
1357 \
1358 /* Check if we have a timer */ \
1359 if (Timeout) \
1360 { \
1361 /* Link to the block */ \
1362 TimerBlock->NextWaitBlock = WaitBlockArray; \
1363 \
1364 /* Setup the timer */ \
1365 KxSetTimerForThreadWait(Timer, *Timeout, &Hand); \
1366 \
1367 /* Save the due time for the caller */ \
1368 DueTime.QuadPart = Timer->DueTime.QuadPart; \
1369 \
1370 /* Initialize the list */ \
1371 InitializeListHead(&Timer->Header.WaitListHead); \
1372 } \
1373 \
1374 /* Set wait settings */ \
1375 Thread->Alertable = Alertable; \
1376 Thread->WaitMode = WaitMode; \
1377 Thread->WaitReason = WaitReason; \
1378 \
1379 /* Check if we can swap the thread's stack */ \
1380 Thread->WaitListEntry.Flink = NULL; \
1381 Swappable = KiCheckThreadStackSwap(Thread, WaitMode); \
1382 \
1383 /* Set the wait time */ \
1384 Thread->WaitTime = KeTickCount.LowPart;
1385
1386 #define KxSingleThreadWait() \
1387 /* Setup the Wait Block */ \
1388 Thread->WaitBlockList = WaitBlock; \
1389 WaitBlock->WaitKey = STATUS_SUCCESS; \
1390 WaitBlock->Object = Object; \
1391 WaitBlock->WaitType = WaitAny; \
1392 \
1393 /* Clear wait status */ \
1394 Thread->WaitStatus = STATUS_SUCCESS; \
1395 \
1396 /* Check if we have a timer */ \
1397 if (Timeout) \
1398 { \
1399 /* Setup the timer */ \
1400 KxSetTimerForThreadWait(Timer, *Timeout, &Hand); \
1401 \
1402 /* Save the due time for the caller */ \
1403 DueTime.QuadPart = Timer->DueTime.QuadPart; \
1404 \
1405 /* Pointer to timer block */ \
1406 WaitBlock->NextWaitBlock = TimerBlock; \
1407 TimerBlock->NextWaitBlock = WaitBlock; \
1408 \
1409 /* Link the timer to this Wait Block */ \
1410 Timer->Header.WaitListHead.Flink = &TimerBlock->WaitListEntry; \
1411 Timer->Header.WaitListHead.Blink = &TimerBlock->WaitListEntry; \
1412 } \
1413 else \
1414 { \
1415 /* No timer block, just ourselves */ \
1416 WaitBlock->NextWaitBlock = WaitBlock; \
1417 } \
1418 \
1419 /* Set wait settings */ \
1420 Thread->Alertable = Alertable; \
1421 Thread->WaitMode = WaitMode; \
1422 Thread->WaitReason = WaitReason; \
1423 \
1424 /* Check if we can swap the thread's stack */ \
1425 Thread->WaitListEntry.Flink = NULL; \
1426 Swappable = KiCheckThreadStackSwap(Thread, WaitMode); \
1427 \
1428 /* Set the wait time */ \
1429 Thread->WaitTime = KeTickCount.LowPart;
1430
1431 #define KxQueueThreadWait() \
1432 /* Setup the Wait Block */ \
1433 Thread->WaitBlockList = WaitBlock; \
1434 WaitBlock->WaitKey = STATUS_SUCCESS; \
1435 WaitBlock->Object = Queue; \
1436 WaitBlock->WaitType = WaitAny; \
1437 WaitBlock->Thread = Thread; \
1438 \
1439 /* Clear wait status */ \
1440 Thread->WaitStatus = STATUS_SUCCESS; \
1441 \
1442 /* Check if we have a timer */ \
1443 if (Timeout) \
1444 { \
1445 /* Setup the timer */ \
1446 KxSetTimerForThreadWait(Timer, *Timeout, &Hand); \
1447 \
1448 /* Save the due time for the caller */ \
1449 DueTime.QuadPart = Timer->DueTime.QuadPart; \
1450 \
1451 /* Pointer to timer block */ \
1452 WaitBlock->NextWaitBlock = TimerBlock; \
1453 TimerBlock->NextWaitBlock = WaitBlock; \
1454 \
1455 /* Link the timer to this Wait Block */ \
1456 Timer->Header.WaitListHead.Flink = &TimerBlock->WaitListEntry; \
1457 Timer->Header.WaitListHead.Blink = &TimerBlock->WaitListEntry; \
1458 } \
1459 else \
1460 { \
1461 /* No timer block, just ourselves */ \
1462 WaitBlock->NextWaitBlock = WaitBlock; \
1463 } \
1464 \
1465 /* Set wait settings */ \
1466 Thread->Alertable = FALSE; \
1467 Thread->WaitMode = WaitMode; \
1468 Thread->WaitReason = WrQueue; \
1469 \
1470 /* Check if we can swap the thread's stack */ \
1471 Thread->WaitListEntry.Flink = NULL; \
1472 Swappable = KiCheckThreadStackSwap(Thread, WaitMode); \
1473 \
1474 /* Set the wait time */ \
1475 Thread->WaitTime = KeTickCount.LowPart;
1476
1477 //
1478 // Unwaits a Thread
1479 //
1480 FORCEINLINE
1481 VOID
1482 KxUnwaitThread(IN DISPATCHER_HEADER *Object,
1483 IN KPRIORITY Increment)
1484 {
1485 PLIST_ENTRY WaitEntry, WaitList;
1486 PKWAIT_BLOCK WaitBlock;
1487 PKTHREAD WaitThread;
1488 ULONG WaitKey;
1489
1490 /* Loop the Wait Entries */
1491 WaitList = &Object->WaitListHead;
1492 ASSERT(IsListEmpty(&Object->WaitListHead) == FALSE);
1493 WaitEntry = WaitList->Flink;
1494 do
1495 {
1496 /* Get the current wait block */
1497 WaitBlock = CONTAINING_RECORD(WaitEntry, KWAIT_BLOCK, WaitListEntry);
1498
1499 /* Get the waiting thread */
1500 WaitThread = WaitBlock->Thread;
1501
1502 /* Check the current Wait Mode */
1503 if (WaitBlock->WaitType == WaitAny)
1504 {
1505 /* Use the actual wait key */
1506 WaitKey = WaitBlock->WaitKey;
1507 }
1508 else
1509 {
1510 /* Otherwise, use STATUS_KERNEL_APC */
1511 WaitKey = STATUS_KERNEL_APC;
1512 }
1513
1514 /* Unwait the thread */
1515 KiUnwaitThread(WaitThread, WaitKey, Increment);
1516
1517 /* Next entry */
1518 WaitEntry = WaitList->Flink;
1519 } while (WaitEntry != WaitList);
1520 }
1521
1522 //
1523 // Unwaits a Thread waiting on an event
1524 //
1525 FORCEINLINE
1526 VOID
1527 KxUnwaitThreadForEvent(IN PKEVENT Event,
1528 IN KPRIORITY Increment)
1529 {
1530 PLIST_ENTRY WaitEntry, WaitList;
1531 PKWAIT_BLOCK WaitBlock;
1532 PKTHREAD WaitThread;
1533
1534 /* Loop the Wait Entries */
1535 WaitList = &Event->Header.WaitListHead;
1536 ASSERT(IsListEmpty(&Event->Header.WaitListHead) == FALSE);
1537 WaitEntry = WaitList->Flink;
1538 do
1539 {
1540 /* Get the current wait block */
1541 WaitBlock = CONTAINING_RECORD(WaitEntry, KWAIT_BLOCK, WaitListEntry);
1542
1543 /* Get the waiting thread */
1544 WaitThread = WaitBlock->Thread;
1545
1546 /* Check the current Wait Mode */
1547 if (WaitBlock->WaitType == WaitAny)
1548 {
1549 /* Un-signal it */
1550 Event->Header.SignalState = 0;
1551
1552 /* Un-signal the event and unwait the thread */
1553 KiUnwaitThread(WaitThread, WaitBlock->WaitKey, Increment);
1554 break;
1555 }
1556
1557 /* Unwait the thread with STATUS_KERNEL_APC */
1558 KiUnwaitThread(WaitThread, STATUS_KERNEL_APC, Increment);
1559
1560 /* Next entry */
1561 WaitEntry = WaitList->Flink;
1562 } while (WaitEntry != WaitList);
1563 }
1564
1565 //
1566 // This routine queues a thread that is ready on the PRCB's ready lists.
1567 // If this thread cannot currently run on this CPU, then the thread is
1568 // added to the deferred ready list instead.
1569 //
1570 // This routine must be entered with the PRCB lock held and it will exit
1571 // with the PRCB lock released!
1572 //
1573 FORCEINLINE
1574 VOID
1575 KxQueueReadyThread(IN PKTHREAD Thread,
1576 IN PKPRCB Prcb)
1577 {
1578 BOOLEAN Preempted;
1579 KPRIORITY Priority;
1580
1581 /* Sanity checks */
1582 ASSERT(Prcb == KeGetCurrentPrcb());
1583 ASSERT(Thread->State == Running);
1584 ASSERT(Thread->NextProcessor == Prcb->Number);
1585
1586 /* Check if this thread is allowed to run in this CPU */
1587 #ifdef CONFIG_SMP
1588 if ((Thread->Affinity) & (Prcb->SetMember))
1589 #else
1590 if (TRUE)
1591 #endif
1592 {
1593 /* Set thread ready for execution */
1594 Thread->State = Ready;
1595
1596 /* Save current priority and if someone had pre-empted it */
1597 Priority = Thread->Priority;
1598 Preempted = Thread->Preempted;
1599
1600 /* We're not pre-empting now, and set the wait time */
1601 Thread->Preempted = FALSE;
1602 Thread->WaitTime = KeTickCount.LowPart;
1603
1604 /* Sanity check */
1605 ASSERT((Priority >= 0) && (Priority <= HIGH_PRIORITY));
1606
1607 /* Insert this thread in the appropriate order */
1608 Preempted ? InsertHeadList(&Prcb->DispatcherReadyListHead[Priority],
1609 &Thread->WaitListEntry) :
1610 InsertTailList(&Prcb->DispatcherReadyListHead[Priority],
1611 &Thread->WaitListEntry);
1612
1613 /* Update the ready summary */
1614 Prcb->ReadySummary |= PRIORITY_MASK(Priority);
1615
1616 /* Sanity check */
1617 ASSERT(Priority == Thread->Priority);
1618
1619 /* Release the PRCB lock */
1620 KiReleasePrcbLock(Prcb);
1621 }
1622 else
1623 {
1624 /* Otherwise, prepare this thread to be deferred */
1625 Thread->State = DeferredReady;
1626 Thread->DeferredProcessor = Prcb->Number;
1627
1628 /* Release the lock and defer scheduling */
1629 KiReleasePrcbLock(Prcb);
1630 KiDeferredReadyThread(Thread);
1631 }
1632 }
1633
1634 //
1635 // This routine scans for an appropriate ready thread to select at the
1636 // given priority and for the given CPU.
1637 //
1638 FORCEINLINE
1639 PKTHREAD
1640 KiSelectReadyThread(IN KPRIORITY Priority,
1641 IN PKPRCB Prcb)
1642 {
1643 ULONG PrioritySet;
1644 LONG HighPriority;
1645 PLIST_ENTRY ListEntry;
1646 PKTHREAD Thread = NULL;
1647
1648 /* Save the current mask and get the priority set for the CPU */
1649 PrioritySet = Prcb->ReadySummary >> Priority;
1650 if (!PrioritySet) goto Quickie;
1651
1652 /* Get the highest priority possible */
1653 BitScanReverse((PULONG)&HighPriority, PrioritySet);
1654 ASSERT((PrioritySet & PRIORITY_MASK(HighPriority)) != 0);
1655 HighPriority += Priority;
1656
1657 /* Make sure the list isn't empty at the highest priority */
1658 ASSERT(IsListEmpty(&Prcb->DispatcherReadyListHead[HighPriority]) == FALSE);
1659
1660 /* Get the first thread on the list */
1661 ListEntry = Prcb->DispatcherReadyListHead[HighPriority].Flink;
1662 Thread = CONTAINING_RECORD(ListEntry, KTHREAD, WaitListEntry);
1663
1664 /* Make sure this thread is here for a reason */
1665 ASSERT(HighPriority == Thread->Priority);
1666 ASSERT(Thread->Affinity & AFFINITY_MASK(Prcb->Number));
1667 ASSERT(Thread->NextProcessor == Prcb->Number);
1668
1669 /* Remove it from the list */
1670 if (RemoveEntryList(&Thread->WaitListEntry))
1671 {
1672 /* The list is empty now, reset the ready summary */
1673 Prcb->ReadySummary ^= PRIORITY_MASK(HighPriority);
1674 }
1675
1676 /* Sanity check and return the thread */
1677 Quickie:
1678 ASSERT((Thread == NULL) ||
1679 (Thread->BasePriority == 0) ||
1680 (Thread->Priority != 0));
1681 return Thread;
1682 }
1683
1684 //
1685 // This routine computes the new priority for a thread. It is only valid for
1686 // threads with priorities in the dynamic priority range.
1687 //
1688 FORCEINLINE
1689 SCHAR
1690 KiComputeNewPriority(IN PKTHREAD Thread,
1691 IN SCHAR Adjustment)
1692 {
1693 SCHAR Priority;
1694
1695 /* Priority sanity checks */
1696 ASSERT((Thread->PriorityDecrement >= 0) &&
1697 (Thread->PriorityDecrement <= Thread->Priority));
1698 ASSERT((Thread->Priority < LOW_REALTIME_PRIORITY) ?
1699 TRUE : (Thread->PriorityDecrement == 0));
1700
1701 /* Get the current priority */
1702 Priority = Thread->Priority;
1703 if (Priority < LOW_REALTIME_PRIORITY)
1704 {
1705 /* Decrease priority by the priority decrement */
1706 Priority -= (Thread->PriorityDecrement + Adjustment);
1707
1708 /* Don't go out of bounds */
1709 if (Priority < Thread->BasePriority) Priority = Thread->BasePriority;
1710
1711 /* Reset the priority decrement */
1712 Thread->PriorityDecrement = 0;
1713 }
1714
1715 /* Sanity check */
1716 ASSERT((Thread->BasePriority == 0) || (Priority != 0));
1717
1718 /* Return the new priority */
1719 return Priority;
1720 }
1721
1722 //
1723 // Guarded Mutex Routines
1724 //
1725 FORCEINLINE
1726 VOID
1727 _KeInitializeGuardedMutex(OUT PKGUARDED_MUTEX GuardedMutex)
1728 {
1729 /* Setup the Initial Data */
1730 GuardedMutex->Count = GM_LOCK_BIT;
1731 GuardedMutex->Owner = NULL;
1732 GuardedMutex->Contention = 0;
1733
1734 /* Initialize the Wait Gate */
1735 KeInitializeGate(&GuardedMutex->Gate);
1736 }
1737
1738 FORCEINLINE
1739 VOID
1740 _KeAcquireGuardedMutexUnsafe(IN OUT PKGUARDED_MUTEX GuardedMutex)
1741 {
1742 PKTHREAD Thread = KeGetCurrentThread();
1743
1744 /* Sanity checks */
1745 ASSERT((KeGetCurrentIrql() == APC_LEVEL) ||
1746 (Thread->SpecialApcDisable < 0) ||
1747 (Thread->Teb == NULL) ||
1748 (Thread->Teb >= (PTEB)MM_SYSTEM_RANGE_START));
1749 ASSERT(GuardedMutex->Owner != Thread);
1750
1751 /* Remove the lock */
1752 if (!InterlockedBitTestAndReset(&GuardedMutex->Count, GM_LOCK_BIT_V))
1753 {
1754 /* The Guarded Mutex was already locked, enter contented case */
1755 KiAcquireGuardedMutex(GuardedMutex);
1756 }
1757
1758 /* Set the Owner */
1759 GuardedMutex->Owner = Thread;
1760 }
1761
1762 FORCEINLINE
1763 VOID
1764 _KeReleaseGuardedMutexUnsafe(IN OUT PKGUARDED_MUTEX GuardedMutex)
1765 {
1766 LONG OldValue, NewValue;
1767
1768 /* Sanity checks */
1769 ASSERT((KeGetCurrentIrql() == APC_LEVEL) ||
1770 (KeGetCurrentThread()->SpecialApcDisable < 0) ||
1771 (KeGetCurrentThread()->Teb == NULL) ||
1772 (KeGetCurrentThread()->Teb >= (PTEB)MM_SYSTEM_RANGE_START));
1773 ASSERT(GuardedMutex->Owner == KeGetCurrentThread());
1774
1775 /* Destroy the Owner */
1776 GuardedMutex->Owner = NULL;
1777
1778 /* Add the Lock Bit */
1779 OldValue = InterlockedExchangeAdd(&GuardedMutex->Count, GM_LOCK_BIT);
1780 ASSERT((OldValue & GM_LOCK_BIT) == 0);
1781
1782 /* Check if it was already locked, but not woken */
1783 if ((OldValue) && !(OldValue & GM_LOCK_WAITER_WOKEN))
1784 {
1785 /* Update the Oldvalue to what it should be now */
1786 OldValue += GM_LOCK_BIT;
1787
1788 /* The mutex will be woken, minus one waiter */
1789 NewValue = OldValue + GM_LOCK_WAITER_WOKEN -
1790 GM_LOCK_WAITER_INC;
1791
1792 /* Remove the Woken bit */
1793 if (InterlockedCompareExchange(&GuardedMutex->Count,
1794 NewValue,
1795 OldValue) == OldValue)
1796 {
1797 /* Signal the Gate */
1798 KeSignalGateBoostPriority(&GuardedMutex->Gate);
1799 }
1800 }
1801 }
1802
1803 FORCEINLINE
1804 VOID
1805 _KeAcquireGuardedMutex(IN PKGUARDED_MUTEX GuardedMutex)
1806 {
1807 PKTHREAD Thread = KeGetCurrentThread();
1808
1809 /* Sanity checks */
1810 ASSERT(KeGetCurrentIrql() <= APC_LEVEL);
1811 ASSERT(GuardedMutex->Owner != Thread);
1812
1813 /* Disable Special APCs */
1814 KeEnterGuardedRegion();
1815
1816 /* Remove the lock */
1817 if (!InterlockedBitTestAndReset(&GuardedMutex->Count, GM_LOCK_BIT_V))
1818 {
1819 /* The Guarded Mutex was already locked, enter contented case */
1820 KiAcquireGuardedMutex(GuardedMutex);
1821 }
1822
1823 /* Set the Owner and Special APC Disable state */
1824 GuardedMutex->Owner = Thread;
1825 GuardedMutex->SpecialApcDisable = Thread->SpecialApcDisable;
1826 }
1827
1828 FORCEINLINE
1829 VOID
1830 _KeReleaseGuardedMutex(IN OUT PKGUARDED_MUTEX GuardedMutex)
1831 {
1832 LONG OldValue, NewValue;
1833
1834 /* Sanity checks */
1835 ASSERT(KeGetCurrentIrql() <= APC_LEVEL);
1836 ASSERT(GuardedMutex->Owner == KeGetCurrentThread());
1837 ASSERT(KeGetCurrentThread()->SpecialApcDisable ==
1838 GuardedMutex->SpecialApcDisable);
1839
1840 /* Destroy the Owner */
1841 GuardedMutex->Owner = NULL;
1842
1843 /* Add the Lock Bit */
1844 OldValue = InterlockedExchangeAdd(&GuardedMutex->Count, GM_LOCK_BIT);
1845 ASSERT((OldValue & GM_LOCK_BIT) == 0);
1846
1847 /* Check if it was already locked, but not woken */
1848 if ((OldValue) && !(OldValue & GM_LOCK_WAITER_WOKEN))
1849 {
1850 /* Update the Oldvalue to what it should be now */
1851 OldValue += GM_LOCK_BIT;
1852
1853 /* The mutex will be woken, minus one waiter */
1854 NewValue = OldValue + GM_LOCK_WAITER_WOKEN -
1855 GM_LOCK_WAITER_INC;
1856
1857 /* Remove the Woken bit */
1858 if (InterlockedCompareExchange(&GuardedMutex->Count,
1859 NewValue,
1860 OldValue) == OldValue)
1861 {
1862 /* Signal the Gate */
1863 KeSignalGateBoostPriority(&GuardedMutex->Gate);
1864 }
1865 }
1866
1867 /* Re-enable APCs */
1868 KeLeaveGuardedRegion();
1869 }
1870
1871 FORCEINLINE
1872 BOOLEAN
1873 _KeTryToAcquireGuardedMutex(IN OUT PKGUARDED_MUTEX GuardedMutex)
1874 {
1875 PKTHREAD Thread = KeGetCurrentThread();
1876
1877 /* Block APCs */
1878 KeEnterGuardedRegion();
1879
1880 /* Remove the lock */
1881 if (!InterlockedBitTestAndReset(&GuardedMutex->Count, GM_LOCK_BIT_V))
1882 {
1883 /* Re-enable APCs */
1884 KeLeaveGuardedRegion();
1885 YieldProcessor();
1886
1887 /* Return failure */
1888 return FALSE;
1889 }
1890
1891 /* Set the Owner and APC State */
1892 GuardedMutex->Owner = Thread;
1893 GuardedMutex->SpecialApcDisable = Thread->SpecialApcDisable;
1894 return TRUE;
1895 }