sync to trunk head (37079)
[reactos.git] / reactos / ntoskrnl / include / internal / ke_x.h
1 /*
2 * PROJECT: ReactOS Kernel
3 * LICENSE: GPL - See COPYING in the top level directory
4 * FILE: ntoskrnl/include/ke_x.h
5 * PURPOSE: Internal Inlined Functions for the Kernel
6 * PROGRAMMERS: Alex Ionescu (alex.ionescu@reactos.org)
7 */
8
9 //
10 // Thread Dispatcher Header DebugActive Mask
11 //
12 #define DR_MASK(x) 1 << x
13 #define DR_ACTIVE_MASK 0x10
14 #define DR_REG_MASK 0x4F
15
16 #ifdef _M_IX86
17 //
18 // Sanitizes a selector
19 //
20 FORCEINLINE
21 ULONG
22 Ke386SanitizeSeg(IN ULONG Cs,
23 IN KPROCESSOR_MODE Mode)
24 {
25 //
26 // Check if we're in kernel-mode, and force CPL 0 if so.
27 // Otherwise, force CPL 3.
28 //
29 return ((Mode == KernelMode) ?
30 (Cs & (0xFFFF & ~RPL_MASK)) :
31 (RPL_MASK | (Cs & 0xFFFF)));
32 }
33
34 //
35 // Sanitizes EFLAGS
36 //
37 FORCEINLINE
38 ULONG
39 Ke386SanitizeFlags(IN ULONG Eflags,
40 IN KPROCESSOR_MODE Mode)
41 {
42 //
43 // Check if we're in kernel-mode, and sanitize EFLAGS if so.
44 // Otherwise, also force interrupt mask on.
45 //
46 return ((Mode == KernelMode) ?
47 (Eflags & (EFLAGS_USER_SANITIZE | EFLAGS_INTERRUPT_MASK)) :
48 (EFLAGS_INTERRUPT_MASK | (Eflags & EFLAGS_USER_SANITIZE)));
49 }
50
51 //
52 // Gets a DR register from a CONTEXT structure
53 //
54 FORCEINLINE
55 PVOID
56 KiDrFromContext(IN ULONG Dr,
57 IN PCONTEXT Context)
58 {
59 return *(PVOID*)((ULONG_PTR)Context + KiDebugRegisterContextOffsets[Dr]);
60 }
61
62 //
63 // Gets a DR register from a KTRAP_FRAME structure
64 //
65 FORCEINLINE
66 PVOID*
67 KiDrFromTrapFrame(IN ULONG Dr,
68 IN PKTRAP_FRAME TrapFrame)
69 {
70 return (PVOID*)((ULONG_PTR)TrapFrame + KiDebugRegisterTrapOffsets[Dr]);
71 }
72
73 //
74 //
75 //
76 FORCEINLINE
77 PVOID
78 Ke386SanitizeDr(IN PVOID DrAddress,
79 IN KPROCESSOR_MODE Mode)
80 {
81 //
82 // Check if we're in kernel-mode, and return the address directly if so.
83 // Otherwise, make sure it's not inside the kernel-mode address space.
84 // If it is, then clear the address.
85 //
86 return ((Mode == KernelMode) ? DrAddress :
87 (DrAddress <= MM_HIGHEST_USER_ADDRESS) ? DrAddress : 0);
88 }
89 #endif /* _M_IX86 */
90
91 #ifndef _M_ARM
92 PRKTHREAD
93 FORCEINLINE
94 KeGetCurrentThread(VOID)
95 {
96 #ifdef _M_IX86
97 /* Return the current thread */
98 return ((PKIPCR)KeGetPcr())->PrcbData.CurrentThread;
99 #elif defined (_M_AMD64)
100 return (PRKTHREAD)__readgsqword(FIELD_OFFSET(KIPCR, Prcb.CurrentThread));
101 #else
102 PKPRCB Prcb = KeGetCurrentPrcb();
103 return Prcb->CurrentThread;
104 #endif
105 }
106
107 UCHAR
108 FORCEINLINE
109 KeGetPreviousMode(VOID)
110 {
111 /* Return the current mode */
112 return KeGetCurrentThread()->PreviousMode;
113 }
114 #endif
115
116 VOID
117 FORCEINLINE
118 KeFlushProcessTb(VOID)
119 {
120 /* Flush the TLB by resetting CR3 */
121 #ifdef _M_PPC
122 __asm__("sync\n\tisync\n\t");
123 #elif _M_ARM
124 //
125 // We need to implement this!
126 //
127 ASSERTMSG("Need ARM flush routine\n", FALSE);
128 #else
129 __writecr3(__readcr3());
130 #endif
131 }
132
133 //
134 // Enters a Guarded Region
135 //
136 #define KeEnterGuardedRegion() \
137 { \
138 PKTHREAD _Thread = KeGetCurrentThread(); \
139 \
140 /* Sanity checks */ \
141 ASSERT(KeGetCurrentIrql() <= APC_LEVEL); \
142 ASSERT(_Thread == KeGetCurrentThread()); \
143 ASSERT((_Thread->SpecialApcDisable <= 0) && \
144 (_Thread->SpecialApcDisable != -32768)); \
145 \
146 /* Disable Special APCs */ \
147 _Thread->SpecialApcDisable--; \
148 }
149
150 //
151 // Leaves a Guarded Region
152 //
153 #define KeLeaveGuardedRegion() \
154 { \
155 PKTHREAD _Thread = KeGetCurrentThread(); \
156 \
157 /* Sanity checks */ \
158 ASSERT(KeGetCurrentIrql() <= APC_LEVEL); \
159 ASSERT(_Thread == KeGetCurrentThread()); \
160 ASSERT(_Thread->SpecialApcDisable < 0); \
161 \
162 /* Leave region and check if APCs are OK now */ \
163 if (!(++_Thread->SpecialApcDisable)) \
164 { \
165 /* Check for Kernel APCs on the list */ \
166 if (!IsListEmpty(&_Thread->ApcState. \
167 ApcListHead[KernelMode])) \
168 { \
169 /* Check for APC Delivery */ \
170 KiCheckForKernelApcDelivery(); \
171 } \
172 } \
173 }
174
175 //
176 // TODO: Guarded Mutex Routines
177 //
178
179 //
180 // Enters a Critical Region
181 //
182 #define KeEnterCriticalRegion() \
183 { \
184 PKTHREAD _Thread = KeGetCurrentThread(); \
185 \
186 /* Sanity checks */ \
187 ASSERT(_Thread == KeGetCurrentThread()); \
188 ASSERT((_Thread->KernelApcDisable <= 0) && \
189 (_Thread->KernelApcDisable != -32768)); \
190 \
191 /* Disable Kernel APCs */ \
192 _Thread->KernelApcDisable--; \
193 }
194
195 //
196 // Leaves a Critical Region
197 //
198 #define KeLeaveCriticalRegion() \
199 { \
200 PKTHREAD _Thread = KeGetCurrentThread(); \
201 \
202 /* Sanity checks */ \
203 ASSERT(_Thread == KeGetCurrentThread()); \
204 ASSERT(_Thread->KernelApcDisable < 0); \
205 \
206 /* Enable Kernel APCs */ \
207 _Thread->KernelApcDisable++; \
208 \
209 /* Check if Kernel APCs are now enabled */ \
210 if (!(_Thread->KernelApcDisable)) \
211 { \
212 /* Check if we need to request an APC Delivery */ \
213 if (!(IsListEmpty(&_Thread->ApcState.ApcListHead[KernelMode])) && \
214 !(_Thread->SpecialApcDisable)) \
215 { \
216 /* Check for the right environment */ \
217 KiCheckForKernelApcDelivery(); \
218 } \
219 } \
220 }
221
222 #ifndef CONFIG_SMP
223 //
224 // Spinlock Acquire at IRQL >= DISPATCH_LEVEL
225 //
226 FORCEINLINE
227 VOID
228 KxAcquireSpinLock(IN PKSPIN_LOCK SpinLock)
229 {
230 /* On UP builds, spinlocks don't exist at IRQL >= DISPATCH */
231 UNREFERENCED_PARAMETER(SpinLock);
232 }
233
234 //
235 // Spinlock Release at IRQL >= DISPATCH_LEVEL
236 //
237 FORCEINLINE
238 VOID
239 KxReleaseSpinLock(IN PKSPIN_LOCK SpinLock)
240 {
241 /* On UP builds, spinlocks don't exist at IRQL >= DISPATCH */
242 UNREFERENCED_PARAMETER(SpinLock);
243 }
244
245 //
246 // This routine protects against multiple CPU acquires, it's meaningless on UP.
247 //
248 VOID
249 FORCEINLINE
250 KiAcquireDispatcherObject(IN DISPATCHER_HEADER* Object)
251 {
252 UNREFERENCED_PARAMETER(Object);
253 }
254
255 //
256 // This routine protects against multiple CPU acquires, it's meaningless on UP.
257 //
258 VOID
259 FORCEINLINE
260 KiReleaseDispatcherObject(IN DISPATCHER_HEADER* Object)
261 {
262 UNREFERENCED_PARAMETER(Object);
263 }
264
265 KIRQL
266 FORCEINLINE
267 KiAcquireDispatcherLock(VOID)
268 {
269 /* Raise to DPC level */
270 return KeRaiseIrqlToDpcLevel();
271 }
272
273 VOID
274 FORCEINLINE
275 KiReleaseDispatcherLock(IN KIRQL OldIrql)
276 {
277 /* Just exit the dispatcher */
278 KiExitDispatcher(OldIrql);
279 }
280
281 VOID
282 FORCEINLINE
283 KiAcquireDispatcherLockAtDpcLevel(VOID)
284 {
285 /* This is a no-op at DPC Level for UP systems */
286 return;
287 }
288
289 VOID
290 FORCEINLINE
291 KiReleaseDispatcherLockFromDpcLevel(VOID)
292 {
293 /* This is a no-op at DPC Level for UP systems */
294 return;
295 }
296
297 //
298 // This routine makes the thread deferred ready on the boot CPU.
299 //
300 FORCEINLINE
301 VOID
302 KiInsertDeferredReadyList(IN PKTHREAD Thread)
303 {
304 /* Set the thread to deferred state and boot CPU */
305 Thread->State = DeferredReady;
306 Thread->DeferredProcessor = 0;
307
308 /* Make the thread ready immediately */
309 KiDeferredReadyThread(Thread);
310 }
311
312 FORCEINLINE
313 VOID
314 KiRescheduleThread(IN BOOLEAN NewThread,
315 IN ULONG Cpu)
316 {
317 /* This is meaningless on UP systems */
318 UNREFERENCED_PARAMETER(NewThread);
319 UNREFERENCED_PARAMETER(Cpu);
320 }
321
322 //
323 // This routine protects against multiple CPU acquires, it's meaningless on UP.
324 //
325 FORCEINLINE
326 VOID
327 KiSetThreadSwapBusy(IN PKTHREAD Thread)
328 {
329 UNREFERENCED_PARAMETER(Thread);
330 }
331
332 //
333 // This routine protects against multiple CPU acquires, it's meaningless on UP.
334 //
335 FORCEINLINE
336 VOID
337 KiAcquirePrcbLock(IN PKPRCB Prcb)
338 {
339 UNREFERENCED_PARAMETER(Prcb);
340 }
341
342 //
343 // This routine protects against multiple CPU acquires, it's meaningless on UP.
344 //
345 FORCEINLINE
346 VOID
347 KiReleasePrcbLock(IN PKPRCB Prcb)
348 {
349 UNREFERENCED_PARAMETER(Prcb);
350 }
351
352 //
353 // This routine protects against multiple CPU acquires, it's meaningless on UP.
354 //
355 FORCEINLINE
356 VOID
357 KiAcquireThreadLock(IN PKTHREAD Thread)
358 {
359 UNREFERENCED_PARAMETER(Thread);
360 }
361
362 //
363 // This routine protects against multiple CPU acquires, it's meaningless on UP.
364 //
365 FORCEINLINE
366 VOID
367 KiReleaseThreadLock(IN PKTHREAD Thread)
368 {
369 UNREFERENCED_PARAMETER(Thread);
370 }
371
372 //
373 // This routine protects against multiple CPU acquires, it's meaningless on UP.
374 //
375 FORCEINLINE
376 BOOLEAN
377 KiTryThreadLock(IN PKTHREAD Thread)
378 {
379 UNREFERENCED_PARAMETER(Thread);
380 return FALSE;
381 }
382
383 FORCEINLINE
384 VOID
385 KiCheckDeferredReadyList(IN PKPRCB Prcb)
386 {
387 /* There are no deferred ready lists on UP systems */
388 UNREFERENCED_PARAMETER(Prcb);
389 }
390
391 FORCEINLINE
392 VOID
393 KiRundownThread(IN PKTHREAD Thread)
394 {
395 #if defined(_M_IX86)
396 /* Check if this is the NPX Thread */
397 if (KeGetCurrentPrcb()->NpxThread == Thread)
398 {
399 /* Clear it */
400 KeGetCurrentPrcb()->NpxThread = NULL;
401 KeArchFnInit();
402 }
403 #endif
404 }
405
406 FORCEINLINE
407 VOID
408 KiRequestApcInterrupt(IN BOOLEAN NeedApc,
409 IN UCHAR Processor)
410 {
411 /* We deliver instantly on UP */
412 UNREFERENCED_PARAMETER(NeedApc);
413 UNREFERENCED_PARAMETER(Processor);
414 }
415
416 FORCEINLINE
417 PKSPIN_LOCK_QUEUE
418 KiAcquireTimerLock(IN ULONG Hand)
419 {
420 ASSERT(KeGetCurrentIrql() >= DISPATCH_LEVEL);
421
422 /* Nothing to do on UP */
423 UNREFERENCED_PARAMETER(Hand);
424 return NULL;
425 }
426
427 FORCEINLINE
428 VOID
429 KiReleaseTimerLock(IN PKSPIN_LOCK_QUEUE LockQueue)
430 {
431 ASSERT(KeGetCurrentIrql() >= DISPATCH_LEVEL);
432
433 /* Nothing to do on UP */
434 UNREFERENCED_PARAMETER(LockQueue);
435 }
436
437 #else
438
439 //
440 // Spinlock Acquisition at IRQL >= DISPATCH_LEVEL
441 //
442 FORCEINLINE
443 VOID
444 KxAcquireSpinLock(IN PKSPIN_LOCK SpinLock)
445 {
446 for (;;)
447 {
448 /* Try to acquire it */
449 if (InterlockedBitTestAndSet((PLONG)SpinLock, 0))
450 {
451 /* Value changed... wait until it's locked */
452 while (*(volatile KSPIN_LOCK *)SpinLock == 1)
453 {
454 #ifdef DBG
455 /* On debug builds, we use a much slower but useful routine */
456 //Kii386SpinOnSpinLock(SpinLock, 5);
457
458 /* FIXME: Do normal yield for now */
459 YieldProcessor();
460 #else
461 /* Otherwise, just yield and keep looping */
462 YieldProcessor();
463 #endif
464 }
465 }
466 else
467 {
468 #ifdef DBG
469 /* On debug builds, we OR in the KTHREAD */
470 *SpinLock = (KSPIN_LOCK)KeGetCurrentThread() | 1;
471 #endif
472 /* All is well, break out */
473 break;
474 }
475 }
476 }
477
478 //
479 // Spinlock Release at IRQL >= DISPATCH_LEVEL
480 //
481 FORCEINLINE
482 VOID
483 KxReleaseSpinLock(IN PKSPIN_LOCK SpinLock)
484 {
485 #ifdef DBG
486 /* Make sure that the threads match */
487 if (((KSPIN_LOCK)KeGetCurrentThread() | 1) != *SpinLock)
488 {
489 /* They don't, bugcheck */
490 KeBugCheckEx(SPIN_LOCK_NOT_OWNED, (ULONG_PTR)SpinLock, 0, 0, 0);
491 }
492 #endif
493 /* Clear the lock */
494 InterlockedAnd((PLONG)SpinLock, 0);
495 }
496
497 VOID
498 FORCEINLINE
499 KiAcquireDispatcherObject(IN DISPATCHER_HEADER* Object)
500 {
501 LONG OldValue;
502
503 /* Make sure we're at a safe level to touch the lock */
504 ASSERT(KeGetCurrentIrql() >= DISPATCH_LEVEL);
505
506 /* Start acquire loop */
507 do
508 {
509 /* Loop until the other CPU releases it */
510 while (TRUE)
511 {
512 /* Check if it got released */
513 OldValue = Object->Lock;
514 if ((OldValue & KOBJECT_LOCK_BIT) == 0) break;
515
516 /* Let the CPU know that this is a loop */
517 YieldProcessor();
518 }
519
520 /* Try acquiring the lock now */
521 } while (InterlockedCompareExchange(&Object->Lock,
522 OldValue | KOBJECT_LOCK_BIT,
523 OldValue) != OldValue);
524 }
525
526 VOID
527 FORCEINLINE
528 KiReleaseDispatcherObject(IN DISPATCHER_HEADER* Object)
529 {
530 /* Make sure we're at a safe level to touch the lock */
531 ASSERT(KeGetCurrentIrql() >= DISPATCH_LEVEL);
532
533 /* Release it */
534 InterlockedAnd(&Object->Lock, ~KOBJECT_LOCK_BIT);
535 }
536
537 KIRQL
538 FORCEINLINE
539 KiAcquireDispatcherLock(VOID)
540 {
541 /* Raise to synchronization level and acquire the dispatcher lock */
542 return KeAcquireQueuedSpinLockRaiseToSynch(LockQueueDispatcherLock);
543 }
544
545 VOID
546 FORCEINLINE
547 KiReleaseDispatcherLock(IN KIRQL OldIrql)
548 {
549 /* First release the lock */
550 KeReleaseQueuedSpinLockFromDpcLevel(&KeGetCurrentPrcb()->
551 LockQueue[LockQueueDispatcherLock]);
552
553 /* Then exit the dispatcher */
554 KiExitDispatcher(OldIrql);
555 }
556
557 VOID
558 FORCEINLINE
559 KiAcquireDispatcherLockAtDpcLevel(VOID)
560 {
561 /* Acquire the dispatcher lock */
562 KeAcquireQueuedSpinLockAtDpcLevel(LockQueueDispatcherLock);
563 }
564
565 VOID
566 FORCEINLINE
567 KiReleaseDispatcherLockFromDpcLevel(VOID)
568 {
569 /* Release the dispatcher lock */
570 KeReleaseQueuedSpinLockFromDpcLevel(LockQueueDispatcherLock);
571 }
572
573 //
574 // This routine inserts a thread into the deferred ready list of the given CPU
575 //
576 FORCEINLINE
577 VOID
578 KiInsertDeferredReadyList(IN PKTHREAD Thread)
579 {
580 PKPRCB Prcb = KeGetCurrentPrcb();
581
582 /* Set the thread to deferred state and CPU */
583 Thread->State = DeferredReady;
584 Thread->DeferredProcessor = Prcb->Number;
585
586 /* Add it on the list */
587 PushEntryList(&Prcb->DeferredReadyListHead, &Thread->SwapListEntry);
588 }
589
590 FORCEINLINE
591 VOID
592 KiRescheduleThread(IN BOOLEAN NewThread,
593 IN ULONG Cpu)
594 {
595 /* Check if a new thread needs to be scheduled on a different CPU */
596 if ((NewThread) && !(KeGetPcr()->Number == Cpu))
597 {
598 /* Send an IPI to request delivery */
599 KiIpiSendRequest(AFFINITY_MASK(Cpu), IPI_DPC);
600 }
601 }
602
603 //
604 // This routine sets the current thread in a swap busy state, which ensure that
605 // nobody else tries to swap it concurrently.
606 //
607 FORCEINLINE
608 VOID
609 KiSetThreadSwapBusy(IN PKTHREAD Thread)
610 {
611 /* Make sure nobody already set it */
612 ASSERT(Thread->SwapBusy == FALSE);
613
614 /* Set it ourselves */
615 Thread->SwapBusy = TRUE;
616 }
617
618 //
619 // This routine acquires the PRCB lock so that only one caller can touch
620 // volatile PRCB data.
621 //
622 // Since this is a simple optimized spin-lock, it must be be only acquired
623 // at dispatcher level or higher!
624 //
625 FORCEINLINE
626 VOID
627 KiAcquirePrcbLock(IN PKPRCB Prcb)
628 {
629 /* Make sure we're at a safe level to touch the PRCB lock */
630 ASSERT(KeGetCurrentIrql() >= DISPATCH_LEVEL);
631
632 /* Start acquire loop */
633 for (;;)
634 {
635 /* Acquire the lock and break out if we acquired it first */
636 if (!InterlockedExchange((PLONG)&Prcb->PrcbLock, 1)) break;
637
638 /* Loop until the other CPU releases it */
639 do
640 {
641 /* Let the CPU know that this is a loop */
642 YieldProcessor();
643 } while (Prcb->PrcbLock);
644 }
645 }
646
647 //
648 // This routine releases the PRCB lock so that other callers can touch
649 // volatile PRCB data.
650 //
651 // Since this is a simple optimized spin-lock, it must be be only acquired
652 // at dispatcher level or higher!
653 //
654 FORCEINLINE
655 VOID
656 KiReleasePrcbLock(IN PKPRCB Prcb)
657 {
658 /* Make sure it's acquired! */
659 ASSERT(Prcb->PrcbLock != 0);
660
661 /* Release it */
662 InterlockedAnd((PLONG)&Prcb->PrcbLock, 0);
663 }
664
665 //
666 // This routine acquires the thread lock so that only one caller can touch
667 // volatile thread data.
668 //
669 // Since this is a simple optimized spin-lock, it must be be only acquired
670 // at dispatcher level or higher!
671 //
672 FORCEINLINE
673 VOID
674 KiAcquireThreadLock(IN PKTHREAD Thread)
675 {
676 /* Make sure we're at a safe level to touch the thread lock */
677 ASSERT(KeGetCurrentIrql() >= DISPATCH_LEVEL);
678
679 /* Start acquire loop */
680 for (;;)
681 {
682 /* Acquire the lock and break out if we acquired it first */
683 if (!InterlockedExchange((PLONG)&Thread->ThreadLock, 1)) break;
684
685 /* Loop until the other CPU releases it */
686 do
687 {
688 /* Let the CPU know that this is a loop */
689 YieldProcessor();
690 } while (Thread->ThreadLock);
691 }
692 }
693
694 //
695 // This routine releases the thread lock so that other callers can touch
696 // volatile thread data.
697 //
698 // Since this is a simple optimized spin-lock, it must be be only acquired
699 // at dispatcher level or higher!
700 //
701 FORCEINLINE
702 VOID
703 KiReleaseThreadLock(IN PKTHREAD Thread)
704 {
705 /* Release it */
706 InterlockedAnd((PLONG)&Thread->ThreadLock, 0);
707 }
708
709 FORCEINLINE
710 BOOLEAN
711 KiTryThreadLock(IN PKTHREAD Thread)
712 {
713 LONG Value;
714
715 /* If the lock isn't acquired, return false */
716 if (!Thread->ThreadLock) return FALSE;
717
718 /* Otherwise, try to acquire it and check the result */
719 Value = 1;
720 Value = InterlockedExchange((PLONG)&Thread->ThreadLock, Value);
721
722 /* Return the lock state */
723 return (Value == TRUE);
724 }
725
726 FORCEINLINE
727 VOID
728 KiCheckDeferredReadyList(IN PKPRCB Prcb)
729 {
730 /* Scan the deferred ready lists if required */
731 if (Prcb->DeferredReadyListHead.Next) KiProcessDeferredReadyList(Prcb);
732 }
733
734 FORCEINLINE
735 VOID
736 KiRundownThread(IN PKTHREAD Thread)
737 {
738 #if defined(_M_IX86) || defined(_M_AMD64)
739 /* FIXME: TODO */
740 ASSERTMSG("Not yet implemented\n", FALSE);
741 #endif
742 }
743
744 FORCEINLINE
745 VOID
746 KiRequestApcInterrupt(IN BOOLEAN NeedApc,
747 IN UCHAR Processor)
748 {
749 /* Check if we need to request APC delivery */
750 if (NeedApc)
751 {
752 /* Check if it's on another CPU */
753 if (KeGetPcr()->Number != Processor)
754 {
755 /* Send an IPI to request delivery */
756 KiIpiSendRequest(AFFINITY_MASK(Processor), IPI_APC);
757 }
758 else
759 {
760 /* Request a software interrupt */
761 HalRequestSoftwareInterrupt(APC_LEVEL);
762 }
763 }
764 }
765
766 FORCEINLINE
767 PKSPIN_LOCK_QUEUE
768 KiAcquireTimerLock(IN ULONG Hand)
769 {
770 PKSPIN_LOCK_QUEUE LockQueue;
771 ULONG LockIndex;
772 ASSERT(KeGetCurrentIrql() >= DISPATCH_LEVEL);
773
774 /* Get the lock index */
775 LockIndex = Hand >> LOCK_QUEUE_TIMER_LOCK_SHIFT;
776 LockIndex &= (LOCK_QUEUE_TIMER_TABLE_LOCKS - 1);
777
778 /* Now get the lock */
779 LockQueue = &KeGetCurrentPrcb()->LockQueue[LockQueueTimerTableLock + LockIndex];
780
781 /* Acquire it and return */
782 KeAcquireQueuedSpinLockAtDpcLevel(LockQueue);
783 return LockQueue;
784 }
785
786 FORCEINLINE
787 VOID
788 KiReleaseTimerLock(IN PKSPIN_LOCK_QUEUE LockQueue)
789 {
790 ASSERT(KeGetCurrentIrql() >= DISPATCH_LEVEL);
791
792 /* Release the lock */
793 KeReleaseQueuedSpinLockFromDpcLevel(LockQueue);
794 }
795
796 #endif
797
798 FORCEINLINE
799 VOID
800 KiAcquireApcLock(IN PKTHREAD Thread,
801 IN PKLOCK_QUEUE_HANDLE Handle)
802 {
803 /* Acquire the lock and raise to synchronization level */
804 KeAcquireInStackQueuedSpinLockRaiseToSynch(&Thread->ApcQueueLock, Handle);
805 }
806
807 FORCEINLINE
808 VOID
809 KiAcquireApcLockAtDpcLevel(IN PKTHREAD Thread,
810 IN PKLOCK_QUEUE_HANDLE Handle)
811 {
812 /* Acquire the lock */
813 KeAcquireInStackQueuedSpinLockAtDpcLevel(&Thread->ApcQueueLock, Handle);
814 }
815
816 FORCEINLINE
817 VOID
818 KiAcquireApcLockAtApcLevel(IN PKTHREAD Thread,
819 IN PKLOCK_QUEUE_HANDLE Handle)
820 {
821 /* Acquire the lock */
822 KeAcquireInStackQueuedSpinLock(&Thread->ApcQueueLock, Handle);
823 }
824
825 FORCEINLINE
826 VOID
827 KiReleaseApcLock(IN PKLOCK_QUEUE_HANDLE Handle)
828 {
829 /* Release the lock */
830 KeReleaseInStackQueuedSpinLock(Handle);
831 }
832
833 FORCEINLINE
834 VOID
835 KiReleaseApcLockFromDpcLevel(IN PKLOCK_QUEUE_HANDLE Handle)
836 {
837 /* Release the lock */
838 KeReleaseInStackQueuedSpinLockFromDpcLevel(Handle);
839 }
840
841 FORCEINLINE
842 VOID
843 KiAcquireProcessLock(IN PKPROCESS Process,
844 IN PKLOCK_QUEUE_HANDLE Handle)
845 {
846 /* Acquire the lock and raise to synchronization level */
847 KeAcquireInStackQueuedSpinLockRaiseToSynch(&Process->ProcessLock, Handle);
848 }
849
850 FORCEINLINE
851 VOID
852 KiReleaseProcessLock(IN PKLOCK_QUEUE_HANDLE Handle)
853 {
854 /* Release the lock */
855 KeReleaseInStackQueuedSpinLock(Handle);
856 }
857
858 FORCEINLINE
859 VOID
860 KiReleaseProcessLockFromDpcLevel(IN PKLOCK_QUEUE_HANDLE Handle)
861 {
862 /* Release the lock */
863 KeReleaseInStackQueuedSpinLockFromDpcLevel(Handle);
864 }
865
866 FORCEINLINE
867 VOID
868 KiAcquireDeviceQueueLock(IN PKDEVICE_QUEUE DeviceQueue,
869 IN PKLOCK_QUEUE_HANDLE DeviceLock)
870 {
871 /* Check if we were called from a threaded DPC */
872 if (KeGetCurrentPrcb()->DpcThreadActive)
873 {
874 /* Lock the Queue, we're not at DPC level */
875 KeAcquireInStackQueuedSpinLock(&DeviceQueue->Lock, DeviceLock);
876 }
877 else
878 {
879 /* We must be at DPC level, acquire the lock safely */
880 ASSERT(KeGetCurrentIrql() == DISPATCH_LEVEL);
881 KeAcquireInStackQueuedSpinLockAtDpcLevel(&DeviceQueue->Lock,
882 DeviceLock);
883 }
884 }
885
886 FORCEINLINE
887 VOID
888 KiReleaseDeviceQueueLock(IN PKLOCK_QUEUE_HANDLE DeviceLock)
889 {
890 /* Check if we were called from a threaded DPC */
891 if (KeGetCurrentPrcb()->DpcThreadActive)
892 {
893 /* Unlock the Queue, we're not at DPC level */
894 KeReleaseInStackQueuedSpinLock(DeviceLock);
895 }
896 else
897 {
898 /* We must be at DPC level, release the lock safely */
899 ASSERT(KeGetCurrentIrql() == DISPATCH_LEVEL);
900 KeReleaseInStackQueuedSpinLockFromDpcLevel(DeviceLock);
901 }
902 }
903
904 //
905 // Satisfies the wait of any dispatcher object
906 //
907 #define KiSatisfyObjectWait(Object, Thread) \
908 { \
909 /* Special case for Mutants */ \
910 if ((Object)->Header.Type == MutantObject) \
911 { \
912 /* Decrease the Signal State */ \
913 (Object)->Header.SignalState--; \
914 \
915 /* Check if it's now non-signaled */ \
916 if (!(Object)->Header.SignalState) \
917 { \
918 /* Set the Owner Thread */ \
919 (Object)->OwnerThread = Thread; \
920 \
921 /* Disable APCs if needed */ \
922 Thread->KernelApcDisable = Thread->KernelApcDisable - \
923 (Object)->ApcDisable; \
924 \
925 /* Check if it's abandoned */ \
926 if ((Object)->Abandoned) \
927 { \
928 /* Unabandon it */ \
929 (Object)->Abandoned = FALSE; \
930 \
931 /* Return Status */ \
932 Thread->WaitStatus = STATUS_ABANDONED; \
933 } \
934 \
935 /* Insert it into the Mutant List */ \
936 InsertHeadList(Thread->MutantListHead.Blink, \
937 &(Object)->MutantListEntry); \
938 } \
939 } \
940 else if (((Object)->Header.Type & TIMER_OR_EVENT_TYPE) == \
941 EventSynchronizationObject) \
942 { \
943 /* Synchronization Timers and Events just get un-signaled */ \
944 (Object)->Header.SignalState = 0; \
945 } \
946 else if ((Object)->Header.Type == SemaphoreObject) \
947 { \
948 /* These ones can have multiple states, so we only decrease it */ \
949 (Object)->Header.SignalState--; \
950 } \
951 }
952
953 //
954 // Satisfies the wait of a mutant dispatcher object
955 //
956 #define KiSatisfyMutantWait(Object, Thread) \
957 { \
958 /* Decrease the Signal State */ \
959 (Object)->Header.SignalState--; \
960 \
961 /* Check if it's now non-signaled */ \
962 if (!(Object)->Header.SignalState) \
963 { \
964 /* Set the Owner Thread */ \
965 (Object)->OwnerThread = Thread; \
966 \
967 /* Disable APCs if needed */ \
968 Thread->KernelApcDisable = Thread->KernelApcDisable - \
969 (Object)->ApcDisable; \
970 \
971 /* Check if it's abandoned */ \
972 if ((Object)->Abandoned) \
973 { \
974 /* Unabandon it */ \
975 (Object)->Abandoned = FALSE; \
976 \
977 /* Return Status */ \
978 Thread->WaitStatus = STATUS_ABANDONED; \
979 } \
980 \
981 /* Insert it into the Mutant List */ \
982 InsertHeadList(Thread->MutantListHead.Blink, \
983 &(Object)->MutantListEntry); \
984 } \
985 }
986
987 //
988 // Satisfies the wait of any nonmutant dispatcher object
989 //
990 #define KiSatisfyNonMutantWait(Object) \
991 { \
992 if (((Object)->Header.Type & TIMER_OR_EVENT_TYPE) == \
993 EventSynchronizationObject) \
994 { \
995 /* Synchronization Timers and Events just get un-signaled */ \
996 (Object)->Header.SignalState = 0; \
997 } \
998 else if ((Object)->Header.Type == SemaphoreObject) \
999 { \
1000 /* These ones can have multiple states, so we only decrease it */ \
1001 (Object)->Header.SignalState--; \
1002 } \
1003 }
1004
1005 //
1006 // Recalculates the due time
1007 //
1008 PLARGE_INTEGER
1009 FORCEINLINE
1010 KiRecalculateDueTime(IN PLARGE_INTEGER OriginalDueTime,
1011 IN PLARGE_INTEGER DueTime,
1012 IN OUT PLARGE_INTEGER NewDueTime)
1013 {
1014 /* Don't do anything for absolute waits */
1015 if (OriginalDueTime->QuadPart >= 0) return OriginalDueTime;
1016
1017 /* Otherwise, query the interrupt time and recalculate */
1018 NewDueTime->QuadPart = KeQueryInterruptTime();
1019 NewDueTime->QuadPart -= DueTime->QuadPart;
1020 return NewDueTime;
1021 }
1022
1023 //
1024 // Determines whether a thread should be added to the wait list
1025 //
1026 FORCEINLINE
1027 BOOLEAN
1028 KiCheckThreadStackSwap(IN PKTHREAD Thread,
1029 IN KPROCESSOR_MODE WaitMode)
1030 {
1031 /* Check the required conditions */
1032 if ((WaitMode != KernelMode) &&
1033 (Thread->EnableStackSwap) &&
1034 (Thread->Priority >= (LOW_REALTIME_PRIORITY + 9)))
1035 {
1036 /* We are go for swap */
1037 return TRUE;
1038 }
1039 else
1040 {
1041 /* Don't swap the thread */
1042 return FALSE;
1043 }
1044 }
1045
1046 //
1047 // Adds a thread to the wait list
1048 //
1049 #define KiAddThreadToWaitList(Thread, Swappable) \
1050 { \
1051 /* Make sure it's swappable */ \
1052 if (Swappable) \
1053 { \
1054 /* Insert it into the PRCB's List */ \
1055 InsertTailList(&KeGetCurrentPrcb()->WaitListHead, \
1056 &Thread->WaitListEntry); \
1057 } \
1058 }
1059
1060 //
1061 // Checks if a wait in progress should be interrupted by APCs or an alertable
1062 // state.
1063 //
1064 FORCEINLINE
1065 NTSTATUS
1066 KiCheckAlertability(IN PKTHREAD Thread,
1067 IN BOOLEAN Alertable,
1068 IN KPROCESSOR_MODE WaitMode)
1069 {
1070 /* Check if the wait is alertable */
1071 if (Alertable)
1072 {
1073 /* It is, first check if the thread is alerted in this mode */
1074 if (Thread->Alerted[WaitMode])
1075 {
1076 /* It is, so bail out of the wait */
1077 Thread->Alerted[WaitMode] = FALSE;
1078 return STATUS_ALERTED;
1079 }
1080 else if ((WaitMode != KernelMode) &&
1081 (!IsListEmpty(&Thread->ApcState.ApcListHead[UserMode])))
1082 {
1083 /* It's isn't, but this is a user wait with queued user APCs */
1084 Thread->ApcState.UserApcPending = TRUE;
1085 return STATUS_USER_APC;
1086 }
1087 else if (Thread->Alerted[KernelMode])
1088 {
1089 /* It isn't that either, but we're alered in kernel mode */
1090 Thread->Alerted[KernelMode] = FALSE;
1091 return STATUS_ALERTED;
1092 }
1093 }
1094 else if ((WaitMode != KernelMode) && (Thread->ApcState.UserApcPending))
1095 {
1096 /* Not alertable, but this is a user wait with pending user APCs */
1097 return STATUS_USER_APC;
1098 }
1099
1100 /* Otherwise, we're fine */
1101 return STATUS_WAIT_0;
1102 }
1103
1104 //
1105 // Called by Wait and Queue code to insert a timer for dispatching.
1106 // Also called by KeSetTimerEx to insert a timer from the caller.
1107 //
1108 VOID
1109 FORCEINLINE
1110 KxInsertTimer(IN PKTIMER Timer,
1111 IN ULONG Hand)
1112 {
1113 PKSPIN_LOCK_QUEUE LockQueue;
1114
1115 /* Acquire the lock and release the dispatcher lock */
1116 LockQueue = KiAcquireTimerLock(Hand);
1117 KiReleaseDispatcherLockFromDpcLevel();
1118
1119 /* Try to insert the timer */
1120 if (KiInsertTimerTable(Timer, Hand))
1121 {
1122 /* Complete it */
1123 KiCompleteTimer(Timer, LockQueue);
1124 }
1125 else
1126 {
1127 /* Do nothing, just release the lock */
1128 KiReleaseTimerLock(LockQueue);
1129 }
1130 }
1131
1132 //
1133 // Called from Unlink and Queue Insert Code.
1134 // Also called by timer code when canceling an inserted timer.
1135 // Removes a timer from it's tree.
1136 //
1137 VOID
1138 FORCEINLINE
1139 KxRemoveTreeTimer(IN PKTIMER Timer)
1140 {
1141 ULONG Hand = Timer->Header.Hand;
1142 PKSPIN_LOCK_QUEUE LockQueue;
1143 PKTIMER_TABLE_ENTRY TimerEntry;
1144
1145 /* Acquire timer lock */
1146 LockQueue = KiAcquireTimerLock(Hand);
1147
1148 /* Set the timer as non-inserted */
1149 Timer->Header.Inserted = FALSE;
1150
1151 /* Remove it from the timer list */
1152 if (RemoveEntryList(&Timer->TimerListEntry))
1153 {
1154 /* Get the entry and check if it's empty */
1155 TimerEntry = &KiTimerTableListHead[Hand];
1156 if (IsListEmpty(&TimerEntry->Entry))
1157 {
1158 /* Clear the time then */
1159 TimerEntry->Time.HighPart = 0xFFFFFFFF;
1160 }
1161 }
1162
1163 /* Release the timer lock */
1164 KiReleaseTimerLock(LockQueue);
1165 }
1166
1167 VOID
1168 FORCEINLINE
1169 KxSetTimerForThreadWait(IN PKTIMER Timer,
1170 IN LARGE_INTEGER Interval,
1171 OUT PULONG Hand)
1172 {
1173 ULONGLONG DueTime;
1174 LARGE_INTEGER InterruptTime, SystemTime, TimeDifference;
1175
1176 /* Check the timer's interval to see if it's absolute */
1177 Timer->Header.Absolute = FALSE;
1178 if (Interval.HighPart >= 0)
1179 {
1180 /* Get the system time and calculate the relative time */
1181 KeQuerySystemTime(&SystemTime);
1182 TimeDifference.QuadPart = SystemTime.QuadPart - Interval.QuadPart;
1183 Timer->Header.Absolute = TRUE;
1184
1185 /* Check if we've already expired */
1186 if (TimeDifference.HighPart >= 0)
1187 {
1188 /* Reset everything */
1189 Timer->DueTime.QuadPart = 0;
1190 *Hand = 0;
1191 Timer->Header.Hand = 0;
1192 return;
1193 }
1194 else
1195 {
1196 /* Update the interval */
1197 Interval = TimeDifference;
1198 }
1199 }
1200
1201 /* Calculate the due time */
1202 InterruptTime.QuadPart = KeQueryInterruptTime();
1203 DueTime = InterruptTime.QuadPart - Interval.QuadPart;
1204 Timer->DueTime.QuadPart = DueTime;
1205
1206 /* Calculate the timer handle */
1207 *Hand = KiComputeTimerTableIndex(DueTime);
1208 Timer->Header.Hand = (UCHAR)*Hand;
1209 }
1210
1211 #define KxDelayThreadWait() \
1212 \
1213 /* Setup the Wait Block */ \
1214 Thread->WaitBlockList = TimerBlock; \
1215 \
1216 /* Setup the timer */ \
1217 KxSetTimerForThreadWait(Timer, *Interval, &Hand); \
1218 \
1219 /* Save the due time for the caller */ \
1220 DueTime.QuadPart = Timer->DueTime.QuadPart; \
1221 \
1222 /* Link the timer to this Wait Block */ \
1223 TimerBlock->NextWaitBlock = TimerBlock; \
1224 Timer->Header.WaitListHead.Flink = &TimerBlock->WaitListEntry; \
1225 Timer->Header.WaitListHead.Blink = &TimerBlock->WaitListEntry; \
1226 \
1227 /* Clear wait status */ \
1228 Thread->WaitStatus = STATUS_SUCCESS; \
1229 \
1230 /* Setup wait fields */ \
1231 Thread->Alertable = Alertable; \
1232 Thread->WaitReason = DelayExecution; \
1233 Thread->WaitMode = WaitMode; \
1234 \
1235 /* Check if we can swap the thread's stack */ \
1236 Thread->WaitListEntry.Flink = NULL; \
1237 Swappable = KiCheckThreadStackSwap(Thread, WaitMode); \
1238 \
1239 /* Set the wait time */ \
1240 Thread->WaitTime = KeTickCount.LowPart;
1241
1242 #define KxMultiThreadWait() \
1243 /* Link wait block array to the thread */ \
1244 Thread->WaitBlockList = WaitBlockArray; \
1245 \
1246 /* Reset the index */ \
1247 Index = 0; \
1248 \
1249 /* Loop wait blocks */ \
1250 do \
1251 { \
1252 /* Fill out the wait block */ \
1253 WaitBlock = &WaitBlockArray[Index]; \
1254 WaitBlock->Object = Object[Index]; \
1255 WaitBlock->WaitKey = (USHORT)Index; \
1256 WaitBlock->WaitType = WaitType; \
1257 WaitBlock->Thread = Thread; \
1258 \
1259 /* Link to next block */ \
1260 WaitBlock->NextWaitBlock = &WaitBlockArray[Index + 1]; \
1261 Index++; \
1262 } while (Index < Count); \
1263 \
1264 /* Link the last block */ \
1265 WaitBlock->NextWaitBlock = WaitBlockArray; \
1266 \
1267 /* Set default wait status */ \
1268 Thread->WaitStatus = STATUS_WAIT_0; \
1269 \
1270 /* Check if we have a timer */ \
1271 if (Timeout) \
1272 { \
1273 /* Link to the block */ \
1274 TimerBlock->NextWaitBlock = WaitBlockArray; \
1275 \
1276 /* Setup the timer */ \
1277 KxSetTimerForThreadWait(Timer, *Timeout, &Hand); \
1278 \
1279 /* Save the due time for the caller */ \
1280 DueTime.QuadPart = Timer->DueTime.QuadPart; \
1281 \
1282 /* Initialize the list */ \
1283 InitializeListHead(&Timer->Header.WaitListHead); \
1284 } \
1285 \
1286 /* Set wait settings */ \
1287 Thread->Alertable = Alertable; \
1288 Thread->WaitMode = WaitMode; \
1289 Thread->WaitReason = WaitReason; \
1290 \
1291 /* Check if we can swap the thread's stack */ \
1292 Thread->WaitListEntry.Flink = NULL; \
1293 Swappable = KiCheckThreadStackSwap(Thread, WaitMode); \
1294 \
1295 /* Set the wait time */ \
1296 Thread->WaitTime = KeTickCount.LowPart;
1297
1298 #define KxSingleThreadWait() \
1299 /* Setup the Wait Block */ \
1300 Thread->WaitBlockList = WaitBlock; \
1301 WaitBlock->WaitKey = STATUS_SUCCESS; \
1302 WaitBlock->Object = Object; \
1303 WaitBlock->WaitType = WaitAny; \
1304 \
1305 /* Clear wait status */ \
1306 Thread->WaitStatus = STATUS_SUCCESS; \
1307 \
1308 /* Check if we have a timer */ \
1309 if (Timeout) \
1310 { \
1311 /* Setup the timer */ \
1312 KxSetTimerForThreadWait(Timer, *Timeout, &Hand); \
1313 \
1314 /* Save the due time for the caller */ \
1315 DueTime.QuadPart = Timer->DueTime.QuadPart; \
1316 \
1317 /* Pointer to timer block */ \
1318 WaitBlock->NextWaitBlock = TimerBlock; \
1319 TimerBlock->NextWaitBlock = WaitBlock; \
1320 \
1321 /* Link the timer to this Wait Block */ \
1322 Timer->Header.WaitListHead.Flink = &TimerBlock->WaitListEntry; \
1323 Timer->Header.WaitListHead.Blink = &TimerBlock->WaitListEntry; \
1324 } \
1325 else \
1326 { \
1327 /* No timer block, just ourselves */ \
1328 WaitBlock->NextWaitBlock = WaitBlock; \
1329 } \
1330 \
1331 /* Set wait settings */ \
1332 Thread->Alertable = Alertable; \
1333 Thread->WaitMode = WaitMode; \
1334 Thread->WaitReason = WaitReason; \
1335 \
1336 /* Check if we can swap the thread's stack */ \
1337 Thread->WaitListEntry.Flink = NULL; \
1338 Swappable = KiCheckThreadStackSwap(Thread, WaitMode); \
1339 \
1340 /* Set the wait time */ \
1341 Thread->WaitTime = KeTickCount.LowPart;
1342
1343 #define KxQueueThreadWait() \
1344 /* Setup the Wait Block */ \
1345 Thread->WaitBlockList = WaitBlock; \
1346 WaitBlock->WaitKey = STATUS_SUCCESS; \
1347 WaitBlock->Object = Queue; \
1348 WaitBlock->WaitType = WaitAny; \
1349 WaitBlock->Thread = Thread; \
1350 \
1351 /* Clear wait status */ \
1352 Thread->WaitStatus = STATUS_SUCCESS; \
1353 \
1354 /* Check if we have a timer */ \
1355 if (Timeout) \
1356 { \
1357 /* Setup the timer */ \
1358 KxSetTimerForThreadWait(Timer, *Timeout, &Hand); \
1359 \
1360 /* Save the due time for the caller */ \
1361 DueTime.QuadPart = Timer->DueTime.QuadPart; \
1362 \
1363 /* Pointer to timer block */ \
1364 WaitBlock->NextWaitBlock = TimerBlock; \
1365 TimerBlock->NextWaitBlock = WaitBlock; \
1366 \
1367 /* Link the timer to this Wait Block */ \
1368 Timer->Header.WaitListHead.Flink = &TimerBlock->WaitListEntry; \
1369 Timer->Header.WaitListHead.Blink = &TimerBlock->WaitListEntry; \
1370 } \
1371 else \
1372 { \
1373 /* No timer block, just ourselves */ \
1374 WaitBlock->NextWaitBlock = WaitBlock; \
1375 } \
1376 \
1377 /* Set wait settings */ \
1378 Thread->Alertable = FALSE; \
1379 Thread->WaitMode = WaitMode; \
1380 Thread->WaitReason = WrQueue; \
1381 \
1382 /* Check if we can swap the thread's stack */ \
1383 Thread->WaitListEntry.Flink = NULL; \
1384 Swappable = KiCheckThreadStackSwap(Thread, WaitMode); \
1385 \
1386 /* Set the wait time */ \
1387 Thread->WaitTime = KeTickCount.LowPart;
1388
1389 //
1390 // Unwaits a Thread
1391 //
1392 FORCEINLINE
1393 VOID
1394 KxUnwaitThread(IN DISPATCHER_HEADER *Object,
1395 IN KPRIORITY Increment)
1396 {
1397 PLIST_ENTRY WaitEntry, WaitList;
1398 PKWAIT_BLOCK WaitBlock;
1399 PKTHREAD WaitThread;
1400 ULONG WaitKey;
1401
1402 /* Loop the Wait Entries */
1403 WaitList = &Object->WaitListHead;
1404 ASSERT(IsListEmpty(&Object->WaitListHead) == FALSE);
1405 WaitEntry = WaitList->Flink;
1406 do
1407 {
1408 /* Get the current wait block */
1409 WaitBlock = CONTAINING_RECORD(WaitEntry, KWAIT_BLOCK, WaitListEntry);
1410
1411 /* Get the waiting thread */
1412 WaitThread = WaitBlock->Thread;
1413
1414 /* Check the current Wait Mode */
1415 if (WaitBlock->WaitType == WaitAny)
1416 {
1417 /* Use the actual wait key */
1418 WaitKey = WaitBlock->WaitKey;
1419 }
1420 else
1421 {
1422 /* Otherwise, use STATUS_KERNEL_APC */
1423 WaitKey = STATUS_KERNEL_APC;
1424 }
1425
1426 /* Unwait the thread */
1427 KiUnwaitThread(WaitThread, WaitKey, Increment);
1428
1429 /* Next entry */
1430 WaitEntry = WaitList->Flink;
1431 } while (WaitEntry != WaitList);
1432 }
1433
1434 //
1435 // Unwaits a Thread waiting on an event
1436 //
1437 FORCEINLINE
1438 VOID
1439 KxUnwaitThreadForEvent(IN PKEVENT Event,
1440 IN KPRIORITY Increment)
1441 {
1442 PLIST_ENTRY WaitEntry, WaitList;
1443 PKWAIT_BLOCK WaitBlock;
1444 PKTHREAD WaitThread;
1445
1446 /* Loop the Wait Entries */
1447 WaitList = &Event->Header.WaitListHead;
1448 ASSERT(IsListEmpty(&Event->Header.WaitListHead) == FALSE);
1449 WaitEntry = WaitList->Flink;
1450 do
1451 {
1452 /* Get the current wait block */
1453 WaitBlock = CONTAINING_RECORD(WaitEntry, KWAIT_BLOCK, WaitListEntry);
1454
1455 /* Get the waiting thread */
1456 WaitThread = WaitBlock->Thread;
1457
1458 /* Check the current Wait Mode */
1459 if (WaitBlock->WaitType == WaitAny)
1460 {
1461 /* Un-signal it */
1462 Event->Header.SignalState = 0;
1463
1464 /* Un-signal the event and unwait the thread */
1465 KiUnwaitThread(WaitThread, WaitBlock->WaitKey, Increment);
1466 break;
1467 }
1468
1469 /* Unwait the thread with STATUS_KERNEL_APC */
1470 KiUnwaitThread(WaitThread, STATUS_KERNEL_APC, Increment);
1471
1472 /* Next entry */
1473 WaitEntry = WaitList->Flink;
1474 } while (WaitEntry != WaitList);
1475 }
1476
1477 //
1478 // This routine queues a thread that is ready on the PRCB's ready lists.
1479 // If this thread cannot currently run on this CPU, then the thread is
1480 // added to the deferred ready list instead.
1481 //
1482 // This routine must be entered with the PRCB lock held and it will exit
1483 // with the PRCB lock released!
1484 //
1485 FORCEINLINE
1486 VOID
1487 KxQueueReadyThread(IN PKTHREAD Thread,
1488 IN PKPRCB Prcb)
1489 {
1490 BOOLEAN Preempted;
1491 KPRIORITY Priority;
1492
1493 /* Sanity checks */
1494 ASSERT(Prcb == KeGetCurrentPrcb());
1495 ASSERT(Thread->State == Running);
1496 ASSERT(Thread->NextProcessor == Prcb->Number);
1497
1498 /* Check if this thread is allowed to run in this CPU */
1499 #ifdef CONFIG_SMP
1500 if ((Thread->Affinity) & (Prcb->SetMember))
1501 #else
1502 if (TRUE)
1503 #endif
1504 {
1505 /* Set thread ready for execution */
1506 Thread->State = Ready;
1507
1508 /* Save current priority and if someone had pre-empted it */
1509 Priority = Thread->Priority;
1510 Preempted = Thread->Preempted;
1511
1512 /* We're not pre-empting now, and set the wait time */
1513 Thread->Preempted = FALSE;
1514 Thread->WaitTime = KeTickCount.LowPart;
1515
1516 /* Sanity check */
1517 ASSERT((Priority >= 0) && (Priority <= HIGH_PRIORITY));
1518
1519 /* Insert this thread in the appropriate order */
1520 Preempted ? InsertHeadList(&Prcb->DispatcherReadyListHead[Priority],
1521 &Thread->WaitListEntry) :
1522 InsertTailList(&Prcb->DispatcherReadyListHead[Priority],
1523 &Thread->WaitListEntry);
1524
1525 /* Update the ready summary */
1526 Prcb->ReadySummary |= PRIORITY_MASK(Priority);
1527
1528 /* Sanity check */
1529 ASSERT(Priority == Thread->Priority);
1530
1531 /* Release the PRCB lock */
1532 KiReleasePrcbLock(Prcb);
1533 }
1534 else
1535 {
1536 /* Otherwise, prepare this thread to be deferred */
1537 Thread->State = DeferredReady;
1538 Thread->DeferredProcessor = Prcb->Number;
1539
1540 /* Release the lock and defer scheduling */
1541 KiReleasePrcbLock(Prcb);
1542 KiDeferredReadyThread(Thread);
1543 }
1544 }
1545
1546 //
1547 // This routine scans for an appropriate ready thread to select at the
1548 // given priority and for the given CPU.
1549 //
1550 FORCEINLINE
1551 PKTHREAD
1552 KiSelectReadyThread(IN KPRIORITY Priority,
1553 IN PKPRCB Prcb)
1554 {
1555 ULONG PrioritySet, HighPriority;
1556 PLIST_ENTRY ListEntry;
1557 PKTHREAD Thread = NULL;
1558
1559 /* Save the current mask and get the priority set for the CPU */
1560 PrioritySet = Prcb->ReadySummary >> Priority;
1561 if (!PrioritySet) goto Quickie;
1562
1563 /* Get the highest priority possible */
1564 BitScanReverse((PULONG)&HighPriority, PrioritySet);
1565 ASSERT((PrioritySet & PRIORITY_MASK(HighPriority)) != 0);
1566 HighPriority += Priority;
1567
1568 /* Make sure the list isn't empty at the highest priority */
1569 ASSERT(IsListEmpty(&Prcb->DispatcherReadyListHead[HighPriority]) == FALSE);
1570
1571 /* Get the first thread on the list */
1572 ListEntry = Prcb->DispatcherReadyListHead[HighPriority].Flink;
1573 Thread = CONTAINING_RECORD(ListEntry, KTHREAD, WaitListEntry);
1574
1575 /* Make sure this thread is here for a reason */
1576 ASSERT(HighPriority == Thread->Priority);
1577 ASSERT(Thread->Affinity & AFFINITY_MASK(Prcb->Number));
1578 ASSERT(Thread->NextProcessor == Prcb->Number);
1579
1580 /* Remove it from the list */
1581 if (RemoveEntryList(&Thread->WaitListEntry))
1582 {
1583 /* The list is empty now, reset the ready summary */
1584 Prcb->ReadySummary ^= PRIORITY_MASK(HighPriority);
1585 }
1586
1587 /* Sanity check and return the thread */
1588 Quickie:
1589 ASSERT((Thread == NULL) ||
1590 (Thread->BasePriority == 0) ||
1591 (Thread->Priority != 0));
1592 return Thread;
1593 }
1594
1595 //
1596 // This routine computes the new priority for a thread. It is only valid for
1597 // threads with priorities in the dynamic priority range.
1598 //
1599 SCHAR
1600 FORCEINLINE
1601 KiComputeNewPriority(IN PKTHREAD Thread,
1602 IN SCHAR Adjustment)
1603 {
1604 SCHAR Priority;
1605
1606 /* Priority sanity checks */
1607 ASSERT((Thread->PriorityDecrement >= 0) &&
1608 (Thread->PriorityDecrement <= Thread->Priority));
1609 ASSERT((Thread->Priority < LOW_REALTIME_PRIORITY) ?
1610 TRUE : (Thread->PriorityDecrement == 0));
1611
1612 /* Get the current priority */
1613 Priority = Thread->Priority;
1614 if (Priority < LOW_REALTIME_PRIORITY)
1615 {
1616 /* Decrease priority by the priority decrement */
1617 Priority -= (Thread->PriorityDecrement + Adjustment);
1618
1619 /* Don't go out of bounds */
1620 if (Priority < Thread->BasePriority) Priority = Thread->BasePriority;
1621
1622 /* Reset the priority decrement */
1623 Thread->PriorityDecrement = 0;
1624 }
1625
1626 /* Sanity check */
1627 ASSERT((Thread->BasePriority == 0) || (Priority != 0));
1628
1629 /* Return the new priority */
1630 return Priority;
1631 }