- Disable old and unused IPI code, add new NT compatible stubs
[reactos.git] / reactos / ntoskrnl / include / internal / ke_x.h
1 /*
2 * PROJECT: ReactOS Kernel
3 * LICENSE: GPL - See COPYING in the top level directory
4 * FILE: ntoskrnl/include/ke_x.h
5 * PURPOSE: Internal Inlined Functions for the Kernel
6 * PROGRAMMERS: Alex Ionescu (alex.ionescu@reactos.org)
7 */
8
9 //
10 // Thread Dispatcher Header DebugActive Mask
11 //
12 #define DR_MASK(x) 1 << x
13 #define DR_ACTIVE_MASK 0x10
14 #define DR_REG_MASK 0x4F
15
16 #ifdef _M_IX86
17 //
18 // Sanitizes a selector
19 //
20 FORCEINLINE
21 ULONG
22 Ke386SanitizeSeg(IN ULONG Cs,
23 IN KPROCESSOR_MODE Mode)
24 {
25 //
26 // Check if we're in kernel-mode, and force CPL 0 if so.
27 // Otherwise, force CPL 3.
28 //
29 return ((Mode == KernelMode) ?
30 (Cs & (0xFFFF & ~RPL_MASK)) :
31 (RPL_MASK | (Cs & 0xFFFF)));
32 }
33
34 //
35 // Sanitizes EFLAGS
36 //
37 FORCEINLINE
38 ULONG
39 Ke386SanitizeFlags(IN ULONG Eflags,
40 IN KPROCESSOR_MODE Mode)
41 {
42 //
43 // Check if we're in kernel-mode, and sanitize EFLAGS if so.
44 // Otherwise, also force interrupt mask on.
45 //
46 return ((Mode == KernelMode) ?
47 (Eflags & (EFLAGS_USER_SANITIZE | EFLAGS_INTERRUPT_MASK)) :
48 (EFLAGS_INTERRUPT_MASK | (Eflags & EFLAGS_USER_SANITIZE)));
49 }
50
51 //
52 // Gets a DR register from a CONTEXT structure
53 //
54 FORCEINLINE
55 PVOID
56 KiDrFromContext(IN ULONG Dr,
57 IN PCONTEXT Context)
58 {
59 return *(PVOID*)((ULONG_PTR)Context + KiDebugRegisterContextOffsets[Dr]);
60 }
61
62 //
63 // Gets a DR register from a KTRAP_FRAME structure
64 //
65 FORCEINLINE
66 PVOID*
67 KiDrFromTrapFrame(IN ULONG Dr,
68 IN PKTRAP_FRAME TrapFrame)
69 {
70 return (PVOID*)((ULONG_PTR)TrapFrame + KiDebugRegisterTrapOffsets[Dr]);
71 }
72
73 //
74 //
75 //
76 FORCEINLINE
77 PVOID
78 Ke386SanitizeDr(IN PVOID DrAddress,
79 IN KPROCESSOR_MODE Mode)
80 {
81 //
82 // Check if we're in kernel-mode, and return the address directly if so.
83 // Otherwise, make sure it's not inside the kernel-mode address space.
84 // If it is, then clear the address.
85 //
86 return ((Mode == KernelMode) ? DrAddress :
87 (DrAddress <= MM_HIGHEST_USER_ADDRESS) ? DrAddress : 0);
88 }
89 #endif /* _M_IX86 */
90
91 #ifndef _M_ARM
92 PRKTHREAD
93 FORCEINLINE
94 KeGetCurrentThread(VOID)
95 {
96 #ifdef _M_IX86
97 /* Return the current thread */
98 return ((PKIPCR)KeGetPcr())->PrcbData.CurrentThread;
99 #else
100 PKPRCB Prcb = KeGetCurrentPrcb();
101 return Prcb->CurrentThread;
102 #endif
103 }
104
105 UCHAR
106 FORCEINLINE
107 KeGetPreviousMode(VOID)
108 {
109 /* Return the current mode */
110 return KeGetCurrentThread()->PreviousMode;
111 }
112 #endif
113
114 VOID
115 FORCEINLINE
116 KeFlushProcessTb(VOID)
117 {
118 /* Flush the TLB by resetting CR3 */
119 #ifdef _M_PPC
120 __asm__("sync\n\tisync\n\t");
121 #elif _M_ARM
122 //
123 // We need to implement this!
124 //
125 ASSERTMSG("Need ARM flush routine\n", FALSE);
126 #else
127 __writecr3(__readcr3());
128 #endif
129 }
130
131 //
132 // Enters a Guarded Region
133 //
134 #define KeEnterGuardedRegion() \
135 { \
136 PKTHREAD _Thread = KeGetCurrentThread(); \
137 \
138 /* Sanity checks */ \
139 ASSERT(KeGetCurrentIrql() <= APC_LEVEL); \
140 ASSERT(_Thread == KeGetCurrentThread()); \
141 ASSERT((_Thread->SpecialApcDisable <= 0) && \
142 (_Thread->SpecialApcDisable != -32768)); \
143 \
144 /* Disable Special APCs */ \
145 _Thread->SpecialApcDisable--; \
146 }
147
148 //
149 // Leaves a Guarded Region
150 //
151 #define KeLeaveGuardedRegion() \
152 { \
153 PKTHREAD _Thread = KeGetCurrentThread(); \
154 \
155 /* Sanity checks */ \
156 ASSERT(KeGetCurrentIrql() <= APC_LEVEL); \
157 ASSERT(_Thread == KeGetCurrentThread()); \
158 ASSERT(_Thread->SpecialApcDisable < 0); \
159 \
160 /* Leave region and check if APCs are OK now */ \
161 if (!(++_Thread->SpecialApcDisable)) \
162 { \
163 /* Check for Kernel APCs on the list */ \
164 if (!IsListEmpty(&_Thread->ApcState. \
165 ApcListHead[KernelMode])) \
166 { \
167 /* Check for APC Delivery */ \
168 KiCheckForKernelApcDelivery(); \
169 } \
170 } \
171 }
172
173 //
174 // TODO: Guarded Mutex Routines
175 //
176
177 //
178 // Enters a Critical Region
179 //
180 #define KeEnterCriticalRegion() \
181 { \
182 PKTHREAD _Thread = KeGetCurrentThread(); \
183 \
184 /* Sanity checks */ \
185 ASSERT(_Thread == KeGetCurrentThread()); \
186 ASSERT((_Thread->KernelApcDisable <= 0) && \
187 (_Thread->KernelApcDisable != -32768)); \
188 \
189 /* Disable Kernel APCs */ \
190 _Thread->KernelApcDisable--; \
191 }
192
193 //
194 // Leaves a Critical Region
195 //
196 #define KeLeaveCriticalRegion() \
197 { \
198 PKTHREAD _Thread = KeGetCurrentThread(); \
199 \
200 /* Sanity checks */ \
201 ASSERT(_Thread == KeGetCurrentThread()); \
202 ASSERT(_Thread->KernelApcDisable < 0); \
203 \
204 /* Enable Kernel APCs */ \
205 _Thread->KernelApcDisable++; \
206 \
207 /* Check if Kernel APCs are now enabled */ \
208 if (!(_Thread->KernelApcDisable)) \
209 { \
210 /* Check if we need to request an APC Delivery */ \
211 if (!(IsListEmpty(&_Thread->ApcState.ApcListHead[KernelMode])) && \
212 !(_Thread->SpecialApcDisable)) \
213 { \
214 /* Check for the right environment */ \
215 KiCheckForKernelApcDelivery(); \
216 } \
217 } \
218 }
219
220 #ifndef CONFIG_SMP
221 //
222 // Spinlock Acquire at IRQL >= DISPATCH_LEVEL
223 //
224 FORCEINLINE
225 VOID
226 KxAcquireSpinLock(IN PKSPIN_LOCK SpinLock)
227 {
228 /* On UP builds, spinlocks don't exist at IRQL >= DISPATCH */
229 UNREFERENCED_PARAMETER(SpinLock);
230 }
231
232 //
233 // Spinlock Release at IRQL >= DISPATCH_LEVEL
234 //
235 FORCEINLINE
236 VOID
237 KxReleaseSpinLock(IN PKSPIN_LOCK SpinLock)
238 {
239 /* On UP builds, spinlocks don't exist at IRQL >= DISPATCH */
240 UNREFERENCED_PARAMETER(SpinLock);
241 }
242
243 //
244 // This routine protects against multiple CPU acquires, it's meaningless on UP.
245 //
246 VOID
247 FORCEINLINE
248 KiAcquireDispatcherObject(IN DISPATCHER_HEADER* Object)
249 {
250 UNREFERENCED_PARAMETER(Object);
251 }
252
253 //
254 // This routine protects against multiple CPU acquires, it's meaningless on UP.
255 //
256 VOID
257 FORCEINLINE
258 KiReleaseDispatcherObject(IN DISPATCHER_HEADER* Object)
259 {
260 UNREFERENCED_PARAMETER(Object);
261 }
262
263 KIRQL
264 FORCEINLINE
265 KiAcquireDispatcherLock(VOID)
266 {
267 /* Raise to DPC level */
268 return KeRaiseIrqlToDpcLevel();
269 }
270
271 VOID
272 FORCEINLINE
273 KiReleaseDispatcherLock(IN KIRQL OldIrql)
274 {
275 /* Just exit the dispatcher */
276 KiExitDispatcher(OldIrql);
277 }
278
279 VOID
280 FORCEINLINE
281 KiAcquireDispatcherLockAtDpcLevel(VOID)
282 {
283 /* This is a no-op at DPC Level for UP systems */
284 return;
285 }
286
287 VOID
288 FORCEINLINE
289 KiReleaseDispatcherLockFromDpcLevel(VOID)
290 {
291 /* This is a no-op at DPC Level for UP systems */
292 return;
293 }
294
295 //
296 // This routine makes the thread deferred ready on the boot CPU.
297 //
298 FORCEINLINE
299 VOID
300 KiInsertDeferredReadyList(IN PKTHREAD Thread)
301 {
302 /* Set the thread to deferred state and boot CPU */
303 Thread->State = DeferredReady;
304 Thread->DeferredProcessor = 0;
305
306 /* Make the thread ready immediately */
307 KiDeferredReadyThread(Thread);
308 }
309
310 FORCEINLINE
311 VOID
312 KiRescheduleThread(IN BOOLEAN NewThread,
313 IN ULONG Cpu)
314 {
315 /* This is meaningless on UP systems */
316 UNREFERENCED_PARAMETER(NewThread);
317 UNREFERENCED_PARAMETER(Cpu);
318 }
319
320 //
321 // This routine protects against multiple CPU acquires, it's meaningless on UP.
322 //
323 FORCEINLINE
324 VOID
325 KiSetThreadSwapBusy(IN PKTHREAD Thread)
326 {
327 UNREFERENCED_PARAMETER(Thread);
328 }
329
330 //
331 // This routine protects against multiple CPU acquires, it's meaningless on UP.
332 //
333 FORCEINLINE
334 VOID
335 KiAcquirePrcbLock(IN PKPRCB Prcb)
336 {
337 UNREFERENCED_PARAMETER(Prcb);
338 }
339
340 //
341 // This routine protects against multiple CPU acquires, it's meaningless on UP.
342 //
343 FORCEINLINE
344 VOID
345 KiReleasePrcbLock(IN PKPRCB Prcb)
346 {
347 UNREFERENCED_PARAMETER(Prcb);
348 }
349
350 //
351 // This routine protects against multiple CPU acquires, it's meaningless on UP.
352 //
353 FORCEINLINE
354 VOID
355 KiAcquireThreadLock(IN PKTHREAD Thread)
356 {
357 UNREFERENCED_PARAMETER(Thread);
358 }
359
360 //
361 // This routine protects against multiple CPU acquires, it's meaningless on UP.
362 //
363 FORCEINLINE
364 VOID
365 KiReleaseThreadLock(IN PKTHREAD Thread)
366 {
367 UNREFERENCED_PARAMETER(Thread);
368 }
369
370 //
371 // This routine protects against multiple CPU acquires, it's meaningless on UP.
372 //
373 FORCEINLINE
374 BOOLEAN
375 KiTryThreadLock(IN PKTHREAD Thread)
376 {
377 UNREFERENCED_PARAMETER(Thread);
378 return FALSE;
379 }
380
381 FORCEINLINE
382 VOID
383 KiCheckDeferredReadyList(IN PKPRCB Prcb)
384 {
385 /* There are no deferred ready lists on UP systems */
386 UNREFERENCED_PARAMETER(Prcb);
387 }
388
389 FORCEINLINE
390 VOID
391 KiRundownThread(IN PKTHREAD Thread)
392 {
393 #if defined(_M_IX86) || defined(_M_AMD64)
394 /* Check if this is the NPX Thread */
395 if (KeGetCurrentPrcb()->NpxThread == Thread)
396 {
397 /* Clear it */
398 KeGetCurrentPrcb()->NpxThread = NULL;
399 KeArchFnInit();
400 }
401 #endif
402 }
403
404 FORCEINLINE
405 VOID
406 KiRequestApcInterrupt(IN BOOLEAN NeedApc,
407 IN UCHAR Processor)
408 {
409 /* We deliver instantly on UP */
410 UNREFERENCED_PARAMETER(NeedApc);
411 UNREFERENCED_PARAMETER(Processor);
412 }
413
414 FORCEINLINE
415 PKSPIN_LOCK_QUEUE
416 KiAcquireTimerLock(IN ULONG Hand)
417 {
418 ASSERT(KeGetCurrentIrql() >= DISPATCH_LEVEL);
419
420 /* Nothing to do on UP */
421 UNREFERENCED_PARAMETER(Hand);
422 return NULL;
423 }
424
425 FORCEINLINE
426 VOID
427 KiReleaseTimerLock(IN PKSPIN_LOCK_QUEUE LockQueue)
428 {
429 ASSERT(KeGetCurrentIrql() >= DISPATCH_LEVEL);
430
431 /* Nothing to do on UP */
432 UNREFERENCED_PARAMETER(LockQueue);
433 }
434
435 #else
436
437 //
438 // Spinlock Acquisition at IRQL >= DISPATCH_LEVEL
439 //
440 FORCEINLINE
441 VOID
442 KxAcquireSpinLock(IN PKSPIN_LOCK SpinLock)
443 {
444 for (;;)
445 {
446 /* Try to acquire it */
447 if (InterlockedBitTestAndSet((PLONG)SpinLock, 0))
448 {
449 /* Value changed... wait until it's locked */
450 while (*(volatile KSPIN_LOCK *)SpinLock == 1)
451 {
452 #ifdef DBG
453 /* On debug builds, we use a much slower but useful routine */
454 //Kii386SpinOnSpinLock(SpinLock, 5);
455
456 /* FIXME: Do normal yield for now */
457 YieldProcessor();
458 #else
459 /* Otherwise, just yield and keep looping */
460 YieldProcessor();
461 #endif
462 }
463 }
464 else
465 {
466 #ifdef DBG
467 /* On debug builds, we OR in the KTHREAD */
468 *SpinLock = (KSPIN_LOCK)KeGetCurrentThread() | 1;
469 #endif
470 /* All is well, break out */
471 break;
472 }
473 }
474 }
475
476 //
477 // Spinlock Release at IRQL >= DISPATCH_LEVEL
478 //
479 FORCEINLINE
480 VOID
481 KxReleaseSpinLock(IN PKSPIN_LOCK SpinLock)
482 {
483 #ifdef DBG
484 /* Make sure that the threads match */
485 if (((KSPIN_LOCK)KeGetCurrentThread() | 1) != *SpinLock)
486 {
487 /* They don't, bugcheck */
488 KeBugCheckEx(SPIN_LOCK_NOT_OWNED, (ULONG_PTR)SpinLock, 0, 0, 0);
489 }
490 #endif
491 /* Clear the lock */
492 InterlockedAnd((PLONG)SpinLock, 0);
493 }
494
495 VOID
496 FORCEINLINE
497 KiAcquireDispatcherObject(IN DISPATCHER_HEADER* Object)
498 {
499 LONG OldValue;
500
501 /* Make sure we're at a safe level to touch the lock */
502 ASSERT(KeGetCurrentIrql() >= DISPATCH_LEVEL);
503
504 /* Start acquire loop */
505 do
506 {
507 /* Loop until the other CPU releases it */
508 while (TRUE)
509 {
510 /* Check if it got released */
511 OldValue = Object->Lock;
512 if ((OldValue & KOBJECT_LOCK_BIT) == 0) break;
513
514 /* Let the CPU know that this is a loop */
515 YieldProcessor();
516 }
517
518 /* Try acquiring the lock now */
519 } while (InterlockedCompareExchange(&Object->Lock,
520 OldValue | KOBJECT_LOCK_BIT,
521 OldValue) != OldValue);
522 }
523
524 VOID
525 FORCEINLINE
526 KiReleaseDispatcherObject(IN DISPATCHER_HEADER* Object)
527 {
528 /* Make sure we're at a safe level to touch the lock */
529 ASSERT(KeGetCurrentIrql() >= DISPATCH_LEVEL);
530
531 /* Release it */
532 InterlockedAnd(&Object->Lock, ~KOBJECT_LOCK_BIT);
533 }
534
535 KIRQL
536 FORCEINLINE
537 KiAcquireDispatcherLock(VOID)
538 {
539 /* Raise to synchronization level and acquire the dispatcher lock */
540 return KeAcquireQueuedSpinLockRaiseToSynch(LockQueueDispatcherLock);
541 }
542
543 VOID
544 FORCEINLINE
545 KiReleaseDispatcherLock(IN KIRQL OldIrql)
546 {
547 /* First release the lock */
548 KeReleaseQueuedSpinLockFromDpcLevel(&KeGetCurrentPrcb()->
549 LockQueue[LockQueueDispatcherLock]);
550
551 /* Then exit the dispatcher */
552 KiExitDispatcher(OldIrql);
553 }
554
555 VOID
556 FORCEINLINE
557 KiAcquireDispatcherLockAtDpcLevel(VOID)
558 {
559 /* Acquire the dispatcher lock */
560 KeAcquireQueuedSpinLockAtDpcLevel(LockQueueDispatcherLock);
561 }
562
563 VOID
564 FORCEINLINE
565 KiReleaseDispatcherLockFromDpcLevel(VOID)
566 {
567 /* Release the dispatcher lock */
568 KeReleaseQueuedSpinLockFromDpcLevel(LockQueueDispatcherLock);
569 }
570
571 //
572 // This routine inserts a thread into the deferred ready list of the given CPU
573 //
574 FORCEINLINE
575 VOID
576 KiInsertDeferredReadyList(IN PKTHREAD Thread)
577 {
578 PKPRCB Prcb = KeGetCurrentPrcb();
579
580 /* Set the thread to deferred state and CPU */
581 Thread->State = DeferredReady;
582 Thread->DeferredProcessor = Prcb->Number;
583
584 /* Add it on the list */
585 PushEntryList(&Prcb->DeferredReadyListHead, &Thread->SwapListEntry);
586 }
587
588 FORCEINLINE
589 VOID
590 KiRescheduleThread(IN BOOLEAN NewThread,
591 IN ULONG Cpu)
592 {
593 /* Check if a new thread needs to be scheduled on a different CPU */
594 if ((NewThread) && !(KeGetPcr()->Number == Cpu))
595 {
596 /* Send an IPI to request delivery */
597 KiIpiSend(AFFINITY_MASK(Cpu), IPI_DPC);
598 }
599 }
600
601 //
602 // This routine sets the current thread in a swap busy state, which ensure that
603 // nobody else tries to swap it concurrently.
604 //
605 FORCEINLINE
606 VOID
607 KiSetThreadSwapBusy(IN PKTHREAD Thread)
608 {
609 /* Make sure nobody already set it */
610 ASSERT(Thread->SwapBusy == FALSE);
611
612 /* Set it ourselves */
613 Thread->SwapBusy = TRUE;
614 }
615
616 //
617 // This routine acquires the PRCB lock so that only one caller can touch
618 // volatile PRCB data.
619 //
620 // Since this is a simple optimized spin-lock, it must be be only acquired
621 // at dispatcher level or higher!
622 //
623 FORCEINLINE
624 VOID
625 KiAcquirePrcbLock(IN PKPRCB Prcb)
626 {
627 /* Make sure we're at a safe level to touch the PRCB lock */
628 ASSERT(KeGetCurrentIrql() >= DISPATCH_LEVEL);
629
630 /* Start acquire loop */
631 for (;;)
632 {
633 /* Acquire the lock and break out if we acquired it first */
634 if (!InterlockedExchange((PLONG)&Prcb->PrcbLock, 1)) break;
635
636 /* Loop until the other CPU releases it */
637 do
638 {
639 /* Let the CPU know that this is a loop */
640 YieldProcessor();
641 } while (Prcb->PrcbLock);
642 }
643 }
644
645 //
646 // This routine releases the PRCB lock so that other callers can touch
647 // volatile PRCB data.
648 //
649 // Since this is a simple optimized spin-lock, it must be be only acquired
650 // at dispatcher level or higher!
651 //
652 FORCEINLINE
653 VOID
654 KiReleasePrcbLock(IN PKPRCB Prcb)
655 {
656 /* Make sure it's acquired! */
657 ASSERT(Prcb->PrcbLock != 0);
658
659 /* Release it */
660 InterlockedAnd((PLONG)&Prcb->PrcbLock, 0);
661 }
662
663 //
664 // This routine acquires the thread lock so that only one caller can touch
665 // volatile thread data.
666 //
667 // Since this is a simple optimized spin-lock, it must be be only acquired
668 // at dispatcher level or higher!
669 //
670 FORCEINLINE
671 VOID
672 KiAcquireThreadLock(IN PKTHREAD Thread)
673 {
674 /* Make sure we're at a safe level to touch the thread lock */
675 ASSERT(KeGetCurrentIrql() >= DISPATCH_LEVEL);
676
677 /* Start acquire loop */
678 for (;;)
679 {
680 /* Acquire the lock and break out if we acquired it first */
681 if (!InterlockedExchange((PLONG)&Thread->ThreadLock, 1)) break;
682
683 /* Loop until the other CPU releases it */
684 do
685 {
686 /* Let the CPU know that this is a loop */
687 YieldProcessor();
688 } while (Thread->ThreadLock);
689 }
690 }
691
692 //
693 // This routine releases the thread lock so that other callers can touch
694 // volatile thread data.
695 //
696 // Since this is a simple optimized spin-lock, it must be be only acquired
697 // at dispatcher level or higher!
698 //
699 FORCEINLINE
700 VOID
701 KiReleaseThreadLock(IN PKTHREAD Thread)
702 {
703 /* Release it */
704 InterlockedAnd((PLONG)&Thread->ThreadLock, 0);
705 }
706
707 FORCEINLINE
708 BOOLEAN
709 KiTryThreadLock(IN PKTHREAD Thread)
710 {
711 LONG Value;
712
713 /* If the lock isn't acquired, return false */
714 if (!Thread->ThreadLock) return FALSE;
715
716 /* Otherwise, try to acquire it and check the result */
717 Value = 1;
718 Value = InterlockedExchange((PLONG)&Thread->ThreadLock, Value);
719
720 /* Return the lock state */
721 return (Value == TRUE);
722 }
723
724 FORCEINLINE
725 VOID
726 KiCheckDeferredReadyList(IN PKPRCB Prcb)
727 {
728 /* Scan the deferred ready lists if required */
729 if (Prcb->DeferredReadyListHead.Next) KiProcessDeferredReadyList(Prcb);
730 }
731
732 FORCEINLINE
733 VOID
734 KiRundownThread(IN PKTHREAD Thread)
735 {
736 #if defined(_M_IX86) || defined(_M_AMD64)
737 /* FIXME: TODO */
738 ASSERTMSG("Not yet implemented\n", FALSE);
739 #endif
740 }
741
742 FORCEINLINE
743 VOID
744 KiRequestApcInterrupt(IN BOOLEAN NeedApc,
745 IN UCHAR Processor)
746 {
747 /* Check if we need to request APC delivery */
748 if (NeedApc)
749 {
750 /* Check if it's on another CPU */
751 if (KeGetPcr()->Number != Processor)
752 {
753 /* Send an IPI to request delivery */
754 KiIpiSend(AFFINITY_MASK(Processor), IPI_APC);
755 }
756 else
757 {
758 /* Request a software interrupt */
759 HalRequestSoftwareInterrupt(APC_LEVEL);
760 }
761 }
762 }
763
764 FORCEINLINE
765 PKSPIN_LOCK_QUEUE
766 KiAcquireTimerLock(IN ULONG Hand)
767 {
768 PKSPIN_LOCK_QUEUE LockQueue;
769 ULONG LockIndex;
770 ASSERT(KeGetCurrentIrql() >= DISPATCH_LEVEL);
771
772 /* Get the lock index */
773 LockIndex = Hand >> LOCK_QUEUE_TIMER_LOCK_SHIFT;
774 LockIndex &= (LOCK_QUEUE_TIMER_TABLE_LOCKS - 1);
775
776 /* Now get the lock */
777 LockQueue = &KeGetCurrentPrcb()->LockQueue[LockQueueTimerTableLock + LockIndex];
778
779 /* Acquire it and return */
780 KeAcquireQueuedSpinLockAtDpcLevel(LockQueue);
781 return LockQueue;
782 }
783
784 FORCEINLINE
785 VOID
786 KiReleaseTimerLock(IN PKSPIN_LOCK_QUEUE LockQueue)
787 {
788 ASSERT(KeGetCurrentIrql() >= DISPATCH_LEVEL);
789
790 /* Release the lock */
791 KeReleaseQueuedSpinLockFromDpcLevel(LockQueue);
792 }
793
794 #endif
795
796 FORCEINLINE
797 VOID
798 KiAcquireApcLock(IN PKTHREAD Thread,
799 IN PKLOCK_QUEUE_HANDLE Handle)
800 {
801 /* Acquire the lock and raise to synchronization level */
802 KeAcquireInStackQueuedSpinLockRaiseToSynch(&Thread->ApcQueueLock, Handle);
803 }
804
805 FORCEINLINE
806 VOID
807 KiAcquireApcLockAtDpcLevel(IN PKTHREAD Thread,
808 IN PKLOCK_QUEUE_HANDLE Handle)
809 {
810 /* Acquire the lock */
811 KeAcquireInStackQueuedSpinLockAtDpcLevel(&Thread->ApcQueueLock, Handle);
812 }
813
814 FORCEINLINE
815 VOID
816 KiAcquireApcLockAtApcLevel(IN PKTHREAD Thread,
817 IN PKLOCK_QUEUE_HANDLE Handle)
818 {
819 /* Acquire the lock */
820 KeAcquireInStackQueuedSpinLock(&Thread->ApcQueueLock, Handle);
821 }
822
823 FORCEINLINE
824 VOID
825 KiReleaseApcLock(IN PKLOCK_QUEUE_HANDLE Handle)
826 {
827 /* Release the lock */
828 KeReleaseInStackQueuedSpinLock(Handle);
829 }
830
831 FORCEINLINE
832 VOID
833 KiReleaseApcLockFromDpcLevel(IN PKLOCK_QUEUE_HANDLE Handle)
834 {
835 /* Release the lock */
836 KeReleaseInStackQueuedSpinLockFromDpcLevel(Handle);
837 }
838
839 FORCEINLINE
840 VOID
841 KiAcquireProcessLock(IN PKPROCESS Process,
842 IN PKLOCK_QUEUE_HANDLE Handle)
843 {
844 /* Acquire the lock and raise to synchronization level */
845 KeAcquireInStackQueuedSpinLockRaiseToSynch(&Process->ProcessLock, Handle);
846 }
847
848 FORCEINLINE
849 VOID
850 KiReleaseProcessLock(IN PKLOCK_QUEUE_HANDLE Handle)
851 {
852 /* Release the lock */
853 KeReleaseInStackQueuedSpinLock(Handle);
854 }
855
856 FORCEINLINE
857 VOID
858 KiReleaseProcessLockFromDpcLevel(IN PKLOCK_QUEUE_HANDLE Handle)
859 {
860 /* Release the lock */
861 KeReleaseInStackQueuedSpinLockFromDpcLevel(Handle);
862 }
863
864 FORCEINLINE
865 VOID
866 KiAcquireDeviceQueueLock(IN PKDEVICE_QUEUE DeviceQueue,
867 IN PKLOCK_QUEUE_HANDLE DeviceLock)
868 {
869 /* Check if we were called from a threaded DPC */
870 if (KeGetCurrentPrcb()->DpcThreadActive)
871 {
872 /* Lock the Queue, we're not at DPC level */
873 KeAcquireInStackQueuedSpinLock(&DeviceQueue->Lock, DeviceLock);
874 }
875 else
876 {
877 /* We must be at DPC level, acquire the lock safely */
878 ASSERT(KeGetCurrentIrql() == DISPATCH_LEVEL);
879 KeAcquireInStackQueuedSpinLockAtDpcLevel(&DeviceQueue->Lock,
880 DeviceLock);
881 }
882 }
883
884 FORCEINLINE
885 VOID
886 KiReleaseDeviceQueueLock(IN PKLOCK_QUEUE_HANDLE DeviceLock)
887 {
888 /* Check if we were called from a threaded DPC */
889 if (KeGetCurrentPrcb()->DpcThreadActive)
890 {
891 /* Unlock the Queue, we're not at DPC level */
892 KeReleaseInStackQueuedSpinLock(DeviceLock);
893 }
894 else
895 {
896 /* We must be at DPC level, release the lock safely */
897 ASSERT(KeGetCurrentIrql() == DISPATCH_LEVEL);
898 KeReleaseInStackQueuedSpinLockFromDpcLevel(DeviceLock);
899 }
900 }
901
902 //
903 // Satisfies the wait of any dispatcher object
904 //
905 #define KiSatisfyObjectWait(Object, Thread) \
906 { \
907 /* Special case for Mutants */ \
908 if ((Object)->Header.Type == MutantObject) \
909 { \
910 /* Decrease the Signal State */ \
911 (Object)->Header.SignalState--; \
912 \
913 /* Check if it's now non-signaled */ \
914 if (!(Object)->Header.SignalState) \
915 { \
916 /* Set the Owner Thread */ \
917 (Object)->OwnerThread = Thread; \
918 \
919 /* Disable APCs if needed */ \
920 Thread->KernelApcDisable = Thread->KernelApcDisable - \
921 (Object)->ApcDisable; \
922 \
923 /* Check if it's abandoned */ \
924 if ((Object)->Abandoned) \
925 { \
926 /* Unabandon it */ \
927 (Object)->Abandoned = FALSE; \
928 \
929 /* Return Status */ \
930 Thread->WaitStatus = STATUS_ABANDONED; \
931 } \
932 \
933 /* Insert it into the Mutant List */ \
934 InsertHeadList(Thread->MutantListHead.Blink, \
935 &(Object)->MutantListEntry); \
936 } \
937 } \
938 else if (((Object)->Header.Type & TIMER_OR_EVENT_TYPE) == \
939 EventSynchronizationObject) \
940 { \
941 /* Synchronization Timers and Events just get un-signaled */ \
942 (Object)->Header.SignalState = 0; \
943 } \
944 else if ((Object)->Header.Type == SemaphoreObject) \
945 { \
946 /* These ones can have multiple states, so we only decrease it */ \
947 (Object)->Header.SignalState--; \
948 } \
949 }
950
951 //
952 // Satisfies the wait of a mutant dispatcher object
953 //
954 #define KiSatisfyMutantWait(Object, Thread) \
955 { \
956 /* Decrease the Signal State */ \
957 (Object)->Header.SignalState--; \
958 \
959 /* Check if it's now non-signaled */ \
960 if (!(Object)->Header.SignalState) \
961 { \
962 /* Set the Owner Thread */ \
963 (Object)->OwnerThread = Thread; \
964 \
965 /* Disable APCs if needed */ \
966 Thread->KernelApcDisable = Thread->KernelApcDisable - \
967 (Object)->ApcDisable; \
968 \
969 /* Check if it's abandoned */ \
970 if ((Object)->Abandoned) \
971 { \
972 /* Unabandon it */ \
973 (Object)->Abandoned = FALSE; \
974 \
975 /* Return Status */ \
976 Thread->WaitStatus = STATUS_ABANDONED; \
977 } \
978 \
979 /* Insert it into the Mutant List */ \
980 InsertHeadList(Thread->MutantListHead.Blink, \
981 &(Object)->MutantListEntry); \
982 } \
983 }
984
985 //
986 // Satisfies the wait of any nonmutant dispatcher object
987 //
988 #define KiSatisfyNonMutantWait(Object) \
989 { \
990 if (((Object)->Header.Type & TIMER_OR_EVENT_TYPE) == \
991 EventSynchronizationObject) \
992 { \
993 /* Synchronization Timers and Events just get un-signaled */ \
994 (Object)->Header.SignalState = 0; \
995 } \
996 else if ((Object)->Header.Type == SemaphoreObject) \
997 { \
998 /* These ones can have multiple states, so we only decrease it */ \
999 (Object)->Header.SignalState--; \
1000 } \
1001 }
1002
1003 //
1004 // Recalculates the due time
1005 //
1006 PLARGE_INTEGER
1007 FORCEINLINE
1008 KiRecalculateDueTime(IN PLARGE_INTEGER OriginalDueTime,
1009 IN PLARGE_INTEGER DueTime,
1010 IN OUT PLARGE_INTEGER NewDueTime)
1011 {
1012 /* Don't do anything for absolute waits */
1013 if (OriginalDueTime->QuadPart >= 0) return OriginalDueTime;
1014
1015 /* Otherwise, query the interrupt time and recalculate */
1016 NewDueTime->QuadPart = KeQueryInterruptTime();
1017 NewDueTime->QuadPart -= DueTime->QuadPart;
1018 return NewDueTime;
1019 }
1020
1021 //
1022 // Determines whether a thread should be added to the wait list
1023 //
1024 FORCEINLINE
1025 BOOLEAN
1026 KiCheckThreadStackSwap(IN PKTHREAD Thread,
1027 IN KPROCESSOR_MODE WaitMode)
1028 {
1029 /* Check the required conditions */
1030 if ((WaitMode != KernelMode) &&
1031 (Thread->EnableStackSwap) &&
1032 (Thread->Priority >= (LOW_REALTIME_PRIORITY + 9)))
1033 {
1034 /* We are go for swap */
1035 return TRUE;
1036 }
1037 else
1038 {
1039 /* Don't swap the thread */
1040 return FALSE;
1041 }
1042 }
1043
1044 //
1045 // Adds a thread to the wait list
1046 //
1047 #define KiAddThreadToWaitList(Thread, Swappable) \
1048 { \
1049 /* Make sure it's swappable */ \
1050 if (Swappable) \
1051 { \
1052 /* Insert it into the PRCB's List */ \
1053 InsertTailList(&KeGetCurrentPrcb()->WaitListHead, \
1054 &Thread->WaitListEntry); \
1055 } \
1056 }
1057
1058 //
1059 // Checks if a wait in progress should be interrupted by APCs or an alertable
1060 // state.
1061 //
1062 FORCEINLINE
1063 NTSTATUS
1064 KiCheckAlertability(IN PKTHREAD Thread,
1065 IN BOOLEAN Alertable,
1066 IN KPROCESSOR_MODE WaitMode)
1067 {
1068 /* Check if the wait is alertable */
1069 if (Alertable)
1070 {
1071 /* It is, first check if the thread is alerted in this mode */
1072 if (Thread->Alerted[WaitMode])
1073 {
1074 /* It is, so bail out of the wait */
1075 Thread->Alerted[WaitMode] = FALSE;
1076 return STATUS_ALERTED;
1077 }
1078 else if ((WaitMode != KernelMode) &&
1079 (!IsListEmpty(&Thread->ApcState.ApcListHead[UserMode])))
1080 {
1081 /* It's isn't, but this is a user wait with queued user APCs */
1082 Thread->ApcState.UserApcPending = TRUE;
1083 return STATUS_USER_APC;
1084 }
1085 else if (Thread->Alerted[KernelMode])
1086 {
1087 /* It isn't that either, but we're alered in kernel mode */
1088 Thread->Alerted[KernelMode] = FALSE;
1089 return STATUS_ALERTED;
1090 }
1091 }
1092 else if ((WaitMode != KernelMode) && (Thread->ApcState.UserApcPending))
1093 {
1094 /* Not alertable, but this is a user wait with pending user APCs */
1095 return STATUS_USER_APC;
1096 }
1097
1098 /* Otherwise, we're fine */
1099 return STATUS_WAIT_0;
1100 }
1101
1102 //
1103 // Called by Wait and Queue code to insert a timer for dispatching.
1104 // Also called by KeSetTimerEx to insert a timer from the caller.
1105 //
1106 VOID
1107 FORCEINLINE
1108 KxInsertTimer(IN PKTIMER Timer,
1109 IN ULONG Hand)
1110 {
1111 PKSPIN_LOCK_QUEUE LockQueue;
1112
1113 /* Acquire the lock and release the dispatcher lock */
1114 LockQueue = KiAcquireTimerLock(Hand);
1115 KiReleaseDispatcherLockFromDpcLevel();
1116
1117 /* Try to insert the timer */
1118 if (KiInsertTimerTable(Timer, Hand))
1119 {
1120 /* Complete it */
1121 KiCompleteTimer(Timer, LockQueue);
1122 }
1123 else
1124 {
1125 /* Do nothing, just release the lock */
1126 KiReleaseTimerLock(LockQueue);
1127 }
1128 }
1129
1130 //
1131 // Called from Unlink and Queue Insert Code.
1132 // Also called by timer code when canceling an inserted timer.
1133 // Removes a timer from it's tree.
1134 //
1135 VOID
1136 FORCEINLINE
1137 KxRemoveTreeTimer(IN PKTIMER Timer)
1138 {
1139 ULONG Hand = Timer->Header.Hand;
1140 PKSPIN_LOCK_QUEUE LockQueue;
1141 PKTIMER_TABLE_ENTRY TimerEntry;
1142
1143 /* Acquire timer lock */
1144 LockQueue = KiAcquireTimerLock(Hand);
1145
1146 /* Set the timer as non-inserted */
1147 Timer->Header.Inserted = FALSE;
1148
1149 /* Remove it from the timer list */
1150 if (RemoveEntryList(&Timer->TimerListEntry))
1151 {
1152 /* Get the entry and check if it's empty */
1153 TimerEntry = &KiTimerTableListHead[Hand];
1154 if (IsListEmpty(&TimerEntry->Entry))
1155 {
1156 /* Clear the time then */
1157 TimerEntry->Time.HighPart = 0xFFFFFFFF;
1158 }
1159 }
1160
1161 /* Release the timer lock */
1162 KiReleaseTimerLock(LockQueue);
1163 }
1164
1165 VOID
1166 FORCEINLINE
1167 KxSetTimerForThreadWait(IN PKTIMER Timer,
1168 IN LARGE_INTEGER Interval,
1169 OUT PULONG Hand)
1170 {
1171 ULONGLONG DueTime;
1172 LARGE_INTEGER InterruptTime, SystemTime, TimeDifference;
1173
1174 /* Check the timer's interval to see if it's absolute */
1175 Timer->Header.Absolute = FALSE;
1176 if (Interval.HighPart >= 0)
1177 {
1178 /* Get the system time and calculate the relative time */
1179 KeQuerySystemTime(&SystemTime);
1180 TimeDifference.QuadPart = SystemTime.QuadPart - Interval.QuadPart;
1181 Timer->Header.Absolute = TRUE;
1182
1183 /* Check if we've already expired */
1184 if (TimeDifference.HighPart >= 0)
1185 {
1186 /* Reset everything */
1187 Timer->DueTime.QuadPart = 0;
1188 *Hand = 0;
1189 Timer->Header.Hand = 0;
1190 return;
1191 }
1192 else
1193 {
1194 /* Update the interval */
1195 Interval = TimeDifference;
1196 }
1197 }
1198
1199 /* Calculate the due time */
1200 InterruptTime.QuadPart = KeQueryInterruptTime();
1201 DueTime = InterruptTime.QuadPart - Interval.QuadPart;
1202 Timer->DueTime.QuadPart = DueTime;
1203
1204 /* Calculate the timer handle */
1205 *Hand = KiComputeTimerTableIndex(DueTime);
1206 Timer->Header.Hand = (UCHAR)*Hand;
1207 }
1208
1209 #define KxDelayThreadWait() \
1210 \
1211 /* Setup the Wait Block */ \
1212 Thread->WaitBlockList = TimerBlock; \
1213 \
1214 /* Setup the timer */ \
1215 KxSetTimerForThreadWait(Timer, *Interval, &Hand); \
1216 \
1217 /* Save the due time for the caller */ \
1218 DueTime.QuadPart = Timer->DueTime.QuadPart; \
1219 \
1220 /* Link the timer to this Wait Block */ \
1221 TimerBlock->NextWaitBlock = TimerBlock; \
1222 Timer->Header.WaitListHead.Flink = &TimerBlock->WaitListEntry; \
1223 Timer->Header.WaitListHead.Blink = &TimerBlock->WaitListEntry; \
1224 \
1225 /* Clear wait status */ \
1226 Thread->WaitStatus = STATUS_SUCCESS; \
1227 \
1228 /* Setup wait fields */ \
1229 Thread->Alertable = Alertable; \
1230 Thread->WaitReason = DelayExecution; \
1231 Thread->WaitMode = WaitMode; \
1232 \
1233 /* Check if we can swap the thread's stack */ \
1234 Thread->WaitListEntry.Flink = NULL; \
1235 Swappable = KiCheckThreadStackSwap(Thread, WaitMode); \
1236 \
1237 /* Set the wait time */ \
1238 Thread->WaitTime = KeTickCount.LowPart;
1239
1240 #define KxMultiThreadWait() \
1241 /* Link wait block array to the thread */ \
1242 Thread->WaitBlockList = WaitBlockArray; \
1243 \
1244 /* Reset the index */ \
1245 Index = 0; \
1246 \
1247 /* Loop wait blocks */ \
1248 do \
1249 { \
1250 /* Fill out the wait block */ \
1251 WaitBlock = &WaitBlockArray[Index]; \
1252 WaitBlock->Object = Object[Index]; \
1253 WaitBlock->WaitKey = (USHORT)Index; \
1254 WaitBlock->WaitType = WaitType; \
1255 WaitBlock->Thread = Thread; \
1256 \
1257 /* Link to next block */ \
1258 WaitBlock->NextWaitBlock = &WaitBlockArray[Index + 1]; \
1259 Index++; \
1260 } while (Index < Count); \
1261 \
1262 /* Link the last block */ \
1263 WaitBlock->NextWaitBlock = WaitBlockArray; \
1264 \
1265 /* Set default wait status */ \
1266 Thread->WaitStatus = STATUS_WAIT_0; \
1267 \
1268 /* Check if we have a timer */ \
1269 if (Timeout) \
1270 { \
1271 /* Link to the block */ \
1272 TimerBlock->NextWaitBlock = WaitBlockArray; \
1273 \
1274 /* Setup the timer */ \
1275 KxSetTimerForThreadWait(Timer, *Timeout, &Hand); \
1276 \
1277 /* Save the due time for the caller */ \
1278 DueTime.QuadPart = Timer->DueTime.QuadPart; \
1279 \
1280 /* Initialize the list */ \
1281 InitializeListHead(&Timer->Header.WaitListHead); \
1282 } \
1283 \
1284 /* Set wait settings */ \
1285 Thread->Alertable = Alertable; \
1286 Thread->WaitMode = WaitMode; \
1287 Thread->WaitReason = WaitReason; \
1288 \
1289 /* Check if we can swap the thread's stack */ \
1290 Thread->WaitListEntry.Flink = NULL; \
1291 Swappable = KiCheckThreadStackSwap(Thread, WaitMode); \
1292 \
1293 /* Set the wait time */ \
1294 Thread->WaitTime = KeTickCount.LowPart;
1295
1296 #define KxSingleThreadWait() \
1297 /* Setup the Wait Block */ \
1298 Thread->WaitBlockList = WaitBlock; \
1299 WaitBlock->WaitKey = STATUS_SUCCESS; \
1300 WaitBlock->Object = Object; \
1301 WaitBlock->WaitType = WaitAny; \
1302 \
1303 /* Clear wait status */ \
1304 Thread->WaitStatus = STATUS_SUCCESS; \
1305 \
1306 /* Check if we have a timer */ \
1307 if (Timeout) \
1308 { \
1309 /* Setup the timer */ \
1310 KxSetTimerForThreadWait(Timer, *Timeout, &Hand); \
1311 \
1312 /* Save the due time for the caller */ \
1313 DueTime.QuadPart = Timer->DueTime.QuadPart; \
1314 \
1315 /* Pointer to timer block */ \
1316 WaitBlock->NextWaitBlock = TimerBlock; \
1317 TimerBlock->NextWaitBlock = WaitBlock; \
1318 \
1319 /* Link the timer to this Wait Block */ \
1320 Timer->Header.WaitListHead.Flink = &TimerBlock->WaitListEntry; \
1321 Timer->Header.WaitListHead.Blink = &TimerBlock->WaitListEntry; \
1322 } \
1323 else \
1324 { \
1325 /* No timer block, just ourselves */ \
1326 WaitBlock->NextWaitBlock = WaitBlock; \
1327 } \
1328 \
1329 /* Set wait settings */ \
1330 Thread->Alertable = Alertable; \
1331 Thread->WaitMode = WaitMode; \
1332 Thread->WaitReason = WaitReason; \
1333 \
1334 /* Check if we can swap the thread's stack */ \
1335 Thread->WaitListEntry.Flink = NULL; \
1336 Swappable = KiCheckThreadStackSwap(Thread, WaitMode); \
1337 \
1338 /* Set the wait time */ \
1339 Thread->WaitTime = KeTickCount.LowPart;
1340
1341 #define KxQueueThreadWait() \
1342 /* Setup the Wait Block */ \
1343 Thread->WaitBlockList = WaitBlock; \
1344 WaitBlock->WaitKey = STATUS_SUCCESS; \
1345 WaitBlock->Object = Queue; \
1346 WaitBlock->WaitType = WaitAny; \
1347 WaitBlock->Thread = Thread; \
1348 \
1349 /* Clear wait status */ \
1350 Thread->WaitStatus = STATUS_SUCCESS; \
1351 \
1352 /* Check if we have a timer */ \
1353 if (Timeout) \
1354 { \
1355 /* Setup the timer */ \
1356 KxSetTimerForThreadWait(Timer, *Timeout, &Hand); \
1357 \
1358 /* Save the due time for the caller */ \
1359 DueTime.QuadPart = Timer->DueTime.QuadPart; \
1360 \
1361 /* Pointer to timer block */ \
1362 WaitBlock->NextWaitBlock = TimerBlock; \
1363 TimerBlock->NextWaitBlock = WaitBlock; \
1364 \
1365 /* Link the timer to this Wait Block */ \
1366 Timer->Header.WaitListHead.Flink = &TimerBlock->WaitListEntry; \
1367 Timer->Header.WaitListHead.Blink = &TimerBlock->WaitListEntry; \
1368 } \
1369 else \
1370 { \
1371 /* No timer block, just ourselves */ \
1372 WaitBlock->NextWaitBlock = WaitBlock; \
1373 } \
1374 \
1375 /* Set wait settings */ \
1376 Thread->Alertable = FALSE; \
1377 Thread->WaitMode = WaitMode; \
1378 Thread->WaitReason = WrQueue; \
1379 \
1380 /* Check if we can swap the thread's stack */ \
1381 Thread->WaitListEntry.Flink = NULL; \
1382 Swappable = KiCheckThreadStackSwap(Thread, WaitMode); \
1383 \
1384 /* Set the wait time */ \
1385 Thread->WaitTime = KeTickCount.LowPart;
1386
1387 //
1388 // Unwaits a Thread
1389 //
1390 FORCEINLINE
1391 VOID
1392 KxUnwaitThread(IN DISPATCHER_HEADER *Object,
1393 IN KPRIORITY Increment)
1394 {
1395 PLIST_ENTRY WaitEntry, WaitList;
1396 PKWAIT_BLOCK WaitBlock;
1397 PKTHREAD WaitThread;
1398 ULONG WaitKey;
1399
1400 /* Loop the Wait Entries */
1401 WaitList = &Object->WaitListHead;
1402 ASSERT(IsListEmpty(&Object->WaitListHead) == FALSE);
1403 WaitEntry = WaitList->Flink;
1404 do
1405 {
1406 /* Get the current wait block */
1407 WaitBlock = CONTAINING_RECORD(WaitEntry, KWAIT_BLOCK, WaitListEntry);
1408
1409 /* Get the waiting thread */
1410 WaitThread = WaitBlock->Thread;
1411
1412 /* Check the current Wait Mode */
1413 if (WaitBlock->WaitType == WaitAny)
1414 {
1415 /* Use the actual wait key */
1416 WaitKey = WaitBlock->WaitKey;
1417 }
1418 else
1419 {
1420 /* Otherwise, use STATUS_KERNEL_APC */
1421 WaitKey = STATUS_KERNEL_APC;
1422 }
1423
1424 /* Unwait the thread */
1425 KiUnwaitThread(WaitThread, WaitKey, Increment);
1426
1427 /* Next entry */
1428 WaitEntry = WaitList->Flink;
1429 } while (WaitEntry != WaitList);
1430 }
1431
1432 //
1433 // Unwaits a Thread waiting on an event
1434 //
1435 FORCEINLINE
1436 VOID
1437 KxUnwaitThreadForEvent(IN PKEVENT Event,
1438 IN KPRIORITY Increment)
1439 {
1440 PLIST_ENTRY WaitEntry, WaitList;
1441 PKWAIT_BLOCK WaitBlock;
1442 PKTHREAD WaitThread;
1443
1444 /* Loop the Wait Entries */
1445 WaitList = &Event->Header.WaitListHead;
1446 ASSERT(IsListEmpty(&Event->Header.WaitListHead) == FALSE);
1447 WaitEntry = WaitList->Flink;
1448 do
1449 {
1450 /* Get the current wait block */
1451 WaitBlock = CONTAINING_RECORD(WaitEntry, KWAIT_BLOCK, WaitListEntry);
1452
1453 /* Get the waiting thread */
1454 WaitThread = WaitBlock->Thread;
1455
1456 /* Check the current Wait Mode */
1457 if (WaitBlock->WaitType == WaitAny)
1458 {
1459 /* Un-signal it */
1460 Event->Header.SignalState = 0;
1461
1462 /* Un-signal the event and unwait the thread */
1463 KiUnwaitThread(WaitThread, WaitBlock->WaitKey, Increment);
1464 break;
1465 }
1466
1467 /* Unwait the thread with STATUS_KERNEL_APC */
1468 KiUnwaitThread(WaitThread, STATUS_KERNEL_APC, Increment);
1469
1470 /* Next entry */
1471 WaitEntry = WaitList->Flink;
1472 } while (WaitEntry != WaitList);
1473 }
1474
1475 //
1476 // This routine queues a thread that is ready on the PRCB's ready lists.
1477 // If this thread cannot currently run on this CPU, then the thread is
1478 // added to the deferred ready list instead.
1479 //
1480 // This routine must be entered with the PRCB lock held and it will exit
1481 // with the PRCB lock released!
1482 //
1483 FORCEINLINE
1484 VOID
1485 KxQueueReadyThread(IN PKTHREAD Thread,
1486 IN PKPRCB Prcb)
1487 {
1488 BOOLEAN Preempted;
1489 KPRIORITY Priority;
1490
1491 /* Sanity checks */
1492 ASSERT(Prcb == KeGetCurrentPrcb());
1493 ASSERT(Thread->State == Running);
1494 ASSERT(Thread->NextProcessor == Prcb->Number);
1495
1496 /* Check if this thread is allowed to run in this CPU */
1497 #ifdef CONFIG_SMP
1498 if ((Thread->Affinity) & (Prcb->SetMember))
1499 #else
1500 if (TRUE)
1501 #endif
1502 {
1503 /* Set thread ready for execution */
1504 Thread->State = Ready;
1505
1506 /* Save current priority and if someone had pre-empted it */
1507 Priority = Thread->Priority;
1508 Preempted = Thread->Preempted;
1509
1510 /* We're not pre-empting now, and set the wait time */
1511 Thread->Preempted = FALSE;
1512 Thread->WaitTime = KeTickCount.LowPart;
1513
1514 /* Sanity check */
1515 ASSERT((Priority >= 0) && (Priority <= HIGH_PRIORITY));
1516
1517 /* Insert this thread in the appropriate order */
1518 Preempted ? InsertHeadList(&Prcb->DispatcherReadyListHead[Priority],
1519 &Thread->WaitListEntry) :
1520 InsertTailList(&Prcb->DispatcherReadyListHead[Priority],
1521 &Thread->WaitListEntry);
1522
1523 /* Update the ready summary */
1524 Prcb->ReadySummary |= PRIORITY_MASK(Priority);
1525
1526 /* Sanity check */
1527 ASSERT(Priority == Thread->Priority);
1528
1529 /* Release the PRCB lock */
1530 KiReleasePrcbLock(Prcb);
1531 }
1532 else
1533 {
1534 /* Otherwise, prepare this thread to be deferred */
1535 Thread->State = DeferredReady;
1536 Thread->DeferredProcessor = Prcb->Number;
1537
1538 /* Release the lock and defer scheduling */
1539 KiReleasePrcbLock(Prcb);
1540 KiDeferredReadyThread(Thread);
1541 }
1542 }
1543
1544 //
1545 // This routine scans for an appropriate ready thread to select at the
1546 // given priority and for the given CPU.
1547 //
1548 FORCEINLINE
1549 PKTHREAD
1550 KiSelectReadyThread(IN KPRIORITY Priority,
1551 IN PKPRCB Prcb)
1552 {
1553 ULONG PrioritySet, HighPriority;
1554 PLIST_ENTRY ListEntry;
1555 PKTHREAD Thread = NULL;
1556
1557 /* Save the current mask and get the priority set for the CPU */
1558 PrioritySet = Prcb->ReadySummary >> Priority;
1559 if (!PrioritySet) goto Quickie;
1560
1561 /* Get the highest priority possible */
1562 BitScanReverse((PULONG)&HighPriority, PrioritySet);
1563 ASSERT((PrioritySet & PRIORITY_MASK(HighPriority)) != 0);
1564 HighPriority += Priority;
1565
1566 /* Make sure the list isn't empty at the highest priority */
1567 ASSERT(IsListEmpty(&Prcb->DispatcherReadyListHead[HighPriority]) == FALSE);
1568
1569 /* Get the first thread on the list */
1570 ListEntry = Prcb->DispatcherReadyListHead[HighPriority].Flink;
1571 Thread = CONTAINING_RECORD(ListEntry, KTHREAD, WaitListEntry);
1572
1573 /* Make sure this thread is here for a reason */
1574 ASSERT(HighPriority == Thread->Priority);
1575 ASSERT(Thread->Affinity & AFFINITY_MASK(Prcb->Number));
1576 ASSERT(Thread->NextProcessor == Prcb->Number);
1577
1578 /* Remove it from the list */
1579 if (RemoveEntryList(&Thread->WaitListEntry))
1580 {
1581 /* The list is empty now, reset the ready summary */
1582 Prcb->ReadySummary ^= PRIORITY_MASK(HighPriority);
1583 }
1584
1585 /* Sanity check and return the thread */
1586 Quickie:
1587 ASSERT((Thread == NULL) ||
1588 (Thread->BasePriority == 0) ||
1589 (Thread->Priority != 0));
1590 return Thread;
1591 }
1592
1593 //
1594 // This routine computes the new priority for a thread. It is only valid for
1595 // threads with priorities in the dynamic priority range.
1596 //
1597 SCHAR
1598 FORCEINLINE
1599 KiComputeNewPriority(IN PKTHREAD Thread,
1600 IN SCHAR Adjustment)
1601 {
1602 SCHAR Priority;
1603
1604 /* Priority sanity checks */
1605 ASSERT((Thread->PriorityDecrement >= 0) &&
1606 (Thread->PriorityDecrement <= Thread->Priority));
1607 ASSERT((Thread->Priority < LOW_REALTIME_PRIORITY) ?
1608 TRUE : (Thread->PriorityDecrement == 0));
1609
1610 /* Get the current priority */
1611 Priority = Thread->Priority;
1612 if (Priority < LOW_REALTIME_PRIORITY)
1613 {
1614 /* Decrease priority by the priority decrement */
1615 Priority -= (Thread->PriorityDecrement + Adjustment);
1616
1617 /* Don't go out of bounds */
1618 if (Priority < Thread->BasePriority) Priority = Thread->BasePriority;
1619
1620 /* Reset the priority decrement */
1621 Thread->PriorityDecrement = 0;
1622 }
1623
1624 /* Sanity check */
1625 ASSERT((Thread->BasePriority == 0) || (Priority != 0));
1626
1627 /* Return the new priority */
1628 return Priority;
1629 }