- Implement KiAcquireDispatcherObject, KiReleaseDispatcherObject, used on SMP for...
[reactos.git] / reactos / ntoskrnl / include / internal / ke_x.h
1 /*
2 * PROJECT: ReactOS Kernel
3 * LICENSE: GPL - See COPYING in the top level directory
4 * FILE: ntoskrnl/include/ke_x.h
5 * PURPOSE: Internal Inlined Functions for the Kernel
6 * PROGRAMMERS: Alex Ionescu (alex.ionescu@reactos.org)
7 */
8
9 //
10 // Enters a Guarded Region
11 //
12 #define KeEnterGuardedRegion() \
13 { \
14 PKTHREAD Thread = KeGetCurrentThread(); \
15 \
16 /* Sanity checks */ \
17 ASSERT_IRQL_LESS_OR_EQUAL(APC_LEVEL); \
18 ASSERT(Thread == KeGetCurrentThread()); \
19 ASSERT((Thread->SpecialApcDisable <= 0) && \
20 (Thread->SpecialApcDisable != -32768)); \
21 \
22 /* Disable Special APCs */ \
23 Thread->SpecialApcDisable--; \
24 }
25
26 //
27 // Leaves a Guarded Region
28 //
29 #define KeLeaveGuardedRegion() \
30 { \
31 PKTHREAD Thread = KeGetCurrentThread(); \
32 \
33 /* Sanity checks */ \
34 ASSERT_IRQL_LESS_OR_EQUAL(APC_LEVEL); \
35 ASSERT(Thread == KeGetCurrentThread()); \
36 ASSERT(Thread->SpecialApcDisable < 0); \
37 \
38 /* Leave region and check if APCs are OK now */ \
39 if (!(++Thread->SpecialApcDisable)) \
40 { \
41 /* Check for Kernel APCs on the list */ \
42 if (!IsListEmpty(&Thread->ApcState. \
43 ApcListHead[KernelMode])) \
44 { \
45 /* Check for APC Delivery */ \
46 KiCheckForKernelApcDelivery(); \
47 } \
48 } \
49 }
50
51 //
52 // TODO: Guarded Mutex Routines
53 //
54
55 //
56 // Enters a Critical Region
57 //
58 #define KeEnterCriticalRegion() \
59 { \
60 PKTHREAD Thread = KeGetCurrentThread(); \
61 if (Thread) \
62 { \
63 /* Sanity checks */ \
64 ASSERT(Thread == KeGetCurrentThread()); \
65 ASSERT((Thread->KernelApcDisable <= 0) && \
66 (Thread->KernelApcDisable != -32768)); \
67 \
68 /* Disable Kernel APCs */ \
69 Thread->KernelApcDisable--; \
70 } \
71 }
72
73 //
74 // Leaves a Critical Region
75 //
76 #define KeLeaveCriticalRegion() \
77 { \
78 PKTHREAD Thread = KeGetCurrentThread(); \
79 if (Thread) \
80 { \
81 /* Sanity checks */ \
82 ASSERT(Thread == KeGetCurrentThread()); \
83 ASSERT(Thread->KernelApcDisable < 0); \
84 \
85 /* Enable Kernel APCs */ \
86 Thread->KernelApcDisable++; \
87 \
88 /* Check if Kernel APCs are now enabled */ \
89 if (!(Thread->KernelApcDisable)) \
90 { \
91 /* Check if we need to request an APC Delivery */ \
92 if (!(IsListEmpty(&Thread->ApcState.ApcListHead[KernelMode])) && \
93 !(Thread->KernelApcDisable)) \
94 { \
95 /* Check for the right environment */ \
96 KiCheckForKernelApcDelivery(); \
97 } \
98 } \
99 } \
100 }
101
102 //
103 // Satisfies the wait of any dispatcher object
104 //
105 #define KiSatisfyObjectWait(Object, Thread) \
106 { \
107 /* Special case for Mutants */ \
108 if ((Object)->Header.Type == MutantObject) \
109 { \
110 /* Decrease the Signal State */ \
111 (Object)->Header.SignalState--; \
112 \
113 /* Check if it's now non-signaled */ \
114 if (!(Object)->Header.SignalState) \
115 { \
116 /* Set the Owner Thread */ \
117 (Object)->OwnerThread = Thread; \
118 \
119 /* Disable APCs if needed */ \
120 Thread->KernelApcDisable -= (Object)->ApcDisable; \
121 \
122 /* Check if it's abandoned */ \
123 if ((Object)->Abandoned) \
124 { \
125 /* Unabandon it */ \
126 (Object)->Abandoned = FALSE; \
127 \
128 /* Return Status */ \
129 Thread->WaitStatus = STATUS_ABANDONED; \
130 } \
131 \
132 /* Insert it into the Mutant List */ \
133 InsertHeadList(Thread->MutantListHead.Blink, \
134 &(Object)->MutantListEntry); \
135 } \
136 } \
137 else if (((Object)->Header.Type & TIMER_OR_EVENT_TYPE) == \
138 EventSynchronizationObject) \
139 { \
140 /* Synchronization Timers and Events just get un-signaled */ \
141 (Object)->Header.SignalState = 0; \
142 } \
143 else if ((Object)->Header.Type == SemaphoreObject) \
144 { \
145 /* These ones can have multiple states, so we only decrease it */ \
146 (Object)->Header.SignalState--; \
147 } \
148 }
149
150 //
151 // Satisfies the wait of a mutant dispatcher object
152 //
153 #define KiSatisfyMutantWait(Object, Thread) \
154 { \
155 /* Decrease the Signal State */ \
156 (Object)->Header.SignalState--; \
157 \
158 /* Check if it's now non-signaled */ \
159 if (!(Object)->Header.SignalState) \
160 { \
161 /* Set the Owner Thread */ \
162 (Object)->OwnerThread = Thread; \
163 \
164 /* Disable APCs if needed */ \
165 Thread->KernelApcDisable -= (Object)->ApcDisable; \
166 \
167 /* Check if it's abandoned */ \
168 if ((Object)->Abandoned) \
169 { \
170 /* Unabandon it */ \
171 (Object)->Abandoned = FALSE; \
172 \
173 /* Return Status */ \
174 Thread->WaitStatus = STATUS_ABANDONED; \
175 } \
176 \
177 /* Insert it into the Mutant List */ \
178 InsertHeadList(Thread->MutantListHead.Blink, \
179 &(Object)->MutantListEntry); \
180 } \
181 }
182
183 //
184 // Satisfies the wait of any nonmutant dispatcher object
185 //
186 #define KiSatisfyNonMutantWait(Object, Thread) \
187 { \
188 if (((Object)->Header.Type & TIMER_OR_EVENT_TYPE) == \
189 EventSynchronizationObject) \
190 { \
191 /* Synchronization Timers and Events just get un-signaled */ \
192 (Object)->Header.SignalState = 0; \
193 } \
194 else if ((Object)->Header.Type == SemaphoreObject) \
195 { \
196 /* These ones can have multiple states, so we only decrease it */ \
197 (Object)->Header.SignalState--; \
198 } \
199 }
200
201 //
202 // Recalculates the due time
203 //
204 PLARGE_INTEGER
205 FORCEINLINE
206 KiRecalculateDueTime(IN PLARGE_INTEGER OriginalDueTime,
207 IN PLARGE_INTEGER DueTime,
208 IN OUT PLARGE_INTEGER NewDueTime)
209 {
210 /* Don't do anything for absolute waits */
211 if (OriginalDueTime->QuadPart >= 0) return OriginalDueTime;
212
213 /* Otherwise, query the interrupt time and recalculate */
214 NewDueTime->QuadPart = KeQueryInterruptTime();
215 NewDueTime->QuadPart -= DueTime->QuadPart;
216 return NewDueTime;
217 }
218
219 //
220 // Determines wether a thread should be added to the wait list
221 //
222 #define KiCheckThreadStackSwap(WaitMode, Thread, Swappable) \
223 { \
224 /* Check the required conditions */ \
225 if ((WaitMode != KernelMode) && \
226 (Thread->EnableStackSwap) && \
227 (Thread->Priority >= (LOW_REALTIME_PRIORITY + 9))) \
228 { \
229 /* We are go for swap */ \
230 Swappable = TRUE; \
231 } \
232 else \
233 { \
234 /* Don't swap the thread */ \
235 Swappable = FALSE; \
236 } \
237 }
238
239 //
240 // Adds a thread to the wait list
241 //
242 #define KiAddThreadToWaitList(Thread, Swappable) \
243 { \
244 /* Make sure it's swappable */ \
245 if (Swappable) \
246 { \
247 /* Insert it into the PRCB's List */ \
248 InsertTailList(&KeGetCurrentPrcb()->WaitListHead, \
249 &Thread->WaitListEntry); \
250 } \
251 }
252
253 //
254 // Rules for checking alertability:
255 // - For Alertable waits ONLY:
256 // * We don't wait and return STATUS_ALERTED if the thread is alerted
257 // in EITHER the specified wait mode OR in Kernel Mode.
258 // - For BOTH Alertable AND Non-Alertable waits:
259 // * We don't want and return STATUS_USER_APC if the User Mode APC list
260 // is not empty AND the wait mode is User Mode.
261 //
262 #define KiCheckAlertability() \
263 { \
264 if (Alertable) \
265 { \
266 if (CurrentThread->Alerted[(int)WaitMode]) \
267 { \
268 CurrentThread->Alerted[(int)WaitMode] = FALSE; \
269 WaitStatus = STATUS_ALERTED; \
270 break; \
271 } \
272 else if ((WaitMode != KernelMode) && \
273 (!IsListEmpty(&CurrentThread-> \
274 ApcState.ApcListHead[UserMode]))) \
275 { \
276 CurrentThread->ApcState.UserApcPending = TRUE; \
277 WaitStatus = STATUS_USER_APC; \
278 break; \
279 } \
280 else if (CurrentThread->Alerted[KernelMode]) \
281 { \
282 CurrentThread->Alerted[KernelMode] = FALSE; \
283 WaitStatus = STATUS_ALERTED; \
284 break; \
285 } \
286 } \
287 else if ((WaitMode != KernelMode) && \
288 (CurrentThread->ApcState.UserApcPending)) \
289 { \
290 WaitStatus = STATUS_USER_APC; \
291 break; \
292 } \
293 }
294
295 //
296 // Unwaits a Thread
297 //
298 FORCEINLINE
299 VOID
300 KxUnwaitThread(IN DISPATCHER_HEADER *Object,
301 IN KPRIORITY Increment)
302 {
303 PLIST_ENTRY WaitEntry, WaitList;
304 PKWAIT_BLOCK CurrentWaitBlock;
305 PKTHREAD WaitThread;
306 ULONG WaitKey;
307
308 /* Loop the Wait Entries */
309 WaitList = &Object->WaitListHead;
310 WaitEntry = WaitList->Flink;
311 do
312 {
313 /* Get the current wait block */
314 CurrentWaitBlock = CONTAINING_RECORD(WaitEntry,
315 KWAIT_BLOCK,
316 WaitListEntry);
317
318 /* Get the waiting thread */
319 WaitThread = CurrentWaitBlock->Thread;
320
321 /* Check the current Wait Mode */
322 if (CurrentWaitBlock->WaitType == WaitAny)
323 {
324 /* Use the actual wait key */
325 WaitKey = CurrentWaitBlock->WaitKey;
326 }
327 else
328 {
329 /* Otherwise, use STATUS_KERNEL_APC */
330 WaitKey = STATUS_KERNEL_APC;
331 }
332
333 /* Unwait the thread */
334 KiUnwaitThread(WaitThread, WaitKey, Increment);
335
336 /* Next entry */
337 WaitEntry = WaitList->Flink;
338 } while (WaitEntry != WaitList);
339 }
340
341 //
342 // Unwaits a Thread waiting on an event
343 //
344 FORCEINLINE
345 VOID
346 KxUnwaitThreadForEvent(IN PKEVENT Event,
347 IN KPRIORITY Increment)
348 {
349 PLIST_ENTRY WaitEntry, WaitList;
350 PKWAIT_BLOCK CurrentWaitBlock;
351 PKTHREAD WaitThread;
352
353 /* Loop the Wait Entries */
354 WaitList = &Event->Header.WaitListHead;
355 WaitEntry = WaitList->Flink;
356 do
357 {
358 /* Get the current wait block */
359 CurrentWaitBlock = CONTAINING_RECORD(WaitEntry,
360 KWAIT_BLOCK,
361 WaitListEntry);
362
363 /* Get the waiting thread */
364 WaitThread = CurrentWaitBlock->Thread;
365
366 /* Check the current Wait Mode */
367 if (CurrentWaitBlock->WaitType == WaitAny)
368 {
369 /* Un-signal it */
370 Event->Header.SignalState = 0;
371
372 /* Un-signal the event and unwait the thread */
373 KiUnwaitThread(WaitThread, CurrentWaitBlock->WaitKey, Increment);
374 break;
375 }
376 else
377 {
378 /* Unwait the thread with STATUS_KERNEL_APC */
379 KiUnwaitThread(WaitThread, STATUS_KERNEL_APC, Increment);
380 }
381
382 /* Next entry */
383 WaitEntry = WaitList->Flink;
384 } while (WaitEntry != WaitList);
385 }
386
387 #ifndef _CONFIG_SMP
388 //
389 // Spinlock Acquire at IRQL >= DISPATCH_LEVEL
390 //
391 FORCEINLINE
392 VOID
393 KxAcquireSpinLock(IN PKSPIN_LOCK SpinLock)
394 {
395 /* On UP builds, spinlocks don't exist at IRQL >= DISPATCH */
396 UNREFERENCED_PARAMETER(SpinLock);
397 }
398
399 //
400 // Spinlock Release at IRQL >= DISPATCH_LEVEL
401 //
402 FORCEINLINE
403 VOID
404 KxReleaseSpinLock(IN PKSPIN_LOCK SpinLock)
405 {
406 /* On UP builds, spinlocks don't exist at IRQL >= DISPATCH */
407 UNREFERENCED_PARAMETER(SpinLock);
408 }
409
410 //
411 // This routine protects against multiple CPU acquires, it's meaningless on UP.
412 //
413 VOID
414 FORCEINLINE
415 KiAcquireDispatcherObject(IN DISPATCHER_HEADER* Object)
416 {
417 UNREFERENCED_PARAMETER(Object);
418 }
419
420 //
421 // This routine protects against multiple CPU acquires, it's meaningless on UP.
422 //
423 VOID
424 FORCEINLINE
425 KiReleaseDispatcherObject(IN DISPATCHER_HEADER* Object)
426 {
427 UNREFERENCED_PARAMETER(Object);
428 }
429
430 KIRQL
431 FORCEINLINE
432 KiAcquireDispatcherLock(VOID)
433 {
434 /* Raise to DPC level */
435 return KeRaiseIrqlToDpcLevel();
436 }
437
438 VOID
439 FORCEINLINE
440 KiReleaseDispatcherLock(IN KIRQL OldIrql)
441 {
442 /* Just exit the dispatcher */
443 KiExitDispatcher(OldIrql);
444 }
445
446 VOID
447 FORCEINLINE
448 KiAcquireDispatcherLockAtDpcLevel(VOID)
449 {
450 /* This is a no-op at DPC Level for UP systems */
451 return;
452 }
453
454 VOID
455 FORCEINLINE
456 KiReleaseDispatcherLockFromDpcLevel(VOID)
457 {
458 /* This is a no-op at DPC Level for UP systems */
459 return;
460 }
461
462 //
463 // This routine makes the thread deferred ready on the boot CPU.
464 //
465 FORCEINLINE
466 VOID
467 KiInsertDeferredReadyList(IN PKTHREAD Thread)
468 {
469 /* Set the thread to deferred state and boot CPU */
470 Thread->State = DeferredReady;
471 Thread->DeferredProcessor = 0;
472
473 /* Make the thread ready immediately */
474 KiDeferredReadyThread(Thread);
475 }
476
477 FORCEINLINE
478 VOID
479 KiRescheduleThread(IN BOOLEAN NewThread,
480 IN ULONG Cpu)
481 {
482 /* This is meaningless on UP systems */
483 UNREFERENCED_PARAMETER(NewThread);
484 UNREFERENCED_PARAMETER(Cpu);
485 }
486
487 //
488 // This routine protects against multiple CPU acquires, it's meaningless on UP.
489 //
490 FORCEINLINE
491 VOID
492 KiSetThreadSwapBusy(IN PKTHREAD Thread)
493 {
494 UNREFERENCED_PARAMETER(Thread);
495 }
496
497 //
498 // This routine protects against multiple CPU acquires, it's meaningless on UP.
499 //
500 FORCEINLINE
501 VOID
502 KiAcquirePrcbLock(IN PKPRCB Prcb)
503 {
504 UNREFERENCED_PARAMETER(Prcb);
505 }
506
507 //
508 // This routine protects against multiple CPU acquires, it's meaningless on UP.
509 //
510 FORCEINLINE
511 VOID
512 KiReleasePrcbLock(IN PKPRCB Prcb)
513 {
514 UNREFERENCED_PARAMETER(Prcb);
515 }
516
517 //
518 // This routine protects against multiple CPU acquires, it's meaningless on UP.
519 //
520 FORCEINLINE
521 VOID
522 KiAcquireThreadLock(IN PKTHREAD Thread)
523 {
524 UNREFERENCED_PARAMETER(Thread);
525 }
526
527 //
528 // This routine protects against multiple CPU acquires, it's meaningless on UP.
529 //
530 FORCEINLINE
531 VOID
532 KiReleaseThreadLock(IN PKTHREAD Thread)
533 {
534 UNREFERENCED_PARAMETER(Thread);
535 }
536
537 //
538 // This routine protects against multiple CPU acquires, it's meaningless on UP.
539 //
540 FORCEINLINE
541 BOOLEAN
542 KiTryThreadLock(IN PKTHREAD Thread)
543 {
544 UNREFERENCED_PARAMETER(Thread);
545 return FALSE;
546 }
547
548 FORCEINLINE
549 VOID
550 KiCheckDeferredReadyList(IN PKPRCB Prcb)
551 {
552 /* There are no deferred ready lists on UP systems */
553 UNREFERENCED_PARAMETER(Prcb);
554 }
555
556 FORCEINLINE
557 VOID
558 KiRundownThread(IN PKTHREAD Thread)
559 {
560 /* Check if this is the NPX Thread */
561 if (KeGetCurrentPrcb()->NpxThread == Thread)
562 {
563 /* Clear it */
564 KeGetCurrentPrcb()->NpxThread = NULL;
565 #ifdef __GNUC__
566 __asm__("fninit\n\t");
567 #else
568 __asm fninit;
569 #endif
570 }
571 }
572
573 FORCEINLINE
574 VOID
575 KiRequestApcInterrupt(IN BOOLEAN NeedApc,
576 IN UCHAR Processor)
577 {
578 /* We deliver instantly on UP */
579 UNREFERENCED_PARAMETER(NeedApc);
580 UNREFERENCED_PARAMETER(Processor);
581 }
582
583 #else
584
585 //
586 // Spinlock Acquisition at IRQL >= DISPATCH_LEVEL
587 //
588 FORCEINLINE
589 VOID
590 KxAcquireSpinLock(IN PKSPIN_LOCK SpinLock)
591 {
592 for (;;)
593 {
594 /* Try to acquire it */
595 if (InterlockedBitTestAndSet((PLONG)SpinLock, 0))
596 {
597 /* Value changed... wait until it's locked */
598 while (*(volatile KSPIN_LOCK *)SpinLock == 1)
599 {
600 #ifdef DBG
601 /* On debug builds, we use a much slower but useful routine */
602 Kii386SpinOnSpinLock(SpinLock, 5);
603 #else
604 /* Otherwise, just yield and keep looping */
605 YieldProcessor();
606 #endif
607 }
608 }
609 else
610 {
611 #ifdef DBG
612 /* On debug builds, we OR in the KTHREAD */
613 *SpinLock = KeGetCurrentThread() | 1;
614 #endif
615 /* All is well, break out */
616 break;
617 }
618 }
619 }
620
621 //
622 // Spinlock Release at IRQL >= DISPATCH_LEVEL
623 //
624 FORCEINLINE
625 VOID
626 KxReleaseSpinLock(IN PKSPIN_LOCK SpinLock)
627 {
628 #ifdef DBG
629 /* Make sure that the threads match */
630 if ((KeGetCurrentThread() | 1) != *SpinLock)
631 {
632 /* They don't, bugcheck */
633 KeBugCheckEx(SPIN_LOCK_NOT_OWNED, SpinLock, 0, 0, 0);
634 }
635 #endif
636 /* Clear the lock */
637 InterlockedAnd(SpinLock, 0);
638 }
639
640 KIRQL
641 FORCEINLINE
642 KiAcquireDispatcherObject(IN DISPATCHER_HEADER* Object)
643 {
644 LONG OldValue, NewValue;
645
646 /* Make sure we're at a safe level to touch the lock */
647 ASSERT(KeGetCurrentIrql() >= DISPATCH_LEVEL);
648
649 /* Start acquire loop */
650 do
651 {
652 /* Loop until the other CPU releases it */
653 while ((UCHAR)Object->Lock & KOBJECT_LOCK_BIT)
654 {
655 /* Let the CPU know that this is a loop */
656 YieldProcessor();
657 };
658
659 /* Try acquiring the lock now */
660 NewValue = InterlockedCompareExchange(&Object->Lock,
661 OldValue | KOBJECT_LOCK_BIT,
662 OldValue);
663 } while (NewValue != OldValue);
664 }
665
666 KIRQL
667 FORCEINLINE
668 KiReleaseDispatcherObject(IN DISPATCHER_HEADER* Object)
669 {
670 /* Make sure we're at a safe level to touch the lock */
671 ASSERT(KeGetCurrentIrql() >= DISPATCH_LEVEL);
672
673 /* Release it */
674 InterlockedAnd(&Object->Lock, ~KOBJECT_LOCK_BIT);
675 }
676
677 KIRQL
678 FORCEINLINE
679 KiAcquireDispatcherLock(VOID)
680 {
681 /* Raise to synchronization level and acquire the dispatcher lock */
682 return KeAcquireQueuedSpinLockRaiseToSynch(LockQueueDispatcherLock);
683 }
684
685 VOID
686 FORCEINLINE
687 KiReleaseDispatcherLock(IN KIRQL OldIrql)
688 {
689 /* First release the lock */
690 KeReleaseQueuedSpinLockFromDpcLevel(&KeGetCurrentPrcb()->
691 LockQueue[LockQueueDispatcherLock]);
692
693 /* Then exit the dispatcher */
694 KiExitDispatcher(OldIrql);
695 }
696
697 //
698 // This routine inserts a thread into the deferred ready list of the given CPU
699 //
700 FORCEINLINE
701 VOID
702 KiInsertDeferredReadyList(IN PKTHREAD Thread)
703 {
704 PKPRCB Prcb = KeGetCurrentPrcb();
705
706 /* Set the thread to deferred state and CPU */
707 Thread->State = DeferredReady;
708 Thread->DeferredProcessor = Prcb->Number;
709
710 /* Add it on the list */
711 PushEntryList(&Prcb->DeferredReadyListHead, &Thread->SwapListEntry);
712 }
713
714 FORCEINLINE
715 VOID
716 KiRescheduleThread(IN BOOLEAN NewThread,
717 IN ULONG Cpu)
718 {
719 /* Check if a new thread needs to be scheduled on a different CPU */
720 if ((NewThread) && !(KeGetPcr()->Number == Cpu))
721 {
722 /* Send an IPI to request delivery */
723 KiIpiSendRequest(AFFINITY_MASK(Cpu), IPI_DPC);
724 }
725 }
726
727 //
728 // This routine sets the current thread in a swap busy state, which ensure that
729 // nobody else tries to swap it concurrently.
730 //
731 FORCEINLINE
732 VOID
733 KiSetThreadSwapBusy(IN PKTHREAD Thread)
734 {
735 /* Make sure nobody already set it */
736 ASSERT(Thread->SwapBusy == FALSE);
737
738 /* Set it ourselves */
739 Thread->SwapBusy = TRUE;
740 }
741
742 //
743 // This routine acquires the PRCB lock so that only one caller can touch
744 // volatile PRCB data.
745 //
746 // Since this is a simple optimized spin-lock, it must be be only acquired
747 // at dispatcher level or higher!
748 //
749 FORCEINLINE
750 VOID
751 KiAcquirePrcbLock(IN PKPRCB Prcb)
752 {
753 /* Make sure we're at a safe level to touch the PRCB lock */
754 ASSERT(KeGetCurrentIrql() >= DISPATCH_LEVEL);
755
756 /* Start acquire loop */
757 for (;;)
758 {
759 /* Acquire the lock and break out if we acquired it first */
760 if (!InterlockedExchange(&Prcb->PrcbLock, 1)) break;
761
762 /* Loop until the other CPU releases it */
763 do
764 {
765 /* Let the CPU know that this is a loop */
766 YieldProcessor();
767 } while (Prcb->PrcbLock);
768 }
769 }
770
771 //
772 // This routine releases the PRCB lock so that other callers can touch
773 // volatile PRCB data.
774 //
775 // Since this is a simple optimized spin-lock, it must be be only acquired
776 // at dispatcher level or higher!
777 //
778 FORCEINLINE
779 VOID
780 KiReleasePrcbLock(IN PKPRCB Prcb)
781 {
782 /* Make sure it's acquired! */
783 ASSERT(Prcb->PrcbLock != 0);
784
785 /* Release it */
786 InterlockedAnd(&Prcb->PrcbLock, 0);
787 }
788
789 //
790 // This routine acquires the thread lock so that only one caller can touch
791 // volatile thread data.
792 //
793 // Since this is a simple optimized spin-lock, it must be be only acquired
794 // at dispatcher level or higher!
795 //
796 FORCEINLINE
797 VOID
798 KiAcquireThreadLock(IN PKTHREAD Thread)
799 {
800 /* Make sure we're at a safe level to touch the thread lock */
801 ASSERT(KeGetCurrentIrql() >= DISPATCH_LEVEL);
802
803 /* Start acquire loop */
804 for (;;)
805 {
806 /* Acquire the lock and break out if we acquired it first */
807 if (!InterlockedExchange(&Thread->ThreadLock, 1)) break;
808
809 /* Loop until the other CPU releases it */
810 do
811 {
812 /* Let the CPU know that this is a loop */
813 YieldProcessor();
814 } while (Thread->ThreadLock);
815 }
816 }
817
818 //
819 // This routine releases the thread lock so that other callers can touch
820 // volatile thread data.
821 //
822 // Since this is a simple optimized spin-lock, it must be be only acquired
823 // at dispatcher level or higher!
824 //
825 FORCEINLINE
826 VOID
827 KiReleaseThreadLock(IN PKTHREAD Thread)
828 {
829 /* Release it */
830 InterlockedAnd(&Thread->ThreadLock, 0);
831 }
832
833 FORCEINLINE
834 BOOLEAN
835 KiTryThreadLock(IN PKTHREAD Thread)
836 {
837 LONG Value;
838
839 /* If the lock isn't acquired, return false */
840 if (!Thread->ThreadLock) return FALSE;
841
842 /* Otherwise, try to acquire it and check the result */
843 Value = 1;
844 Value = InterlockedExchange(&Thread->ThreadLock, &Value);
845
846 /* Return the lock state */
847 return (Value == TRUE);
848 }
849
850 FORCEINLINE
851 VOID
852 KiCheckDeferredReadyList(IN PKPRCB Prcb)
853 {
854 /* Scan the deferred ready lists if required */
855 if (Prcb->DeferredReadyListHead.Next) KiProcessDeferredReadyList(Prcb);
856 }
857
858 FORCEINLINE
859 VOID
860 KiRequestApcInterrupt(IN BOOLEAN NeedApc,
861 IN UCHAR Processor)
862 {
863 /* Check if we need to request APC delivery */
864 if (NeedApc)
865 {
866 /* Check if it's on another CPU */
867 if (KeGetPcr()->Number != Cpu)
868 {
869 /* Send an IPI to request delivery */
870 KiIpiSendRequest(AFFINITY_MASK(Cpu), IPI_DPC);
871 }
872 else
873 {
874 /* Request a software interrupt */
875 HalRequestSoftwareInterrupt(APC_LEVEL);
876 }
877 }
878 }
879
880 #endif
881
882 FORCEINLINE
883 VOID
884 KiAcquireApcLock(IN PKTHREAD Thread,
885 IN PKLOCK_QUEUE_HANDLE Handle)
886 {
887 /* Acquire the lock and raise to synchronization level */
888 KeAcquireInStackQueuedSpinLockRaiseToSynch(&Thread->ApcQueueLock, Handle);
889 }
890
891 FORCEINLINE
892 VOID
893 KiAcquireApcLockAtDpcLevel(IN PKTHREAD Thread,
894 IN PKLOCK_QUEUE_HANDLE Handle)
895 {
896 /* Acquire the lock */
897 KeAcquireInStackQueuedSpinLockAtDpcLevel(&Thread->ApcQueueLock, Handle);
898 }
899
900 FORCEINLINE
901 VOID
902 KiAcquireApcLockAtApcLevel(IN PKTHREAD Thread,
903 IN PKLOCK_QUEUE_HANDLE Handle)
904 {
905 /* Acquire the lock */
906 KeAcquireInStackQueuedSpinLock(&Thread->ApcQueueLock, Handle);
907 }
908
909 FORCEINLINE
910 VOID
911 KiReleaseApcLock(IN PKLOCK_QUEUE_HANDLE Handle)
912 {
913 /* Release the lock */
914 KeReleaseInStackQueuedSpinLock(Handle);
915 }
916
917 FORCEINLINE
918 VOID
919 KiReleaseApcLockFromDpcLevel(IN PKLOCK_QUEUE_HANDLE Handle)
920 {
921 /* Release the lock */
922 KeReleaseInStackQueuedSpinLockFromDpcLevel(Handle);
923 }
924
925 FORCEINLINE
926 VOID
927 KiAcquireProcessLock(IN PKPROCESS Process,
928 IN PKLOCK_QUEUE_HANDLE Handle)
929 {
930 /* Acquire the lock and raise to synchronization level */
931 KeAcquireInStackQueuedSpinLockRaiseToSynch(&Process->ProcessLock, Handle);
932 }
933
934 FORCEINLINE
935 VOID
936 KiReleaseProcessLock(IN PKLOCK_QUEUE_HANDLE Handle)
937 {
938 /* Release the lock */
939 KeReleaseInStackQueuedSpinLock(Handle);
940 }
941
942 FORCEINLINE
943 VOID
944 KiReleaseProcessLockFromDpcLevel(IN PKLOCK_QUEUE_HANDLE Handle)
945 {
946 /* Release the lock */
947 KeReleaseInStackQueuedSpinLockFromDpcLevel(Handle);
948 }
949
950 FORCEINLINE
951 VOID
952 KiAcquireDeviceQueueLock(IN PKDEVICE_QUEUE DeviceQueue,
953 IN PKLOCK_QUEUE_HANDLE DeviceLock)
954 {
955 /* Check if we were called from a threaded DPC */
956 if (KeGetCurrentPrcb()->DpcThreadActive)
957 {
958 /* Lock the Queue, we're not at DPC level */
959 KeAcquireInStackQueuedSpinLock(&DeviceQueue->Lock, DeviceLock);
960 }
961 else
962 {
963 /* We must be at DPC level, acquire the lock safely */
964 ASSERT(KeGetCurrentIrql() == DISPATCH_LEVEL);
965 KeAcquireInStackQueuedSpinLockAtDpcLevel(&DeviceQueue->Lock,
966 DeviceLock);
967 }
968 }
969
970 FORCEINLINE
971 VOID
972 KiReleaseDeviceQueueLock(IN PKLOCK_QUEUE_HANDLE DeviceLock)
973 {
974 /* Check if we were called from a threaded DPC */
975 if (KeGetCurrentPrcb()->DpcThreadActive)
976 {
977 /* Unlock the Queue, we're not at DPC level */
978 KeReleaseInStackQueuedSpinLock(DeviceLock);
979 }
980 else
981 {
982 /* We must be at DPC level, release the lock safely */
983 ASSERT(KeGetCurrentIrql() == DISPATCH_LEVEL);
984 KeReleaseInStackQueuedSpinLockFromDpcLevel(DeviceLock);
985 }
986 }
987
988 //
989 // This routine queues a thread that is ready on the PRCB's ready lists.
990 // If this thread cannot currently run on this CPU, then the thread is
991 // added to the deferred ready list instead.
992 //
993 // This routine must be entered with the PRCB lock held and it will exit
994 // with the PRCB lock released!
995 //
996 FORCEINLINE
997 VOID
998 KxQueueReadyThread(IN PKTHREAD Thread,
999 IN PKPRCB Prcb)
1000 {
1001 BOOLEAN Preempted;
1002 KPRIORITY Priority;
1003
1004 /* Sanity checks */
1005 ASSERT(Prcb == KeGetCurrentPrcb());
1006 ASSERT(Thread->State == Running);
1007 ASSERT(Thread->NextProcessor == Prcb->Number);
1008
1009 /* Check if this thread is allowed to run in this CPU */
1010 #ifdef _CONFIG_SMP
1011 if ((Thread->Affinity) & (Prcb->SetMember))
1012 #else
1013 if (TRUE)
1014 #endif
1015 {
1016 /* Set thread ready for execution */
1017 Thread->State = Ready;
1018
1019 /* Save current priority and if someone had pre-empted it */
1020 Priority = Thread->Priority;
1021 Preempted = Thread->Preempted;
1022
1023 /* We're not pre-empting now, and set the wait time */
1024 Thread->Preempted = FALSE;
1025 Thread->WaitTime = KeTickCount.LowPart;
1026
1027 /* Sanity check */
1028 ASSERT((Priority >= 0) && (Priority <= HIGH_PRIORITY));
1029
1030 /* Insert this thread in the appropriate order */
1031 Preempted ? InsertHeadList(&Prcb->DispatcherReadyListHead[Priority],
1032 &Thread->WaitListEntry) :
1033 InsertTailList(&Prcb->DispatcherReadyListHead[Priority],
1034 &Thread->WaitListEntry);
1035
1036 /* Update the ready summary */
1037 Prcb->ReadySummary |= PRIORITY_MASK(Priority);
1038
1039 /* Sanity check */
1040 ASSERT(Priority == Thread->Priority);
1041
1042 /* Release the PRCB lock */
1043 KiReleasePrcbLock(Prcb);
1044 }
1045 else
1046 {
1047 /* Otherwise, prepare this thread to be deferred */
1048 Thread->State = DeferredReady;
1049 Thread->DeferredProcessor = Prcb->Number;
1050
1051 /* Release the lock and defer scheduling */
1052 KiReleasePrcbLock(Prcb);
1053 KiDeferredReadyThread(Thread);
1054 }
1055 }
1056
1057 //
1058 // This routine scans for an appropriate ready thread to select at the
1059 // given priority and for the given CPU.
1060 //
1061 FORCEINLINE
1062 PKTHREAD
1063 KiSelectReadyThread(IN KPRIORITY Priority,
1064 IN PKPRCB Prcb)
1065 {
1066 LONG PriorityMask, PrioritySet, HighPriority;
1067 PLIST_ENTRY ListEntry;
1068 PKTHREAD Thread;
1069
1070 /* Save the current mask and get the priority set for the CPU */
1071 PriorityMask = Priority;
1072 PrioritySet = Prcb->ReadySummary >> (UCHAR)Priority;
1073 if (!PrioritySet) return NULL;
1074
1075 /* Get the highest priority possible */
1076 BitScanReverse((PULONG)&HighPriority, PrioritySet);
1077 ASSERT((PrioritySet & PRIORITY_MASK(HighPriority)) != 0);
1078 HighPriority += PriorityMask;
1079
1080 /* Make sure the list isn't at highest priority */
1081 ASSERT(IsListEmpty(&Prcb->DispatcherReadyListHead[HighPriority]) == FALSE);
1082
1083 /* Get the first thread on the list */
1084 ListEntry = &Prcb->DispatcherReadyListHead[HighPriority];
1085 Thread = CONTAINING_RECORD(ListEntry, KTHREAD, WaitListEntry);
1086
1087 /* Make sure this thread is here for a reason */
1088 ASSERT(HighPriority == Thread->Priority);
1089 ASSERT(Thread->Affinity & AFFINITY_MASK(Prcb->Number));
1090 ASSERT(Thread->NextProcessor == Prcb->Number);
1091
1092 /* Remove it from the list */
1093 RemoveEntryList(&Thread->WaitListEntry);
1094 if (IsListEmpty(&Thread->WaitListEntry))
1095 {
1096 /* The list is empty now, reset the ready summary */
1097 Prcb->ReadySummary ^= PRIORITY_MASK(HighPriority);
1098 }
1099
1100 /* Sanity check and return the thread */
1101 ASSERT((Thread == NULL) ||
1102 (Thread->BasePriority == 0) ||
1103 (Thread->Priority != 0));
1104 return Thread;
1105 }
1106
1107 //
1108 // This routine computes the new priority for a thread. It is only valid for
1109 // threads with priorities in the dynamic priority range.
1110 //
1111 SCHAR
1112 FORCEINLINE
1113 KiComputeNewPriority(IN PKTHREAD Thread)
1114 {
1115 SCHAR Priority;
1116
1117 /* Priority sanity checks */
1118 ASSERT((Thread->PriorityDecrement >= 0) &&
1119 (Thread->PriorityDecrement <= Thread->Priority));
1120 ASSERT((Thread->Priority < LOW_REALTIME_PRIORITY) ?
1121 TRUE : (Thread->PriorityDecrement == 0));
1122
1123 /* Get the current priority */
1124 Priority = Thread->Priority;
1125 if (Priority < LOW_REALTIME_PRIORITY)
1126 {
1127 /* Decrease priority by the priority decrement */
1128 Priority -= (Thread->PriorityDecrement + 1);
1129
1130 /* Don't go out of bounds */
1131 if (Priority < Thread->BasePriority) Priority = Thread->BasePriority;
1132
1133 /* Reset the priority decrement */
1134 Thread->PriorityDecrement = 0;
1135 }
1136
1137 /* Sanity check */
1138 ASSERT((Thread->BasePriority == 0) || (Priority != 0));
1139
1140 /* Return the new priority */
1141 return Priority;
1142 }
1143