- Converted some macros to inlined functions.
[reactos.git] / reactos / ntoskrnl / include / internal / ke_x.h
1 /*
2 * PROJECT: ReactOS Kernel
3 * LICENSE: GPL - See COPYING in the top level directory
4 * FILE: ntoskrnl/include/ke_x.h
5 * PURPOSE: Internal Inlined Functions for the Kernel
6 * PROGRAMMERS: Alex Ionescu (alex.ionescu@reactos.org)
7 */
8
9 //
10 // Enters a Guarded Region
11 //
12 #define KeEnterGuardedRegion() \
13 { \
14 PKTHREAD Thread = KeGetCurrentThread(); \
15 \
16 /* Sanity checks */ \
17 ASSERT_IRQL_LESS_OR_EQUAL(APC_LEVEL); \
18 ASSERT(Thread == KeGetCurrentThread()); \
19 ASSERT((Thread->SpecialApcDisable <= 0) && \
20 (Thread->SpecialApcDisable != -32768)); \
21 \
22 /* Disable Special APCs */ \
23 Thread->SpecialApcDisable--; \
24 }
25
26 //
27 // Leaves a Guarded Region
28 //
29 #define KeLeaveGuardedRegion() \
30 { \
31 PKTHREAD Thread = KeGetCurrentThread(); \
32 \
33 /* Sanity checks */ \
34 ASSERT_IRQL_LESS_OR_EQUAL(APC_LEVEL); \
35 ASSERT(Thread == KeGetCurrentThread()); \
36 ASSERT(Thread->SpecialApcDisable < 0); \
37 \
38 /* Leave region and check if APCs are OK now */ \
39 if (!(++Thread->SpecialApcDisable)) \
40 { \
41 /* Check for Kernel APCs on the list */ \
42 if (!IsListEmpty(&Thread->ApcState. \
43 ApcListHead[KernelMode])) \
44 { \
45 /* Check for APC Delivery */ \
46 KiCheckForKernelApcDelivery(); \
47 } \
48 } \
49 }
50
51 //
52 // TODO: Guarded Mutex Routines
53 //
54
55 //
56 // Enters a Critical Region
57 //
58 #define KeEnterCriticalRegion() \
59 { \
60 PKTHREAD Thread = KeGetCurrentThread(); \
61 if (Thread) \
62 { \
63 /* Sanity checks */ \
64 ASSERT(Thread == KeGetCurrentThread()); \
65 ASSERT((Thread->KernelApcDisable <= 0) && \
66 (Thread->KernelApcDisable != -32768)); \
67 \
68 /* Disable Kernel APCs */ \
69 Thread->KernelApcDisable--; \
70 } \
71 }
72
73 //
74 // Leaves a Critical Region
75 //
76 #define KeLeaveCriticalRegion() \
77 { \
78 PKTHREAD Thread = KeGetCurrentThread(); \
79 if (Thread) \
80 { \
81 /* Sanity checks */ \
82 ASSERT(Thread == KeGetCurrentThread()); \
83 ASSERT(Thread->KernelApcDisable < 0); \
84 \
85 /* Enable Kernel APCs */ \
86 Thread->KernelApcDisable++; \
87 \
88 /* Check if Kernel APCs are now enabled */ \
89 if (!(Thread->KernelApcDisable)) \
90 { \
91 /* Check if we need to request an APC Delivery */ \
92 if (!(IsListEmpty(&Thread->ApcState.ApcListHead[KernelMode])) && \
93 !(Thread->KernelApcDisable)) \
94 { \
95 /* Check for the right environment */ \
96 KiCheckForKernelApcDelivery(); \
97 } \
98 } \
99 } \
100 }
101
102 //
103 // Satisfies the wait of any dispatcher object
104 //
105 #define KiSatisfyObjectWait(Object, Thread) \
106 { \
107 /* Special case for Mutants */ \
108 if ((Object)->Header.Type == MutantObject) \
109 { \
110 /* Decrease the Signal State */ \
111 (Object)->Header.SignalState--; \
112 \
113 /* Check if it's now non-signaled */ \
114 if (!(Object)->Header.SignalState) \
115 { \
116 /* Set the Owner Thread */ \
117 (Object)->OwnerThread = Thread; \
118 \
119 /* Disable APCs if needed */ \
120 Thread->KernelApcDisable -= (Object)->ApcDisable; \
121 \
122 /* Check if it's abandoned */ \
123 if ((Object)->Abandoned) \
124 { \
125 /* Unabandon it */ \
126 (Object)->Abandoned = FALSE; \
127 \
128 /* Return Status */ \
129 Thread->WaitStatus = STATUS_ABANDONED; \
130 } \
131 \
132 /* Insert it into the Mutant List */ \
133 InsertHeadList(Thread->MutantListHead.Blink, \
134 &(Object)->MutantListEntry); \
135 } \
136 } \
137 else if (((Object)->Header.Type & TIMER_OR_EVENT_TYPE) == \
138 EventSynchronizationObject) \
139 { \
140 /* Synchronization Timers and Events just get un-signaled */ \
141 (Object)->Header.SignalState = 0; \
142 } \
143 else if ((Object)->Header.Type == SemaphoreObject) \
144 { \
145 /* These ones can have multiple states, so we only decrease it */ \
146 (Object)->Header.SignalState--; \
147 } \
148 }
149
150 //
151 // Satisfies the wait of a mutant dispatcher object
152 //
153 #define KiSatisfyMutantWait(Object, Thread) \
154 { \
155 /* Decrease the Signal State */ \
156 (Object)->Header.SignalState--; \
157 \
158 /* Check if it's now non-signaled */ \
159 if (!(Object)->Header.SignalState) \
160 { \
161 /* Set the Owner Thread */ \
162 (Object)->OwnerThread = Thread; \
163 \
164 /* Disable APCs if needed */ \
165 Thread->KernelApcDisable -= (Object)->ApcDisable; \
166 \
167 /* Check if it's abandoned */ \
168 if ((Object)->Abandoned) \
169 { \
170 /* Unabandon it */ \
171 (Object)->Abandoned = FALSE; \
172 \
173 /* Return Status */ \
174 Thread->WaitStatus = STATUS_ABANDONED; \
175 } \
176 \
177 /* Insert it into the Mutant List */ \
178 InsertHeadList(Thread->MutantListHead.Blink, \
179 &(Object)->MutantListEntry); \
180 } \
181 }
182
183 //
184 // Satisfies the wait of any nonmutant dispatcher object
185 //
186 #define KiSatisfyNonMutantWait(Object, Thread) \
187 { \
188 if (((Object)->Header.Type & TIMER_OR_EVENT_TYPE) == \
189 EventSynchronizationObject) \
190 { \
191 /* Synchronization Timers and Events just get un-signaled */ \
192 (Object)->Header.SignalState = 0; \
193 } \
194 else if ((Object)->Header.Type == SemaphoreObject) \
195 { \
196 /* These ones can have multiple states, so we only decrease it */ \
197 (Object)->Header.SignalState--; \
198 } \
199 }
200
201 //
202 // Recalculates the due time
203 //
204 PLARGE_INTEGER
205 FORCEINLINE
206 KiRecalculateDueTime(IN PLARGE_INTEGER OriginalDueTime,
207 IN PLARGE_INTEGER DueTime,
208 IN OUT PLARGE_INTEGER NewDueTime)
209 {
210 /* Don't do anything for absolute waits */
211 if (OriginalDueTime->QuadPart >= 0) return OriginalDueTime;
212
213 /* Otherwise, query the interrupt time and recalculate */
214 NewDueTime->QuadPart = KeQueryInterruptTime();
215 NewDueTime->QuadPart -= DueTime->QuadPart;
216 return NewDueTime;
217 }
218
219 //
220 // Determines wether a thread should be added to the wait list
221 //
222 FORCEINLINE
223 BOOLEAN
224 KiCheckThreadStackSwap(IN PKTHREAD Thread,
225 IN KPROCESSOR_MODE WaitMode)
226 {
227 /* Check the required conditions */
228 if ((WaitMode != KernelMode) &&
229 (Thread->EnableStackSwap) &&
230 (Thread->Priority >= (LOW_REALTIME_PRIORITY + 9)))
231 {
232 /* We are go for swap */
233 return TRUE;
234 }
235 else
236 {
237 /* Don't swap the thread */
238 return FALSE;
239 }
240 }
241
242 //
243 // Adds a thread to the wait list
244 //
245 #define KiAddThreadToWaitList(Thread, Swappable) \
246 { \
247 /* Make sure it's swappable */ \
248 if (Swappable) \
249 { \
250 /* Insert it into the PRCB's List */ \
251 InsertTailList(&KeGetCurrentPrcb()->WaitListHead, \
252 &Thread->WaitListEntry); \
253 } \
254 }
255
256 //
257 // Checks if a wait in progress should be interrupted by APCs or an alertable
258 // state.
259 //
260 FORCEINLINE
261 NTSTATUS
262 KiCheckAlertability(IN PKTHREAD Thread,
263 IN BOOLEAN Alertable,
264 IN KPROCESSOR_MODE WaitMode)
265 {
266 /* Check if the wait is alertable */
267 if (Alertable)
268 {
269 /* It is, first check if the thread is alerted in this mode */
270 if (Thread->Alerted[WaitMode])
271 {
272 /* It is, so bail out of the wait */
273 Thread->Alerted[WaitMode] = FALSE;
274 return STATUS_ALERTED;
275 }
276 else if ((WaitMode != KernelMode) &&
277 (!IsListEmpty(&Thread->ApcState.ApcListHead[UserMode])))
278 {
279 /* It's isn't, but this is a user wait with queued user APCs */
280 Thread->ApcState.UserApcPending = TRUE;
281 return STATUS_USER_APC;
282 }
283 else if (Thread->Alerted[KernelMode])
284 {
285 /* It isn't that either, but we're alered in kernel mode */
286 Thread->Alerted[KernelMode] = FALSE;
287 return STATUS_ALERTED;
288 }
289 }
290 else if ((WaitMode != KernelMode) && (Thread->ApcState.UserApcPending))
291 {
292 /* Not alertable, but this is a user wait with pending user APCs */
293 return STATUS_USER_APC;
294 }
295
296 /* Otherwise, we're fine */
297 return STATUS_WAIT_0;
298 }
299
300 FORCEINLINE
301 BOOLEAN
302 KxDelayThreadWait(IN PKTHREAD Thread,
303 IN BOOLEAN Alertable,
304 IN KPROCESSOR_MODE WaitMode)
305 {
306 BOOLEAN Swappable;
307 PKWAIT_BLOCK TimerBlock = &Thread->WaitBlock[TIMER_WAIT_BLOCK];
308
309 /* Setup the Wait Block */
310 Thread->WaitBlockList = TimerBlock;
311 TimerBlock->NextWaitBlock = TimerBlock;
312
313 /* Link the timer to this Wait Block */
314 Thread->Timer.Header.WaitListHead.Flink = &TimerBlock->WaitListEntry;
315 Thread->Timer.Header.WaitListHead.Blink = &TimerBlock->WaitListEntry;
316
317 /* Clear wait status */
318 Thread->WaitStatus = STATUS_WAIT_0;
319
320 /* Setup wait fields */
321 Thread->Alertable = Alertable;
322 Thread->WaitReason = DelayExecution;
323 Thread->WaitMode = WaitMode;
324
325 /* Check if we can swap the thread's stack */
326 Thread->WaitListEntry.Flink = NULL;
327 Swappable = KiCheckThreadStackSwap(Thread, WaitMode);
328
329 /* Set the wait time */
330 Thread->WaitTime = ((PLARGE_INTEGER)&KeTickCount)->LowPart;
331 return Swappable;
332 }
333
334 FORCEINLINE
335 BOOLEAN
336 KxSingleThreadWait(IN PKTHREAD Thread,
337 IN PKWAIT_BLOCK WaitBlock,
338 IN PVOID Object,
339 IN PLARGE_INTEGER Timeout,
340 IN BOOLEAN Alertable,
341 IN KWAIT_REASON WaitReason,
342 IN KPROCESSOR_MODE WaitMode)
343 {
344 BOOLEAN Swappable;
345 PKWAIT_BLOCK TimerBlock = &Thread->WaitBlock[TIMER_WAIT_BLOCK];
346
347 /* Setup the Wait Block */
348 Thread->WaitBlockList = WaitBlock;
349 WaitBlock->WaitKey = STATUS_WAIT_0;
350 WaitBlock->Object = Object;
351 WaitBlock->WaitType = WaitAny;
352
353 /* Clear wait status */
354 Thread->WaitStatus = STATUS_WAIT_0;
355
356 /* Check if we have a timer */
357 if (Timeout)
358 {
359 /* Pointer to timer block */
360 WaitBlock->NextWaitBlock = TimerBlock;
361 TimerBlock->NextWaitBlock = WaitBlock;
362
363 /* Link the timer to this Wait Block */
364 Thread->Timer.Header.WaitListHead.Flink = &TimerBlock->WaitListEntry;
365 Thread->Timer.Header.WaitListHead.Blink = &TimerBlock->WaitListEntry;
366 }
367 else
368 {
369 /* No timer block, just ourselves */
370 WaitBlock->NextWaitBlock = WaitBlock;
371 }
372
373 /* Setup wait fields */
374 Thread->Alertable = Alertable;
375 Thread->WaitReason = WaitReason;
376 Thread->WaitMode = WaitMode;
377
378 /* Check if we can swap the thread's stack */
379 Thread->WaitListEntry.Flink = NULL;
380 Swappable = KiCheckThreadStackSwap(Thread, WaitMode);
381
382 /* Set the wait time */
383 Thread->WaitTime = ((PLARGE_INTEGER)&KeTickCount)->LowPart;
384 return Swappable;
385 }
386
387 //
388 // Unwaits a Thread
389 //
390 FORCEINLINE
391 VOID
392 KxUnwaitThread(IN DISPATCHER_HEADER *Object,
393 IN KPRIORITY Increment)
394 {
395 PLIST_ENTRY WaitEntry, WaitList;
396 PKWAIT_BLOCK CurrentWaitBlock;
397 PKTHREAD WaitThread;
398 ULONG WaitKey;
399
400 /* Loop the Wait Entries */
401 WaitList = &Object->WaitListHead;
402 WaitEntry = WaitList->Flink;
403 do
404 {
405 /* Get the current wait block */
406 CurrentWaitBlock = CONTAINING_RECORD(WaitEntry,
407 KWAIT_BLOCK,
408 WaitListEntry);
409
410 /* Get the waiting thread */
411 WaitThread = CurrentWaitBlock->Thread;
412
413 /* Check the current Wait Mode */
414 if (CurrentWaitBlock->WaitType == WaitAny)
415 {
416 /* Use the actual wait key */
417 WaitKey = CurrentWaitBlock->WaitKey;
418 }
419 else
420 {
421 /* Otherwise, use STATUS_KERNEL_APC */
422 WaitKey = STATUS_KERNEL_APC;
423 }
424
425 /* Unwait the thread */
426 KiUnwaitThread(WaitThread, WaitKey, Increment);
427
428 /* Next entry */
429 WaitEntry = WaitList->Flink;
430 } while (WaitEntry != WaitList);
431 }
432
433 //
434 // Unwaits a Thread waiting on an event
435 //
436 FORCEINLINE
437 VOID
438 KxUnwaitThreadForEvent(IN PKEVENT Event,
439 IN KPRIORITY Increment)
440 {
441 PLIST_ENTRY WaitEntry, WaitList;
442 PKWAIT_BLOCK CurrentWaitBlock;
443 PKTHREAD WaitThread;
444
445 /* Loop the Wait Entries */
446 WaitList = &Event->Header.WaitListHead;
447 WaitEntry = WaitList->Flink;
448 do
449 {
450 /* Get the current wait block */
451 CurrentWaitBlock = CONTAINING_RECORD(WaitEntry,
452 KWAIT_BLOCK,
453 WaitListEntry);
454
455 /* Get the waiting thread */
456 WaitThread = CurrentWaitBlock->Thread;
457
458 /* Check the current Wait Mode */
459 if (CurrentWaitBlock->WaitType == WaitAny)
460 {
461 /* Un-signal it */
462 Event->Header.SignalState = 0;
463
464 /* Un-signal the event and unwait the thread */
465 KiUnwaitThread(WaitThread, CurrentWaitBlock->WaitKey, Increment);
466 break;
467 }
468 else
469 {
470 /* Unwait the thread with STATUS_KERNEL_APC */
471 KiUnwaitThread(WaitThread, STATUS_KERNEL_APC, Increment);
472 }
473
474 /* Next entry */
475 WaitEntry = WaitList->Flink;
476 } while (WaitEntry != WaitList);
477 }
478
479 #ifndef _CONFIG_SMP
480 //
481 // Spinlock Acquire at IRQL >= DISPATCH_LEVEL
482 //
483 FORCEINLINE
484 VOID
485 KxAcquireSpinLock(IN PKSPIN_LOCK SpinLock)
486 {
487 /* On UP builds, spinlocks don't exist at IRQL >= DISPATCH */
488 UNREFERENCED_PARAMETER(SpinLock);
489 }
490
491 //
492 // Spinlock Release at IRQL >= DISPATCH_LEVEL
493 //
494 FORCEINLINE
495 VOID
496 KxReleaseSpinLock(IN PKSPIN_LOCK SpinLock)
497 {
498 /* On UP builds, spinlocks don't exist at IRQL >= DISPATCH */
499 UNREFERENCED_PARAMETER(SpinLock);
500 }
501
502 //
503 // This routine protects against multiple CPU acquires, it's meaningless on UP.
504 //
505 VOID
506 FORCEINLINE
507 KiAcquireDispatcherObject(IN DISPATCHER_HEADER* Object)
508 {
509 UNREFERENCED_PARAMETER(Object);
510 }
511
512 //
513 // This routine protects against multiple CPU acquires, it's meaningless on UP.
514 //
515 VOID
516 FORCEINLINE
517 KiReleaseDispatcherObject(IN DISPATCHER_HEADER* Object)
518 {
519 UNREFERENCED_PARAMETER(Object);
520 }
521
522 KIRQL
523 FORCEINLINE
524 KiAcquireDispatcherLock(VOID)
525 {
526 /* Raise to DPC level */
527 return KeRaiseIrqlToDpcLevel();
528 }
529
530 VOID
531 FORCEINLINE
532 KiReleaseDispatcherLock(IN KIRQL OldIrql)
533 {
534 /* Just exit the dispatcher */
535 KiExitDispatcher(OldIrql);
536 }
537
538 VOID
539 FORCEINLINE
540 KiAcquireDispatcherLockAtDpcLevel(VOID)
541 {
542 /* This is a no-op at DPC Level for UP systems */
543 return;
544 }
545
546 VOID
547 FORCEINLINE
548 KiReleaseDispatcherLockFromDpcLevel(VOID)
549 {
550 /* This is a no-op at DPC Level for UP systems */
551 return;
552 }
553
554 //
555 // This routine makes the thread deferred ready on the boot CPU.
556 //
557 FORCEINLINE
558 VOID
559 KiInsertDeferredReadyList(IN PKTHREAD Thread)
560 {
561 /* Set the thread to deferred state and boot CPU */
562 Thread->State = DeferredReady;
563 Thread->DeferredProcessor = 0;
564
565 /* Make the thread ready immediately */
566 KiDeferredReadyThread(Thread);
567 }
568
569 FORCEINLINE
570 VOID
571 KiRescheduleThread(IN BOOLEAN NewThread,
572 IN ULONG Cpu)
573 {
574 /* This is meaningless on UP systems */
575 UNREFERENCED_PARAMETER(NewThread);
576 UNREFERENCED_PARAMETER(Cpu);
577 }
578
579 //
580 // This routine protects against multiple CPU acquires, it's meaningless on UP.
581 //
582 FORCEINLINE
583 VOID
584 KiSetThreadSwapBusy(IN PKTHREAD Thread)
585 {
586 UNREFERENCED_PARAMETER(Thread);
587 }
588
589 //
590 // This routine protects against multiple CPU acquires, it's meaningless on UP.
591 //
592 FORCEINLINE
593 VOID
594 KiAcquirePrcbLock(IN PKPRCB Prcb)
595 {
596 UNREFERENCED_PARAMETER(Prcb);
597 }
598
599 //
600 // This routine protects against multiple CPU acquires, it's meaningless on UP.
601 //
602 FORCEINLINE
603 VOID
604 KiReleasePrcbLock(IN PKPRCB Prcb)
605 {
606 UNREFERENCED_PARAMETER(Prcb);
607 }
608
609 //
610 // This routine protects against multiple CPU acquires, it's meaningless on UP.
611 //
612 FORCEINLINE
613 VOID
614 KiAcquireThreadLock(IN PKTHREAD Thread)
615 {
616 UNREFERENCED_PARAMETER(Thread);
617 }
618
619 //
620 // This routine protects against multiple CPU acquires, it's meaningless on UP.
621 //
622 FORCEINLINE
623 VOID
624 KiReleaseThreadLock(IN PKTHREAD Thread)
625 {
626 UNREFERENCED_PARAMETER(Thread);
627 }
628
629 //
630 // This routine protects against multiple CPU acquires, it's meaningless on UP.
631 //
632 FORCEINLINE
633 BOOLEAN
634 KiTryThreadLock(IN PKTHREAD Thread)
635 {
636 UNREFERENCED_PARAMETER(Thread);
637 return FALSE;
638 }
639
640 FORCEINLINE
641 VOID
642 KiCheckDeferredReadyList(IN PKPRCB Prcb)
643 {
644 /* There are no deferred ready lists on UP systems */
645 UNREFERENCED_PARAMETER(Prcb);
646 }
647
648 FORCEINLINE
649 VOID
650 KiRundownThread(IN PKTHREAD Thread)
651 {
652 /* Check if this is the NPX Thread */
653 if (KeGetCurrentPrcb()->NpxThread == Thread)
654 {
655 /* Clear it */
656 KeGetCurrentPrcb()->NpxThread = NULL;
657 #ifdef __GNUC__
658 __asm__("fninit\n\t");
659 #else
660 __asm fninit;
661 #endif
662 }
663 }
664
665 FORCEINLINE
666 VOID
667 KiRequestApcInterrupt(IN BOOLEAN NeedApc,
668 IN UCHAR Processor)
669 {
670 /* We deliver instantly on UP */
671 UNREFERENCED_PARAMETER(NeedApc);
672 UNREFERENCED_PARAMETER(Processor);
673 }
674
675 #else
676
677 //
678 // Spinlock Acquisition at IRQL >= DISPATCH_LEVEL
679 //
680 FORCEINLINE
681 VOID
682 KxAcquireSpinLock(IN PKSPIN_LOCK SpinLock)
683 {
684 for (;;)
685 {
686 /* Try to acquire it */
687 if (InterlockedBitTestAndSet((PLONG)SpinLock, 0))
688 {
689 /* Value changed... wait until it's locked */
690 while (*(volatile KSPIN_LOCK *)SpinLock == 1)
691 {
692 #ifdef DBG
693 /* On debug builds, we use a much slower but useful routine */
694 Kii386SpinOnSpinLock(SpinLock, 5);
695 #else
696 /* Otherwise, just yield and keep looping */
697 YieldProcessor();
698 #endif
699 }
700 }
701 else
702 {
703 #ifdef DBG
704 /* On debug builds, we OR in the KTHREAD */
705 *SpinLock = KeGetCurrentThread() | 1;
706 #endif
707 /* All is well, break out */
708 break;
709 }
710 }
711 }
712
713 //
714 // Spinlock Release at IRQL >= DISPATCH_LEVEL
715 //
716 FORCEINLINE
717 VOID
718 KxReleaseSpinLock(IN PKSPIN_LOCK SpinLock)
719 {
720 #ifdef DBG
721 /* Make sure that the threads match */
722 if ((KeGetCurrentThread() | 1) != *SpinLock)
723 {
724 /* They don't, bugcheck */
725 KeBugCheckEx(SPIN_LOCK_NOT_OWNED, SpinLock, 0, 0, 0);
726 }
727 #endif
728 /* Clear the lock */
729 InterlockedAnd(SpinLock, 0);
730 }
731
732 KIRQL
733 FORCEINLINE
734 KiAcquireDispatcherObject(IN DISPATCHER_HEADER* Object)
735 {
736 LONG OldValue, NewValue;
737
738 /* Make sure we're at a safe level to touch the lock */
739 ASSERT(KeGetCurrentIrql() >= DISPATCH_LEVEL);
740
741 /* Start acquire loop */
742 do
743 {
744 /* Loop until the other CPU releases it */
745 while ((UCHAR)Object->Lock & KOBJECT_LOCK_BIT)
746 {
747 /* Let the CPU know that this is a loop */
748 YieldProcessor();
749 };
750
751 /* Try acquiring the lock now */
752 NewValue = InterlockedCompareExchange(&Object->Lock,
753 OldValue | KOBJECT_LOCK_BIT,
754 OldValue);
755 } while (NewValue != OldValue);
756 }
757
758 KIRQL
759 FORCEINLINE
760 KiReleaseDispatcherObject(IN DISPATCHER_HEADER* Object)
761 {
762 /* Make sure we're at a safe level to touch the lock */
763 ASSERT(KeGetCurrentIrql() >= DISPATCH_LEVEL);
764
765 /* Release it */
766 InterlockedAnd(&Object->Lock, ~KOBJECT_LOCK_BIT);
767 }
768
769 KIRQL
770 FORCEINLINE
771 KiAcquireDispatcherLock(VOID)
772 {
773 /* Raise to synchronization level and acquire the dispatcher lock */
774 return KeAcquireQueuedSpinLockRaiseToSynch(LockQueueDispatcherLock);
775 }
776
777 VOID
778 FORCEINLINE
779 KiReleaseDispatcherLock(IN KIRQL OldIrql)
780 {
781 /* First release the lock */
782 KeReleaseQueuedSpinLockFromDpcLevel(&KeGetCurrentPrcb()->
783 LockQueue[LockQueueDispatcherLock]);
784
785 /* Then exit the dispatcher */
786 KiExitDispatcher(OldIrql);
787 }
788
789 //
790 // This routine inserts a thread into the deferred ready list of the given CPU
791 //
792 FORCEINLINE
793 VOID
794 KiInsertDeferredReadyList(IN PKTHREAD Thread)
795 {
796 PKPRCB Prcb = KeGetCurrentPrcb();
797
798 /* Set the thread to deferred state and CPU */
799 Thread->State = DeferredReady;
800 Thread->DeferredProcessor = Prcb->Number;
801
802 /* Add it on the list */
803 PushEntryList(&Prcb->DeferredReadyListHead, &Thread->SwapListEntry);
804 }
805
806 FORCEINLINE
807 VOID
808 KiRescheduleThread(IN BOOLEAN NewThread,
809 IN ULONG Cpu)
810 {
811 /* Check if a new thread needs to be scheduled on a different CPU */
812 if ((NewThread) && !(KeGetPcr()->Number == Cpu))
813 {
814 /* Send an IPI to request delivery */
815 KiIpiSendRequest(AFFINITY_MASK(Cpu), IPI_DPC);
816 }
817 }
818
819 //
820 // This routine sets the current thread in a swap busy state, which ensure that
821 // nobody else tries to swap it concurrently.
822 //
823 FORCEINLINE
824 VOID
825 KiSetThreadSwapBusy(IN PKTHREAD Thread)
826 {
827 /* Make sure nobody already set it */
828 ASSERT(Thread->SwapBusy == FALSE);
829
830 /* Set it ourselves */
831 Thread->SwapBusy = TRUE;
832 }
833
834 //
835 // This routine acquires the PRCB lock so that only one caller can touch
836 // volatile PRCB data.
837 //
838 // Since this is a simple optimized spin-lock, it must be be only acquired
839 // at dispatcher level or higher!
840 //
841 FORCEINLINE
842 VOID
843 KiAcquirePrcbLock(IN PKPRCB Prcb)
844 {
845 /* Make sure we're at a safe level to touch the PRCB lock */
846 ASSERT(KeGetCurrentIrql() >= DISPATCH_LEVEL);
847
848 /* Start acquire loop */
849 for (;;)
850 {
851 /* Acquire the lock and break out if we acquired it first */
852 if (!InterlockedExchange(&Prcb->PrcbLock, 1)) break;
853
854 /* Loop until the other CPU releases it */
855 do
856 {
857 /* Let the CPU know that this is a loop */
858 YieldProcessor();
859 } while (Prcb->PrcbLock);
860 }
861 }
862
863 //
864 // This routine releases the PRCB lock so that other callers can touch
865 // volatile PRCB data.
866 //
867 // Since this is a simple optimized spin-lock, it must be be only acquired
868 // at dispatcher level or higher!
869 //
870 FORCEINLINE
871 VOID
872 KiReleasePrcbLock(IN PKPRCB Prcb)
873 {
874 /* Make sure it's acquired! */
875 ASSERT(Prcb->PrcbLock != 0);
876
877 /* Release it */
878 InterlockedAnd(&Prcb->PrcbLock, 0);
879 }
880
881 //
882 // This routine acquires the thread lock so that only one caller can touch
883 // volatile thread data.
884 //
885 // Since this is a simple optimized spin-lock, it must be be only acquired
886 // at dispatcher level or higher!
887 //
888 FORCEINLINE
889 VOID
890 KiAcquireThreadLock(IN PKTHREAD Thread)
891 {
892 /* Make sure we're at a safe level to touch the thread lock */
893 ASSERT(KeGetCurrentIrql() >= DISPATCH_LEVEL);
894
895 /* Start acquire loop */
896 for (;;)
897 {
898 /* Acquire the lock and break out if we acquired it first */
899 if (!InterlockedExchange(&Thread->ThreadLock, 1)) break;
900
901 /* Loop until the other CPU releases it */
902 do
903 {
904 /* Let the CPU know that this is a loop */
905 YieldProcessor();
906 } while (Thread->ThreadLock);
907 }
908 }
909
910 //
911 // This routine releases the thread lock so that other callers can touch
912 // volatile thread data.
913 //
914 // Since this is a simple optimized spin-lock, it must be be only acquired
915 // at dispatcher level or higher!
916 //
917 FORCEINLINE
918 VOID
919 KiReleaseThreadLock(IN PKTHREAD Thread)
920 {
921 /* Release it */
922 InterlockedAnd(&Thread->ThreadLock, 0);
923 }
924
925 FORCEINLINE
926 BOOLEAN
927 KiTryThreadLock(IN PKTHREAD Thread)
928 {
929 LONG Value;
930
931 /* If the lock isn't acquired, return false */
932 if (!Thread->ThreadLock) return FALSE;
933
934 /* Otherwise, try to acquire it and check the result */
935 Value = 1;
936 Value = InterlockedExchange(&Thread->ThreadLock, &Value);
937
938 /* Return the lock state */
939 return (Value == TRUE);
940 }
941
942 FORCEINLINE
943 VOID
944 KiCheckDeferredReadyList(IN PKPRCB Prcb)
945 {
946 /* Scan the deferred ready lists if required */
947 if (Prcb->DeferredReadyListHead.Next) KiProcessDeferredReadyList(Prcb);
948 }
949
950 FORCEINLINE
951 VOID
952 KiRequestApcInterrupt(IN BOOLEAN NeedApc,
953 IN UCHAR Processor)
954 {
955 /* Check if we need to request APC delivery */
956 if (NeedApc)
957 {
958 /* Check if it's on another CPU */
959 if (KeGetPcr()->Number != Cpu)
960 {
961 /* Send an IPI to request delivery */
962 KiIpiSendRequest(AFFINITY_MASK(Cpu), IPI_DPC);
963 }
964 else
965 {
966 /* Request a software interrupt */
967 HalRequestSoftwareInterrupt(APC_LEVEL);
968 }
969 }
970 }
971
972 #endif
973
974 FORCEINLINE
975 VOID
976 KiAcquireApcLock(IN PKTHREAD Thread,
977 IN PKLOCK_QUEUE_HANDLE Handle)
978 {
979 /* Acquire the lock and raise to synchronization level */
980 KeAcquireInStackQueuedSpinLockRaiseToSynch(&Thread->ApcQueueLock, Handle);
981 }
982
983 FORCEINLINE
984 VOID
985 KiAcquireApcLockAtDpcLevel(IN PKTHREAD Thread,
986 IN PKLOCK_QUEUE_HANDLE Handle)
987 {
988 /* Acquire the lock */
989 KeAcquireInStackQueuedSpinLockAtDpcLevel(&Thread->ApcQueueLock, Handle);
990 }
991
992 FORCEINLINE
993 VOID
994 KiAcquireApcLockAtApcLevel(IN PKTHREAD Thread,
995 IN PKLOCK_QUEUE_HANDLE Handle)
996 {
997 /* Acquire the lock */
998 KeAcquireInStackQueuedSpinLock(&Thread->ApcQueueLock, Handle);
999 }
1000
1001 FORCEINLINE
1002 VOID
1003 KiReleaseApcLock(IN PKLOCK_QUEUE_HANDLE Handle)
1004 {
1005 /* Release the lock */
1006 KeReleaseInStackQueuedSpinLock(Handle);
1007 }
1008
1009 FORCEINLINE
1010 VOID
1011 KiReleaseApcLockFromDpcLevel(IN PKLOCK_QUEUE_HANDLE Handle)
1012 {
1013 /* Release the lock */
1014 KeReleaseInStackQueuedSpinLockFromDpcLevel(Handle);
1015 }
1016
1017 FORCEINLINE
1018 VOID
1019 KiAcquireProcessLock(IN PKPROCESS Process,
1020 IN PKLOCK_QUEUE_HANDLE Handle)
1021 {
1022 /* Acquire the lock and raise to synchronization level */
1023 KeAcquireInStackQueuedSpinLockRaiseToSynch(&Process->ProcessLock, Handle);
1024 }
1025
1026 FORCEINLINE
1027 VOID
1028 KiReleaseProcessLock(IN PKLOCK_QUEUE_HANDLE Handle)
1029 {
1030 /* Release the lock */
1031 KeReleaseInStackQueuedSpinLock(Handle);
1032 }
1033
1034 FORCEINLINE
1035 VOID
1036 KiReleaseProcessLockFromDpcLevel(IN PKLOCK_QUEUE_HANDLE Handle)
1037 {
1038 /* Release the lock */
1039 KeReleaseInStackQueuedSpinLockFromDpcLevel(Handle);
1040 }
1041
1042 FORCEINLINE
1043 VOID
1044 KiAcquireDeviceQueueLock(IN PKDEVICE_QUEUE DeviceQueue,
1045 IN PKLOCK_QUEUE_HANDLE DeviceLock)
1046 {
1047 /* Check if we were called from a threaded DPC */
1048 if (KeGetCurrentPrcb()->DpcThreadActive)
1049 {
1050 /* Lock the Queue, we're not at DPC level */
1051 KeAcquireInStackQueuedSpinLock(&DeviceQueue->Lock, DeviceLock);
1052 }
1053 else
1054 {
1055 /* We must be at DPC level, acquire the lock safely */
1056 ASSERT(KeGetCurrentIrql() == DISPATCH_LEVEL);
1057 KeAcquireInStackQueuedSpinLockAtDpcLevel(&DeviceQueue->Lock,
1058 DeviceLock);
1059 }
1060 }
1061
1062 FORCEINLINE
1063 VOID
1064 KiReleaseDeviceQueueLock(IN PKLOCK_QUEUE_HANDLE DeviceLock)
1065 {
1066 /* Check if we were called from a threaded DPC */
1067 if (KeGetCurrentPrcb()->DpcThreadActive)
1068 {
1069 /* Unlock the Queue, we're not at DPC level */
1070 KeReleaseInStackQueuedSpinLock(DeviceLock);
1071 }
1072 else
1073 {
1074 /* We must be at DPC level, release the lock safely */
1075 ASSERT(KeGetCurrentIrql() == DISPATCH_LEVEL);
1076 KeReleaseInStackQueuedSpinLockFromDpcLevel(DeviceLock);
1077 }
1078 }
1079
1080 //
1081 // This routine queues a thread that is ready on the PRCB's ready lists.
1082 // If this thread cannot currently run on this CPU, then the thread is
1083 // added to the deferred ready list instead.
1084 //
1085 // This routine must be entered with the PRCB lock held and it will exit
1086 // with the PRCB lock released!
1087 //
1088 FORCEINLINE
1089 VOID
1090 KxQueueReadyThread(IN PKTHREAD Thread,
1091 IN PKPRCB Prcb)
1092 {
1093 BOOLEAN Preempted;
1094 KPRIORITY Priority;
1095
1096 /* Sanity checks */
1097 ASSERT(Prcb == KeGetCurrentPrcb());
1098 ASSERT(Thread->State == Running);
1099 ASSERT(Thread->NextProcessor == Prcb->Number);
1100
1101 /* Check if this thread is allowed to run in this CPU */
1102 #ifdef _CONFIG_SMP
1103 if ((Thread->Affinity) & (Prcb->SetMember))
1104 #else
1105 if (TRUE)
1106 #endif
1107 {
1108 /* Set thread ready for execution */
1109 Thread->State = Ready;
1110
1111 /* Save current priority and if someone had pre-empted it */
1112 Priority = Thread->Priority;
1113 Preempted = Thread->Preempted;
1114
1115 /* We're not pre-empting now, and set the wait time */
1116 Thread->Preempted = FALSE;
1117 Thread->WaitTime = KeTickCount.LowPart;
1118
1119 /* Sanity check */
1120 ASSERT((Priority >= 0) && (Priority <= HIGH_PRIORITY));
1121
1122 /* Insert this thread in the appropriate order */
1123 Preempted ? InsertHeadList(&Prcb->DispatcherReadyListHead[Priority],
1124 &Thread->WaitListEntry) :
1125 InsertTailList(&Prcb->DispatcherReadyListHead[Priority],
1126 &Thread->WaitListEntry);
1127
1128 /* Update the ready summary */
1129 Prcb->ReadySummary |= PRIORITY_MASK(Priority);
1130
1131 /* Sanity check */
1132 ASSERT(Priority == Thread->Priority);
1133
1134 /* Release the PRCB lock */
1135 KiReleasePrcbLock(Prcb);
1136 }
1137 else
1138 {
1139 /* Otherwise, prepare this thread to be deferred */
1140 Thread->State = DeferredReady;
1141 Thread->DeferredProcessor = Prcb->Number;
1142
1143 /* Release the lock and defer scheduling */
1144 KiReleasePrcbLock(Prcb);
1145 KiDeferredReadyThread(Thread);
1146 }
1147 }
1148
1149 //
1150 // This routine scans for an appropriate ready thread to select at the
1151 // given priority and for the given CPU.
1152 //
1153 FORCEINLINE
1154 PKTHREAD
1155 KiSelectReadyThread(IN KPRIORITY Priority,
1156 IN PKPRCB Prcb)
1157 {
1158 LONG PriorityMask, PrioritySet, HighPriority;
1159 PLIST_ENTRY ListEntry;
1160 PKTHREAD Thread;
1161
1162 /* Save the current mask and get the priority set for the CPU */
1163 PriorityMask = Priority;
1164 PrioritySet = Prcb->ReadySummary >> (UCHAR)Priority;
1165 if (!PrioritySet) return NULL;
1166
1167 /* Get the highest priority possible */
1168 BitScanReverse((PULONG)&HighPriority, PrioritySet);
1169 ASSERT((PrioritySet & PRIORITY_MASK(HighPriority)) != 0);
1170 HighPriority += PriorityMask;
1171
1172 /* Make sure the list isn't at highest priority */
1173 ASSERT(IsListEmpty(&Prcb->DispatcherReadyListHead[HighPriority]) == FALSE);
1174
1175 /* Get the first thread on the list */
1176 ListEntry = &Prcb->DispatcherReadyListHead[HighPriority];
1177 Thread = CONTAINING_RECORD(ListEntry, KTHREAD, WaitListEntry);
1178
1179 /* Make sure this thread is here for a reason */
1180 ASSERT(HighPriority == Thread->Priority);
1181 ASSERT(Thread->Affinity & AFFINITY_MASK(Prcb->Number));
1182 ASSERT(Thread->NextProcessor == Prcb->Number);
1183
1184 /* Remove it from the list */
1185 RemoveEntryList(&Thread->WaitListEntry);
1186 if (IsListEmpty(&Thread->WaitListEntry))
1187 {
1188 /* The list is empty now, reset the ready summary */
1189 Prcb->ReadySummary ^= PRIORITY_MASK(HighPriority);
1190 }
1191
1192 /* Sanity check and return the thread */
1193 ASSERT((Thread == NULL) ||
1194 (Thread->BasePriority == 0) ||
1195 (Thread->Priority != 0));
1196 return Thread;
1197 }
1198
1199 //
1200 // This routine computes the new priority for a thread. It is only valid for
1201 // threads with priorities in the dynamic priority range.
1202 //
1203 SCHAR
1204 FORCEINLINE
1205 KiComputeNewPriority(IN PKTHREAD Thread)
1206 {
1207 SCHAR Priority;
1208
1209 /* Priority sanity checks */
1210 ASSERT((Thread->PriorityDecrement >= 0) &&
1211 (Thread->PriorityDecrement <= Thread->Priority));
1212 ASSERT((Thread->Priority < LOW_REALTIME_PRIORITY) ?
1213 TRUE : (Thread->PriorityDecrement == 0));
1214
1215 /* Get the current priority */
1216 Priority = Thread->Priority;
1217 if (Priority < LOW_REALTIME_PRIORITY)
1218 {
1219 /* Decrease priority by the priority decrement */
1220 Priority -= (Thread->PriorityDecrement + 1);
1221
1222 /* Don't go out of bounds */
1223 if (Priority < Thread->BasePriority) Priority = Thread->BasePriority;
1224
1225 /* Reset the priority decrement */
1226 Thread->PriorityDecrement = 0;
1227 }
1228
1229 /* Sanity check */
1230 ASSERT((Thread->BasePriority == 0) || (Priority != 0));
1231
1232 /* Return the new priority */
1233 return Priority;
1234 }
1235