- Fix lock acquisition/release mismathces in KiInsertQueueApc.
[reactos.git] / reactos / ntoskrnl / include / internal / ke_x.h
1 /*
2 * PROJECT: ReactOS Kernel
3 * LICENSE: GPL - See COPYING in the top level directory
4 * FILE: ntoskrnl/include/ke_x.h
5 * PURPOSE: Internal Inlined Functions for the Kernel
6 * PROGRAMMERS: Alex Ionescu (alex.ionescu@reactos.org)
7 */
8
9 //
10 // Enters a Guarded Region
11 //
12 #define KeEnterGuardedRegion() \
13 { \
14 PKTHREAD Thread = KeGetCurrentThread(); \
15 \
16 /* Sanity checks */ \
17 ASSERT_IRQL_LESS_OR_EQUAL(APC_LEVEL); \
18 ASSERT(Thread == KeGetCurrentThread()); \
19 ASSERT((Thread->SpecialApcDisable <= 0) && \
20 (Thread->SpecialApcDisable != -32768)); \
21 \
22 /* Disable Special APCs */ \
23 Thread->SpecialApcDisable--; \
24 }
25
26 //
27 // Leaves a Guarded Region
28 //
29 #define KeLeaveGuardedRegion() \
30 { \
31 PKTHREAD Thread = KeGetCurrentThread(); \
32 \
33 /* Sanity checks */ \
34 ASSERT_IRQL_LESS_OR_EQUAL(APC_LEVEL); \
35 ASSERT(Thread == KeGetCurrentThread()); \
36 ASSERT(Thread->SpecialApcDisable < 0); \
37 \
38 /* Leave region and check if APCs are OK now */ \
39 if (!(++Thread->SpecialApcDisable)) \
40 { \
41 /* Check for Kernel APCs on the list */ \
42 if (!IsListEmpty(&Thread->ApcState. \
43 ApcListHead[KernelMode])) \
44 { \
45 /* Check for APC Delivery */ \
46 KiCheckForKernelApcDelivery(); \
47 } \
48 } \
49 }
50
51 //
52 // TODO: Guarded Mutex Routines
53 //
54
55 //
56 // Enters a Critical Region
57 //
58 #define KeEnterCriticalRegion() \
59 { \
60 PKTHREAD Thread = KeGetCurrentThread(); \
61 if (Thread) \
62 { \
63 /* Sanity checks */ \
64 ASSERT(Thread == KeGetCurrentThread()); \
65 ASSERT((Thread->KernelApcDisable <= 0) && \
66 (Thread->KernelApcDisable != -32768)); \
67 \
68 /* Disable Kernel APCs */ \
69 Thread->KernelApcDisable--; \
70 } \
71 }
72
73 //
74 // Leaves a Critical Region
75 //
76 #define KeLeaveCriticalRegion() \
77 { \
78 PKTHREAD Thread = KeGetCurrentThread(); \
79 if (Thread) \
80 { \
81 /* Sanity checks */ \
82 ASSERT(Thread == KeGetCurrentThread()); \
83 ASSERT(Thread->KernelApcDisable < 0); \
84 \
85 /* Enable Kernel APCs */ \
86 Thread->KernelApcDisable++; \
87 \
88 /* Check if Kernel APCs are now enabled */ \
89 if (!(Thread->KernelApcDisable)) \
90 { \
91 /* Check if we need to request an APC Delivery */ \
92 if (!(IsListEmpty(&Thread->ApcState.ApcListHead[KernelMode])) && \
93 !(Thread->KernelApcDisable)) \
94 { \
95 /* Check for the right environment */ \
96 KiCheckForKernelApcDelivery(); \
97 } \
98 } \
99 } \
100 }
101
102 //
103 // Satisfies the wait of any dispatcher object
104 //
105 #define KiSatisfyObjectWait(Object, Thread) \
106 { \
107 /* Special case for Mutants */ \
108 if ((Object)->Header.Type == MutantObject) \
109 { \
110 /* Decrease the Signal State */ \
111 (Object)->Header.SignalState--; \
112 \
113 /* Check if it's now non-signaled */ \
114 if (!(Object)->Header.SignalState) \
115 { \
116 /* Set the Owner Thread */ \
117 (Object)->OwnerThread = Thread; \
118 \
119 /* Disable APCs if needed */ \
120 Thread->KernelApcDisable -= (Object)->ApcDisable; \
121 \
122 /* Check if it's abandoned */ \
123 if ((Object)->Abandoned) \
124 { \
125 /* Unabandon it */ \
126 (Object)->Abandoned = FALSE; \
127 \
128 /* Return Status */ \
129 Thread->WaitStatus = STATUS_ABANDONED; \
130 } \
131 \
132 /* Insert it into the Mutant List */ \
133 InsertHeadList(Thread->MutantListHead.Blink, \
134 &(Object)->MutantListEntry); \
135 } \
136 } \
137 else if (((Object)->Header.Type & TIMER_OR_EVENT_TYPE) == \
138 EventSynchronizationObject) \
139 { \
140 /* Synchronization Timers and Events just get un-signaled */ \
141 (Object)->Header.SignalState = 0; \
142 } \
143 else if ((Object)->Header.Type == SemaphoreObject) \
144 { \
145 /* These ones can have multiple states, so we only decrease it */ \
146 (Object)->Header.SignalState--; \
147 } \
148 }
149
150 //
151 // Satisfies the wait of a mutant dispatcher object
152 //
153 #define KiSatisfyMutantWait(Object, Thread) \
154 { \
155 /* Decrease the Signal State */ \
156 (Object)->Header.SignalState--; \
157 \
158 /* Check if it's now non-signaled */ \
159 if (!(Object)->Header.SignalState) \
160 { \
161 /* Set the Owner Thread */ \
162 (Object)->OwnerThread = Thread; \
163 \
164 /* Disable APCs if needed */ \
165 Thread->KernelApcDisable -= (Object)->ApcDisable; \
166 \
167 /* Check if it's abandoned */ \
168 if ((Object)->Abandoned) \
169 { \
170 /* Unabandon it */ \
171 (Object)->Abandoned = FALSE; \
172 \
173 /* Return Status */ \
174 Thread->WaitStatus = STATUS_ABANDONED; \
175 } \
176 \
177 /* Insert it into the Mutant List */ \
178 InsertHeadList(Thread->MutantListHead.Blink, \
179 &(Object)->MutantListEntry); \
180 } \
181 }
182
183 //
184 // Satisfies the wait of any nonmutant dispatcher object
185 //
186 #define KiSatisfyNonMutantWait(Object, Thread) \
187 { \
188 if (((Object)->Header.Type & TIMER_OR_EVENT_TYPE) == \
189 EventSynchronizationObject) \
190 { \
191 /* Synchronization Timers and Events just get un-signaled */ \
192 (Object)->Header.SignalState = 0; \
193 } \
194 else if ((Object)->Header.Type == SemaphoreObject) \
195 { \
196 /* These ones can have multiple states, so we only decrease it */ \
197 (Object)->Header.SignalState--; \
198 } \
199 }
200
201 //
202 // Recalculates the due time
203 //
204 PLARGE_INTEGER
205 FORCEINLINE
206 KiRecalculateDueTime(IN PLARGE_INTEGER OriginalDueTime,
207 IN PLARGE_INTEGER DueTime,
208 IN OUT PLARGE_INTEGER NewDueTime)
209 {
210 /* Don't do anything for absolute waits */
211 if (OriginalDueTime->QuadPart >= 0) return OriginalDueTime;
212
213 /* Otherwise, query the interrupt time and recalculate */
214 NewDueTime->QuadPart = KeQueryInterruptTime();
215 NewDueTime->QuadPart -= DueTime->QuadPart;
216 return NewDueTime;
217 }
218
219 //
220 // Determines wether a thread should be added to the wait list
221 //
222 #define KiCheckThreadStackSwap(WaitMode, Thread, Swappable) \
223 { \
224 /* Check the required conditions */ \
225 if ((WaitMode != KernelMode) && \
226 (Thread->EnableStackSwap) && \
227 (Thread->Priority >= (LOW_REALTIME_PRIORITY + 9))) \
228 { \
229 /* We are go for swap */ \
230 Swappable = TRUE; \
231 } \
232 else \
233 { \
234 /* Don't swap the thread */ \
235 Swappable = FALSE; \
236 } \
237 }
238
239 //
240 // Adds a thread to the wait list
241 //
242 #define KiAddThreadToWaitList(Thread, Swappable) \
243 { \
244 /* Make sure it's swappable */ \
245 if (Swappable) \
246 { \
247 /* Insert it into the PRCB's List */ \
248 InsertTailList(&KeGetCurrentPrcb()->WaitListHead, \
249 &Thread->WaitListEntry); \
250 } \
251 }
252
253 //
254 // Rules for checking alertability:
255 // - For Alertable waits ONLY:
256 // * We don't wait and return STATUS_ALERTED if the thread is alerted
257 // in EITHER the specified wait mode OR in Kernel Mode.
258 // - For BOTH Alertable AND Non-Alertable waits:
259 // * We don't want and return STATUS_USER_APC if the User Mode APC list
260 // is not empty AND the wait mode is User Mode.
261 //
262 #define KiCheckAlertability() \
263 { \
264 if (Alertable) \
265 { \
266 if (CurrentThread->Alerted[(int)WaitMode]) \
267 { \
268 CurrentThread->Alerted[(int)WaitMode] = FALSE; \
269 WaitStatus = STATUS_ALERTED; \
270 break; \
271 } \
272 else if ((WaitMode != KernelMode) && \
273 (!IsListEmpty(&CurrentThread-> \
274 ApcState.ApcListHead[UserMode]))) \
275 { \
276 CurrentThread->ApcState.UserApcPending = TRUE; \
277 WaitStatus = STATUS_USER_APC; \
278 break; \
279 } \
280 else if (CurrentThread->Alerted[KernelMode]) \
281 { \
282 CurrentThread->Alerted[KernelMode] = FALSE; \
283 WaitStatus = STATUS_ALERTED; \
284 break; \
285 } \
286 } \
287 else if ((WaitMode != KernelMode) && \
288 (CurrentThread->ApcState.UserApcPending)) \
289 { \
290 WaitStatus = STATUS_USER_APC; \
291 break; \
292 } \
293 }
294
295 //
296 // Thread Scheduling Routines
297 //
298 #ifndef _CONFIG_SMP
299 KIRQL
300 FORCEINLINE
301 KiAcquireDispatcherLock(VOID)
302 {
303 /* Raise to DPC level */
304 return KeRaiseIrqlToDpcLevel();
305 }
306
307 VOID
308 FORCEINLINE
309 KiReleaseDispatcherLock(IN KIRQL OldIrql)
310 {
311 /* Just exit the dispatcher */
312 KiExitDispatcher(OldIrql);
313 }
314
315 VOID
316 FORCEINLINE
317 KiAcquireDispatcherLockAtDpcLevel(VOID)
318 {
319 /* This is a no-op at DPC Level for UP systems */
320 return;
321 }
322
323 VOID
324 FORCEINLINE
325 KiReleaseDispatcherLockFromDpcLevel(VOID)
326 {
327 /* This is a no-op at DPC Level for UP systems */
328 return;
329 }
330
331 //
332 // This routine makes the thread deferred ready on the boot CPU.
333 //
334 FORCEINLINE
335 VOID
336 KiInsertDeferredReadyList(IN PKTHREAD Thread)
337 {
338 /* Set the thread to deferred state and boot CPU */
339 Thread->State = DeferredReady;
340 Thread->DeferredProcessor = 0;
341
342 /* Make the thread ready immediately */
343 KiDeferredReadyThread(Thread);
344 }
345
346 FORCEINLINE
347 VOID
348 KiRescheduleThread(IN BOOLEAN NewThread,
349 IN ULONG Cpu)
350 {
351 /* This is meaningless on UP systems */
352 UNREFERENCED_PARAMETER(NewThread);
353 UNREFERENCED_PARAMETER(Cpu);
354 }
355
356 //
357 // This routine protects against multiple CPU acquires, it's meaningless on UP.
358 //
359 FORCEINLINE
360 VOID
361 KiSetThreadSwapBusy(IN PKTHREAD Thread)
362 {
363 UNREFERENCED_PARAMETER(Thread);
364 }
365
366 //
367 // This routine protects against multiple CPU acquires, it's meaningless on UP.
368 //
369 FORCEINLINE
370 VOID
371 KiAcquirePrcbLock(IN PKPRCB Prcb)
372 {
373 UNREFERENCED_PARAMETER(Prcb);
374 }
375
376 //
377 // This routine protects against multiple CPU acquires, it's meaningless on UP.
378 //
379 FORCEINLINE
380 VOID
381 KiReleasePrcbLock(IN PKPRCB Prcb)
382 {
383 UNREFERENCED_PARAMETER(Prcb);
384 }
385
386 //
387 // This routine protects against multiple CPU acquires, it's meaningless on UP.
388 //
389 FORCEINLINE
390 VOID
391 KiAcquireThreadLock(IN PKTHREAD Thread)
392 {
393 UNREFERENCED_PARAMETER(Thread);
394 }
395
396 //
397 // This routine protects against multiple CPU acquires, it's meaningless on UP.
398 //
399 FORCEINLINE
400 VOID
401 KiReleaseThreadLock(IN PKTHREAD Thread)
402 {
403 UNREFERENCED_PARAMETER(Thread);
404 }
405
406 FORCEINLINE
407 VOID
408 KiCheckDeferredReadyList(IN PKPRCB Prcb)
409 {
410 /* There are no deferred ready lists on UP systems */
411 UNREFERENCED_PARAMETER(Prcb);
412 }
413
414 FORCEINLINE
415 VOID
416 KiRundownThread(IN PKTHREAD Thread)
417 {
418 /* Check if this is the NPX Thread */
419 if (KeGetCurrentPrcb()->NpxThread == Thread)
420 {
421 /* Clear it */
422 KeGetCurrentPrcb()->NpxThread = NULL;
423 #ifdef __GNUC__
424 __asm__("fninit\n\t");
425 #else
426 __asm fninit;
427 #endif
428 }
429 }
430
431 FORCEINLINE
432 VOID
433 KiRequestApcInterrupt(IN BOOLEAN NeedApc,
434 IN UCHAR Processor)
435 {
436 /* We deliver instantly on UP */
437 UNREFERENCED_PARAMETER(NeedApc);
438 UNREFERENCED_PARAMETER(Processor);
439 }
440
441 #else
442
443 KIRQL
444 FORCEINLINE
445 KiAcquireDispatcherLock(VOID)
446 {
447 /* Raise to synchronization level and acquire the dispatcher lock */
448 return KeAcquireQueuedSpinLockRaiseToSynch(LockQueueDispatcherLock);
449 }
450
451 VOID
452 FORCEINLINE
453 KiReleaseDispatcherLock(IN KIRQL OldIrql)
454 {
455 /* First release the lock */
456 KeReleaseQueuedSpinLockFromDpcLevel(&KeGetCurrentPrcb()->
457 LockQueue[LockQueueDispatcherLock]);
458
459 /* Then exit the dispatcher */
460 KiExitDispatcher(OldIrql);
461 }
462
463 //
464 // This routine inserts a thread into the deferred ready list of the given CPU
465 //
466 FORCEINLINE
467 VOID
468 KiInsertDeferredReadyList(IN PKTHREAD Thread)
469 {
470 PKPRCB Prcb = KeGetCurrentPrcb();
471
472 /* Set the thread to deferred state and CPU */
473 Thread->State = DeferredReady;
474 Thread->DeferredProcessor = Prcb->Number;
475
476 /* Add it on the list */
477 PushEntryList(&Prcb->DeferredReadyListHead, &Thread->SwapListEntry);
478 }
479
480 FORCEINLINE
481 VOID
482 KiRescheduleThread(IN BOOLEAN NewThread,
483 IN ULONG Cpu)
484 {
485 /* Check if a new thread needs to be scheduled on a different CPU */
486 if ((NewThread) && !(KeGetPcr()->Number == Cpu))
487 {
488 /* Send an IPI to request delivery */
489 KiIpiSendRequest(AFFINITY_MASK(Cpu), IPI_DPC);
490 }
491 }
492
493 //
494 // This routine sets the current thread in a swap busy state, which ensure that
495 // nobody else tries to swap it concurrently.
496 //
497 FORCEINLINE
498 VOID
499 KiSetThreadSwapBusy(IN PKTHREAD Thread)
500 {
501 /* Make sure nobody already set it */
502 ASSERT(Thread->SwapBusy == FALSE);
503
504 /* Set it ourselves */
505 Thread->SwapBusy = TRUE;
506 }
507
508 //
509 // This routine acquires the PRCB lock so that only one caller can touch
510 // volatile PRCB data.
511 //
512 // Since this is a simple optimized spin-lock, it must be be only acquired
513 // at dispatcher level or higher!
514 //
515 FORCEINLINE
516 VOID
517 KiAcquirePrcbLock(IN PKPRCB Prcb)
518 {
519 /* Make sure we're at a safe level to touch the PRCB lock */
520 ASSERT(KeGetCurrentIrql() >= DISPATCH_LEVEL);
521
522 /* Start acquire loop */
523 for (;;)
524 {
525 /* Acquire the lock and break out if we acquired it first */
526 if (!InterlockedExchange(&Prcb->PrcbLock, 1)) break;
527
528 /* Loop until the other CPU releases it */
529 do
530 {
531 /* Let the CPU know that this is a loop */
532 YieldProcessor();
533 } while (Prcb->PrcbLock);
534 }
535 }
536
537 //
538 // This routine releases the PRCB lock so that other callers can touch
539 // volatile PRCB data.
540 //
541 // Since this is a simple optimized spin-lock, it must be be only acquired
542 // at dispatcher level or higher!
543 //
544 FORCEINLINE
545 VOID
546 KiReleasePrcbLock(IN PKPRCB Prcb)
547 {
548 /* Make sure it's acquired! */
549 ASSERT(Prcb->PrcbLock != 0);
550
551 /* Release it */
552 InterlockedAnd(&Prcb->PrcbLock, 0);
553 }
554
555 //
556 // This routine acquires the thread lock so that only one caller can touch
557 // volatile thread data.
558 //
559 // Since this is a simple optimized spin-lock, it must be be only acquired
560 // at dispatcher level or higher!
561 //
562 FORCEINLINE
563 VOID
564 KiAcquireThreadLock(IN PKTHREAD Thread)
565 {
566 /* Make sure we're at a safe level to touch the thread lock */
567 ASSERT(KeGetCurrentIrql() >= DISPATCH_LEVEL);
568
569 /* Start acquire loop */
570 for (;;)
571 {
572 /* Acquire the lock and break out if we acquired it first */
573 if (!InterlockedExchange(&Thread->ThreadLock, 1)) break;
574
575 /* Loop until the other CPU releases it */
576 do
577 {
578 /* Let the CPU know that this is a loop */
579 YieldProcessor();
580 } while (Thread->ThreadLock);
581 }
582 }
583
584 //
585 // This routine releases the thread lock so that other callers can touch
586 // volatile thread data.
587 //
588 // Since this is a simple optimized spin-lock, it must be be only acquired
589 // at dispatcher level or higher!
590 //
591 FORCEINLINE
592 VOID
593 KiReleaseThreadLock(IN PKTHREAD Thread)
594 {
595 /* Release it */
596 InterlockedAnd(&Thread->ThreadLock, 0);
597 }
598
599 FORCEINLINE
600 VOID
601 KiCheckDeferredReadyList(IN PKPRCB Prcb)
602 {
603 /* Scan the deferred ready lists if required */
604 if (Prcb->DeferredReadyListHead.Next) KiProcessDeferredReadyList(Prcb);
605 }
606
607 FORCEINLINE
608 VOID
609 KiRequestApcInterrupt(IN BOOLEAN NeedApc,
610 IN UCHAR Processor)
611 {
612 /* Check if we need to request APC delivery */
613 if (NeedApc)
614 {
615 /* Check if it's on another CPU */
616 if (KeGetPcr()->Number != Cpu)
617 {
618 /* Send an IPI to request delivery */
619 KiIpiSendRequest(AFFINITY_MASK(Cpu), IPI_DPC);
620 }
621 else
622 {
623 /* Request a software interrupt */
624 HalRequestSoftwareInterrupt(APC_LEVEL);
625 }
626 }
627 }
628
629 #endif
630
631 FORCEINLINE
632 VOID
633 KiAcquireApcLock(IN PKTHREAD Thread,
634 IN PKLOCK_QUEUE_HANDLE Handle)
635 {
636 /* Acquire the lock and raise to synchronization level */
637 KeAcquireInStackQueuedSpinLockRaiseToSynch(&Thread->ApcQueueLock, Handle);
638 }
639
640 FORCEINLINE
641 VOID
642 KiAcquireApcLockAtDpcLevel(IN PKTHREAD Thread,
643 IN PKLOCK_QUEUE_HANDLE Handle)
644 {
645 /* Acquire the lock */
646 KeAcquireInStackQueuedSpinLockAtDpcLevel(&Thread->ApcQueueLock, Handle);
647 }
648
649 FORCEINLINE
650 VOID
651 KiReleaseApcLock(IN PKLOCK_QUEUE_HANDLE Handle)
652 {
653 /* Release the lock */
654 KeReleaseInStackQueuedSpinLock(Handle);
655 }
656
657 FORCEINLINE
658 VOID
659 KiReleaseApcLockFromDpcLevel(IN PKLOCK_QUEUE_HANDLE Handle)
660 {
661 /* Release the lock */
662 KeReleaseInStackQueuedSpinLockFromDpcLevel(Handle);
663 }
664
665 FORCEINLINE
666 VOID
667 KiAcquireProcessLock(IN PKPROCESS Process,
668 IN PKLOCK_QUEUE_HANDLE Handle)
669 {
670 /* Acquire the lock and raise to synchronization level */
671 KeAcquireInStackQueuedSpinLockRaiseToSynch(&Process->ProcessLock, Handle);
672 }
673
674 FORCEINLINE
675 VOID
676 KiReleaseProcessLock(IN PKLOCK_QUEUE_HANDLE Handle)
677 {
678 /* Release the lock */
679 KeReleaseInStackQueuedSpinLock(Handle);
680 }
681
682 //
683 // This routine queues a thread that is ready on the PRCB's ready lists.
684 // If this thread cannot currently run on this CPU, then the thread is
685 // added to the deferred ready list instead.
686 //
687 // This routine must be entered with the PRCB lock held and it will exit
688 // with the PRCB lock released!
689 //
690 FORCEINLINE
691 VOID
692 KxQueueReadyThread(IN PKTHREAD Thread,
693 IN PKPRCB Prcb)
694 {
695 BOOLEAN Preempted;
696 KPRIORITY Priority;
697
698 /* Sanity checks */
699 ASSERT(Prcb == KeGetCurrentPrcb());
700 ASSERT(Thread->State == Running);
701 ASSERT(Thread->NextProcessor == Prcb->Number);
702
703 /* Check if this thread is allowed to run in this CPU */
704 #ifdef _CONFIG_SMP
705 if ((Thread->Affinity) & (Prcb->SetMember))
706 #else
707 if (TRUE)
708 #endif
709 {
710 /* Set thread ready for execution */
711 Thread->State = Ready;
712
713 /* Save current priority and if someone had pre-empted it */
714 Priority = Thread->Priority;
715 Preempted = Thread->Preempted;
716
717 /* We're not pre-empting now, and set the wait time */
718 Thread->Preempted = FALSE;
719 Thread->WaitTime = KeTickCount.LowPart;
720
721 /* Sanity check */
722 ASSERT((Priority >= 0) && (Priority <= HIGH_PRIORITY));
723
724 /* Insert this thread in the appropriate order */
725 Preempted ? InsertHeadList(&Prcb->DispatcherReadyListHead[Priority],
726 &Thread->WaitListEntry) :
727 InsertTailList(&Prcb->DispatcherReadyListHead[Priority],
728 &Thread->WaitListEntry);
729
730 /* Update the ready summary */
731 Prcb->ReadySummary |= PRIORITY_MASK(Priority);
732
733 /* Sanity check */
734 ASSERT(Priority == Thread->Priority);
735
736 /* Release the PRCB lock */
737 KiReleasePrcbLock(Prcb);
738 }
739 else
740 {
741 /* Otherwise, prepare this thread to be deferred */
742 Thread->State = DeferredReady;
743 Thread->DeferredProcessor = Prcb->Number;
744
745 /* Release the lock and defer scheduling */
746 KiReleasePrcbLock(Prcb);
747 KiDeferredReadyThread(Thread);
748 }
749 }
750
751 //
752 // This routine scans for an appropriate ready thread to select at the
753 // given priority and for the given CPU.
754 //
755 FORCEINLINE
756 PKTHREAD
757 KiSelectReadyThread(IN KPRIORITY Priority,
758 IN PKPRCB Prcb)
759 {
760 LONG PriorityMask, PrioritySet, HighPriority;
761 PLIST_ENTRY ListEntry;
762 PKTHREAD Thread;
763
764 /* Save the current mask and get the priority set for the CPU */
765 PriorityMask = Priority;
766 PrioritySet = Prcb->ReadySummary >> (UCHAR)Priority;
767 if (!PrioritySet) return NULL;
768
769 /* Get the highest priority possible */
770 BitScanReverse((PULONG)&HighPriority, PrioritySet);
771 ASSERT((PrioritySet & PRIORITY_MASK(HighPriority)) != 0);
772 HighPriority += PriorityMask;
773
774 /* Make sure the list isn't at highest priority */
775 ASSERT(IsListEmpty(&Prcb->DispatcherReadyListHead[HighPriority]) == FALSE);
776
777 /* Get the first thread on the list */
778 ListEntry = &Prcb->DispatcherReadyListHead[HighPriority];
779 Thread = CONTAINING_RECORD(ListEntry, KTHREAD, WaitListEntry);
780
781 /* Make sure this thread is here for a reason */
782 ASSERT(HighPriority == Thread->Priority);
783 ASSERT(Thread->Affinity & AFFINITY_MASK(Prcb->Number));
784 ASSERT(Thread->NextProcessor == Prcb->Number);
785
786 /* Remove it from the list */
787 RemoveEntryList(&Thread->WaitListEntry);
788 if (IsListEmpty(&Thread->WaitListEntry))
789 {
790 /* The list is empty now, reset the ready summary */
791 Prcb->ReadySummary ^= PRIORITY_MASK(HighPriority);
792 }
793
794 /* Sanity check and return the thread */
795 ASSERT((Thread == NULL) ||
796 (Thread->BasePriority == 0) ||
797 (Thread->Priority != 0));
798 return Thread;
799 }
800
801 //
802 // This routine computes the new priority for a thread. It is only valid for
803 // threads with priorities in the dynamic priority range.
804 //
805 SCHAR
806 FORCEINLINE
807 KiComputeNewPriority(IN PKTHREAD Thread)
808 {
809 SCHAR Priority;
810
811 /* Priority sanity checks */
812 ASSERT((Thread->PriorityDecrement >= 0) &&
813 (Thread->PriorityDecrement <= Thread->Priority));
814 ASSERT((Thread->Priority < LOW_REALTIME_PRIORITY) ?
815 TRUE : (Thread->PriorityDecrement == 0));
816
817 /* Get the current priority */
818 Priority = Thread->Priority;
819 if (Priority < LOW_REALTIME_PRIORITY)
820 {
821 /* Set the New Priority and add the Priority Decrement */
822 Priority += (Priority - Thread->PriorityDecrement - 1);
823
824 /* Don't go out of bounds */
825 if (Priority < Thread->BasePriority) Priority = Thread->BasePriority;
826
827 /* Reset the priority decrement */
828 Thread->PriorityDecrement = 0;
829 }
830
831 /* Sanity check */
832 ASSERT((Thread->BasePriority == 0) || (Priority != 0));
833
834 /* Return the new priority */
835 return Priority;
836 }
837