- Re-implement KiRequestApcInterrupt in ke_x.h. Make it work by CPU number (as docume...
[reactos.git] / reactos / ntoskrnl / include / internal / ke_x.h
1 /*
2 * PROJECT: ReactOS Kernel
3 * LICENSE: GPL - See COPYING in the top level directory
4 * FILE: ntoskrnl/include/ke_x.h
5 * PURPOSE: Internal Inlined Functions for the Kernel
6 * PROGRAMMERS: Alex Ionescu (alex.ionescu@reactos.org)
7 */
8
9 //
10 // Enters a Guarded Region
11 //
12 #define KeEnterGuardedRegion() \
13 { \
14 PKTHREAD Thread = KeGetCurrentThread(); \
15 \
16 /* Sanity checks */ \
17 ASSERT_IRQL_LESS_OR_EQUAL(APC_LEVEL); \
18 ASSERT(Thread == KeGetCurrentThread()); \
19 ASSERT((Thread->SpecialApcDisable <= 0) && \
20 (Thread->SpecialApcDisable != -32768)); \
21 \
22 /* Disable Special APCs */ \
23 Thread->SpecialApcDisable--; \
24 }
25
26 //
27 // Leaves a Guarded Region
28 //
29 #define KeLeaveGuardedRegion() \
30 { \
31 PKTHREAD Thread = KeGetCurrentThread(); \
32 \
33 /* Sanity checks */ \
34 ASSERT_IRQL_LESS_OR_EQUAL(APC_LEVEL); \
35 ASSERT(Thread == KeGetCurrentThread()); \
36 ASSERT(Thread->SpecialApcDisable < 0); \
37 \
38 /* Leave region and check if APCs are OK now */ \
39 if (!(++Thread->SpecialApcDisable)) \
40 { \
41 /* Check for Kernel APCs on the list */ \
42 if (!IsListEmpty(&Thread->ApcState. \
43 ApcListHead[KernelMode])) \
44 { \
45 /* Check for APC Delivery */ \
46 KiCheckForKernelApcDelivery(); \
47 } \
48 } \
49 }
50
51 //
52 // TODO: Guarded Mutex Routines
53 //
54
55 //
56 // Enters a Critical Region
57 //
58 #define KeEnterCriticalRegion() \
59 { \
60 PKTHREAD Thread = KeGetCurrentThread(); \
61 if (Thread) \
62 { \
63 /* Sanity checks */ \
64 ASSERT(Thread == KeGetCurrentThread()); \
65 ASSERT((Thread->KernelApcDisable <= 0) && \
66 (Thread->KernelApcDisable != -32768)); \
67 \
68 /* Disable Kernel APCs */ \
69 Thread->KernelApcDisable--; \
70 } \
71 }
72
73 //
74 // Leaves a Critical Region
75 //
76 #define KeLeaveCriticalRegion() \
77 { \
78 PKTHREAD Thread = KeGetCurrentThread(); \
79 if (Thread) \
80 { \
81 /* Sanity checks */ \
82 ASSERT(Thread == KeGetCurrentThread()); \
83 ASSERT(Thread->KernelApcDisable < 0); \
84 \
85 /* Enable Kernel APCs */ \
86 Thread->KernelApcDisable++; \
87 \
88 /* Check if Kernel APCs are now enabled */ \
89 if (!(Thread->KernelApcDisable)) \
90 { \
91 /* Check if we need to request an APC Delivery */ \
92 if (!(IsListEmpty(&Thread->ApcState.ApcListHead[KernelMode])) && \
93 !(Thread->KernelApcDisable)) \
94 { \
95 /* Check for the right environment */ \
96 KiCheckForKernelApcDelivery(); \
97 } \
98 } \
99 } \
100 }
101
102 //
103 // Satisfies the wait of any dispatcher object
104 //
105 #define KiSatisfyObjectWait(Object, Thread) \
106 { \
107 /* Special case for Mutants */ \
108 if ((Object)->Header.Type == MutantObject) \
109 { \
110 /* Decrease the Signal State */ \
111 (Object)->Header.SignalState--; \
112 \
113 /* Check if it's now non-signaled */ \
114 if (!(Object)->Header.SignalState) \
115 { \
116 /* Set the Owner Thread */ \
117 (Object)->OwnerThread = Thread; \
118 \
119 /* Disable APCs if needed */ \
120 Thread->KernelApcDisable -= (Object)->ApcDisable; \
121 \
122 /* Check if it's abandoned */ \
123 if ((Object)->Abandoned) \
124 { \
125 /* Unabandon it */ \
126 (Object)->Abandoned = FALSE; \
127 \
128 /* Return Status */ \
129 Thread->WaitStatus = STATUS_ABANDONED; \
130 } \
131 \
132 /* Insert it into the Mutant List */ \
133 InsertHeadList(Thread->MutantListHead.Blink, \
134 &(Object)->MutantListEntry); \
135 } \
136 } \
137 else if (((Object)->Header.Type & TIMER_OR_EVENT_TYPE) == \
138 EventSynchronizationObject) \
139 { \
140 /* Synchronization Timers and Events just get un-signaled */ \
141 (Object)->Header.SignalState = 0; \
142 } \
143 else if ((Object)->Header.Type == SemaphoreObject) \
144 { \
145 /* These ones can have multiple states, so we only decrease it */ \
146 (Object)->Header.SignalState--; \
147 } \
148 }
149
150 //
151 // Satisfies the wait of a mutant dispatcher object
152 //
153 #define KiSatisfyMutantWait(Object, Thread) \
154 { \
155 /* Decrease the Signal State */ \
156 (Object)->Header.SignalState--; \
157 \
158 /* Check if it's now non-signaled */ \
159 if (!(Object)->Header.SignalState) \
160 { \
161 /* Set the Owner Thread */ \
162 (Object)->OwnerThread = Thread; \
163 \
164 /* Disable APCs if needed */ \
165 Thread->KernelApcDisable -= (Object)->ApcDisable; \
166 \
167 /* Check if it's abandoned */ \
168 if ((Object)->Abandoned) \
169 { \
170 /* Unabandon it */ \
171 (Object)->Abandoned = FALSE; \
172 \
173 /* Return Status */ \
174 Thread->WaitStatus = STATUS_ABANDONED; \
175 } \
176 \
177 /* Insert it into the Mutant List */ \
178 InsertHeadList(Thread->MutantListHead.Blink, \
179 &(Object)->MutantListEntry); \
180 } \
181 }
182
183 //
184 // Satisfies the wait of any nonmutant dispatcher object
185 //
186 #define KiSatisfyNonMutantWait(Object, Thread) \
187 { \
188 if (((Object)->Header.Type & TIMER_OR_EVENT_TYPE) == \
189 EventSynchronizationObject) \
190 { \
191 /* Synchronization Timers and Events just get un-signaled */ \
192 (Object)->Header.SignalState = 0; \
193 } \
194 else if ((Object)->Header.Type == SemaphoreObject) \
195 { \
196 /* These ones can have multiple states, so we only decrease it */ \
197 (Object)->Header.SignalState--; \
198 } \
199 }
200
201 //
202 // Recalculates the due time
203 //
204 PLARGE_INTEGER
205 FORCEINLINE
206 KiRecalculateDueTime(IN PLARGE_INTEGER OriginalDueTime,
207 IN PLARGE_INTEGER DueTime,
208 IN OUT PLARGE_INTEGER NewDueTime)
209 {
210 /* Don't do anything for absolute waits */
211 if (OriginalDueTime->QuadPart >= 0) return OriginalDueTime;
212
213 /* Otherwise, query the interrupt time and recalculate */
214 NewDueTime->QuadPart = KeQueryInterruptTime();
215 NewDueTime->QuadPart -= DueTime->QuadPart;
216 return NewDueTime;
217 }
218
219 //
220 // Determines wether a thread should be added to the wait list
221 //
222 #define KiCheckThreadStackSwap(WaitMode, Thread, Swappable) \
223 { \
224 /* Check the required conditions */ \
225 if ((WaitMode != KernelMode) && \
226 (Thread->EnableStackSwap) && \
227 (Thread->Priority >= (LOW_REALTIME_PRIORITY + 9))) \
228 { \
229 /* We are go for swap */ \
230 Swappable = TRUE; \
231 } \
232 else \
233 { \
234 /* Don't swap the thread */ \
235 Swappable = FALSE; \
236 } \
237 }
238
239 //
240 // Adds a thread to the wait list
241 //
242 #define KiAddThreadToWaitList(Thread, Swappable) \
243 { \
244 /* Make sure it's swappable */ \
245 if (Swappable) \
246 { \
247 /* Insert it into the PRCB's List */ \
248 InsertTailList(&KeGetCurrentPrcb()->WaitListHead, \
249 &Thread->WaitListEntry); \
250 } \
251 }
252
253 //
254 // Rules for checking alertability:
255 // - For Alertable waits ONLY:
256 // * We don't wait and return STATUS_ALERTED if the thread is alerted
257 // in EITHER the specified wait mode OR in Kernel Mode.
258 // - For BOTH Alertable AND Non-Alertable waits:
259 // * We don't want and return STATUS_USER_APC if the User Mode APC list
260 // is not empty AND the wait mode is User Mode.
261 //
262 #define KiCheckAlertability() \
263 { \
264 if (Alertable) \
265 { \
266 if (CurrentThread->Alerted[(int)WaitMode]) \
267 { \
268 CurrentThread->Alerted[(int)WaitMode] = FALSE; \
269 WaitStatus = STATUS_ALERTED; \
270 break; \
271 } \
272 else if ((WaitMode != KernelMode) && \
273 (!IsListEmpty(&CurrentThread-> \
274 ApcState.ApcListHead[UserMode]))) \
275 { \
276 CurrentThread->ApcState.UserApcPending = TRUE; \
277 WaitStatus = STATUS_USER_APC; \
278 break; \
279 } \
280 else if (CurrentThread->Alerted[KernelMode]) \
281 { \
282 CurrentThread->Alerted[KernelMode] = FALSE; \
283 WaitStatus = STATUS_ALERTED; \
284 break; \
285 } \
286 } \
287 else if ((WaitMode != KernelMode) && \
288 (CurrentThread->ApcState.UserApcPending)) \
289 { \
290 WaitStatus = STATUS_USER_APC; \
291 break; \
292 } \
293 }
294
295 //
296 // Thread Scheduling Routines
297 //
298 #ifndef _CONFIG_SMP
299 KIRQL
300 FORCEINLINE
301 KiAcquireDispatcherLock(VOID)
302 {
303 /* Raise to DPC level */
304 return KeRaiseIrqlToDpcLevel();
305 }
306
307 VOID
308 FORCEINLINE
309 KiReleaseDispatcherLock(IN KIRQL OldIrql)
310 {
311 /* Just exit the dispatcher */
312 KiExitDispatcher(OldIrql);
313 }
314
315 VOID
316 FORCEINLINE
317 KiAcquireDispatcherLockAtDpcLevel(VOID)
318 {
319 /* This is a no-op at DPC Level for UP systems */
320 return;
321 }
322
323 VOID
324 FORCEINLINE
325 KiReleaseDispatcherLockFromDpcLevel(VOID)
326 {
327 /* This is a no-op at DPC Level for UP systems */
328 return;
329 }
330
331 //
332 // This routine makes the thread deferred ready on the boot CPU.
333 //
334 FORCEINLINE
335 VOID
336 KiInsertDeferredReadyList(IN PKTHREAD Thread)
337 {
338 /* Set the thread to deferred state and boot CPU */
339 Thread->State = DeferredReady;
340 Thread->DeferredProcessor = 0;
341
342 /* Make the thread ready immediately */
343 KiDeferredReadyThread(Thread);
344 }
345
346 FORCEINLINE
347 VOID
348 KiRescheduleThread(IN BOOLEAN NewThread,
349 IN ULONG Cpu)
350 {
351 /* This is meaningless on UP systems */
352 UNREFERENCED_PARAMETER(NewThread);
353 UNREFERENCED_PARAMETER(Cpu);
354 }
355
356 //
357 // This routine protects against multiple CPU acquires, it's meaningless on UP.
358 //
359 FORCEINLINE
360 VOID
361 KiSetThreadSwapBusy(IN PKTHREAD Thread)
362 {
363 UNREFERENCED_PARAMETER(Thread);
364 }
365
366 //
367 // This routine protects against multiple CPU acquires, it's meaningless on UP.
368 //
369 FORCEINLINE
370 VOID
371 KiAcquirePrcbLock(IN PKPRCB Prcb)
372 {
373 UNREFERENCED_PARAMETER(Prcb);
374 }
375
376 //
377 // This routine protects against multiple CPU acquires, it's meaningless on UP.
378 //
379 FORCEINLINE
380 VOID
381 KiReleasePrcbLock(IN PKPRCB Prcb)
382 {
383 UNREFERENCED_PARAMETER(Prcb);
384 }
385
386 //
387 // This routine protects against multiple CPU acquires, it's meaningless on UP.
388 //
389 FORCEINLINE
390 VOID
391 KiAcquireThreadLock(IN PKTHREAD Thread)
392 {
393 UNREFERENCED_PARAMETER(Thread);
394 }
395
396 //
397 // This routine protects against multiple CPU acquires, it's meaningless on UP.
398 //
399 FORCEINLINE
400 VOID
401 KiReleaseThreadLock(IN PKTHREAD Thread)
402 {
403 UNREFERENCED_PARAMETER(Thread);
404 }
405
406 FORCEINLINE
407 VOID
408 KiCheckDeferredReadyList(IN PKPRCB Prcb)
409 {
410 /* There are no deferred ready lists on UP systems */
411 UNREFERENCED_PARAMETER(Prcb);
412 }
413
414 FORCEINLINE
415 VOID
416 KiRundownThread(IN PKTHREAD Thread)
417 {
418 /* Check if this is the NPX Thread */
419 if (KeGetCurrentPrcb()->NpxThread == Thread)
420 {
421 /* Clear it */
422 KeGetCurrentPrcb()->NpxThread = NULL;
423 #ifdef __GNUC__
424 __asm__("fninit\n\t");
425 #else
426 __asm fninit;
427 #endif
428 }
429 }
430
431 FORCEINLINE
432 VOID
433 KiRequestApcInterrupt(IN UCHAR Processor)
434 {
435 /* Request a software interrupt */
436 HalRequestSoftwareInterrupt(APC_LEVEL);
437 }
438
439 #else
440
441 KIRQL
442 FORCEINLINE
443 KiAcquireDispatcherLock(VOID)
444 {
445 /* Raise to synchronization level and acquire the dispatcher lock */
446 return KeAcquireQueuedSpinLockRaiseToSynch(LockQueueDispatcherLock);
447 }
448
449 VOID
450 FORCEINLINE
451 KiReleaseDispatcherLock(IN KIRQL OldIrql)
452 {
453 /* First release the lock */
454 KeReleaseQueuedSpinLockFromDpcLevel(&KeGetCurrentPrcb()->
455 LockQueue[LockQueueDispatcherLock]);
456
457 /* Then exit the dispatcher */
458 KiExitDispatcher(OldIrql);
459 }
460
461 //
462 // This routine inserts a thread into the deferred ready list of the given CPU
463 //
464 FORCEINLINE
465 VOID
466 KiInsertDeferredReadyList(IN PKTHREAD Thread)
467 {
468 PKPRCB Prcb = KeGetCurrentPrcb();
469
470 /* Set the thread to deferred state and CPU */
471 Thread->State = DeferredReady;
472 Thread->DeferredProcessor = Prcb->Number;
473
474 /* Add it on the list */
475 PushEntryList(&Prcb->DeferredReadyListHead, &Thread->SwapListEntry);
476 }
477
478 FORCEINLINE
479 VOID
480 KiRescheduleThread(IN BOOLEAN NewThread,
481 IN ULONG Cpu)
482 {
483 /* Check if a new thread needs to be scheduled on a different CPU */
484 if ((NewThread) && !(KeGetPcr()->Number == Cpu))
485 {
486 /* Send an IPI to request delivery */
487 KiIpiSendRequest(AFFINITY_MASK(Cpu), IPI_DPC);
488 }
489 }
490
491 //
492 // This routine sets the current thread in a swap busy state, which ensure that
493 // nobody else tries to swap it concurrently.
494 //
495 FORCEINLINE
496 VOID
497 KiSetThreadSwapBusy(IN PKTHREAD Thread)
498 {
499 /* Make sure nobody already set it */
500 ASSERT(Thread->SwapBusy == FALSE);
501
502 /* Set it ourselves */
503 Thread->SwapBusy = TRUE;
504 }
505
506 //
507 // This routine acquires the PRCB lock so that only one caller can touch
508 // volatile PRCB data.
509 //
510 // Since this is a simple optimized spin-lock, it must be be only acquired
511 // at dispatcher level or higher!
512 //
513 FORCEINLINE
514 VOID
515 KiAcquirePrcbLock(IN PKPRCB Prcb)
516 {
517 /* Make sure we're at a safe level to touch the PRCB lock */
518 ASSERT(KeGetCurrentIrql() >= DISPATCH_LEVEL);
519
520 /* Start acquire loop */
521 for (;;)
522 {
523 /* Acquire the lock and break out if we acquired it first */
524 if (!InterlockedExchange(&Prcb->PrcbLock, 1)) break;
525
526 /* Loop until the other CPU releases it */
527 do
528 {
529 /* Let the CPU know that this is a loop */
530 YieldProcessor();
531 } while (Prcb->PrcbLock);
532 }
533 }
534
535 //
536 // This routine releases the PRCB lock so that other callers can touch
537 // volatile PRCB data.
538 //
539 // Since this is a simple optimized spin-lock, it must be be only acquired
540 // at dispatcher level or higher!
541 //
542 FORCEINLINE
543 VOID
544 KiReleasePrcbLock(IN PKPRCB Prcb)
545 {
546 /* Make sure it's acquired! */
547 ASSERT(Prcb->PrcbLock != 0);
548
549 /* Release it */
550 InterlockedAnd(&Prcb->PrcbLock, 0);
551 }
552
553 //
554 // This routine acquires the thread lock so that only one caller can touch
555 // volatile thread data.
556 //
557 // Since this is a simple optimized spin-lock, it must be be only acquired
558 // at dispatcher level or higher!
559 //
560 FORCEINLINE
561 VOID
562 KiAcquireThreadLock(IN PKTHREAD Thread)
563 {
564 /* Make sure we're at a safe level to touch the thread lock */
565 ASSERT(KeGetCurrentIrql() >= DISPATCH_LEVEL);
566
567 /* Start acquire loop */
568 for (;;)
569 {
570 /* Acquire the lock and break out if we acquired it first */
571 if (!InterlockedExchange(&Thread->ThreadLock, 1)) break;
572
573 /* Loop until the other CPU releases it */
574 do
575 {
576 /* Let the CPU know that this is a loop */
577 YieldProcessor();
578 } while (Thread->ThreadLock);
579 }
580 }
581
582 //
583 // This routine releases the thread lock so that other callers can touch
584 // volatile thread data.
585 //
586 // Since this is a simple optimized spin-lock, it must be be only acquired
587 // at dispatcher level or higher!
588 //
589 FORCEINLINE
590 VOID
591 KiReleaseThreadLock(IN PKTHREAD Thread)
592 {
593 /* Release it */
594 InterlockedAnd(&Thread->ThreadLock, 0);
595 }
596
597 FORCEINLINE
598 VOID
599 KiCheckDeferredReadyList(IN PKPRCB Prcb)
600 {
601 /* Scan the deferred ready lists if required */
602 if (Prcb->DeferredReadyListHead.Next) KiProcessDeferredReadyList(Prcb);
603 }
604
605 FORCEINLINE
606 VOID
607 KiRequestApcInterrupt(IN UCHAR Processor)
608 {
609 /* Check if we're on the same CPU */
610 if (KeGetCurrentPrcb()->Number == Processor)
611 {
612 /* Request a software interrupt */
613 HalRequestSoftwareInterrupt(APC_LEVEL);
614 }
615 else
616 {
617 KiIpiSendRequest(KeGetCurrentPrcb()->SetMember, IPI_APC);
618 }
619 }
620
621 #endif
622
623 FORCEINLINE
624 VOID
625 KiAcquireApcLock(IN PKTHREAD Thread,
626 IN PKLOCK_QUEUE_HANDLE Handle)
627 {
628 /* Acquire the lock and raise to synchronization level */
629 KeAcquireInStackQueuedSpinLockRaiseToSynch(&Thread->ApcQueueLock, Handle);
630 }
631
632 FORCEINLINE
633 VOID
634 KiAcquireApcLockAtDpcLevel(IN PKTHREAD Thread,
635 IN PKLOCK_QUEUE_HANDLE Handle)
636 {
637 /* Acquire the lock */
638 KeAcquireInStackQueuedSpinLockAtDpcLevel(&Thread->ApcQueueLock, Handle);
639 }
640
641 FORCEINLINE
642 VOID
643 KiReleaseApcLock(IN PKLOCK_QUEUE_HANDLE Handle)
644 {
645 /* Release the lock */
646 KeReleaseInStackQueuedSpinLock(Handle);
647 }
648
649 FORCEINLINE
650 VOID
651 KiReleaseApcLockFromDpcLevel(IN PKLOCK_QUEUE_HANDLE Handle)
652 {
653 /* Release the lock */
654 KeReleaseInStackQueuedSpinLockFromDpcLevel(Handle);
655 }
656
657 FORCEINLINE
658 VOID
659 KiAcquireProcessLock(IN PKPROCESS Process,
660 IN PKLOCK_QUEUE_HANDLE Handle)
661 {
662 /* Acquire the lock and raise to synchronization level */
663 KeAcquireInStackQueuedSpinLockRaiseToSynch(&Process->ProcessLock, Handle);
664 }
665
666 FORCEINLINE
667 VOID
668 KiReleaseProcessLock(IN PKLOCK_QUEUE_HANDLE Handle)
669 {
670 /* Release the lock */
671 KeReleaseInStackQueuedSpinLock(Handle);
672 }
673
674 //
675 // This routine queues a thread that is ready on the PRCB's ready lists.
676 // If this thread cannot currently run on this CPU, then the thread is
677 // added to the deferred ready list instead.
678 //
679 // This routine must be entered with the PRCB lock held and it will exit
680 // with the PRCB lock released!
681 //
682 FORCEINLINE
683 VOID
684 KxQueueReadyThread(IN PKTHREAD Thread,
685 IN PKPRCB Prcb)
686 {
687 BOOLEAN Preempted;
688 KPRIORITY Priority;
689
690 /* Sanity checks */
691 ASSERT(Prcb == KeGetCurrentPrcb());
692 ASSERT(Thread->State == Running);
693 ASSERT(Thread->NextProcessor == Prcb->Number);
694
695 /* Check if this thread is allowed to run in this CPU */
696 #ifdef _CONFIG_SMP
697 if ((Thread->Affinity) & (Prcb->SetMember))
698 #else
699 if (TRUE)
700 #endif
701 {
702 /* Set thread ready for execution */
703 Thread->State = Ready;
704
705 /* Save current priority and if someone had pre-empted it */
706 Priority = Thread->Priority;
707 Preempted = Thread->Preempted;
708
709 /* We're not pre-empting now, and set the wait time */
710 Thread->Preempted = FALSE;
711 Thread->WaitTime = KeTickCount.LowPart;
712
713 /* Sanity check */
714 ASSERT((Priority >= 0) && (Priority <= HIGH_PRIORITY));
715
716 /* Insert this thread in the appropriate order */
717 Preempted ? InsertHeadList(&Prcb->DispatcherReadyListHead[Priority],
718 &Thread->WaitListEntry) :
719 InsertTailList(&Prcb->DispatcherReadyListHead[Priority],
720 &Thread->WaitListEntry);
721
722 /* Update the ready summary */
723 Prcb->ReadySummary |= PRIORITY_MASK(Priority);
724
725 /* Sanity check */
726 ASSERT(Priority == Thread->Priority);
727
728 /* Release the PRCB lock */
729 KiReleasePrcbLock(Prcb);
730 }
731 else
732 {
733 /* Otherwise, prepare this thread to be deferred */
734 Thread->State = DeferredReady;
735 Thread->DeferredProcessor = Prcb->Number;
736
737 /* Release the lock and defer scheduling */
738 KiReleasePrcbLock(Prcb);
739 KiDeferredReadyThread(Thread);
740 }
741 }
742
743 //
744 // This routine scans for an appropriate ready thread to select at the
745 // given priority and for the given CPU.
746 //
747 FORCEINLINE
748 PKTHREAD
749 KiSelectReadyThread(IN KPRIORITY Priority,
750 IN PKPRCB Prcb)
751 {
752 LONG PriorityMask, PrioritySet, HighPriority;
753 PLIST_ENTRY ListEntry;
754 PKTHREAD Thread;
755
756 /* Save the current mask and get the priority set for the CPU */
757 PriorityMask = Priority;
758 PrioritySet = Prcb->ReadySummary >> (UCHAR)Priority;
759 if (!PrioritySet) return NULL;
760
761 /* Get the highest priority possible */
762 BitScanReverse((PULONG)&HighPriority, PrioritySet);
763 ASSERT((PrioritySet & PRIORITY_MASK(HighPriority)) != 0);
764 HighPriority += PriorityMask;
765
766 /* Make sure the list isn't at highest priority */
767 ASSERT(IsListEmpty(&Prcb->DispatcherReadyListHead[HighPriority]) == FALSE);
768
769 /* Get the first thread on the list */
770 ListEntry = &Prcb->DispatcherReadyListHead[HighPriority];
771 Thread = CONTAINING_RECORD(ListEntry, KTHREAD, WaitListEntry);
772
773 /* Make sure this thread is here for a reason */
774 ASSERT(HighPriority == Thread->Priority);
775 ASSERT(Thread->Affinity & AFFINITY_MASK(Prcb->Number));
776 ASSERT(Thread->NextProcessor == Prcb->Number);
777
778 /* Remove it from the list */
779 RemoveEntryList(&Thread->WaitListEntry);
780 if (IsListEmpty(&Thread->WaitListEntry))
781 {
782 /* The list is empty now, reset the ready summary */
783 Prcb->ReadySummary ^= PRIORITY_MASK(HighPriority);
784 }
785
786 /* Sanity check and return the thread */
787 ASSERT((Thread == NULL) ||
788 (Thread->BasePriority == 0) ||
789 (Thread->Priority != 0));
790 return Thread;
791 }
792
793 //
794 // This routine computes the new priority for a thread. It is only valid for
795 // threads with priorities in the dynamic priority range.
796 //
797 SCHAR
798 FORCEINLINE
799 KiComputeNewPriority(IN PKTHREAD Thread)
800 {
801 SCHAR Priority;
802
803 /* Priority sanity checks */
804 ASSERT((Thread->PriorityDecrement >= 0) &&
805 (Thread->PriorityDecrement <= Thread->Priority));
806 ASSERT((Thread->Priority < LOW_REALTIME_PRIORITY) ?
807 TRUE : (Thread->PriorityDecrement == 0));
808
809 /* Get the current priority */
810 Priority = Thread->Priority;
811 if (Priority < LOW_REALTIME_PRIORITY)
812 {
813 /* Set the New Priority and add the Priority Decrement */
814 Priority += (Priority - Thread->PriorityDecrement - 1);
815
816 /* Don't go out of bounds */
817 if (Priority < Thread->BasePriority) Priority = Thread->BasePriority;
818
819 /* Reset the priority decrement */
820 Thread->PriorityDecrement = 0;
821 }
822
823 /* Sanity check */
824 ASSERT((Thread->BasePriority == 0) || (Priority != 0));
825
826 /* Return the new priority */
827 return Priority;
828 }
829