- Fix KeEnterCriticalRegion/KeLeaveCriticalRegion by moving to ke_x and adding ASSERT...
[reactos.git] / reactos / ntoskrnl / include / internal / ke_x.h
1 /*
2 * PROJECT: ReactOS Kernel
3 * LICENSE: GPL - See COPYING in the top level directory
4 * FILE: ntoskrnl/include/ke_x.h
5 * PURPOSE: Internal Inlined Functions for the Kernel
6 * PROGRAMMERS: Alex Ionescu (alex.ionescu@reactos.org)
7 */
8
9 //
10 // Enters a Guarded Region
11 //
12 #define KeEnterGuardedRegion() \
13 { \
14 PKTHREAD Thread = KeGetCurrentThread(); \
15 \
16 /* Sanity checks */ \
17 ASSERT_IRQL_LESS_OR_EQUAL(APC_LEVEL); \
18 ASSERT(Thread == KeGetCurrentThread()); \
19 ASSERT((Thread->SpecialApcDisable <= 0) && \
20 (Thread->SpecialApcDisable != -32768)); \
21 \
22 /* Disable Special APCs */ \
23 Thread->SpecialApcDisable--; \
24 }
25
26 //
27 // Leaves a Guarded Region
28 //
29 #define KeLeaveGuardedRegion() \
30 { \
31 PKTHREAD Thread = KeGetCurrentThread(); \
32 \
33 /* Sanity checks */ \
34 ASSERT_IRQL_LESS_OR_EQUAL(APC_LEVEL); \
35 ASSERT(Thread == KeGetCurrentThread()); \
36 ASSERT(Thread->SpecialApcDisable < 0); \
37 \
38 /* Leave region and check if APCs are OK now */ \
39 if (!(++Thread->SpecialApcDisable)) \
40 { \
41 /* Check for Kernel APCs on the list */ \
42 if (!IsListEmpty(&Thread->ApcState. \
43 ApcListHead[KernelMode])) \
44 { \
45 /* Check for APC Delivery */ \
46 KiCheckForKernelApcDelivery(); \
47 } \
48 } \
49 }
50
51 //
52 // TODO: Guarded Mutex Routines
53 //
54
55 //
56 // Enters a Critical Region
57 //
58 #define KeEnterCriticalRegion() \
59 { \
60 PKTHREAD Thread = KeGetCurrentThread(); \
61 if (Thread) \
62 { \
63 /* Sanity checks */ \
64 ASSERT(Thread == KeGetCurrentThread()); \
65 ASSERT((Thread->KernelApcDisable <= 0) && \
66 (Thread->KernelApcDisable != -32768)); \
67 \
68 /* Disable Kernel APCs */ \
69 Thread->KernelApcDisable--; \
70 } \
71 }
72
73 //
74 // Leaves a Critical Region
75 //
76 #define KeLeaveCriticalRegion() \
77 { \
78 PKTHREAD Thread = KeGetCurrentThread(); \
79 if (Thread) \
80 { \
81 /* Sanity checks */ \
82 ASSERT(Thread == KeGetCurrentThread()); \
83 ASSERT(Thread->KernelApcDisable < 0); \
84 \
85 /* Enable Kernel APCs */ \
86 Thread->KernelApcDisable++; \
87 \
88 /* Check if Kernel APCs are now enabled */ \
89 if (!(Thread->KernelApcDisable)) \
90 { \
91 /* Check if we need to request an APC Delivery */ \
92 if (!(IsListEmpty(&Thread->ApcState.ApcListHead[KernelMode])) && \
93 !(Thread->KernelApcDisable)) \
94 { \
95 /* Check for the right environment */ \
96 KiCheckForKernelApcDelivery(); \
97 } \
98 } \
99 } \
100 }
101
102 //
103 // Satisfies the wait of any dispatcher object
104 //
105 #define KiSatisfyObjectWait(Object, Thread) \
106 { \
107 /* Special case for Mutants */ \
108 if ((Object)->Header.Type == MutantObject) \
109 { \
110 /* Decrease the Signal State */ \
111 (Object)->Header.SignalState--; \
112 \
113 /* Check if it's now non-signaled */ \
114 if (!(Object)->Header.SignalState) \
115 { \
116 /* Set the Owner Thread */ \
117 (Object)->OwnerThread = Thread; \
118 \
119 /* Disable APCs if needed */ \
120 Thread->KernelApcDisable -= (Object)->ApcDisable; \
121 \
122 /* Check if it's abandoned */ \
123 if ((Object)->Abandoned) \
124 { \
125 /* Unabandon it */ \
126 (Object)->Abandoned = FALSE; \
127 \
128 /* Return Status */ \
129 Thread->WaitStatus = STATUS_ABANDONED; \
130 } \
131 \
132 /* Insert it into the Mutant List */ \
133 InsertHeadList(Thread->MutantListHead.Blink, \
134 &(Object)->MutantListEntry); \
135 } \
136 } \
137 else if (((Object)->Header.Type & TIMER_OR_EVENT_TYPE) == \
138 EventSynchronizationObject) \
139 { \
140 /* Synchronization Timers and Events just get un-signaled */ \
141 (Object)->Header.SignalState = 0; \
142 } \
143 else if ((Object)->Header.Type == SemaphoreObject) \
144 { \
145 /* These ones can have multiple states, so we only decrease it */ \
146 (Object)->Header.SignalState--; \
147 } \
148 }
149
150 //
151 // Satisfies the wait of a mutant dispatcher object
152 //
153 #define KiSatisfyMutantWait(Object, Thread) \
154 { \
155 /* Decrease the Signal State */ \
156 (Object)->Header.SignalState--; \
157 \
158 /* Check if it's now non-signaled */ \
159 if (!(Object)->Header.SignalState) \
160 { \
161 /* Set the Owner Thread */ \
162 (Object)->OwnerThread = Thread; \
163 \
164 /* Disable APCs if needed */ \
165 Thread->KernelApcDisable -= (Object)->ApcDisable; \
166 \
167 /* Check if it's abandoned */ \
168 if ((Object)->Abandoned) \
169 { \
170 /* Unabandon it */ \
171 (Object)->Abandoned = FALSE; \
172 \
173 /* Return Status */ \
174 Thread->WaitStatus = STATUS_ABANDONED; \
175 } \
176 \
177 /* Insert it into the Mutant List */ \
178 InsertHeadList(Thread->MutantListHead.Blink, \
179 &(Object)->MutantListEntry); \
180 } \
181 }
182
183 //
184 // Satisfies the wait of any nonmutant dispatcher object
185 //
186 #define KiSatisfyNonMutantWait(Object, Thread) \
187 { \
188 if (((Object)->Header.Type & TIMER_OR_EVENT_TYPE) == \
189 EventSynchronizationObject) \
190 { \
191 /* Synchronization Timers and Events just get un-signaled */ \
192 (Object)->Header.SignalState = 0; \
193 } \
194 else if ((Object)->Header.Type == SemaphoreObject) \
195 { \
196 /* These ones can have multiple states, so we only decrease it */ \
197 (Object)->Header.SignalState--; \
198 } \
199 }
200
201 //
202 // Recalculates the due time
203 //
204 PLARGE_INTEGER
205 FORCEINLINE
206 KiRecalculateDueTime(IN PLARGE_INTEGER OriginalDueTime,
207 IN PLARGE_INTEGER DueTime,
208 IN OUT PLARGE_INTEGER NewDueTime)
209 {
210 /* Don't do anything for absolute waits */
211 if (OriginalDueTime->QuadPart >= 0) return OriginalDueTime;
212
213 /* Otherwise, query the interrupt time and recalculate */
214 NewDueTime->QuadPart = KeQueryInterruptTime();
215 NewDueTime->QuadPart -= DueTime->QuadPart;
216 return NewDueTime;
217 }
218
219 //
220 // Determines wether a thread should be added to the wait list
221 //
222 #define KiCheckThreadStackSwap(WaitMode, Thread, Swappable) \
223 { \
224 /* Check the required conditions */ \
225 if ((WaitMode != KernelMode) && \
226 (Thread->EnableStackSwap) && \
227 (Thread->Priority >= (LOW_REALTIME_PRIORITY + 9))) \
228 { \
229 /* We are go for swap */ \
230 Swappable = TRUE; \
231 } \
232 else \
233 { \
234 /* Don't swap the thread */ \
235 Swappable = FALSE; \
236 } \
237 }
238
239 //
240 // Adds a thread to the wait list
241 //
242 #define KiAddThreadToWaitList(Thread, Swappable) \
243 { \
244 /* Make sure it's swappable */ \
245 if (Swappable) \
246 { \
247 /* Insert it into the PRCB's List */ \
248 InsertTailList(&KeGetCurrentPrcb()->WaitListHead, \
249 &Thread->WaitListEntry); \
250 } \
251 }
252
253 //
254 // Rules for checking alertability:
255 // - For Alertable waits ONLY:
256 // * We don't wait and return STATUS_ALERTED if the thread is alerted
257 // in EITHER the specified wait mode OR in Kernel Mode.
258 // - For BOTH Alertable AND Non-Alertable waits:
259 // * We don't want and return STATUS_USER_APC if the User Mode APC list
260 // is not empty AND the wait mode is User Mode.
261 //
262 #define KiCheckAlertability() \
263 { \
264 if (Alertable) \
265 { \
266 if (CurrentThread->Alerted[(int)WaitMode]) \
267 { \
268 CurrentThread->Alerted[(int)WaitMode] = FALSE; \
269 WaitStatus = STATUS_ALERTED; \
270 break; \
271 } \
272 else if ((WaitMode != KernelMode) && \
273 (!IsListEmpty(&CurrentThread-> \
274 ApcState.ApcListHead[UserMode]))) \
275 { \
276 CurrentThread->ApcState.UserApcPending = TRUE; \
277 WaitStatus = STATUS_USER_APC; \
278 break; \
279 } \
280 else if (CurrentThread->Alerted[KernelMode]) \
281 { \
282 CurrentThread->Alerted[KernelMode] = FALSE; \
283 WaitStatus = STATUS_ALERTED; \
284 break; \
285 } \
286 } \
287 else if ((WaitMode != KernelMode) && \
288 (CurrentThread->ApcState.UserApcPending)) \
289 { \
290 WaitStatus = STATUS_USER_APC; \
291 break; \
292 } \
293 }
294
295 //
296 // Thread Scheduling Routines
297 //
298 #ifndef _CONFIG_SMP
299 KIRQL
300 FORCEINLINE
301 KiAcquireDispatcherLock(VOID)
302 {
303 /* Raise to DPC level */
304 return KeRaiseIrqlToDpcLevel();
305 }
306
307 VOID
308 FORCEINLINE
309 KiReleaseDispatcherLock(IN KIRQL OldIrql)
310 {
311 /* Just exit the dispatcher */
312 KiExitDispatcher(OldIrql);
313 }
314
315 VOID
316 FORCEINLINE
317 KiAcquireDispatcherLockAtDpcLevel(VOID)
318 {
319 /* This is a no-op at DPC Level for UP systems */
320 return;
321 }
322
323 VOID
324 FORCEINLINE
325 KiReleaseDispatcherLockFromDpcLevel(VOID)
326 {
327 /* This is a no-op at DPC Level for UP systems */
328 return;
329 }
330
331 //
332 // This routine makes the thread deferred ready on the boot CPU.
333 //
334 FORCEINLINE
335 VOID
336 KiInsertDeferredReadyList(IN PKTHREAD Thread)
337 {
338 /* Set the thread to deferred state and boot CPU */
339 Thread->State = DeferredReady;
340 Thread->DeferredProcessor = 0;
341
342 /* Make the thread ready immediately */
343 KiDeferredReadyThread(Thread);
344 }
345
346 FORCEINLINE
347 VOID
348 KiRescheduleThread(IN BOOLEAN NewThread,
349 IN ULONG Cpu)
350 {
351 /* This is meaningless on UP systems */
352 UNREFERENCED_PARAMETER(NewThread);
353 UNREFERENCED_PARAMETER(Cpu);
354 }
355
356 //
357 // This routine protects against multiple CPU acquires, it's meaningless on UP.
358 //
359 FORCEINLINE
360 VOID
361 KiSetThreadSwapBusy(IN PKTHREAD Thread)
362 {
363 UNREFERENCED_PARAMETER(Thread);
364 }
365
366 //
367 // This routine protects against multiple CPU acquires, it's meaningless on UP.
368 //
369 FORCEINLINE
370 VOID
371 KiAcquirePrcbLock(IN PKPRCB Prcb)
372 {
373 UNREFERENCED_PARAMETER(Prcb);
374 }
375
376 //
377 // This routine protects against multiple CPU acquires, it's meaningless on UP.
378 //
379 FORCEINLINE
380 VOID
381 KiReleasePrcbLock(IN PKPRCB Prcb)
382 {
383 UNREFERENCED_PARAMETER(Prcb);
384 }
385
386 //
387 // This routine protects against multiple CPU acquires, it's meaningless on UP.
388 //
389 FORCEINLINE
390 VOID
391 KiAcquireThreadLock(IN PKTHREAD Thread)
392 {
393 UNREFERENCED_PARAMETER(Thread);
394 }
395
396 //
397 // This routine protects against multiple CPU acquires, it's meaningless on UP.
398 //
399 FORCEINLINE
400 VOID
401 KiReleaseThreadLock(IN PKTHREAD Thread)
402 {
403 UNREFERENCED_PARAMETER(Thread);
404 }
405
406 FORCEINLINE
407 VOID
408 KiCheckDeferredReadyList(IN PKPRCB Prcb)
409 {
410 /* There are no deferred ready lists on UP systems */
411 UNREFERENCED_PARAMETER(Prcb);
412 }
413
414 FORCEINLINE
415 VOID
416 KiRundownThread(IN PKTHREAD Thread)
417 {
418 /* Check if this is the NPX Thread */
419 if (KeGetCurrentPrcb()->NpxThread == Thread)
420 {
421 /* Clear it */
422 KeGetCurrentPrcb()->NpxThread = NULL;
423 #ifdef __GNUC__
424 __asm__("fninit\n\t");
425 #else
426 __asm fninit;
427 #endif
428 }
429 }
430
431 #else
432
433 KIRQL
434 FORCEINLINE
435 KiAcquireDispatcherLock(VOID)
436 {
437 /* Raise to synchronization level and acquire the dispatcher lock */
438 return KeAcquireQueuedSpinLockRaiseToSynch(LockQueueDispatcherLock);
439 }
440
441 VOID
442 FORCEINLINE
443 KiReleaseDispatcherLock(IN KIRQL OldIrql)
444 {
445 /* First release the lock */
446 KeReleaseQueuedSpinLockFromDpcLevel(&KeGetCurrentPrcb()->
447 LockQueue[LockQueueDispatcherLock]);
448
449 /* Then exit the dispatcher */
450 KiExitDispatcher(OldIrql);
451 }
452
453 //
454 // This routine inserts a thread into the deferred ready list of the given CPU
455 //
456 FORCEINLINE
457 VOID
458 KiInsertDeferredReadyList(IN PKTHREAD Thread)
459 {
460 PKPRCB Prcb = KeGetCurrentPrcb();
461
462 /* Set the thread to deferred state and CPU */
463 Thread->State = DeferredReady;
464 Thread->DeferredProcessor = Prcb->Number;
465
466 /* Add it on the list */
467 PushEntryList(&Prcb->DeferredReadyListHead, &Thread->SwapListEntry);
468 }
469
470 FORCEINLINE
471 VOID
472 KiRescheduleThread(IN BOOLEAN NewThread,
473 IN ULONG Cpu)
474 {
475 /* Check if a new thread needs to be scheduled on a different CPU */
476 if ((NewThread) && !(KeGetPcr()->Number == Cpu))
477 {
478 /* Send an IPI to request delivery */
479 KiIpiSendRequest(AFFINITY_MASK(Cpu), IPI_DPC);
480 }
481 }
482
483 //
484 // This routine sets the current thread in a swap busy state, which ensure that
485 // nobody else tries to swap it concurrently.
486 //
487 FORCEINLINE
488 VOID
489 KiSetThreadSwapBusy(IN PKTHREAD Thread)
490 {
491 /* Make sure nobody already set it */
492 ASSERT(Thread->SwapBusy == FALSE);
493
494 /* Set it ourselves */
495 Thread->SwapBusy = TRUE;
496 }
497
498 //
499 // This routine acquires the PRCB lock so that only one caller can touch
500 // volatile PRCB data.
501 //
502 // Since this is a simple optimized spin-lock, it must be be only acquired
503 // at dispatcher level or higher!
504 //
505 FORCEINLINE
506 VOID
507 KiAcquirePrcbLock(IN PKPRCB Prcb)
508 {
509 /* Make sure we're at a safe level to touch the PRCB lock */
510 ASSERT(KeGetCurrentIrql() >= DISPATCH_LEVEL);
511
512 /* Start acquire loop */
513 for (;;)
514 {
515 /* Acquire the lock and break out if we acquired it first */
516 if (!InterlockedExchange(&Prcb->PrcbLock, 1)) break;
517
518 /* Loop until the other CPU releases it */
519 do
520 {
521 /* Let the CPU know that this is a loop */
522 YieldProcessor();
523 } while (Prcb->PrcbLock);
524 }
525 }
526
527 //
528 // This routine releases the PRCB lock so that other callers can touch
529 // volatile PRCB data.
530 //
531 // Since this is a simple optimized spin-lock, it must be be only acquired
532 // at dispatcher level or higher!
533 //
534 FORCEINLINE
535 VOID
536 KiReleasePrcbLock(IN PKPRCB Prcb)
537 {
538 /* Make sure it's acquired! */
539 ASSERT(Prcb->PrcbLock != 0);
540
541 /* Release it */
542 InterlockedAnd(&Prcb->PrcbLock, 0);
543 }
544
545 //
546 // This routine acquires the thread lock so that only one caller can touch
547 // volatile thread data.
548 //
549 // Since this is a simple optimized spin-lock, it must be be only acquired
550 // at dispatcher level or higher!
551 //
552 FORCEINLINE
553 VOID
554 KiAcquireThreadLock(IN PKTHREAD Thread)
555 {
556 /* Make sure we're at a safe level to touch the thread lock */
557 ASSERT(KeGetCurrentIrql() >= DISPATCH_LEVEL);
558
559 /* Start acquire loop */
560 for (;;)
561 {
562 /* Acquire the lock and break out if we acquired it first */
563 if (!InterlockedExchange(&Thread->ThreadLock, 1)) break;
564
565 /* Loop until the other CPU releases it */
566 do
567 {
568 /* Let the CPU know that this is a loop */
569 YieldProcessor();
570 } while (Thread->ThreadLock);
571 }
572 }
573
574 //
575 // This routine releases the thread lock so that other callers can touch
576 // volatile thread data.
577 //
578 // Since this is a simple optimized spin-lock, it must be be only acquired
579 // at dispatcher level or higher!
580 //
581 FORCEINLINE
582 VOID
583 KiReleaseThreadLock(IN PKTHREAD Thread)
584 {
585 /* Release it */
586 InterlockedAnd(&Thread->ThreadLock, 0);
587 }
588
589 FORCEINLINE
590 VOID
591 KiCheckDeferredReadyList(IN PKPRCB Prcb)
592 {
593 /* Scan the deferred ready lists if required */
594 if (Prcb->DeferredReadyListHead.Next) KiProcessDeferredReadyList(Prcb);
595 }
596
597 #endif
598
599 FORCEINLINE
600 VOID
601 KiAcquireApcLock(IN PKTHREAD Thread,
602 IN PKLOCK_QUEUE_HANDLE Handle)
603 {
604 /* Acquire the lock and raise to synchronization level */
605 KeAcquireInStackQueuedSpinLockRaiseToSynch(&Thread->ApcQueueLock, Handle);
606 }
607
608 FORCEINLINE
609 VOID
610 KiAcquireApcLockAtDpcLevel(IN PKTHREAD Thread,
611 IN PKLOCK_QUEUE_HANDLE Handle)
612 {
613 /* Acquire the lock */
614 KeAcquireInStackQueuedSpinLockAtDpcLevel(&Thread->ApcQueueLock, Handle);
615 }
616
617 FORCEINLINE
618 VOID
619 KiReleaseApcLock(IN PKLOCK_QUEUE_HANDLE Handle)
620 {
621 /* Release the lock */
622 KeReleaseInStackQueuedSpinLock(Handle);
623 }
624
625 FORCEINLINE
626 VOID
627 KiReleaseApcLockFromDpcLevel(IN PKLOCK_QUEUE_HANDLE Handle)
628 {
629 /* Release the lock */
630 KeReleaseInStackQueuedSpinLockFromDpcLevel(Handle);
631 }
632
633 FORCEINLINE
634 VOID
635 KiAcquireProcessLock(IN PKPROCESS Process,
636 IN PKLOCK_QUEUE_HANDLE Handle)
637 {
638 /* Acquire the lock and raise to synchronization level */
639 KeAcquireInStackQueuedSpinLockRaiseToSynch(&Process->ProcessLock, Handle);
640 }
641
642 FORCEINLINE
643 VOID
644 KiReleaseProcessLock(IN PKLOCK_QUEUE_HANDLE Handle)
645 {
646 /* Release the lock */
647 KeReleaseInStackQueuedSpinLock(Handle);
648 }
649
650 //
651 // This routine queues a thread that is ready on the PRCB's ready lists.
652 // If this thread cannot currently run on this CPU, then the thread is
653 // added to the deferred ready list instead.
654 //
655 // This routine must be entered with the PRCB lock held and it will exit
656 // with the PRCB lock released!
657 //
658 FORCEINLINE
659 VOID
660 KxQueueReadyThread(IN PKTHREAD Thread,
661 IN PKPRCB Prcb)
662 {
663 BOOLEAN Preempted;
664 KPRIORITY Priority;
665
666 /* Sanity checks */
667 ASSERT(Prcb == KeGetCurrentPrcb());
668 ASSERT(Thread->State == Running);
669 ASSERT(Thread->NextProcessor == Prcb->Number);
670
671 /* Check if this thread is allowed to run in this CPU */
672 #ifdef _CONFIG_SMP
673 if ((Thread->Affinity) & (Prcb->SetMember))
674 #else
675 if (TRUE)
676 #endif
677 {
678 /* Set thread ready for execution */
679 Thread->State = Ready;
680
681 /* Save current priority and if someone had pre-empted it */
682 Priority = Thread->Priority;
683 Preempted = Thread->Preempted;
684
685 /* We're not pre-empting now, and set the wait time */
686 Thread->Preempted = FALSE;
687 Thread->WaitTime = KeTickCount.LowPart;
688
689 /* Sanity check */
690 ASSERT((Priority >= 0) && (Priority <= HIGH_PRIORITY));
691
692 /* Insert this thread in the appropriate order */
693 Preempted ? InsertHeadList(&Prcb->DispatcherReadyListHead[Priority],
694 &Thread->WaitListEntry) :
695 InsertTailList(&Prcb->DispatcherReadyListHead[Priority],
696 &Thread->WaitListEntry);
697
698 /* Update the ready summary */
699 Prcb->ReadySummary |= PRIORITY_MASK(Priority);
700
701 /* Sanity check */
702 ASSERT(Priority == Thread->Priority);
703
704 /* Release the PRCB lock */
705 KiReleasePrcbLock(Prcb);
706 }
707 else
708 {
709 /* Otherwise, prepare this thread to be deferred */
710 Thread->State = DeferredReady;
711 Thread->DeferredProcessor = Prcb->Number;
712
713 /* Release the lock and defer scheduling */
714 KiReleasePrcbLock(Prcb);
715 KiDeferredReadyThread(Thread);
716 }
717 }
718
719 //
720 // This routine scans for an appropriate ready thread to select at the
721 // given priority and for the given CPU.
722 //
723 FORCEINLINE
724 PKTHREAD
725 KiSelectReadyThread(IN KPRIORITY Priority,
726 IN PKPRCB Prcb)
727 {
728 LONG PriorityMask, PrioritySet, HighPriority;
729 PLIST_ENTRY ListEntry;
730 PKTHREAD Thread;
731
732 /* Save the current mask and get the priority set for the CPU */
733 PriorityMask = Priority;
734 PrioritySet = Prcb->ReadySummary >> (UCHAR)Priority;
735 if (!PrioritySet) return NULL;
736
737 /* Get the highest priority possible */
738 BitScanReverse((PULONG)&HighPriority, PrioritySet);
739 ASSERT((PrioritySet & PRIORITY_MASK(HighPriority)) != 0);
740 HighPriority += PriorityMask;
741
742 /* Make sure the list isn't at highest priority */
743 ASSERT(IsListEmpty(&Prcb->DispatcherReadyListHead[HighPriority]) == FALSE);
744
745 /* Get the first thread on the list */
746 ListEntry = &Prcb->DispatcherReadyListHead[HighPriority];
747 Thread = CONTAINING_RECORD(ListEntry, KTHREAD, WaitListEntry);
748
749 /* Make sure this thread is here for a reason */
750 ASSERT(HighPriority == Thread->Priority);
751 ASSERT(Thread->Affinity & AFFINITY_MASK(Prcb->Number));
752 ASSERT(Thread->NextProcessor == Prcb->Number);
753
754 /* Remove it from the list */
755 RemoveEntryList(&Thread->WaitListEntry);
756 if (IsListEmpty(&Thread->WaitListEntry))
757 {
758 /* The list is empty now, reset the ready summary */
759 Prcb->ReadySummary ^= PRIORITY_MASK(HighPriority);
760 }
761
762 /* Sanity check and return the thread */
763 ASSERT((Thread == NULL) ||
764 (Thread->BasePriority == 0) ||
765 (Thread->Priority != 0));
766 return Thread;
767 }
768
769 //
770 // This routine computes the new priority for a thread. It is only valid for
771 // threads with priorities in the dynamic priority range.
772 //
773 SCHAR
774 FORCEINLINE
775 KiComputeNewPriority(IN PKTHREAD Thread)
776 {
777 SCHAR Priority;
778
779 /* Priority sanity checks */
780 ASSERT((Thread->PriorityDecrement >= 0) &&
781 (Thread->PriorityDecrement <= Thread->Priority));
782 ASSERT((Thread->Priority < LOW_REALTIME_PRIORITY) ?
783 TRUE : (Thread->PriorityDecrement == 0));
784
785 /* Get the current priority */
786 Priority = Thread->Priority;
787 if (Priority < LOW_REALTIME_PRIORITY)
788 {
789 /* Set the New Priority and add the Priority Decrement */
790 Priority += (Priority - Thread->PriorityDecrement - 1);
791
792 /* Don't go out of bounds */
793 if (Priority < Thread->BasePriority) Priority = Thread->BasePriority;
794
795 /* Reset the priority decrement */
796 Thread->PriorityDecrement = 0;
797 }
798
799 /* Sanity check */
800 ASSERT((Thread->BasePriority == 0) || (Priority != 0));
801
802 /* Return the new priority */
803 return Priority;
804 }
805