- Simplify KiWaitTest.
[reactos.git] / reactos / ntoskrnl / include / internal / ke_x.h
1 /*
2 * PROJECT: ReactOS Kernel
3 * LICENSE: GPL - See COPYING in the top level directory
4 * FILE: ntoskrnl/include/ke_x.h
5 * PURPOSE: Internal Inlined Functions for the Kernel
6 * PROGRAMMERS: Alex Ionescu (alex.ionescu@reactos.org)
7 */
8
9 //
10 // Thread Dispatcher Header DebugActive Mask
11 //
12 #define DR_MASK(x) 1 << x
13 #define DR_ACTIVE_MASK 0x10
14 #define DR_REG_MASK 0x4F
15
16 //
17 // Sanitizes a selector
18 //
19 FORCEINLINE
20 ULONG
21 Ke386SanitizeSeg(IN ULONG Cs,
22 IN KPROCESSOR_MODE Mode)
23 {
24 //
25 // Check if we're in kernel-mode, and force CPL 0 if so.
26 // Otherwise, force CPL 3.
27 //
28 return ((Mode == KernelMode) ?
29 (Cs & (0xFFFF & ~RPL_MASK)) :
30 (RPL_MASK | (Cs & 0xFFFF)));
31 }
32
33 //
34 // Sanitizes EFLAGS
35 //
36 FORCEINLINE
37 ULONG
38 Ke386SanitizeFlags(IN ULONG Eflags,
39 IN KPROCESSOR_MODE Mode)
40 {
41 //
42 // Check if we're in kernel-mode, and sanitize EFLAGS if so.
43 // Otherwise, also force interrupt mask on.
44 //
45 return ((Mode == KernelMode) ?
46 (Eflags & (EFLAGS_USER_SANITIZE | EFLAGS_INTERRUPT_MASK)) :
47 (EFLAGS_INTERRUPT_MASK | (Eflags & EFLAGS_USER_SANITIZE)));
48 }
49
50 //
51 // Gets a DR register from a CONTEXT structure
52 //
53 FORCEINLINE
54 PVOID
55 KiDrFromContext(IN ULONG Dr,
56 IN PCONTEXT Context)
57 {
58 return *(PVOID*)((ULONG_PTR)Context + KiDebugRegisterContextOffsets[Dr]);
59 }
60
61 //
62 // Gets a DR register from a KTRAP_FRAME structure
63 //
64 FORCEINLINE
65 PVOID*
66 KiDrFromTrapFrame(IN ULONG Dr,
67 IN PKTRAP_FRAME TrapFrame)
68 {
69 return (PVOID*)((ULONG_PTR)TrapFrame + KiDebugRegisterTrapOffsets[Dr]);
70 }
71
72 //
73 //
74 //
75 FORCEINLINE
76 PVOID
77 Ke386SanitizeDr(IN PVOID DrAddress,
78 IN KPROCESSOR_MODE Mode)
79 {
80 //
81 // Check if we're in kernel-mode, and return the address directly if so.
82 // Otherwise, make sure it's not inside the kernel-mode address space.
83 // If it is, then clear the address.
84 //
85 return ((Mode == KernelMode) ? DrAddress :
86 (DrAddress <= MM_HIGHEST_USER_ADDRESS) ? DrAddress : 0);
87 }
88
89 //
90 // Enters a Guarded Region
91 //
92 #define KeEnterGuardedRegion() \
93 { \
94 PKTHREAD _Thread = KeGetCurrentThread(); \
95 \
96 /* Sanity checks */ \
97 ASSERT_IRQL_LESS_OR_EQUAL(APC_LEVEL); \
98 ASSERT(_Thread == KeGetCurrentThread()); \
99 ASSERT((_Thread->SpecialApcDisable <= 0) && \
100 (_Thread->SpecialApcDisable != -32768)); \
101 \
102 /* Disable Special APCs */ \
103 _Thread->SpecialApcDisable--; \
104 }
105
106 //
107 // Leaves a Guarded Region
108 //
109 #define KeLeaveGuardedRegion() \
110 { \
111 PKTHREAD _Thread = KeGetCurrentThread(); \
112 \
113 /* Sanity checks */ \
114 ASSERT_IRQL_LESS_OR_EQUAL(APC_LEVEL); \
115 ASSERT(_Thread == KeGetCurrentThread()); \
116 ASSERT(_Thread->SpecialApcDisable < 0); \
117 \
118 /* Leave region and check if APCs are OK now */ \
119 if (!(++_Thread->SpecialApcDisable)) \
120 { \
121 /* Check for Kernel APCs on the list */ \
122 if (!IsListEmpty(&_Thread->ApcState. \
123 ApcListHead[KernelMode])) \
124 { \
125 /* Check for APC Delivery */ \
126 KiCheckForKernelApcDelivery(); \
127 } \
128 } \
129 }
130
131 //
132 // TODO: Guarded Mutex Routines
133 //
134
135 //
136 // Enters a Critical Region
137 //
138 #define KeEnterCriticalRegion() \
139 { \
140 PKTHREAD _Thread = KeGetCurrentThread(); \
141 \
142 /* Sanity checks */ \
143 ASSERT(_Thread == KeGetCurrentThread()); \
144 ASSERT((_Thread->KernelApcDisable <= 0) && \
145 (_Thread->KernelApcDisable != -32768)); \
146 \
147 /* Disable Kernel APCs */ \
148 _Thread->KernelApcDisable--; \
149 }
150
151 //
152 // Leaves a Critical Region
153 //
154 #define KeLeaveCriticalRegion() \
155 { \
156 PKTHREAD _Thread = KeGetCurrentThread(); \
157 \
158 /* Sanity checks */ \
159 ASSERT(_Thread == KeGetCurrentThread()); \
160 ASSERT(_Thread->KernelApcDisable < 0); \
161 \
162 /* Enable Kernel APCs */ \
163 _Thread->KernelApcDisable++; \
164 \
165 /* Check if Kernel APCs are now enabled */ \
166 if (!(_Thread->KernelApcDisable)) \
167 { \
168 /* Check if we need to request an APC Delivery */ \
169 if (!(IsListEmpty(&_Thread->ApcState.ApcListHead[KernelMode])) && \
170 !(_Thread->SpecialApcDisable)) \
171 { \
172 /* Check for the right environment */ \
173 KiCheckForKernelApcDelivery(); \
174 } \
175 } \
176 }
177
178 //
179 // Satisfies the wait of any dispatcher object
180 //
181 #define KiSatisfyObjectWait(Object, Thread) \
182 { \
183 /* Special case for Mutants */ \
184 if ((Object)->Header.Type == MutantObject) \
185 { \
186 /* Decrease the Signal State */ \
187 (Object)->Header.SignalState--; \
188 \
189 /* Check if it's now non-signaled */ \
190 if (!(Object)->Header.SignalState) \
191 { \
192 /* Set the Owner Thread */ \
193 (Object)->OwnerThread = Thread; \
194 \
195 /* Disable APCs if needed */ \
196 Thread->KernelApcDisable -= (Object)->ApcDisable; \
197 \
198 /* Check if it's abandoned */ \
199 if ((Object)->Abandoned) \
200 { \
201 /* Unabandon it */ \
202 (Object)->Abandoned = FALSE; \
203 \
204 /* Return Status */ \
205 Thread->WaitStatus = STATUS_ABANDONED; \
206 } \
207 \
208 /* Insert it into the Mutant List */ \
209 InsertHeadList(Thread->MutantListHead.Blink, \
210 &(Object)->MutantListEntry); \
211 } \
212 } \
213 else if (((Object)->Header.Type & TIMER_OR_EVENT_TYPE) == \
214 EventSynchronizationObject) \
215 { \
216 /* Synchronization Timers and Events just get un-signaled */ \
217 (Object)->Header.SignalState = 0; \
218 } \
219 else if ((Object)->Header.Type == SemaphoreObject) \
220 { \
221 /* These ones can have multiple states, so we only decrease it */ \
222 (Object)->Header.SignalState--; \
223 } \
224 }
225
226 //
227 // Satisfies the wait of a mutant dispatcher object
228 //
229 #define KiSatisfyMutantWait(Object, Thread) \
230 { \
231 /* Decrease the Signal State */ \
232 (Object)->Header.SignalState--; \
233 \
234 /* Check if it's now non-signaled */ \
235 if (!(Object)->Header.SignalState) \
236 { \
237 /* Set the Owner Thread */ \
238 (Object)->OwnerThread = Thread; \
239 \
240 /* Disable APCs if needed */ \
241 Thread->KernelApcDisable -= (Object)->ApcDisable; \
242 \
243 /* Check if it's abandoned */ \
244 if ((Object)->Abandoned) \
245 { \
246 /* Unabandon it */ \
247 (Object)->Abandoned = FALSE; \
248 \
249 /* Return Status */ \
250 Thread->WaitStatus = STATUS_ABANDONED; \
251 } \
252 \
253 /* Insert it into the Mutant List */ \
254 InsertHeadList(Thread->MutantListHead.Blink, \
255 &(Object)->MutantListEntry); \
256 } \
257 }
258
259 //
260 // Satisfies the wait of any nonmutant dispatcher object
261 //
262 #define KiSatisfyNonMutantWait(Object, Thread) \
263 { \
264 if (((Object)->Header.Type & TIMER_OR_EVENT_TYPE) == \
265 EventSynchronizationObject) \
266 { \
267 /* Synchronization Timers and Events just get un-signaled */ \
268 (Object)->Header.SignalState = 0; \
269 } \
270 else if ((Object)->Header.Type == SemaphoreObject) \
271 { \
272 /* These ones can have multiple states, so we only decrease it */ \
273 (Object)->Header.SignalState--; \
274 } \
275 }
276
277 //
278 // Recalculates the due time
279 //
280 PLARGE_INTEGER
281 FORCEINLINE
282 KiRecalculateDueTime(IN PLARGE_INTEGER OriginalDueTime,
283 IN PLARGE_INTEGER DueTime,
284 IN OUT PLARGE_INTEGER NewDueTime)
285 {
286 /* Don't do anything for absolute waits */
287 if (OriginalDueTime->QuadPart >= 0) return OriginalDueTime;
288
289 /* Otherwise, query the interrupt time and recalculate */
290 NewDueTime->QuadPart = KeQueryInterruptTime();
291 NewDueTime->QuadPart -= DueTime->QuadPart;
292 return NewDueTime;
293 }
294
295 //
296 // Determines wether a thread should be added to the wait list
297 //
298 FORCEINLINE
299 BOOLEAN
300 KiCheckThreadStackSwap(IN PKTHREAD Thread,
301 IN KPROCESSOR_MODE WaitMode)
302 {
303 /* Check the required conditions */
304 if ((WaitMode != KernelMode) &&
305 (Thread->EnableStackSwap) &&
306 (Thread->Priority >= (LOW_REALTIME_PRIORITY + 9)))
307 {
308 /* We are go for swap */
309 return TRUE;
310 }
311 else
312 {
313 /* Don't swap the thread */
314 return FALSE;
315 }
316 }
317
318 //
319 // Adds a thread to the wait list
320 //
321 #define KiAddThreadToWaitList(Thread, Swappable) \
322 { \
323 /* Make sure it's swappable */ \
324 if (Swappable) \
325 { \
326 /* Insert it into the PRCB's List */ \
327 InsertTailList(&KeGetCurrentPrcb()->WaitListHead, \
328 &Thread->WaitListEntry); \
329 } \
330 }
331
332 //
333 // Checks if a wait in progress should be interrupted by APCs or an alertable
334 // state.
335 //
336 FORCEINLINE
337 NTSTATUS
338 KiCheckAlertability(IN PKTHREAD Thread,
339 IN BOOLEAN Alertable,
340 IN KPROCESSOR_MODE WaitMode)
341 {
342 /* Check if the wait is alertable */
343 if (Alertable)
344 {
345 /* It is, first check if the thread is alerted in this mode */
346 if (Thread->Alerted[WaitMode])
347 {
348 /* It is, so bail out of the wait */
349 Thread->Alerted[WaitMode] = FALSE;
350 return STATUS_ALERTED;
351 }
352 else if ((WaitMode != KernelMode) &&
353 (!IsListEmpty(&Thread->ApcState.ApcListHead[UserMode])))
354 {
355 /* It's isn't, but this is a user wait with queued user APCs */
356 Thread->ApcState.UserApcPending = TRUE;
357 return STATUS_USER_APC;
358 }
359 else if (Thread->Alerted[KernelMode])
360 {
361 /* It isn't that either, but we're alered in kernel mode */
362 Thread->Alerted[KernelMode] = FALSE;
363 return STATUS_ALERTED;
364 }
365 }
366 else if ((WaitMode != KernelMode) && (Thread->ApcState.UserApcPending))
367 {
368 /* Not alertable, but this is a user wait with pending user APCs */
369 return STATUS_USER_APC;
370 }
371
372 /* Otherwise, we're fine */
373 return STATUS_WAIT_0;
374 }
375
376 FORCEINLINE
377 BOOLEAN
378 KxDelayThreadWait(IN PKTHREAD Thread,
379 IN BOOLEAN Alertable,
380 IN KPROCESSOR_MODE WaitMode)
381 {
382 BOOLEAN Swappable;
383 PKWAIT_BLOCK TimerBlock = &Thread->WaitBlock[TIMER_WAIT_BLOCK];
384
385 /* Setup the Wait Block */
386 Thread->WaitBlockList = TimerBlock;
387 TimerBlock->NextWaitBlock = TimerBlock;
388
389 /* Link the timer to this Wait Block */
390 Thread->Timer.Header.WaitListHead.Flink = &TimerBlock->WaitListEntry;
391 Thread->Timer.Header.WaitListHead.Blink = &TimerBlock->WaitListEntry;
392
393 /* Clear wait status */
394 Thread->WaitStatus = STATUS_WAIT_0;
395
396 /* Setup wait fields */
397 Thread->Alertable = Alertable;
398 Thread->WaitReason = DelayExecution;
399 Thread->WaitMode = WaitMode;
400
401 /* Check if we can swap the thread's stack */
402 Thread->WaitListEntry.Flink = NULL;
403 Swappable = KiCheckThreadStackSwap(Thread, WaitMode);
404
405 /* Set the wait time */
406 Thread->WaitTime = ((PLARGE_INTEGER)&KeTickCount)->LowPart;
407 return Swappable;
408 }
409
410 FORCEINLINE
411 BOOLEAN
412 KxMultiThreadWait(IN PKTHREAD Thread,
413 IN PKWAIT_BLOCK WaitBlock,
414 IN BOOLEAN Alertable,
415 IN KWAIT_REASON WaitReason,
416 IN KPROCESSOR_MODE WaitMode)
417 {
418 BOOLEAN Swappable;
419 PKTIMER ThreadTimer = &Thread->Timer;
420
421 /* Set default wait status */
422 Thread->WaitStatus = STATUS_WAIT_0;
423
424 /* Link wait block array to the thread */
425 Thread->WaitBlockList = WaitBlock;
426
427 /* Initialize the timer list */
428 InitializeListHead(&ThreadTimer->Header.WaitListHead);
429
430 /* Set wait settings */
431 Thread->Alertable = Alertable;
432 Thread->WaitMode = WaitMode;
433 Thread->WaitReason = WaitReason;
434
435 /* Check if we can swap the thread's stack */
436 Thread->WaitListEntry.Flink = NULL;
437 Swappable = KiCheckThreadStackSwap(Thread, WaitMode);
438
439 /* Set the wait time */
440 Thread->WaitTime = ((PLARGE_INTEGER)&KeTickCount)->LowPart;
441 return Swappable;
442 }
443
444 FORCEINLINE
445 BOOLEAN
446 KxSingleThreadWait(IN PKTHREAD Thread,
447 IN PKWAIT_BLOCK WaitBlock,
448 IN PVOID Object,
449 IN PLARGE_INTEGER Timeout,
450 IN BOOLEAN Alertable,
451 IN KWAIT_REASON WaitReason,
452 IN KPROCESSOR_MODE WaitMode)
453 {
454 BOOLEAN Swappable;
455 PKWAIT_BLOCK TimerBlock = &Thread->WaitBlock[TIMER_WAIT_BLOCK];
456
457 /* Setup the Wait Block */
458 Thread->WaitBlockList = WaitBlock;
459 WaitBlock->WaitKey = STATUS_WAIT_0;
460 WaitBlock->Object = Object;
461 WaitBlock->WaitType = WaitAny;
462
463 /* Clear wait status */
464 Thread->WaitStatus = STATUS_WAIT_0;
465
466 /* Check if we have a timer */
467 if (Timeout)
468 {
469 /* Pointer to timer block */
470 WaitBlock->NextWaitBlock = TimerBlock;
471 TimerBlock->NextWaitBlock = WaitBlock;
472
473 /* Link the timer to this Wait Block */
474 Thread->Timer.Header.WaitListHead.Flink = &TimerBlock->WaitListEntry;
475 Thread->Timer.Header.WaitListHead.Blink = &TimerBlock->WaitListEntry;
476 }
477 else
478 {
479 /* No timer block, just ourselves */
480 WaitBlock->NextWaitBlock = WaitBlock;
481 }
482
483 /* Setup wait fields */
484 Thread->Alertable = Alertable;
485 Thread->WaitReason = WaitReason;
486 Thread->WaitMode = WaitMode;
487
488 /* Check if we can swap the thread's stack */
489 Thread->WaitListEntry.Flink = NULL;
490 Swappable = KiCheckThreadStackSwap(Thread, WaitMode);
491
492 /* Set the wait time */
493 Thread->WaitTime = ((PLARGE_INTEGER)&KeTickCount)->LowPart;
494 return Swappable;
495 }
496
497 //
498 // Unwaits a Thread
499 //
500 FORCEINLINE
501 VOID
502 KxUnwaitThread(IN DISPATCHER_HEADER *Object,
503 IN KPRIORITY Increment)
504 {
505 PLIST_ENTRY WaitEntry, WaitList;
506 PKWAIT_BLOCK WaitBlock;
507 PKTHREAD WaitThread;
508 ULONG WaitKey;
509
510 /* Loop the Wait Entries */
511 WaitList = &Object->WaitListHead;
512 ASSERT(IsListEmpty(&Object->WaitListHead) == FALSE);
513 WaitEntry = WaitList->Flink;
514 do
515 {
516 /* Get the current wait block */
517 WaitBlock = CONTAINING_RECORD(WaitEntry, KWAIT_BLOCK, WaitListEntry);
518
519 /* Get the waiting thread */
520 WaitThread = WaitBlock->Thread;
521
522 /* Check the current Wait Mode */
523 if (WaitBlock->WaitType == WaitAny)
524 {
525 /* Use the actual wait key */
526 WaitKey = WaitBlock->WaitKey;
527 }
528 else
529 {
530 /* Otherwise, use STATUS_KERNEL_APC */
531 WaitKey = STATUS_KERNEL_APC;
532 }
533
534 /* Unwait the thread */
535 KiUnwaitThread(WaitThread, WaitKey, Increment);
536
537 /* Next entry */
538 WaitEntry = WaitList->Flink;
539 } while (WaitEntry != WaitList);
540 }
541
542 //
543 // Unwaits a Thread waiting on an event
544 //
545 FORCEINLINE
546 VOID
547 KxUnwaitThreadForEvent(IN PKEVENT Event,
548 IN KPRIORITY Increment)
549 {
550 PLIST_ENTRY WaitEntry, WaitList;
551 PKWAIT_BLOCK WaitBlock;
552 PKTHREAD WaitThread;
553
554 /* Loop the Wait Entries */
555 WaitList = &Event->Header.WaitListHead;
556 ASSERT(IsListEmpty(&Event->Header.WaitListHead) == FALSE);
557 WaitEntry = WaitList->Flink;
558 do
559 {
560 /* Get the current wait block */
561 WaitBlock = CONTAINING_RECORD(WaitEntry, KWAIT_BLOCK, WaitListEntry);
562
563 /* Get the waiting thread */
564 WaitThread = WaitBlock->Thread;
565
566 /* Check the current Wait Mode */
567 if (WaitBlock->WaitType == WaitAny)
568 {
569 /* Un-signal it */
570 Event->Header.SignalState = 0;
571
572 /* Un-signal the event and unwait the thread */
573 KiUnwaitThread(WaitThread, WaitBlock->WaitKey, Increment);
574 break;
575 }
576
577 /* Unwait the thread with STATUS_KERNEL_APC */
578 KiUnwaitThread(WaitThread, STATUS_KERNEL_APC, Increment);
579
580 /* Next entry */
581 WaitEntry = WaitList->Flink;
582 } while (WaitEntry != WaitList);
583 }
584
585 #ifndef _CONFIG_SMP
586 //
587 // Spinlock Acquire at IRQL >= DISPATCH_LEVEL
588 //
589 FORCEINLINE
590 VOID
591 KxAcquireSpinLock(IN PKSPIN_LOCK SpinLock)
592 {
593 /* On UP builds, spinlocks don't exist at IRQL >= DISPATCH */
594 UNREFERENCED_PARAMETER(SpinLock);
595 }
596
597 //
598 // Spinlock Release at IRQL >= DISPATCH_LEVEL
599 //
600 FORCEINLINE
601 VOID
602 KxReleaseSpinLock(IN PKSPIN_LOCK SpinLock)
603 {
604 /* On UP builds, spinlocks don't exist at IRQL >= DISPATCH */
605 UNREFERENCED_PARAMETER(SpinLock);
606 }
607
608 //
609 // This routine protects against multiple CPU acquires, it's meaningless on UP.
610 //
611 VOID
612 FORCEINLINE
613 KiAcquireDispatcherObject(IN DISPATCHER_HEADER* Object)
614 {
615 UNREFERENCED_PARAMETER(Object);
616 }
617
618 //
619 // This routine protects against multiple CPU acquires, it's meaningless on UP.
620 //
621 VOID
622 FORCEINLINE
623 KiReleaseDispatcherObject(IN DISPATCHER_HEADER* Object)
624 {
625 UNREFERENCED_PARAMETER(Object);
626 }
627
628 KIRQL
629 FORCEINLINE
630 KiAcquireDispatcherLock(VOID)
631 {
632 /* Raise to DPC level */
633 return KeRaiseIrqlToDpcLevel();
634 }
635
636 VOID
637 FORCEINLINE
638 KiReleaseDispatcherLock(IN KIRQL OldIrql)
639 {
640 /* Just exit the dispatcher */
641 KiExitDispatcher(OldIrql);
642 }
643
644 VOID
645 FORCEINLINE
646 KiAcquireDispatcherLockAtDpcLevel(VOID)
647 {
648 /* This is a no-op at DPC Level for UP systems */
649 return;
650 }
651
652 VOID
653 FORCEINLINE
654 KiReleaseDispatcherLockFromDpcLevel(VOID)
655 {
656 /* This is a no-op at DPC Level for UP systems */
657 return;
658 }
659
660 //
661 // This routine makes the thread deferred ready on the boot CPU.
662 //
663 FORCEINLINE
664 VOID
665 KiInsertDeferredReadyList(IN PKTHREAD Thread)
666 {
667 /* Set the thread to deferred state and boot CPU */
668 Thread->State = DeferredReady;
669 Thread->DeferredProcessor = 0;
670
671 /* Make the thread ready immediately */
672 KiDeferredReadyThread(Thread);
673 }
674
675 FORCEINLINE
676 VOID
677 KiRescheduleThread(IN BOOLEAN NewThread,
678 IN ULONG Cpu)
679 {
680 /* This is meaningless on UP systems */
681 UNREFERENCED_PARAMETER(NewThread);
682 UNREFERENCED_PARAMETER(Cpu);
683 }
684
685 //
686 // This routine protects against multiple CPU acquires, it's meaningless on UP.
687 //
688 FORCEINLINE
689 VOID
690 KiSetThreadSwapBusy(IN PKTHREAD Thread)
691 {
692 UNREFERENCED_PARAMETER(Thread);
693 }
694
695 //
696 // This routine protects against multiple CPU acquires, it's meaningless on UP.
697 //
698 FORCEINLINE
699 VOID
700 KiAcquirePrcbLock(IN PKPRCB Prcb)
701 {
702 UNREFERENCED_PARAMETER(Prcb);
703 }
704
705 //
706 // This routine protects against multiple CPU acquires, it's meaningless on UP.
707 //
708 FORCEINLINE
709 VOID
710 KiReleasePrcbLock(IN PKPRCB Prcb)
711 {
712 UNREFERENCED_PARAMETER(Prcb);
713 }
714
715 //
716 // This routine protects against multiple CPU acquires, it's meaningless on UP.
717 //
718 FORCEINLINE
719 VOID
720 KiAcquireThreadLock(IN PKTHREAD Thread)
721 {
722 UNREFERENCED_PARAMETER(Thread);
723 }
724
725 //
726 // This routine protects against multiple CPU acquires, it's meaningless on UP.
727 //
728 FORCEINLINE
729 VOID
730 KiReleaseThreadLock(IN PKTHREAD Thread)
731 {
732 UNREFERENCED_PARAMETER(Thread);
733 }
734
735 //
736 // This routine protects against multiple CPU acquires, it's meaningless on UP.
737 //
738 FORCEINLINE
739 BOOLEAN
740 KiTryThreadLock(IN PKTHREAD Thread)
741 {
742 UNREFERENCED_PARAMETER(Thread);
743 return FALSE;
744 }
745
746 FORCEINLINE
747 VOID
748 KiCheckDeferredReadyList(IN PKPRCB Prcb)
749 {
750 /* There are no deferred ready lists on UP systems */
751 UNREFERENCED_PARAMETER(Prcb);
752 }
753
754 FORCEINLINE
755 VOID
756 KiRundownThread(IN PKTHREAD Thread)
757 {
758 /* Check if this is the NPX Thread */
759 if (KeGetCurrentPrcb()->NpxThread == Thread)
760 {
761 /* Clear it */
762 KeGetCurrentPrcb()->NpxThread = NULL;
763 Ke386FnInit();
764 }
765 }
766
767 FORCEINLINE
768 VOID
769 KiRequestApcInterrupt(IN BOOLEAN NeedApc,
770 IN UCHAR Processor)
771 {
772 /* We deliver instantly on UP */
773 UNREFERENCED_PARAMETER(NeedApc);
774 UNREFERENCED_PARAMETER(Processor);
775 }
776
777 #else
778
779 //
780 // Spinlock Acquisition at IRQL >= DISPATCH_LEVEL
781 //
782 FORCEINLINE
783 VOID
784 KxAcquireSpinLock(IN PKSPIN_LOCK SpinLock)
785 {
786 for (;;)
787 {
788 /* Try to acquire it */
789 if (InterlockedBitTestAndSet((PLONG)SpinLock, 0))
790 {
791 /* Value changed... wait until it's locked */
792 while (*(volatile KSPIN_LOCK *)SpinLock == 1)
793 {
794 #ifdef DBG
795 /* On debug builds, we use a much slower but useful routine */
796 Kii386SpinOnSpinLock(SpinLock, 5);
797 #else
798 /* Otherwise, just yield and keep looping */
799 YieldProcessor();
800 #endif
801 }
802 }
803 else
804 {
805 #ifdef DBG
806 /* On debug builds, we OR in the KTHREAD */
807 *SpinLock = KeGetCurrentThread() | 1;
808 #endif
809 /* All is well, break out */
810 break;
811 }
812 }
813 }
814
815 //
816 // Spinlock Release at IRQL >= DISPATCH_LEVEL
817 //
818 FORCEINLINE
819 VOID
820 KxReleaseSpinLock(IN PKSPIN_LOCK SpinLock)
821 {
822 #ifdef DBG
823 /* Make sure that the threads match */
824 if ((KeGetCurrentThread() | 1) != *SpinLock)
825 {
826 /* They don't, bugcheck */
827 KeBugCheckEx(SPIN_LOCK_NOT_OWNED, SpinLock, 0, 0, 0);
828 }
829 #endif
830 /* Clear the lock */
831 InterlockedAnd(SpinLock, 0);
832 }
833
834 KIRQL
835 FORCEINLINE
836 KiAcquireDispatcherObject(IN DISPATCHER_HEADER* Object)
837 {
838 LONG OldValue, NewValue;
839
840 /* Make sure we're at a safe level to touch the lock */
841 ASSERT(KeGetCurrentIrql() >= DISPATCH_LEVEL);
842
843 /* Start acquire loop */
844 do
845 {
846 /* Loop until the other CPU releases it */
847 while ((UCHAR)Object->Lock & KOBJECT_LOCK_BIT)
848 {
849 /* Let the CPU know that this is a loop */
850 YieldProcessor();
851 };
852
853 /* Try acquiring the lock now */
854 NewValue = InterlockedCompareExchange(&Object->Lock,
855 OldValue | KOBJECT_LOCK_BIT,
856 OldValue);
857 } while (NewValue != OldValue);
858 }
859
860 KIRQL
861 FORCEINLINE
862 KiReleaseDispatcherObject(IN DISPATCHER_HEADER* Object)
863 {
864 /* Make sure we're at a safe level to touch the lock */
865 ASSERT(KeGetCurrentIrql() >= DISPATCH_LEVEL);
866
867 /* Release it */
868 InterlockedAnd(&Object->Lock, ~KOBJECT_LOCK_BIT);
869 }
870
871 KIRQL
872 FORCEINLINE
873 KiAcquireDispatcherLock(VOID)
874 {
875 /* Raise to synchronization level and acquire the dispatcher lock */
876 return KeAcquireQueuedSpinLockRaiseToSynch(LockQueueDispatcherLock);
877 }
878
879 VOID
880 FORCEINLINE
881 KiReleaseDispatcherLock(IN KIRQL OldIrql)
882 {
883 /* First release the lock */
884 KeReleaseQueuedSpinLockFromDpcLevel(&KeGetCurrentPrcb()->
885 LockQueue[LockQueueDispatcherLock]);
886
887 /* Then exit the dispatcher */
888 KiExitDispatcher(OldIrql);
889 }
890
891 //
892 // This routine inserts a thread into the deferred ready list of the given CPU
893 //
894 FORCEINLINE
895 VOID
896 KiInsertDeferredReadyList(IN PKTHREAD Thread)
897 {
898 PKPRCB Prcb = KeGetCurrentPrcb();
899
900 /* Set the thread to deferred state and CPU */
901 Thread->State = DeferredReady;
902 Thread->DeferredProcessor = Prcb->Number;
903
904 /* Add it on the list */
905 PushEntryList(&Prcb->DeferredReadyListHead, &Thread->SwapListEntry);
906 }
907
908 FORCEINLINE
909 VOID
910 KiRescheduleThread(IN BOOLEAN NewThread,
911 IN ULONG Cpu)
912 {
913 /* Check if a new thread needs to be scheduled on a different CPU */
914 if ((NewThread) && !(KeGetPcr()->Number == Cpu))
915 {
916 /* Send an IPI to request delivery */
917 KiIpiSendRequest(AFFINITY_MASK(Cpu), IPI_DPC);
918 }
919 }
920
921 //
922 // This routine sets the current thread in a swap busy state, which ensure that
923 // nobody else tries to swap it concurrently.
924 //
925 FORCEINLINE
926 VOID
927 KiSetThreadSwapBusy(IN PKTHREAD Thread)
928 {
929 /* Make sure nobody already set it */
930 ASSERT(Thread->SwapBusy == FALSE);
931
932 /* Set it ourselves */
933 Thread->SwapBusy = TRUE;
934 }
935
936 //
937 // This routine acquires the PRCB lock so that only one caller can touch
938 // volatile PRCB data.
939 //
940 // Since this is a simple optimized spin-lock, it must be be only acquired
941 // at dispatcher level or higher!
942 //
943 FORCEINLINE
944 VOID
945 KiAcquirePrcbLock(IN PKPRCB Prcb)
946 {
947 /* Make sure we're at a safe level to touch the PRCB lock */
948 ASSERT(KeGetCurrentIrql() >= DISPATCH_LEVEL);
949
950 /* Start acquire loop */
951 for (;;)
952 {
953 /* Acquire the lock and break out if we acquired it first */
954 if (!InterlockedExchange(&Prcb->PrcbLock, 1)) break;
955
956 /* Loop until the other CPU releases it */
957 do
958 {
959 /* Let the CPU know that this is a loop */
960 YieldProcessor();
961 } while (Prcb->PrcbLock);
962 }
963 }
964
965 //
966 // This routine releases the PRCB lock so that other callers can touch
967 // volatile PRCB data.
968 //
969 // Since this is a simple optimized spin-lock, it must be be only acquired
970 // at dispatcher level or higher!
971 //
972 FORCEINLINE
973 VOID
974 KiReleasePrcbLock(IN PKPRCB Prcb)
975 {
976 /* Make sure it's acquired! */
977 ASSERT(Prcb->PrcbLock != 0);
978
979 /* Release it */
980 InterlockedAnd(&Prcb->PrcbLock, 0);
981 }
982
983 //
984 // This routine acquires the thread lock so that only one caller can touch
985 // volatile thread data.
986 //
987 // Since this is a simple optimized spin-lock, it must be be only acquired
988 // at dispatcher level or higher!
989 //
990 FORCEINLINE
991 VOID
992 KiAcquireThreadLock(IN PKTHREAD Thread)
993 {
994 /* Make sure we're at a safe level to touch the thread lock */
995 ASSERT(KeGetCurrentIrql() >= DISPATCH_LEVEL);
996
997 /* Start acquire loop */
998 for (;;)
999 {
1000 /* Acquire the lock and break out if we acquired it first */
1001 if (!InterlockedExchange(&Thread->ThreadLock, 1)) break;
1002
1003 /* Loop until the other CPU releases it */
1004 do
1005 {
1006 /* Let the CPU know that this is a loop */
1007 YieldProcessor();
1008 } while (Thread->ThreadLock);
1009 }
1010 }
1011
1012 //
1013 // This routine releases the thread lock so that other callers can touch
1014 // volatile thread data.
1015 //
1016 // Since this is a simple optimized spin-lock, it must be be only acquired
1017 // at dispatcher level or higher!
1018 //
1019 FORCEINLINE
1020 VOID
1021 KiReleaseThreadLock(IN PKTHREAD Thread)
1022 {
1023 /* Release it */
1024 InterlockedAnd(&Thread->ThreadLock, 0);
1025 }
1026
1027 FORCEINLINE
1028 BOOLEAN
1029 KiTryThreadLock(IN PKTHREAD Thread)
1030 {
1031 LONG Value;
1032
1033 /* If the lock isn't acquired, return false */
1034 if (!Thread->ThreadLock) return FALSE;
1035
1036 /* Otherwise, try to acquire it and check the result */
1037 Value = 1;
1038 Value = InterlockedExchange(&Thread->ThreadLock, &Value);
1039
1040 /* Return the lock state */
1041 return (Value == TRUE);
1042 }
1043
1044 FORCEINLINE
1045 VOID
1046 KiCheckDeferredReadyList(IN PKPRCB Prcb)
1047 {
1048 /* Scan the deferred ready lists if required */
1049 if (Prcb->DeferredReadyListHead.Next) KiProcessDeferredReadyList(Prcb);
1050 }
1051
1052 FORCEINLINE
1053 VOID
1054 KiRequestApcInterrupt(IN BOOLEAN NeedApc,
1055 IN UCHAR Processor)
1056 {
1057 /* Check if we need to request APC delivery */
1058 if (NeedApc)
1059 {
1060 /* Check if it's on another CPU */
1061 if (KeGetPcr()->Number != Cpu)
1062 {
1063 /* Send an IPI to request delivery */
1064 KiIpiSendRequest(AFFINITY_MASK(Cpu), IPI_DPC);
1065 }
1066 else
1067 {
1068 /* Request a software interrupt */
1069 HalRequestSoftwareInterrupt(APC_LEVEL);
1070 }
1071 }
1072 }
1073
1074 #endif
1075
1076 FORCEINLINE
1077 VOID
1078 KiAcquireApcLock(IN PKTHREAD Thread,
1079 IN PKLOCK_QUEUE_HANDLE Handle)
1080 {
1081 /* Acquire the lock and raise to synchronization level */
1082 KeAcquireInStackQueuedSpinLockRaiseToSynch(&Thread->ApcQueueLock, Handle);
1083 }
1084
1085 FORCEINLINE
1086 VOID
1087 KiAcquireApcLockAtDpcLevel(IN PKTHREAD Thread,
1088 IN PKLOCK_QUEUE_HANDLE Handle)
1089 {
1090 /* Acquire the lock */
1091 KeAcquireInStackQueuedSpinLockAtDpcLevel(&Thread->ApcQueueLock, Handle);
1092 }
1093
1094 FORCEINLINE
1095 VOID
1096 KiAcquireApcLockAtApcLevel(IN PKTHREAD Thread,
1097 IN PKLOCK_QUEUE_HANDLE Handle)
1098 {
1099 /* Acquire the lock */
1100 KeAcquireInStackQueuedSpinLock(&Thread->ApcQueueLock, Handle);
1101 }
1102
1103 FORCEINLINE
1104 VOID
1105 KiReleaseApcLock(IN PKLOCK_QUEUE_HANDLE Handle)
1106 {
1107 /* Release the lock */
1108 KeReleaseInStackQueuedSpinLock(Handle);
1109 }
1110
1111 FORCEINLINE
1112 VOID
1113 KiReleaseApcLockFromDpcLevel(IN PKLOCK_QUEUE_HANDLE Handle)
1114 {
1115 /* Release the lock */
1116 KeReleaseInStackQueuedSpinLockFromDpcLevel(Handle);
1117 }
1118
1119 FORCEINLINE
1120 VOID
1121 KiAcquireProcessLock(IN PKPROCESS Process,
1122 IN PKLOCK_QUEUE_HANDLE Handle)
1123 {
1124 /* Acquire the lock and raise to synchronization level */
1125 KeAcquireInStackQueuedSpinLockRaiseToSynch(&Process->ProcessLock, Handle);
1126 }
1127
1128 FORCEINLINE
1129 VOID
1130 KiReleaseProcessLock(IN PKLOCK_QUEUE_HANDLE Handle)
1131 {
1132 /* Release the lock */
1133 KeReleaseInStackQueuedSpinLock(Handle);
1134 }
1135
1136 FORCEINLINE
1137 VOID
1138 KiReleaseProcessLockFromDpcLevel(IN PKLOCK_QUEUE_HANDLE Handle)
1139 {
1140 /* Release the lock */
1141 KeReleaseInStackQueuedSpinLockFromDpcLevel(Handle);
1142 }
1143
1144 FORCEINLINE
1145 VOID
1146 KiAcquireDeviceQueueLock(IN PKDEVICE_QUEUE DeviceQueue,
1147 IN PKLOCK_QUEUE_HANDLE DeviceLock)
1148 {
1149 /* Check if we were called from a threaded DPC */
1150 if (KeGetCurrentPrcb()->DpcThreadActive)
1151 {
1152 /* Lock the Queue, we're not at DPC level */
1153 KeAcquireInStackQueuedSpinLock(&DeviceQueue->Lock, DeviceLock);
1154 }
1155 else
1156 {
1157 /* We must be at DPC level, acquire the lock safely */
1158 ASSERT(KeGetCurrentIrql() == DISPATCH_LEVEL);
1159 KeAcquireInStackQueuedSpinLockAtDpcLevel(&DeviceQueue->Lock,
1160 DeviceLock);
1161 }
1162 }
1163
1164 FORCEINLINE
1165 VOID
1166 KiReleaseDeviceQueueLock(IN PKLOCK_QUEUE_HANDLE DeviceLock)
1167 {
1168 /* Check if we were called from a threaded DPC */
1169 if (KeGetCurrentPrcb()->DpcThreadActive)
1170 {
1171 /* Unlock the Queue, we're not at DPC level */
1172 KeReleaseInStackQueuedSpinLock(DeviceLock);
1173 }
1174 else
1175 {
1176 /* We must be at DPC level, release the lock safely */
1177 ASSERT(KeGetCurrentIrql() == DISPATCH_LEVEL);
1178 KeReleaseInStackQueuedSpinLockFromDpcLevel(DeviceLock);
1179 }
1180 }
1181
1182 //
1183 // This routine queues a thread that is ready on the PRCB's ready lists.
1184 // If this thread cannot currently run on this CPU, then the thread is
1185 // added to the deferred ready list instead.
1186 //
1187 // This routine must be entered with the PRCB lock held and it will exit
1188 // with the PRCB lock released!
1189 //
1190 FORCEINLINE
1191 VOID
1192 KxQueueReadyThread(IN PKTHREAD Thread,
1193 IN PKPRCB Prcb)
1194 {
1195 BOOLEAN Preempted;
1196 KPRIORITY Priority;
1197
1198 /* Sanity checks */
1199 ASSERT(Prcb == KeGetCurrentPrcb());
1200 ASSERT(Thread->State == Running);
1201 ASSERT(Thread->NextProcessor == Prcb->Number);
1202
1203 /* Check if this thread is allowed to run in this CPU */
1204 #ifdef _CONFIG_SMP
1205 if ((Thread->Affinity) & (Prcb->SetMember))
1206 #else
1207 if (TRUE)
1208 #endif
1209 {
1210 /* Set thread ready for execution */
1211 Thread->State = Ready;
1212
1213 /* Save current priority and if someone had pre-empted it */
1214 Priority = Thread->Priority;
1215 Preempted = Thread->Preempted;
1216
1217 /* We're not pre-empting now, and set the wait time */
1218 Thread->Preempted = FALSE;
1219 Thread->WaitTime = KeTickCount.LowPart;
1220
1221 /* Sanity check */
1222 ASSERT((Priority >= 0) && (Priority <= HIGH_PRIORITY));
1223
1224 /* Insert this thread in the appropriate order */
1225 Preempted ? InsertHeadList(&Prcb->DispatcherReadyListHead[Priority],
1226 &Thread->WaitListEntry) :
1227 InsertTailList(&Prcb->DispatcherReadyListHead[Priority],
1228 &Thread->WaitListEntry);
1229
1230 /* Update the ready summary */
1231 Prcb->ReadySummary |= PRIORITY_MASK(Priority);
1232
1233 /* Sanity check */
1234 ASSERT(Priority == Thread->Priority);
1235
1236 /* Release the PRCB lock */
1237 KiReleasePrcbLock(Prcb);
1238 }
1239 else
1240 {
1241 /* Otherwise, prepare this thread to be deferred */
1242 Thread->State = DeferredReady;
1243 Thread->DeferredProcessor = Prcb->Number;
1244
1245 /* Release the lock and defer scheduling */
1246 KiReleasePrcbLock(Prcb);
1247 KiDeferredReadyThread(Thread);
1248 }
1249 }
1250
1251 //
1252 // This routine scans for an appropriate ready thread to select at the
1253 // given priority and for the given CPU.
1254 //
1255 FORCEINLINE
1256 PKTHREAD
1257 KiSelectReadyThread(IN KPRIORITY Priority,
1258 IN PKPRCB Prcb)
1259 {
1260 LONG PriorityMask, PrioritySet, HighPriority;
1261 PLIST_ENTRY ListEntry;
1262 PKTHREAD Thread;
1263
1264 /* Save the current mask and get the priority set for the CPU */
1265 PriorityMask = Priority;
1266 PrioritySet = Prcb->ReadySummary >> (UCHAR)Priority;
1267 if (!PrioritySet) return NULL;
1268
1269 /* Get the highest priority possible */
1270 BitScanReverse((PULONG)&HighPriority, PrioritySet);
1271 ASSERT((PrioritySet & PRIORITY_MASK(HighPriority)) != 0);
1272 HighPriority += PriorityMask;
1273
1274 /* Make sure the list isn't at highest priority */
1275 ASSERT(IsListEmpty(&Prcb->DispatcherReadyListHead[HighPriority]) == FALSE);
1276
1277 /* Get the first thread on the list */
1278 ListEntry = &Prcb->DispatcherReadyListHead[HighPriority];
1279 Thread = CONTAINING_RECORD(ListEntry, KTHREAD, WaitListEntry);
1280
1281 /* Make sure this thread is here for a reason */
1282 ASSERT(HighPriority == Thread->Priority);
1283 ASSERT(Thread->Affinity & AFFINITY_MASK(Prcb->Number));
1284 ASSERT(Thread->NextProcessor == Prcb->Number);
1285
1286 /* Remove it from the list */
1287 RemoveEntryList(&Thread->WaitListEntry);
1288 if (IsListEmpty(&Thread->WaitListEntry))
1289 {
1290 /* The list is empty now, reset the ready summary */
1291 Prcb->ReadySummary ^= PRIORITY_MASK(HighPriority);
1292 }
1293
1294 /* Sanity check and return the thread */
1295 ASSERT((Thread == NULL) ||
1296 (Thread->BasePriority == 0) ||
1297 (Thread->Priority != 0));
1298 return Thread;
1299 }
1300
1301 //
1302 // This routine computes the new priority for a thread. It is only valid for
1303 // threads with priorities in the dynamic priority range.
1304 //
1305 SCHAR
1306 FORCEINLINE
1307 KiComputeNewPriority(IN PKTHREAD Thread)
1308 {
1309 SCHAR Priority;
1310
1311 /* Priority sanity checks */
1312 ASSERT((Thread->PriorityDecrement >= 0) &&
1313 (Thread->PriorityDecrement <= Thread->Priority));
1314 ASSERT((Thread->Priority < LOW_REALTIME_PRIORITY) ?
1315 TRUE : (Thread->PriorityDecrement == 0));
1316
1317 /* Get the current priority */
1318 Priority = Thread->Priority;
1319 if (Priority < LOW_REALTIME_PRIORITY)
1320 {
1321 /* Decrease priority by the priority decrement */
1322 Priority -= (Thread->PriorityDecrement + 1);
1323
1324 /* Don't go out of bounds */
1325 if (Priority < Thread->BasePriority) Priority = Thread->BasePriority;
1326
1327 /* Reset the priority decrement */
1328 Thread->PriorityDecrement = 0;
1329 }
1330
1331 /* Sanity check */
1332 ASSERT((Thread->BasePriority == 0) || (Priority != 0));
1333
1334 /* Return the new priority */
1335 return Priority;
1336 }
1337
1338 PRKTHREAD
1339 FORCEINLINE
1340 KeGetCurrentThread(VOID)
1341 {
1342 /* Return the current thread */
1343 return ((PKIPCR)KeGetPcr())->PrcbData.CurrentThread;
1344 }
1345
1346 UCHAR
1347 FORCEINLINE
1348 KeGetPreviousMode(VOID)
1349 {
1350 /* Return the current mode */
1351 return KeGetCurrentThread()->PreviousMode;
1352 }