- Fix a bug in KeLeaveCriticalRegion which was delivering APCs even if Special APCs...
[reactos.git] / reactos / ntoskrnl / include / internal / ke_x.h
1 /*
2 * PROJECT: ReactOS Kernel
3 * LICENSE: GPL - See COPYING in the top level directory
4 * FILE: ntoskrnl/include/ke_x.h
5 * PURPOSE: Internal Inlined Functions for the Kernel
6 * PROGRAMMERS: Alex Ionescu (alex.ionescu@reactos.org)
7 */
8
9 //
10 // Thread Dispatcher Header DebugActive Mask
11 //
12 #define DR_MASK(x) 1 << x
13 #define DR_ACTIVE_MASK 0x10
14 #define DR_REG_MASK 0x4F
15
16 //
17 // Sanitizes a selector
18 //
19 FORCEINLINE
20 ULONG
21 Ke386SanitizeSeg(IN ULONG Cs,
22 IN KPROCESSOR_MODE Mode)
23 {
24 //
25 // Check if we're in kernel-mode, and force CPL 0 if so.
26 // Otherwise, force CPL 3.
27 //
28 return ((Mode == KernelMode) ?
29 (Cs & (0xFFFF & ~RPL_MASK)) :
30 (RPL_MASK | (Cs & 0xFFFF)));
31 }
32
33 //
34 // Sanitizes EFLAGS
35 //
36 FORCEINLINE
37 ULONG
38 Ke386SanitizeFlags(IN ULONG Eflags,
39 IN KPROCESSOR_MODE Mode)
40 {
41 //
42 // Check if we're in kernel-mode, and sanitize EFLAGS if so.
43 // Otherwise, also force interrupt mask on.
44 //
45 return ((Mode == KernelMode) ?
46 (Eflags & (EFLAGS_USER_SANITIZE | EFLAGS_INTERRUPT_MASK)) :
47 (EFLAGS_INTERRUPT_MASK | (Eflags & EFLAGS_USER_SANITIZE)));
48 }
49
50 //
51 // Gets a DR register from a CONTEXT structure
52 //
53 FORCEINLINE
54 PVOID
55 KiDrFromContext(IN ULONG Dr,
56 IN PCONTEXT Context)
57 {
58 return *(PVOID*)((ULONG_PTR)Context + KiDebugRegisterContextOffsets[Dr]);
59 }
60
61 //
62 // Gets a DR register from a KTRAP_FRAME structure
63 //
64 FORCEINLINE
65 PVOID*
66 KiDrFromTrapFrame(IN ULONG Dr,
67 IN PKTRAP_FRAME TrapFrame)
68 {
69 return (PVOID*)((ULONG_PTR)TrapFrame + KiDebugRegisterTrapOffsets[Dr]);
70 }
71
72 //
73 //
74 //
75 FORCEINLINE
76 PVOID
77 Ke386SanitizeDr(IN PVOID DrAddress,
78 IN KPROCESSOR_MODE Mode)
79 {
80 //
81 // Check if we're in kernel-mode, and return the address directly if so.
82 // Otherwise, make sure it's not inside the kernel-mode address space.
83 // If it is, then clear the address.
84 //
85 return ((Mode == KernelMode) ? DrAddress :
86 (DrAddress <= MM_HIGHEST_USER_ADDRESS) ? DrAddress : 0);
87 }
88
89 //
90 // Enters a Guarded Region
91 //
92 #define KeEnterGuardedRegion() \
93 { \
94 PKTHREAD _Thread = KeGetCurrentThread(); \
95 \
96 /* Sanity checks */ \
97 ASSERT_IRQL_LESS_OR_EQUAL(APC_LEVEL); \
98 ASSERT(_Thread == KeGetCurrentThread()); \
99 ASSERT((_Thread->SpecialApcDisable <= 0) && \
100 (_Thread->SpecialApcDisable != -32768)); \
101 \
102 /* Disable Special APCs */ \
103 _Thread->SpecialApcDisable--; \
104 }
105
106 //
107 // Leaves a Guarded Region
108 //
109 #define KeLeaveGuardedRegion() \
110 { \
111 PKTHREAD _Thread = KeGetCurrentThread(); \
112 \
113 /* Sanity checks */ \
114 ASSERT_IRQL_LESS_OR_EQUAL(APC_LEVEL); \
115 ASSERT(_Thread == KeGetCurrentThread()); \
116 ASSERT(_Thread->SpecialApcDisable < 0); \
117 \
118 /* Leave region and check if APCs are OK now */ \
119 if (!(++_Thread->SpecialApcDisable)) \
120 { \
121 /* Check for Kernel APCs on the list */ \
122 if (!IsListEmpty(&_Thread->ApcState. \
123 ApcListHead[KernelMode])) \
124 { \
125 /* Check for APC Delivery */ \
126 KiCheckForKernelApcDelivery(); \
127 } \
128 } \
129 }
130
131 //
132 // TODO: Guarded Mutex Routines
133 //
134
135 //
136 // Enters a Critical Region
137 //
138 #define KeEnterCriticalRegion() \
139 { \
140 PKTHREAD _Thread = KeGetCurrentThread(); \
141 \
142 /* Sanity checks */ \
143 ASSERT(_Thread == KeGetCurrentThread()); \
144 ASSERT((_Thread->KernelApcDisable <= 0) && \
145 (_Thread->KernelApcDisable != -32768)); \
146 \
147 /* Disable Kernel APCs */ \
148 _Thread->KernelApcDisable--; \
149 }
150
151 //
152 // Leaves a Critical Region
153 //
154 #define KeLeaveCriticalRegion() \
155 { \
156 PKTHREAD _Thread = KeGetCurrentThread(); \
157 \
158 /* Sanity checks */ \
159 ASSERT(_Thread == KeGetCurrentThread()); \
160 ASSERT(_Thread->KernelApcDisable < 0); \
161 \
162 /* Enable Kernel APCs */ \
163 _Thread->KernelApcDisable++; \
164 \
165 /* Check if Kernel APCs are now enabled */ \
166 if (!(_Thread->KernelApcDisable)) \
167 { \
168 /* Check if we need to request an APC Delivery */ \
169 if (!(IsListEmpty(&_Thread->ApcState.ApcListHead[KernelMode])) && \
170 !(_Thread->SpecialApcDisable)) \
171 { \
172 /* Check for the right environment */ \
173 KiCheckForKernelApcDelivery(); \
174 } \
175 } \
176 }
177
178 //
179 // Satisfies the wait of any dispatcher object
180 //
181 #define KiSatisfyObjectWait(Object, Thread) \
182 { \
183 /* Special case for Mutants */ \
184 if ((Object)->Header.Type == MutantObject) \
185 { \
186 /* Decrease the Signal State */ \
187 (Object)->Header.SignalState--; \
188 \
189 /* Check if it's now non-signaled */ \
190 if (!(Object)->Header.SignalState) \
191 { \
192 /* Set the Owner Thread */ \
193 (Object)->OwnerThread = Thread; \
194 \
195 /* Disable APCs if needed */ \
196 Thread->KernelApcDisable -= (Object)->ApcDisable; \
197 \
198 /* Check if it's abandoned */ \
199 if ((Object)->Abandoned) \
200 { \
201 /* Unabandon it */ \
202 (Object)->Abandoned = FALSE; \
203 \
204 /* Return Status */ \
205 Thread->WaitStatus = STATUS_ABANDONED; \
206 } \
207 \
208 /* Insert it into the Mutant List */ \
209 InsertHeadList(Thread->MutantListHead.Blink, \
210 &(Object)->MutantListEntry); \
211 } \
212 } \
213 else if (((Object)->Header.Type & TIMER_OR_EVENT_TYPE) == \
214 EventSynchronizationObject) \
215 { \
216 /* Synchronization Timers and Events just get un-signaled */ \
217 (Object)->Header.SignalState = 0; \
218 } \
219 else if ((Object)->Header.Type == SemaphoreObject) \
220 { \
221 /* These ones can have multiple states, so we only decrease it */ \
222 (Object)->Header.SignalState--; \
223 } \
224 }
225
226 //
227 // Satisfies the wait of a mutant dispatcher object
228 //
229 #define KiSatisfyMutantWait(Object, Thread) \
230 { \
231 /* Decrease the Signal State */ \
232 (Object)->Header.SignalState--; \
233 \
234 /* Check if it's now non-signaled */ \
235 if (!(Object)->Header.SignalState) \
236 { \
237 /* Set the Owner Thread */ \
238 (Object)->OwnerThread = Thread; \
239 \
240 /* Disable APCs if needed */ \
241 Thread->KernelApcDisable -= (Object)->ApcDisable; \
242 \
243 /* Check if it's abandoned */ \
244 if ((Object)->Abandoned) \
245 { \
246 /* Unabandon it */ \
247 (Object)->Abandoned = FALSE; \
248 \
249 /* Return Status */ \
250 Thread->WaitStatus = STATUS_ABANDONED; \
251 } \
252 \
253 /* Insert it into the Mutant List */ \
254 InsertHeadList(Thread->MutantListHead.Blink, \
255 &(Object)->MutantListEntry); \
256 } \
257 }
258
259 //
260 // Satisfies the wait of any nonmutant dispatcher object
261 //
262 #define KiSatisfyNonMutantWait(Object, Thread) \
263 { \
264 if (((Object)->Header.Type & TIMER_OR_EVENT_TYPE) == \
265 EventSynchronizationObject) \
266 { \
267 /* Synchronization Timers and Events just get un-signaled */ \
268 (Object)->Header.SignalState = 0; \
269 } \
270 else if ((Object)->Header.Type == SemaphoreObject) \
271 { \
272 /* These ones can have multiple states, so we only decrease it */ \
273 (Object)->Header.SignalState--; \
274 } \
275 }
276
277 //
278 // Recalculates the due time
279 //
280 PLARGE_INTEGER
281 FORCEINLINE
282 KiRecalculateDueTime(IN PLARGE_INTEGER OriginalDueTime,
283 IN PLARGE_INTEGER DueTime,
284 IN OUT PLARGE_INTEGER NewDueTime)
285 {
286 /* Don't do anything for absolute waits */
287 if (OriginalDueTime->QuadPart >= 0) return OriginalDueTime;
288
289 /* Otherwise, query the interrupt time and recalculate */
290 NewDueTime->QuadPart = KeQueryInterruptTime();
291 NewDueTime->QuadPart -= DueTime->QuadPart;
292 return NewDueTime;
293 }
294
295 //
296 // Determines wether a thread should be added to the wait list
297 //
298 FORCEINLINE
299 BOOLEAN
300 KiCheckThreadStackSwap(IN PKTHREAD Thread,
301 IN KPROCESSOR_MODE WaitMode)
302 {
303 /* Check the required conditions */
304 if ((WaitMode != KernelMode) &&
305 (Thread->EnableStackSwap) &&
306 (Thread->Priority >= (LOW_REALTIME_PRIORITY + 9)))
307 {
308 /* We are go for swap */
309 return TRUE;
310 }
311 else
312 {
313 /* Don't swap the thread */
314 return FALSE;
315 }
316 }
317
318 //
319 // Adds a thread to the wait list
320 //
321 #define KiAddThreadToWaitList(Thread, Swappable) \
322 { \
323 /* Make sure it's swappable */ \
324 if (Swappable) \
325 { \
326 /* Insert it into the PRCB's List */ \
327 InsertTailList(&KeGetCurrentPrcb()->WaitListHead, \
328 &Thread->WaitListEntry); \
329 } \
330 }
331
332 //
333 // Checks if a wait in progress should be interrupted by APCs or an alertable
334 // state.
335 //
336 FORCEINLINE
337 NTSTATUS
338 KiCheckAlertability(IN PKTHREAD Thread,
339 IN BOOLEAN Alertable,
340 IN KPROCESSOR_MODE WaitMode)
341 {
342 /* Check if the wait is alertable */
343 if (Alertable)
344 {
345 /* It is, first check if the thread is alerted in this mode */
346 if (Thread->Alerted[WaitMode])
347 {
348 /* It is, so bail out of the wait */
349 Thread->Alerted[WaitMode] = FALSE;
350 return STATUS_ALERTED;
351 }
352 else if ((WaitMode != KernelMode) &&
353 (!IsListEmpty(&Thread->ApcState.ApcListHead[UserMode])))
354 {
355 /* It's isn't, but this is a user wait with queued user APCs */
356 Thread->ApcState.UserApcPending = TRUE;
357 return STATUS_USER_APC;
358 }
359 else if (Thread->Alerted[KernelMode])
360 {
361 /* It isn't that either, but we're alered in kernel mode */
362 Thread->Alerted[KernelMode] = FALSE;
363 return STATUS_ALERTED;
364 }
365 }
366 else if ((WaitMode != KernelMode) && (Thread->ApcState.UserApcPending))
367 {
368 /* Not alertable, but this is a user wait with pending user APCs */
369 return STATUS_USER_APC;
370 }
371
372 /* Otherwise, we're fine */
373 return STATUS_WAIT_0;
374 }
375
376 FORCEINLINE
377 BOOLEAN
378 KxDelayThreadWait(IN PKTHREAD Thread,
379 IN BOOLEAN Alertable,
380 IN KPROCESSOR_MODE WaitMode)
381 {
382 BOOLEAN Swappable;
383 PKWAIT_BLOCK TimerBlock = &Thread->WaitBlock[TIMER_WAIT_BLOCK];
384
385 /* Setup the Wait Block */
386 Thread->WaitBlockList = TimerBlock;
387 TimerBlock->NextWaitBlock = TimerBlock;
388
389 /* Link the timer to this Wait Block */
390 Thread->Timer.Header.WaitListHead.Flink = &TimerBlock->WaitListEntry;
391 Thread->Timer.Header.WaitListHead.Blink = &TimerBlock->WaitListEntry;
392
393 /* Clear wait status */
394 Thread->WaitStatus = STATUS_WAIT_0;
395
396 /* Setup wait fields */
397 Thread->Alertable = Alertable;
398 Thread->WaitReason = DelayExecution;
399 Thread->WaitMode = WaitMode;
400
401 /* Check if we can swap the thread's stack */
402 Thread->WaitListEntry.Flink = NULL;
403 Swappable = KiCheckThreadStackSwap(Thread, WaitMode);
404
405 /* Set the wait time */
406 Thread->WaitTime = ((PLARGE_INTEGER)&KeTickCount)->LowPart;
407 return Swappable;
408 }
409
410 FORCEINLINE
411 BOOLEAN
412 KxMultiThreadWait(IN PKTHREAD Thread,
413 IN PKWAIT_BLOCK WaitBlock,
414 IN BOOLEAN Alertable,
415 IN KWAIT_REASON WaitReason,
416 IN KPROCESSOR_MODE WaitMode)
417 {
418 BOOLEAN Swappable;
419 PKTIMER ThreadTimer = &Thread->Timer;
420
421 /* Set default wait status */
422 Thread->WaitStatus = STATUS_WAIT_0;
423
424 /* Link wait block array to the thread */
425 Thread->WaitBlockList = WaitBlock;
426
427 /* Initialize the timer list */
428 InitializeListHead(&ThreadTimer->Header.WaitListHead);
429
430 /* Set wait settings */
431 Thread->Alertable = Alertable;
432 Thread->WaitMode = WaitMode;
433 Thread->WaitReason = WaitReason;
434
435 /* Check if we can swap the thread's stack */
436 Thread->WaitListEntry.Flink = NULL;
437 Swappable = KiCheckThreadStackSwap(Thread, WaitMode);
438
439 /* Set the wait time */
440 Thread->WaitTime = ((PLARGE_INTEGER)&KeTickCount)->LowPart;
441 return Swappable;
442 }
443
444 FORCEINLINE
445 BOOLEAN
446 KxSingleThreadWait(IN PKTHREAD Thread,
447 IN PKWAIT_BLOCK WaitBlock,
448 IN PVOID Object,
449 IN PLARGE_INTEGER Timeout,
450 IN BOOLEAN Alertable,
451 IN KWAIT_REASON WaitReason,
452 IN KPROCESSOR_MODE WaitMode)
453 {
454 BOOLEAN Swappable;
455 PKWAIT_BLOCK TimerBlock = &Thread->WaitBlock[TIMER_WAIT_BLOCK];
456
457 /* Setup the Wait Block */
458 Thread->WaitBlockList = WaitBlock;
459 WaitBlock->WaitKey = STATUS_WAIT_0;
460 WaitBlock->Object = Object;
461 WaitBlock->WaitType = WaitAny;
462
463 /* Clear wait status */
464 Thread->WaitStatus = STATUS_WAIT_0;
465
466 /* Check if we have a timer */
467 if (Timeout)
468 {
469 /* Pointer to timer block */
470 WaitBlock->NextWaitBlock = TimerBlock;
471 TimerBlock->NextWaitBlock = WaitBlock;
472
473 /* Link the timer to this Wait Block */
474 Thread->Timer.Header.WaitListHead.Flink = &TimerBlock->WaitListEntry;
475 Thread->Timer.Header.WaitListHead.Blink = &TimerBlock->WaitListEntry;
476 }
477 else
478 {
479 /* No timer block, just ourselves */
480 WaitBlock->NextWaitBlock = WaitBlock;
481 }
482
483 /* Setup wait fields */
484 Thread->Alertable = Alertable;
485 Thread->WaitReason = WaitReason;
486 Thread->WaitMode = WaitMode;
487
488 /* Check if we can swap the thread's stack */
489 Thread->WaitListEntry.Flink = NULL;
490 Swappable = KiCheckThreadStackSwap(Thread, WaitMode);
491
492 /* Set the wait time */
493 Thread->WaitTime = ((PLARGE_INTEGER)&KeTickCount)->LowPart;
494 return Swappable;
495 }
496
497 //
498 // Unwaits a Thread
499 //
500 FORCEINLINE
501 VOID
502 KxUnwaitThread(IN DISPATCHER_HEADER *Object,
503 IN KPRIORITY Increment)
504 {
505 PLIST_ENTRY WaitEntry, WaitList;
506 PKWAIT_BLOCK CurrentWaitBlock;
507 PKTHREAD WaitThread;
508 ULONG WaitKey;
509
510 /* Loop the Wait Entries */
511 WaitList = &Object->WaitListHead;
512 WaitEntry = WaitList->Flink;
513 do
514 {
515 /* Get the current wait block */
516 CurrentWaitBlock = CONTAINING_RECORD(WaitEntry,
517 KWAIT_BLOCK,
518 WaitListEntry);
519
520 /* Get the waiting thread */
521 WaitThread = CurrentWaitBlock->Thread;
522
523 /* Check the current Wait Mode */
524 if (CurrentWaitBlock->WaitType == WaitAny)
525 {
526 /* Use the actual wait key */
527 WaitKey = CurrentWaitBlock->WaitKey;
528 }
529 else
530 {
531 /* Otherwise, use STATUS_KERNEL_APC */
532 WaitKey = STATUS_KERNEL_APC;
533 }
534
535 /* Unwait the thread */
536 KiUnwaitThread(WaitThread, WaitKey, Increment);
537
538 /* Next entry */
539 WaitEntry = WaitList->Flink;
540 } while (WaitEntry != WaitList);
541 }
542
543 //
544 // Unwaits a Thread waiting on an event
545 //
546 FORCEINLINE
547 VOID
548 KxUnwaitThreadForEvent(IN PKEVENT Event,
549 IN KPRIORITY Increment)
550 {
551 PLIST_ENTRY WaitEntry, WaitList;
552 PKWAIT_BLOCK CurrentWaitBlock;
553 PKTHREAD WaitThread;
554
555 /* Loop the Wait Entries */
556 WaitList = &Event->Header.WaitListHead;
557 WaitEntry = WaitList->Flink;
558 do
559 {
560 /* Get the current wait block */
561 CurrentWaitBlock = CONTAINING_RECORD(WaitEntry,
562 KWAIT_BLOCK,
563 WaitListEntry);
564
565 /* Get the waiting thread */
566 WaitThread = CurrentWaitBlock->Thread;
567
568 /* Check the current Wait Mode */
569 if (CurrentWaitBlock->WaitType == WaitAny)
570 {
571 /* Un-signal it */
572 Event->Header.SignalState = 0;
573
574 /* Un-signal the event and unwait the thread */
575 KiUnwaitThread(WaitThread, CurrentWaitBlock->WaitKey, Increment);
576 break;
577 }
578 else
579 {
580 /* Unwait the thread with STATUS_KERNEL_APC */
581 KiUnwaitThread(WaitThread, STATUS_KERNEL_APC, Increment);
582 }
583
584 /* Next entry */
585 WaitEntry = WaitList->Flink;
586 } while (WaitEntry != WaitList);
587 }
588
589 #ifndef _CONFIG_SMP
590 //
591 // Spinlock Acquire at IRQL >= DISPATCH_LEVEL
592 //
593 FORCEINLINE
594 VOID
595 KxAcquireSpinLock(IN PKSPIN_LOCK SpinLock)
596 {
597 /* On UP builds, spinlocks don't exist at IRQL >= DISPATCH */
598 UNREFERENCED_PARAMETER(SpinLock);
599 }
600
601 //
602 // Spinlock Release at IRQL >= DISPATCH_LEVEL
603 //
604 FORCEINLINE
605 VOID
606 KxReleaseSpinLock(IN PKSPIN_LOCK SpinLock)
607 {
608 /* On UP builds, spinlocks don't exist at IRQL >= DISPATCH */
609 UNREFERENCED_PARAMETER(SpinLock);
610 }
611
612 //
613 // This routine protects against multiple CPU acquires, it's meaningless on UP.
614 //
615 VOID
616 FORCEINLINE
617 KiAcquireDispatcherObject(IN DISPATCHER_HEADER* Object)
618 {
619 UNREFERENCED_PARAMETER(Object);
620 }
621
622 //
623 // This routine protects against multiple CPU acquires, it's meaningless on UP.
624 //
625 VOID
626 FORCEINLINE
627 KiReleaseDispatcherObject(IN DISPATCHER_HEADER* Object)
628 {
629 UNREFERENCED_PARAMETER(Object);
630 }
631
632 KIRQL
633 FORCEINLINE
634 KiAcquireDispatcherLock(VOID)
635 {
636 /* Raise to DPC level */
637 return KeRaiseIrqlToDpcLevel();
638 }
639
640 VOID
641 FORCEINLINE
642 KiReleaseDispatcherLock(IN KIRQL OldIrql)
643 {
644 /* Just exit the dispatcher */
645 KiExitDispatcher(OldIrql);
646 }
647
648 VOID
649 FORCEINLINE
650 KiAcquireDispatcherLockAtDpcLevel(VOID)
651 {
652 /* This is a no-op at DPC Level for UP systems */
653 return;
654 }
655
656 VOID
657 FORCEINLINE
658 KiReleaseDispatcherLockFromDpcLevel(VOID)
659 {
660 /* This is a no-op at DPC Level for UP systems */
661 return;
662 }
663
664 //
665 // This routine makes the thread deferred ready on the boot CPU.
666 //
667 FORCEINLINE
668 VOID
669 KiInsertDeferredReadyList(IN PKTHREAD Thread)
670 {
671 /* Set the thread to deferred state and boot CPU */
672 Thread->State = DeferredReady;
673 Thread->DeferredProcessor = 0;
674
675 /* Make the thread ready immediately */
676 KiDeferredReadyThread(Thread);
677 }
678
679 FORCEINLINE
680 VOID
681 KiRescheduleThread(IN BOOLEAN NewThread,
682 IN ULONG Cpu)
683 {
684 /* This is meaningless on UP systems */
685 UNREFERENCED_PARAMETER(NewThread);
686 UNREFERENCED_PARAMETER(Cpu);
687 }
688
689 //
690 // This routine protects against multiple CPU acquires, it's meaningless on UP.
691 //
692 FORCEINLINE
693 VOID
694 KiSetThreadSwapBusy(IN PKTHREAD Thread)
695 {
696 UNREFERENCED_PARAMETER(Thread);
697 }
698
699 //
700 // This routine protects against multiple CPU acquires, it's meaningless on UP.
701 //
702 FORCEINLINE
703 VOID
704 KiAcquirePrcbLock(IN PKPRCB Prcb)
705 {
706 UNREFERENCED_PARAMETER(Prcb);
707 }
708
709 //
710 // This routine protects against multiple CPU acquires, it's meaningless on UP.
711 //
712 FORCEINLINE
713 VOID
714 KiReleasePrcbLock(IN PKPRCB Prcb)
715 {
716 UNREFERENCED_PARAMETER(Prcb);
717 }
718
719 //
720 // This routine protects against multiple CPU acquires, it's meaningless on UP.
721 //
722 FORCEINLINE
723 VOID
724 KiAcquireThreadLock(IN PKTHREAD Thread)
725 {
726 UNREFERENCED_PARAMETER(Thread);
727 }
728
729 //
730 // This routine protects against multiple CPU acquires, it's meaningless on UP.
731 //
732 FORCEINLINE
733 VOID
734 KiReleaseThreadLock(IN PKTHREAD Thread)
735 {
736 UNREFERENCED_PARAMETER(Thread);
737 }
738
739 //
740 // This routine protects against multiple CPU acquires, it's meaningless on UP.
741 //
742 FORCEINLINE
743 BOOLEAN
744 KiTryThreadLock(IN PKTHREAD Thread)
745 {
746 UNREFERENCED_PARAMETER(Thread);
747 return FALSE;
748 }
749
750 FORCEINLINE
751 VOID
752 KiCheckDeferredReadyList(IN PKPRCB Prcb)
753 {
754 /* There are no deferred ready lists on UP systems */
755 UNREFERENCED_PARAMETER(Prcb);
756 }
757
758 FORCEINLINE
759 VOID
760 KiRundownThread(IN PKTHREAD Thread)
761 {
762 /* Check if this is the NPX Thread */
763 if (KeGetCurrentPrcb()->NpxThread == Thread)
764 {
765 /* Clear it */
766 KeGetCurrentPrcb()->NpxThread = NULL;
767 Ke386FnInit();
768 }
769 }
770
771 FORCEINLINE
772 VOID
773 KiRequestApcInterrupt(IN BOOLEAN NeedApc,
774 IN UCHAR Processor)
775 {
776 /* We deliver instantly on UP */
777 UNREFERENCED_PARAMETER(NeedApc);
778 UNREFERENCED_PARAMETER(Processor);
779 }
780
781 #else
782
783 //
784 // Spinlock Acquisition at IRQL >= DISPATCH_LEVEL
785 //
786 FORCEINLINE
787 VOID
788 KxAcquireSpinLock(IN PKSPIN_LOCK SpinLock)
789 {
790 for (;;)
791 {
792 /* Try to acquire it */
793 if (InterlockedBitTestAndSet((PLONG)SpinLock, 0))
794 {
795 /* Value changed... wait until it's locked */
796 while (*(volatile KSPIN_LOCK *)SpinLock == 1)
797 {
798 #ifdef DBG
799 /* On debug builds, we use a much slower but useful routine */
800 Kii386SpinOnSpinLock(SpinLock, 5);
801 #else
802 /* Otherwise, just yield and keep looping */
803 YieldProcessor();
804 #endif
805 }
806 }
807 else
808 {
809 #ifdef DBG
810 /* On debug builds, we OR in the KTHREAD */
811 *SpinLock = KeGetCurrentThread() | 1;
812 #endif
813 /* All is well, break out */
814 break;
815 }
816 }
817 }
818
819 //
820 // Spinlock Release at IRQL >= DISPATCH_LEVEL
821 //
822 FORCEINLINE
823 VOID
824 KxReleaseSpinLock(IN PKSPIN_LOCK SpinLock)
825 {
826 #ifdef DBG
827 /* Make sure that the threads match */
828 if ((KeGetCurrentThread() | 1) != *SpinLock)
829 {
830 /* They don't, bugcheck */
831 KeBugCheckEx(SPIN_LOCK_NOT_OWNED, SpinLock, 0, 0, 0);
832 }
833 #endif
834 /* Clear the lock */
835 InterlockedAnd(SpinLock, 0);
836 }
837
838 KIRQL
839 FORCEINLINE
840 KiAcquireDispatcherObject(IN DISPATCHER_HEADER* Object)
841 {
842 LONG OldValue, NewValue;
843
844 /* Make sure we're at a safe level to touch the lock */
845 ASSERT(KeGetCurrentIrql() >= DISPATCH_LEVEL);
846
847 /* Start acquire loop */
848 do
849 {
850 /* Loop until the other CPU releases it */
851 while ((UCHAR)Object->Lock & KOBJECT_LOCK_BIT)
852 {
853 /* Let the CPU know that this is a loop */
854 YieldProcessor();
855 };
856
857 /* Try acquiring the lock now */
858 NewValue = InterlockedCompareExchange(&Object->Lock,
859 OldValue | KOBJECT_LOCK_BIT,
860 OldValue);
861 } while (NewValue != OldValue);
862 }
863
864 KIRQL
865 FORCEINLINE
866 KiReleaseDispatcherObject(IN DISPATCHER_HEADER* Object)
867 {
868 /* Make sure we're at a safe level to touch the lock */
869 ASSERT(KeGetCurrentIrql() >= DISPATCH_LEVEL);
870
871 /* Release it */
872 InterlockedAnd(&Object->Lock, ~KOBJECT_LOCK_BIT);
873 }
874
875 KIRQL
876 FORCEINLINE
877 KiAcquireDispatcherLock(VOID)
878 {
879 /* Raise to synchronization level and acquire the dispatcher lock */
880 return KeAcquireQueuedSpinLockRaiseToSynch(LockQueueDispatcherLock);
881 }
882
883 VOID
884 FORCEINLINE
885 KiReleaseDispatcherLock(IN KIRQL OldIrql)
886 {
887 /* First release the lock */
888 KeReleaseQueuedSpinLockFromDpcLevel(&KeGetCurrentPrcb()->
889 LockQueue[LockQueueDispatcherLock]);
890
891 /* Then exit the dispatcher */
892 KiExitDispatcher(OldIrql);
893 }
894
895 //
896 // This routine inserts a thread into the deferred ready list of the given CPU
897 //
898 FORCEINLINE
899 VOID
900 KiInsertDeferredReadyList(IN PKTHREAD Thread)
901 {
902 PKPRCB Prcb = KeGetCurrentPrcb();
903
904 /* Set the thread to deferred state and CPU */
905 Thread->State = DeferredReady;
906 Thread->DeferredProcessor = Prcb->Number;
907
908 /* Add it on the list */
909 PushEntryList(&Prcb->DeferredReadyListHead, &Thread->SwapListEntry);
910 }
911
912 FORCEINLINE
913 VOID
914 KiRescheduleThread(IN BOOLEAN NewThread,
915 IN ULONG Cpu)
916 {
917 /* Check if a new thread needs to be scheduled on a different CPU */
918 if ((NewThread) && !(KeGetPcr()->Number == Cpu))
919 {
920 /* Send an IPI to request delivery */
921 KiIpiSendRequest(AFFINITY_MASK(Cpu), IPI_DPC);
922 }
923 }
924
925 //
926 // This routine sets the current thread in a swap busy state, which ensure that
927 // nobody else tries to swap it concurrently.
928 //
929 FORCEINLINE
930 VOID
931 KiSetThreadSwapBusy(IN PKTHREAD Thread)
932 {
933 /* Make sure nobody already set it */
934 ASSERT(Thread->SwapBusy == FALSE);
935
936 /* Set it ourselves */
937 Thread->SwapBusy = TRUE;
938 }
939
940 //
941 // This routine acquires the PRCB lock so that only one caller can touch
942 // volatile PRCB data.
943 //
944 // Since this is a simple optimized spin-lock, it must be be only acquired
945 // at dispatcher level or higher!
946 //
947 FORCEINLINE
948 VOID
949 KiAcquirePrcbLock(IN PKPRCB Prcb)
950 {
951 /* Make sure we're at a safe level to touch the PRCB lock */
952 ASSERT(KeGetCurrentIrql() >= DISPATCH_LEVEL);
953
954 /* Start acquire loop */
955 for (;;)
956 {
957 /* Acquire the lock and break out if we acquired it first */
958 if (!InterlockedExchange(&Prcb->PrcbLock, 1)) break;
959
960 /* Loop until the other CPU releases it */
961 do
962 {
963 /* Let the CPU know that this is a loop */
964 YieldProcessor();
965 } while (Prcb->PrcbLock);
966 }
967 }
968
969 //
970 // This routine releases the PRCB lock so that other callers can touch
971 // volatile PRCB data.
972 //
973 // Since this is a simple optimized spin-lock, it must be be only acquired
974 // at dispatcher level or higher!
975 //
976 FORCEINLINE
977 VOID
978 KiReleasePrcbLock(IN PKPRCB Prcb)
979 {
980 /* Make sure it's acquired! */
981 ASSERT(Prcb->PrcbLock != 0);
982
983 /* Release it */
984 InterlockedAnd(&Prcb->PrcbLock, 0);
985 }
986
987 //
988 // This routine acquires the thread lock so that only one caller can touch
989 // volatile thread data.
990 //
991 // Since this is a simple optimized spin-lock, it must be be only acquired
992 // at dispatcher level or higher!
993 //
994 FORCEINLINE
995 VOID
996 KiAcquireThreadLock(IN PKTHREAD Thread)
997 {
998 /* Make sure we're at a safe level to touch the thread lock */
999 ASSERT(KeGetCurrentIrql() >= DISPATCH_LEVEL);
1000
1001 /* Start acquire loop */
1002 for (;;)
1003 {
1004 /* Acquire the lock and break out if we acquired it first */
1005 if (!InterlockedExchange(&Thread->ThreadLock, 1)) break;
1006
1007 /* Loop until the other CPU releases it */
1008 do
1009 {
1010 /* Let the CPU know that this is a loop */
1011 YieldProcessor();
1012 } while (Thread->ThreadLock);
1013 }
1014 }
1015
1016 //
1017 // This routine releases the thread lock so that other callers can touch
1018 // volatile thread data.
1019 //
1020 // Since this is a simple optimized spin-lock, it must be be only acquired
1021 // at dispatcher level or higher!
1022 //
1023 FORCEINLINE
1024 VOID
1025 KiReleaseThreadLock(IN PKTHREAD Thread)
1026 {
1027 /* Release it */
1028 InterlockedAnd(&Thread->ThreadLock, 0);
1029 }
1030
1031 FORCEINLINE
1032 BOOLEAN
1033 KiTryThreadLock(IN PKTHREAD Thread)
1034 {
1035 LONG Value;
1036
1037 /* If the lock isn't acquired, return false */
1038 if (!Thread->ThreadLock) return FALSE;
1039
1040 /* Otherwise, try to acquire it and check the result */
1041 Value = 1;
1042 Value = InterlockedExchange(&Thread->ThreadLock, &Value);
1043
1044 /* Return the lock state */
1045 return (Value == TRUE);
1046 }
1047
1048 FORCEINLINE
1049 VOID
1050 KiCheckDeferredReadyList(IN PKPRCB Prcb)
1051 {
1052 /* Scan the deferred ready lists if required */
1053 if (Prcb->DeferredReadyListHead.Next) KiProcessDeferredReadyList(Prcb);
1054 }
1055
1056 FORCEINLINE
1057 VOID
1058 KiRequestApcInterrupt(IN BOOLEAN NeedApc,
1059 IN UCHAR Processor)
1060 {
1061 /* Check if we need to request APC delivery */
1062 if (NeedApc)
1063 {
1064 /* Check if it's on another CPU */
1065 if (KeGetPcr()->Number != Cpu)
1066 {
1067 /* Send an IPI to request delivery */
1068 KiIpiSendRequest(AFFINITY_MASK(Cpu), IPI_DPC);
1069 }
1070 else
1071 {
1072 /* Request a software interrupt */
1073 HalRequestSoftwareInterrupt(APC_LEVEL);
1074 }
1075 }
1076 }
1077
1078 #endif
1079
1080 FORCEINLINE
1081 VOID
1082 KiAcquireApcLock(IN PKTHREAD Thread,
1083 IN PKLOCK_QUEUE_HANDLE Handle)
1084 {
1085 /* Acquire the lock and raise to synchronization level */
1086 KeAcquireInStackQueuedSpinLockRaiseToSynch(&Thread->ApcQueueLock, Handle);
1087 }
1088
1089 FORCEINLINE
1090 VOID
1091 KiAcquireApcLockAtDpcLevel(IN PKTHREAD Thread,
1092 IN PKLOCK_QUEUE_HANDLE Handle)
1093 {
1094 /* Acquire the lock */
1095 KeAcquireInStackQueuedSpinLockAtDpcLevel(&Thread->ApcQueueLock, Handle);
1096 }
1097
1098 FORCEINLINE
1099 VOID
1100 KiAcquireApcLockAtApcLevel(IN PKTHREAD Thread,
1101 IN PKLOCK_QUEUE_HANDLE Handle)
1102 {
1103 /* Acquire the lock */
1104 KeAcquireInStackQueuedSpinLock(&Thread->ApcQueueLock, Handle);
1105 }
1106
1107 FORCEINLINE
1108 VOID
1109 KiReleaseApcLock(IN PKLOCK_QUEUE_HANDLE Handle)
1110 {
1111 /* Release the lock */
1112 KeReleaseInStackQueuedSpinLock(Handle);
1113 }
1114
1115 FORCEINLINE
1116 VOID
1117 KiReleaseApcLockFromDpcLevel(IN PKLOCK_QUEUE_HANDLE Handle)
1118 {
1119 /* Release the lock */
1120 KeReleaseInStackQueuedSpinLockFromDpcLevel(Handle);
1121 }
1122
1123 FORCEINLINE
1124 VOID
1125 KiAcquireProcessLock(IN PKPROCESS Process,
1126 IN PKLOCK_QUEUE_HANDLE Handle)
1127 {
1128 /* Acquire the lock and raise to synchronization level */
1129 KeAcquireInStackQueuedSpinLockRaiseToSynch(&Process->ProcessLock, Handle);
1130 }
1131
1132 FORCEINLINE
1133 VOID
1134 KiReleaseProcessLock(IN PKLOCK_QUEUE_HANDLE Handle)
1135 {
1136 /* Release the lock */
1137 KeReleaseInStackQueuedSpinLock(Handle);
1138 }
1139
1140 FORCEINLINE
1141 VOID
1142 KiReleaseProcessLockFromDpcLevel(IN PKLOCK_QUEUE_HANDLE Handle)
1143 {
1144 /* Release the lock */
1145 KeReleaseInStackQueuedSpinLockFromDpcLevel(Handle);
1146 }
1147
1148 FORCEINLINE
1149 VOID
1150 KiAcquireDeviceQueueLock(IN PKDEVICE_QUEUE DeviceQueue,
1151 IN PKLOCK_QUEUE_HANDLE DeviceLock)
1152 {
1153 /* Check if we were called from a threaded DPC */
1154 if (KeGetCurrentPrcb()->DpcThreadActive)
1155 {
1156 /* Lock the Queue, we're not at DPC level */
1157 KeAcquireInStackQueuedSpinLock(&DeviceQueue->Lock, DeviceLock);
1158 }
1159 else
1160 {
1161 /* We must be at DPC level, acquire the lock safely */
1162 ASSERT(KeGetCurrentIrql() == DISPATCH_LEVEL);
1163 KeAcquireInStackQueuedSpinLockAtDpcLevel(&DeviceQueue->Lock,
1164 DeviceLock);
1165 }
1166 }
1167
1168 FORCEINLINE
1169 VOID
1170 KiReleaseDeviceQueueLock(IN PKLOCK_QUEUE_HANDLE DeviceLock)
1171 {
1172 /* Check if we were called from a threaded DPC */
1173 if (KeGetCurrentPrcb()->DpcThreadActive)
1174 {
1175 /* Unlock the Queue, we're not at DPC level */
1176 KeReleaseInStackQueuedSpinLock(DeviceLock);
1177 }
1178 else
1179 {
1180 /* We must be at DPC level, release the lock safely */
1181 ASSERT(KeGetCurrentIrql() == DISPATCH_LEVEL);
1182 KeReleaseInStackQueuedSpinLockFromDpcLevel(DeviceLock);
1183 }
1184 }
1185
1186 //
1187 // This routine queues a thread that is ready on the PRCB's ready lists.
1188 // If this thread cannot currently run on this CPU, then the thread is
1189 // added to the deferred ready list instead.
1190 //
1191 // This routine must be entered with the PRCB lock held and it will exit
1192 // with the PRCB lock released!
1193 //
1194 FORCEINLINE
1195 VOID
1196 KxQueueReadyThread(IN PKTHREAD Thread,
1197 IN PKPRCB Prcb)
1198 {
1199 BOOLEAN Preempted;
1200 KPRIORITY Priority;
1201
1202 /* Sanity checks */
1203 ASSERT(Prcb == KeGetCurrentPrcb());
1204 ASSERT(Thread->State == Running);
1205 ASSERT(Thread->NextProcessor == Prcb->Number);
1206
1207 /* Check if this thread is allowed to run in this CPU */
1208 #ifdef _CONFIG_SMP
1209 if ((Thread->Affinity) & (Prcb->SetMember))
1210 #else
1211 if (TRUE)
1212 #endif
1213 {
1214 /* Set thread ready for execution */
1215 Thread->State = Ready;
1216
1217 /* Save current priority and if someone had pre-empted it */
1218 Priority = Thread->Priority;
1219 Preempted = Thread->Preempted;
1220
1221 /* We're not pre-empting now, and set the wait time */
1222 Thread->Preempted = FALSE;
1223 Thread->WaitTime = KeTickCount.LowPart;
1224
1225 /* Sanity check */
1226 ASSERT((Priority >= 0) && (Priority <= HIGH_PRIORITY));
1227
1228 /* Insert this thread in the appropriate order */
1229 Preempted ? InsertHeadList(&Prcb->DispatcherReadyListHead[Priority],
1230 &Thread->WaitListEntry) :
1231 InsertTailList(&Prcb->DispatcherReadyListHead[Priority],
1232 &Thread->WaitListEntry);
1233
1234 /* Update the ready summary */
1235 Prcb->ReadySummary |= PRIORITY_MASK(Priority);
1236
1237 /* Sanity check */
1238 ASSERT(Priority == Thread->Priority);
1239
1240 /* Release the PRCB lock */
1241 KiReleasePrcbLock(Prcb);
1242 }
1243 else
1244 {
1245 /* Otherwise, prepare this thread to be deferred */
1246 Thread->State = DeferredReady;
1247 Thread->DeferredProcessor = Prcb->Number;
1248
1249 /* Release the lock and defer scheduling */
1250 KiReleasePrcbLock(Prcb);
1251 KiDeferredReadyThread(Thread);
1252 }
1253 }
1254
1255 //
1256 // This routine scans for an appropriate ready thread to select at the
1257 // given priority and for the given CPU.
1258 //
1259 FORCEINLINE
1260 PKTHREAD
1261 KiSelectReadyThread(IN KPRIORITY Priority,
1262 IN PKPRCB Prcb)
1263 {
1264 LONG PriorityMask, PrioritySet, HighPriority;
1265 PLIST_ENTRY ListEntry;
1266 PKTHREAD Thread;
1267
1268 /* Save the current mask and get the priority set for the CPU */
1269 PriorityMask = Priority;
1270 PrioritySet = Prcb->ReadySummary >> (UCHAR)Priority;
1271 if (!PrioritySet) return NULL;
1272
1273 /* Get the highest priority possible */
1274 BitScanReverse((PULONG)&HighPriority, PrioritySet);
1275 ASSERT((PrioritySet & PRIORITY_MASK(HighPriority)) != 0);
1276 HighPriority += PriorityMask;
1277
1278 /* Make sure the list isn't at highest priority */
1279 ASSERT(IsListEmpty(&Prcb->DispatcherReadyListHead[HighPriority]) == FALSE);
1280
1281 /* Get the first thread on the list */
1282 ListEntry = &Prcb->DispatcherReadyListHead[HighPriority];
1283 Thread = CONTAINING_RECORD(ListEntry, KTHREAD, WaitListEntry);
1284
1285 /* Make sure this thread is here for a reason */
1286 ASSERT(HighPriority == Thread->Priority);
1287 ASSERT(Thread->Affinity & AFFINITY_MASK(Prcb->Number));
1288 ASSERT(Thread->NextProcessor == Prcb->Number);
1289
1290 /* Remove it from the list */
1291 RemoveEntryList(&Thread->WaitListEntry);
1292 if (IsListEmpty(&Thread->WaitListEntry))
1293 {
1294 /* The list is empty now, reset the ready summary */
1295 Prcb->ReadySummary ^= PRIORITY_MASK(HighPriority);
1296 }
1297
1298 /* Sanity check and return the thread */
1299 ASSERT((Thread == NULL) ||
1300 (Thread->BasePriority == 0) ||
1301 (Thread->Priority != 0));
1302 return Thread;
1303 }
1304
1305 //
1306 // This routine computes the new priority for a thread. It is only valid for
1307 // threads with priorities in the dynamic priority range.
1308 //
1309 SCHAR
1310 FORCEINLINE
1311 KiComputeNewPriority(IN PKTHREAD Thread)
1312 {
1313 SCHAR Priority;
1314
1315 /* Priority sanity checks */
1316 ASSERT((Thread->PriorityDecrement >= 0) &&
1317 (Thread->PriorityDecrement <= Thread->Priority));
1318 ASSERT((Thread->Priority < LOW_REALTIME_PRIORITY) ?
1319 TRUE : (Thread->PriorityDecrement == 0));
1320
1321 /* Get the current priority */
1322 Priority = Thread->Priority;
1323 if (Priority < LOW_REALTIME_PRIORITY)
1324 {
1325 /* Decrease priority by the priority decrement */
1326 Priority -= (Thread->PriorityDecrement + 1);
1327
1328 /* Don't go out of bounds */
1329 if (Priority < Thread->BasePriority) Priority = Thread->BasePriority;
1330
1331 /* Reset the priority decrement */
1332 Thread->PriorityDecrement = 0;
1333 }
1334
1335 /* Sanity check */
1336 ASSERT((Thread->BasePriority == 0) || (Priority != 0));
1337
1338 /* Return the new priority */
1339 return Priority;
1340 }
1341
1342 PRKTHREAD
1343 FORCEINLINE
1344 KeGetCurrentThread(VOID)
1345 {
1346 /* Return the current thread */
1347 return ((PKIPCR)KeGetPcr())->PrcbData.CurrentThread;
1348 }
1349
1350 UCHAR
1351 FORCEINLINE
1352 KeGetPreviousMode(VOID)
1353 {
1354 /* Return the current mode */
1355 return KeGetCurrentThread()->PreviousMode;
1356 }