[HAL]
[reactos.git] / reactos / ntoskrnl / include / internal / ke_x.h
1 /*
2 * PROJECT: ReactOS Kernel
3 * LICENSE: GPL - See COPYING in the top level directory
4 * FILE: ntoskrnl/include/ke_x.h
5 * PURPOSE: Internal Inlined Functions for the Kernel
6 * PROGRAMMERS: Alex Ionescu (alex.ionescu@reactos.org)
7 */
8
9 #ifndef _M_ARM
10 FORCEINLINE
11 UCHAR
12 KeGetPreviousMode(VOID)
13 {
14 /* Return the current mode */
15 return KeGetCurrentThread()->PreviousMode;
16 }
17 #endif
18
19 //
20 // Enters a Guarded Region
21 //
22 #define KeEnterGuardedRegion() \
23 { \
24 PKTHREAD _Thread = KeGetCurrentThread(); \
25 \
26 /* Sanity checks */ \
27 ASSERT(KeGetCurrentIrql() <= APC_LEVEL); \
28 ASSERT(_Thread == KeGetCurrentThread()); \
29 ASSERT((_Thread->SpecialApcDisable <= 0) && \
30 (_Thread->SpecialApcDisable != -32768)); \
31 \
32 /* Disable Special APCs */ \
33 _Thread->SpecialApcDisable--; \
34 }
35
36 //
37 // Leaves a Guarded Region
38 //
39 #define KeLeaveGuardedRegion() \
40 { \
41 PKTHREAD _Thread = KeGetCurrentThread(); \
42 \
43 /* Sanity checks */ \
44 ASSERT(KeGetCurrentIrql() <= APC_LEVEL); \
45 ASSERT(_Thread == KeGetCurrentThread()); \
46 ASSERT(_Thread->SpecialApcDisable < 0); \
47 \
48 /* Leave region and check if APCs are OK now */ \
49 if (!(++_Thread->SpecialApcDisable)) \
50 { \
51 /* Check for Kernel APCs on the list */ \
52 if (!IsListEmpty(&_Thread->ApcState. \
53 ApcListHead[KernelMode])) \
54 { \
55 /* Check for APC Delivery */ \
56 KiCheckForKernelApcDelivery(); \
57 } \
58 } \
59 }
60
61 //
62 // Enters a Critical Region
63 //
64 #define KeEnterCriticalRegion() \
65 { \
66 PKTHREAD _Thread = KeGetCurrentThread(); \
67 \
68 /* Sanity checks */ \
69 ASSERT(_Thread == KeGetCurrentThread()); \
70 ASSERT((_Thread->KernelApcDisable <= 0) && \
71 (_Thread->KernelApcDisable != -32768)); \
72 \
73 /* Disable Kernel APCs */ \
74 _Thread->KernelApcDisable--; \
75 }
76
77 //
78 // Leaves a Critical Region
79 //
80 #define KeLeaveCriticalRegion() \
81 { \
82 PKTHREAD _Thread = KeGetCurrentThread(); \
83 \
84 /* Sanity checks */ \
85 ASSERT(_Thread == KeGetCurrentThread()); \
86 ASSERT(_Thread->KernelApcDisable < 0); \
87 \
88 /* Enable Kernel APCs */ \
89 _Thread->KernelApcDisable++; \
90 \
91 /* Check if Kernel APCs are now enabled */ \
92 if (!(_Thread->KernelApcDisable)) \
93 { \
94 /* Check if we need to request an APC Delivery */ \
95 if (!(IsListEmpty(&_Thread->ApcState.ApcListHead[KernelMode])) && \
96 !(_Thread->SpecialApcDisable)) \
97 { \
98 /* Check for the right environment */ \
99 KiCheckForKernelApcDelivery(); \
100 } \
101 } \
102 }
103
104 #ifndef CONFIG_SMP
105 //
106 // Spinlock Acquire at IRQL >= DISPATCH_LEVEL
107 //
108 FORCEINLINE
109 VOID
110 KxAcquireSpinLock(IN PKSPIN_LOCK SpinLock)
111 {
112 /* On UP builds, spinlocks don't exist at IRQL >= DISPATCH */
113 UNREFERENCED_PARAMETER(SpinLock);
114 }
115
116 //
117 // Spinlock Release at IRQL >= DISPATCH_LEVEL
118 //
119 FORCEINLINE
120 VOID
121 KxReleaseSpinLock(IN PKSPIN_LOCK SpinLock)
122 {
123 /* On UP builds, spinlocks don't exist at IRQL >= DISPATCH */
124 UNREFERENCED_PARAMETER(SpinLock);
125 }
126
127 //
128 // This routine protects against multiple CPU acquires, it's meaningless on UP.
129 //
130 FORCEINLINE
131 VOID
132 KiAcquireDispatcherObject(IN DISPATCHER_HEADER* Object)
133 {
134 UNREFERENCED_PARAMETER(Object);
135 }
136
137 //
138 // This routine protects against multiple CPU acquires, it's meaningless on UP.
139 //
140 FORCEINLINE
141 VOID
142 KiReleaseDispatcherObject(IN DISPATCHER_HEADER* Object)
143 {
144 UNREFERENCED_PARAMETER(Object);
145 }
146
147 FORCEINLINE
148 KIRQL
149 KiAcquireDispatcherLock(VOID)
150 {
151 /* Raise to DPC level */
152 return KeRaiseIrqlToDpcLevel();
153 }
154
155 FORCEINLINE
156 VOID
157 KiReleaseDispatcherLock(IN KIRQL OldIrql)
158 {
159 /* Just exit the dispatcher */
160 KiExitDispatcher(OldIrql);
161 }
162
163 FORCEINLINE
164 VOID
165 KiAcquireDispatcherLockAtDpcLevel(VOID)
166 {
167 /* This is a no-op at DPC Level for UP systems */
168 return;
169 }
170
171 FORCEINLINE
172 VOID
173 KiReleaseDispatcherLockFromDpcLevel(VOID)
174 {
175 /* This is a no-op at DPC Level for UP systems */
176 return;
177 }
178
179 //
180 // This routine makes the thread deferred ready on the boot CPU.
181 //
182 FORCEINLINE
183 VOID
184 KiInsertDeferredReadyList(IN PKTHREAD Thread)
185 {
186 /* Set the thread to deferred state and boot CPU */
187 Thread->State = DeferredReady;
188 Thread->DeferredProcessor = 0;
189
190 /* Make the thread ready immediately */
191 KiDeferredReadyThread(Thread);
192 }
193
194 FORCEINLINE
195 VOID
196 KiRescheduleThread(IN BOOLEAN NewThread,
197 IN ULONG Cpu)
198 {
199 /* This is meaningless on UP systems */
200 UNREFERENCED_PARAMETER(NewThread);
201 UNREFERENCED_PARAMETER(Cpu);
202 }
203
204 //
205 // This routine protects against multiple CPU acquires, it's meaningless on UP.
206 //
207 FORCEINLINE
208 VOID
209 KiSetThreadSwapBusy(IN PKTHREAD Thread)
210 {
211 UNREFERENCED_PARAMETER(Thread);
212 }
213
214 //
215 // This routine protects against multiple CPU acquires, it's meaningless on UP.
216 //
217 FORCEINLINE
218 VOID
219 KiAcquirePrcbLock(IN PKPRCB Prcb)
220 {
221 UNREFERENCED_PARAMETER(Prcb);
222 }
223
224 //
225 // This routine protects against multiple CPU acquires, it's meaningless on UP.
226 //
227 FORCEINLINE
228 VOID
229 KiReleasePrcbLock(IN PKPRCB Prcb)
230 {
231 UNREFERENCED_PARAMETER(Prcb);
232 }
233
234 //
235 // This routine protects against multiple CPU acquires, it's meaningless on UP.
236 //
237 FORCEINLINE
238 VOID
239 KiAcquireThreadLock(IN PKTHREAD Thread)
240 {
241 UNREFERENCED_PARAMETER(Thread);
242 }
243
244 //
245 // This routine protects against multiple CPU acquires, it's meaningless on UP.
246 //
247 FORCEINLINE
248 VOID
249 KiReleaseThreadLock(IN PKTHREAD Thread)
250 {
251 UNREFERENCED_PARAMETER(Thread);
252 }
253
254 //
255 // This routine protects against multiple CPU acquires, it's meaningless on UP.
256 //
257 FORCEINLINE
258 BOOLEAN
259 KiTryThreadLock(IN PKTHREAD Thread)
260 {
261 UNREFERENCED_PARAMETER(Thread);
262 return FALSE;
263 }
264
265 FORCEINLINE
266 VOID
267 KiCheckDeferredReadyList(IN PKPRCB Prcb)
268 {
269 /* There are no deferred ready lists on UP systems */
270 UNREFERENCED_PARAMETER(Prcb);
271 }
272
273 FORCEINLINE
274 VOID
275 KiRequestApcInterrupt(IN BOOLEAN NeedApc,
276 IN UCHAR Processor)
277 {
278 /* We deliver instantly on UP */
279 UNREFERENCED_PARAMETER(NeedApc);
280 UNREFERENCED_PARAMETER(Processor);
281 }
282
283 FORCEINLINE
284 PKSPIN_LOCK_QUEUE
285 KiAcquireTimerLock(IN ULONG Hand)
286 {
287 ASSERT(KeGetCurrentIrql() >= DISPATCH_LEVEL);
288
289 /* Nothing to do on UP */
290 UNREFERENCED_PARAMETER(Hand);
291 return NULL;
292 }
293
294 FORCEINLINE
295 VOID
296 KiReleaseTimerLock(IN PKSPIN_LOCK_QUEUE LockQueue)
297 {
298 ASSERT(KeGetCurrentIrql() >= DISPATCH_LEVEL);
299
300 /* Nothing to do on UP */
301 UNREFERENCED_PARAMETER(LockQueue);
302 }
303
304 #else
305
306 //
307 // Spinlock Acquisition at IRQL >= DISPATCH_LEVEL
308 //
309 FORCEINLINE
310 VOID
311 KxAcquireSpinLock(IN PKSPIN_LOCK SpinLock)
312 {
313 /* Make sure that we don't own the lock already */
314 if (((KSPIN_LOCK)KeGetCurrentThread() | 1) == *SpinLock)
315 {
316 /* We do, bugcheck! */
317 KeBugCheckEx(SPIN_LOCK_ALREADY_OWNED, (ULONG_PTR)SpinLock, 0, 0, 0);
318 }
319
320 /* Start acquire loop */
321 for (;;)
322 {
323 /* Try to acquire it */
324 if (InterlockedBitTestAndSet((PLONG)SpinLock, 0))
325 {
326 /* Value changed... wait until it's unlocked */
327 while (*(volatile KSPIN_LOCK *)SpinLock == 1)
328 {
329 #if DBG
330 /* On debug builds, we use a much slower but useful routine */
331 //Kii386SpinOnSpinLock(SpinLock, 5);
332
333 /* FIXME: Do normal yield for now */
334 YieldProcessor();
335 #else
336 /* Otherwise, just yield and keep looping */
337 YieldProcessor();
338 #endif
339 }
340 }
341 else
342 {
343 #if DBG
344 /* On debug builds, we OR in the KTHREAD */
345 *SpinLock = (KSPIN_LOCK)KeGetCurrentThread() | 1;
346 #endif
347 /* All is well, break out */
348 break;
349 }
350 }
351 }
352
353 //
354 // Spinlock Release at IRQL >= DISPATCH_LEVEL
355 //
356 FORCEINLINE
357 VOID
358 KxReleaseSpinLock(IN PKSPIN_LOCK SpinLock)
359 {
360 #if DBG
361 /* Make sure that the threads match */
362 if (((KSPIN_LOCK)KeGetCurrentThread() | 1) != *SpinLock)
363 {
364 /* They don't, bugcheck */
365 KeBugCheckEx(SPIN_LOCK_NOT_OWNED, (ULONG_PTR)SpinLock, 0, 0, 0);
366 }
367 #endif
368 /* Clear the lock */
369 InterlockedAnd((PLONG)SpinLock, 0);
370 }
371
372 FORCEINLINE
373 VOID
374 KiAcquireDispatcherObject(IN DISPATCHER_HEADER* Object)
375 {
376 LONG OldValue;
377
378 /* Make sure we're at a safe level to touch the lock */
379 ASSERT(KeGetCurrentIrql() >= DISPATCH_LEVEL);
380
381 /* Start acquire loop */
382 do
383 {
384 /* Loop until the other CPU releases it */
385 while (TRUE)
386 {
387 /* Check if it got released */
388 OldValue = Object->Lock;
389 if ((OldValue & KOBJECT_LOCK_BIT) == 0) break;
390
391 /* Let the CPU know that this is a loop */
392 YieldProcessor();
393 }
394
395 /* Try acquiring the lock now */
396 } while (InterlockedCompareExchange(&Object->Lock,
397 OldValue | KOBJECT_LOCK_BIT,
398 OldValue) != OldValue);
399 }
400
401 FORCEINLINE
402 VOID
403 KiReleaseDispatcherObject(IN DISPATCHER_HEADER* Object)
404 {
405 /* Make sure we're at a safe level to touch the lock */
406 ASSERT(KeGetCurrentIrql() >= DISPATCH_LEVEL);
407
408 /* Release it */
409 InterlockedAnd(&Object->Lock, ~KOBJECT_LOCK_BIT);
410 }
411
412 FORCEINLINE
413 KIRQL
414 KiAcquireDispatcherLock(VOID)
415 {
416 /* Raise to synchronization level and acquire the dispatcher lock */
417 return KeAcquireQueuedSpinLockRaiseToSynch(LockQueueDispatcherLock);
418 }
419
420 FORCEINLINE
421 VOID
422 KiReleaseDispatcherLock(IN KIRQL OldIrql)
423 {
424 /* First release the lock */
425 KeReleaseQueuedSpinLockFromDpcLevel(&KeGetCurrentPrcb()->
426 LockQueue[LockQueueDispatcherLock]);
427
428 /* Then exit the dispatcher */
429 KiExitDispatcher(OldIrql);
430 }
431
432 FORCEINLINE
433 VOID
434 KiAcquireDispatcherLockAtDpcLevel(VOID)
435 {
436 /* Acquire the dispatcher lock */
437 KeAcquireQueuedSpinLockAtDpcLevel(&KeGetCurrentPrcb()->
438 LockQueue[LockQueueDispatcherLock]);
439 }
440
441 FORCEINLINE
442 VOID
443 KiReleaseDispatcherLockFromDpcLevel(VOID)
444 {
445 /* Release the dispatcher lock */
446 KeReleaseQueuedSpinLockFromDpcLevel(&KeGetCurrentPrcb()->
447 LockQueue[LockQueueDispatcherLock]);
448 }
449
450 //
451 // This routine inserts a thread into the deferred ready list of the current CPU
452 //
453 FORCEINLINE
454 VOID
455 KiInsertDeferredReadyList(IN PKTHREAD Thread)
456 {
457 PKPRCB Prcb = KeGetCurrentPrcb();
458
459 /* Set the thread to deferred state and CPU */
460 Thread->State = DeferredReady;
461 Thread->DeferredProcessor = Prcb->Number;
462
463 /* Add it on the list */
464 PushEntryList(&Prcb->DeferredReadyListHead, &Thread->SwapListEntry);
465 }
466
467 FORCEINLINE
468 VOID
469 KiRescheduleThread(IN BOOLEAN NewThread,
470 IN ULONG Cpu)
471 {
472 /* Check if a new thread needs to be scheduled on a different CPU */
473 if ((NewThread) && !(KeGetPcr()->Number == Cpu))
474 {
475 /* Send an IPI to request delivery */
476 KiIpiSend(AFFINITY_MASK(Cpu), IPI_DPC);
477 }
478 }
479
480 //
481 // This routine sets the current thread in a swap busy state, which ensure that
482 // nobody else tries to swap it concurrently.
483 //
484 FORCEINLINE
485 VOID
486 KiSetThreadSwapBusy(IN PKTHREAD Thread)
487 {
488 /* Make sure nobody already set it */
489 ASSERT(Thread->SwapBusy == FALSE);
490
491 /* Set it ourselves */
492 Thread->SwapBusy = TRUE;
493 }
494
495 //
496 // This routine acquires the PRCB lock so that only one caller can touch
497 // volatile PRCB data.
498 //
499 // Since this is a simple optimized spin-lock, it must only be acquired
500 // at dispatcher level or higher!
501 //
502 FORCEINLINE
503 VOID
504 KiAcquirePrcbLock(IN PKPRCB Prcb)
505 {
506 /* Make sure we're at a safe level to touch the PRCB lock */
507 ASSERT(KeGetCurrentIrql() >= DISPATCH_LEVEL);
508
509 /* Start acquire loop */
510 for (;;)
511 {
512 /* Acquire the lock and break out if we acquired it first */
513 if (!InterlockedExchange((PLONG)&Prcb->PrcbLock, 1)) break;
514
515 /* Loop until the other CPU releases it */
516 do
517 {
518 /* Let the CPU know that this is a loop */
519 YieldProcessor();
520 } while (Prcb->PrcbLock);
521 }
522 }
523
524 //
525 // This routine releases the PRCB lock so that other callers can touch
526 // volatile PRCB data.
527 //
528 // Since this is a simple optimized spin-lock, it must be be only acquired
529 // at dispatcher level or higher!
530 //
531 FORCEINLINE
532 VOID
533 KiReleasePrcbLock(IN PKPRCB Prcb)
534 {
535 /* Make sure we are above dispatch and the lock is acquired! */
536 ASSERT(KeGetCurrentIrql() >= DISPATCH_LEVEL);
537 ASSERT(Prcb->PrcbLock != 0);
538
539 /* Release it */
540 InterlockedAnd((PLONG)&Prcb->PrcbLock, 0);
541 }
542
543 //
544 // This routine acquires the thread lock so that only one caller can touch
545 // volatile thread data.
546 //
547 // Since this is a simple optimized spin-lock, it must be be only acquired
548 // at dispatcher level or higher!
549 //
550 FORCEINLINE
551 VOID
552 KiAcquireThreadLock(IN PKTHREAD Thread)
553 {
554 /* Make sure we're at a safe level to touch the thread lock */
555 ASSERT(KeGetCurrentIrql() >= DISPATCH_LEVEL);
556
557 /* Start acquire loop */
558 for (;;)
559 {
560 /* Acquire the lock and break out if we acquired it first */
561 if (!InterlockedExchange((PLONG)&Thread->ThreadLock, 1)) break;
562
563 /* Loop until the other CPU releases it */
564 do
565 {
566 /* Let the CPU know that this is a loop */
567 YieldProcessor();
568 } while (Thread->ThreadLock);
569 }
570 }
571
572 //
573 // This routine releases the thread lock so that other callers can touch
574 // volatile thread data.
575 //
576 // Since this is a simple optimized spin-lock, it must be be only acquired
577 // at dispatcher level or higher!
578 //
579 FORCEINLINE
580 VOID
581 KiReleaseThreadLock(IN PKTHREAD Thread)
582 {
583 /* Make sure we are still above dispatch */
584 ASSERT(KeGetCurrentIrql() >= DISPATCH_LEVEL);
585
586 /* Release it */
587 InterlockedAnd((PLONG)&Thread->ThreadLock, 0);
588 }
589
590 FORCEINLINE
591 BOOLEAN
592 KiTryThreadLock(IN PKTHREAD Thread)
593 {
594 LONG Value;
595
596 /* If the lock isn't acquired, return false */
597 if (!Thread->ThreadLock) return FALSE;
598
599 /* Otherwise, try to acquire it and check the result */
600 Value = 1;
601 Value = InterlockedExchange((PLONG)&Thread->ThreadLock, Value);
602
603 /* Return the lock state */
604 return (Value == TRUE);
605 }
606
607 FORCEINLINE
608 VOID
609 KiCheckDeferredReadyList(IN PKPRCB Prcb)
610 {
611 /* Scan the deferred ready lists if required */
612 if (Prcb->DeferredReadyListHead.Next) KiProcessDeferredReadyList(Prcb);
613 }
614
615 FORCEINLINE
616 VOID
617 KiRequestApcInterrupt(IN BOOLEAN NeedApc,
618 IN UCHAR Processor)
619 {
620 /* Check if we need to request APC delivery */
621 if (NeedApc)
622 {
623 /* Check if it's on another CPU */
624 if (KeGetPcr()->Number != Processor)
625 {
626 /* Send an IPI to request delivery */
627 KiIpiSend(AFFINITY_MASK(Processor), IPI_APC);
628 }
629 else
630 {
631 /* Request a software interrupt */
632 HalRequestSoftwareInterrupt(APC_LEVEL);
633 }
634 }
635 }
636
637 FORCEINLINE
638 PKSPIN_LOCK_QUEUE
639 KiAcquireTimerLock(IN ULONG Hand)
640 {
641 PKSPIN_LOCK_QUEUE LockQueue;
642 ULONG LockIndex;
643 ASSERT(KeGetCurrentIrql() >= DISPATCH_LEVEL);
644
645 /* Get the lock index */
646 LockIndex = Hand >> LOCK_QUEUE_TIMER_LOCK_SHIFT;
647 LockIndex &= (LOCK_QUEUE_TIMER_TABLE_LOCKS - 1);
648
649 /* Now get the lock */
650 LockQueue = &KeGetCurrentPrcb()->LockQueue[LockQueueTimerTableLock + LockIndex];
651
652 /* Acquire it and return */
653 KeAcquireQueuedSpinLockAtDpcLevel(LockQueue);
654 return LockQueue;
655 }
656
657 FORCEINLINE
658 VOID
659 KiReleaseTimerLock(IN PKSPIN_LOCK_QUEUE LockQueue)
660 {
661 ASSERT(KeGetCurrentIrql() >= DISPATCH_LEVEL);
662
663 /* Release the lock */
664 KeReleaseQueuedSpinLockFromDpcLevel(LockQueue);
665 }
666
667 #endif
668
669 FORCEINLINE
670 VOID
671 KiAcquireApcLock(IN PKTHREAD Thread,
672 IN PKLOCK_QUEUE_HANDLE Handle)
673 {
674 /* Acquire the lock and raise to synchronization level */
675 KeAcquireInStackQueuedSpinLockRaiseToSynch(&Thread->ApcQueueLock, Handle);
676 }
677
678 FORCEINLINE
679 VOID
680 KiAcquireApcLockAtDpcLevel(IN PKTHREAD Thread,
681 IN PKLOCK_QUEUE_HANDLE Handle)
682 {
683 /* Acquire the lock */
684 KeAcquireInStackQueuedSpinLockAtDpcLevel(&Thread->ApcQueueLock, Handle);
685 }
686
687 FORCEINLINE
688 VOID
689 KiAcquireApcLockAtApcLevel(IN PKTHREAD Thread,
690 IN PKLOCK_QUEUE_HANDLE Handle)
691 {
692 /* Acquire the lock */
693 KeAcquireInStackQueuedSpinLock(&Thread->ApcQueueLock, Handle);
694 }
695
696 FORCEINLINE
697 VOID
698 KiReleaseApcLock(IN PKLOCK_QUEUE_HANDLE Handle)
699 {
700 /* Release the lock */
701 KeReleaseInStackQueuedSpinLock(Handle);
702 }
703
704 FORCEINLINE
705 VOID
706 KiReleaseApcLockFromDpcLevel(IN PKLOCK_QUEUE_HANDLE Handle)
707 {
708 /* Release the lock */
709 KeReleaseInStackQueuedSpinLockFromDpcLevel(Handle);
710 }
711
712 FORCEINLINE
713 VOID
714 KiAcquireProcessLock(IN PKPROCESS Process,
715 IN PKLOCK_QUEUE_HANDLE Handle)
716 {
717 /* Acquire the lock and raise to synchronization level */
718 KeAcquireInStackQueuedSpinLockRaiseToSynch(&Process->ProcessLock, Handle);
719 }
720
721 FORCEINLINE
722 VOID
723 KiReleaseProcessLock(IN PKLOCK_QUEUE_HANDLE Handle)
724 {
725 /* Release the lock */
726 KeReleaseInStackQueuedSpinLock(Handle);
727 }
728
729 FORCEINLINE
730 VOID
731 KiReleaseProcessLockFromDpcLevel(IN PKLOCK_QUEUE_HANDLE Handle)
732 {
733 /* Release the lock */
734 KeReleaseInStackQueuedSpinLockFromDpcLevel(Handle);
735 }
736
737 FORCEINLINE
738 VOID
739 KiAcquireDeviceQueueLock(IN PKDEVICE_QUEUE DeviceQueue,
740 IN PKLOCK_QUEUE_HANDLE DeviceLock)
741 {
742 /* Check if we were called from a threaded DPC */
743 if (KeGetCurrentPrcb()->DpcThreadActive)
744 {
745 /* Lock the Queue, we're not at DPC level */
746 KeAcquireInStackQueuedSpinLock(&DeviceQueue->Lock, DeviceLock);
747 }
748 else
749 {
750 /* We must be at DPC level, acquire the lock safely */
751 ASSERT(KeGetCurrentIrql() == DISPATCH_LEVEL);
752 KeAcquireInStackQueuedSpinLockAtDpcLevel(&DeviceQueue->Lock,
753 DeviceLock);
754 }
755 }
756
757 FORCEINLINE
758 VOID
759 KiReleaseDeviceQueueLock(IN PKLOCK_QUEUE_HANDLE DeviceLock)
760 {
761 /* Check if we were called from a threaded DPC */
762 if (KeGetCurrentPrcb()->DpcThreadActive)
763 {
764 /* Unlock the Queue, we're not at DPC level */
765 KeReleaseInStackQueuedSpinLock(DeviceLock);
766 }
767 else
768 {
769 /* We must be at DPC level, release the lock safely */
770 ASSERT(KeGetCurrentIrql() == DISPATCH_LEVEL);
771 KeReleaseInStackQueuedSpinLockFromDpcLevel(DeviceLock);
772 }
773 }
774
775 //
776 // Satisfies the wait of any dispatcher object
777 //
778 #define KiSatisfyObjectWait(Object, Thread) \
779 { \
780 /* Special case for Mutants */ \
781 if ((Object)->Header.Type == MutantObject) \
782 { \
783 /* Decrease the Signal State */ \
784 (Object)->Header.SignalState--; \
785 \
786 /* Check if it's now non-signaled */ \
787 if (!(Object)->Header.SignalState) \
788 { \
789 /* Set the Owner Thread */ \
790 (Object)->OwnerThread = Thread; \
791 \
792 /* Disable APCs if needed */ \
793 Thread->KernelApcDisable = Thread->KernelApcDisable - \
794 (Object)->ApcDisable; \
795 \
796 /* Check if it's abandoned */ \
797 if ((Object)->Abandoned) \
798 { \
799 /* Unabandon it */ \
800 (Object)->Abandoned = FALSE; \
801 \
802 /* Return Status */ \
803 Thread->WaitStatus = STATUS_ABANDONED; \
804 } \
805 \
806 /* Insert it into the Mutant List */ \
807 InsertHeadList(Thread->MutantListHead.Blink, \
808 &(Object)->MutantListEntry); \
809 } \
810 } \
811 else if (((Object)->Header.Type & TIMER_OR_EVENT_TYPE) == \
812 EventSynchronizationObject) \
813 { \
814 /* Synchronization Timers and Events just get un-signaled */ \
815 (Object)->Header.SignalState = 0; \
816 } \
817 else if ((Object)->Header.Type == SemaphoreObject) \
818 { \
819 /* These ones can have multiple states, so we only decrease it */ \
820 (Object)->Header.SignalState--; \
821 } \
822 }
823
824 //
825 // Satisfies the wait of a mutant dispatcher object
826 //
827 #define KiSatisfyMutantWait(Object, Thread) \
828 { \
829 /* Decrease the Signal State */ \
830 (Object)->Header.SignalState--; \
831 \
832 /* Check if it's now non-signaled */ \
833 if (!(Object)->Header.SignalState) \
834 { \
835 /* Set the Owner Thread */ \
836 (Object)->OwnerThread = Thread; \
837 \
838 /* Disable APCs if needed */ \
839 Thread->KernelApcDisable = Thread->KernelApcDisable - \
840 (Object)->ApcDisable; \
841 \
842 /* Check if it's abandoned */ \
843 if ((Object)->Abandoned) \
844 { \
845 /* Unabandon it */ \
846 (Object)->Abandoned = FALSE; \
847 \
848 /* Return Status */ \
849 Thread->WaitStatus = STATUS_ABANDONED; \
850 } \
851 \
852 /* Insert it into the Mutant List */ \
853 InsertHeadList(Thread->MutantListHead.Blink, \
854 &(Object)->MutantListEntry); \
855 } \
856 }
857
858 //
859 // Satisfies the wait of any nonmutant dispatcher object
860 //
861 #define KiSatisfyNonMutantWait(Object) \
862 { \
863 if (((Object)->Header.Type & TIMER_OR_EVENT_TYPE) == \
864 EventSynchronizationObject) \
865 { \
866 /* Synchronization Timers and Events just get un-signaled */ \
867 (Object)->Header.SignalState = 0; \
868 } \
869 else if ((Object)->Header.Type == SemaphoreObject) \
870 { \
871 /* These ones can have multiple states, so we only decrease it */ \
872 (Object)->Header.SignalState--; \
873 } \
874 }
875
876 //
877 // Recalculates the due time
878 //
879 FORCEINLINE
880 PLARGE_INTEGER
881 KiRecalculateDueTime(IN PLARGE_INTEGER OriginalDueTime,
882 IN PLARGE_INTEGER DueTime,
883 IN OUT PLARGE_INTEGER NewDueTime)
884 {
885 /* Don't do anything for absolute waits */
886 if (OriginalDueTime->QuadPart >= 0) return OriginalDueTime;
887
888 /* Otherwise, query the interrupt time and recalculate */
889 NewDueTime->QuadPart = KeQueryInterruptTime();
890 NewDueTime->QuadPart -= DueTime->QuadPart;
891 return NewDueTime;
892 }
893
894 //
895 // Determines whether a thread should be added to the wait list
896 //
897 FORCEINLINE
898 BOOLEAN
899 KiCheckThreadStackSwap(IN PKTHREAD Thread,
900 IN KPROCESSOR_MODE WaitMode)
901 {
902 /* Check the required conditions */
903 if ((WaitMode != KernelMode) &&
904 (Thread->EnableStackSwap) &&
905 (Thread->Priority >= (LOW_REALTIME_PRIORITY + 9)))
906 {
907 /* We are go for swap */
908 return TRUE;
909 }
910 else
911 {
912 /* Don't swap the thread */
913 return FALSE;
914 }
915 }
916
917 //
918 // Adds a thread to the wait list
919 //
920 #define KiAddThreadToWaitList(Thread, Swappable) \
921 { \
922 /* Make sure it's swappable */ \
923 if (Swappable) \
924 { \
925 /* Insert it into the PRCB's List */ \
926 InsertTailList(&KeGetCurrentPrcb()->WaitListHead, \
927 &Thread->WaitListEntry); \
928 } \
929 }
930
931 //
932 // Checks if a wait in progress should be interrupted by APCs or an alertable
933 // state.
934 //
935 FORCEINLINE
936 NTSTATUS
937 KiCheckAlertability(IN PKTHREAD Thread,
938 IN BOOLEAN Alertable,
939 IN KPROCESSOR_MODE WaitMode)
940 {
941 /* Check if the wait is alertable */
942 if (Alertable)
943 {
944 /* It is, first check if the thread is alerted in this mode */
945 if (Thread->Alerted[WaitMode])
946 {
947 /* It is, so bail out of the wait */
948 Thread->Alerted[WaitMode] = FALSE;
949 return STATUS_ALERTED;
950 }
951 else if ((WaitMode != KernelMode) &&
952 (!IsListEmpty(&Thread->ApcState.ApcListHead[UserMode])))
953 {
954 /* It's isn't, but this is a user wait with queued user APCs */
955 Thread->ApcState.UserApcPending = TRUE;
956 return STATUS_USER_APC;
957 }
958 else if (Thread->Alerted[KernelMode])
959 {
960 /* It isn't that either, but we're alered in kernel mode */
961 Thread->Alerted[KernelMode] = FALSE;
962 return STATUS_ALERTED;
963 }
964 }
965 else if ((WaitMode != KernelMode) && (Thread->ApcState.UserApcPending))
966 {
967 /* Not alertable, but this is a user wait with pending user APCs */
968 return STATUS_USER_APC;
969 }
970
971 /* Otherwise, we're fine */
972 return STATUS_WAIT_0;
973 }
974
975 ULONG
976 FORCEINLINE
977 KiComputeTimerTableIndex(IN ULONGLONG DueTime)
978 {
979 return (DueTime / KeMaximumIncrement) & (TIMER_TABLE_SIZE - 1);
980 }
981
982 //
983 // Called from KiCompleteTimer, KiInsertTreeTimer, KeSetSystemTime
984 // to remove timer entries
985 // See Windows HPI blog for more information.
986 FORCEINLINE
987 VOID
988 KiRemoveEntryTimer(IN PKTIMER Timer)
989 {
990 ULONG Hand;
991 PKTIMER_TABLE_ENTRY TableEntry;
992
993 /* Remove the timer from the timer list and check if it's empty */
994 Hand = Timer->Header.Hand;
995 if (RemoveEntryList(&Timer->TimerListEntry))
996 {
997 /* Get the respective timer table entry */
998 TableEntry = &KiTimerTableListHead[Hand];
999 if (&TableEntry->Entry == TableEntry->Entry.Flink)
1000 {
1001 /* Set the entry to an infinite absolute time */
1002 TableEntry->Time.HighPart = 0xFFFFFFFF;
1003 }
1004 }
1005
1006 /* Clear the list entries on dbg builds so we can tell the timer is gone */
1007 #if DBG
1008 Timer->TimerListEntry.Flink = NULL;
1009 Timer->TimerListEntry.Blink = NULL;
1010 #endif
1011 }
1012
1013 //
1014 // Called by Wait and Queue code to insert a timer for dispatching.
1015 // Also called by KeSetTimerEx to insert a timer from the caller.
1016 //
1017 FORCEINLINE
1018 VOID
1019 KxInsertTimer(IN PKTIMER Timer,
1020 IN ULONG Hand)
1021 {
1022 PKSPIN_LOCK_QUEUE LockQueue;
1023
1024 /* Acquire the lock and release the dispatcher lock */
1025 LockQueue = KiAcquireTimerLock(Hand);
1026 KiReleaseDispatcherLockFromDpcLevel();
1027
1028 /* Try to insert the timer */
1029 if (KiInsertTimerTable(Timer, Hand))
1030 {
1031 /* Complete it */
1032 KiCompleteTimer(Timer, LockQueue);
1033 }
1034 else
1035 {
1036 /* Do nothing, just release the lock */
1037 KiReleaseTimerLock(LockQueue);
1038 }
1039 }
1040
1041 //
1042 // Called by KeSetTimerEx and KiInsertTreeTimer to calculate Due Time
1043 // See the Windows HPI Blog for more information
1044 //
1045 FORCEINLINE
1046 BOOLEAN
1047 KiComputeDueTime(IN PKTIMER Timer,
1048 IN LARGE_INTEGER DueTime,
1049 OUT PULONG Hand)
1050 {
1051 LARGE_INTEGER InterruptTime, SystemTime, DifferenceTime;
1052
1053 /* Convert to relative time if needed */
1054 Timer->Header.Absolute = FALSE;
1055 if (DueTime.HighPart >= 0)
1056 {
1057 /* Get System Time */
1058 KeQuerySystemTime(&SystemTime);
1059
1060 /* Do the conversion */
1061 DifferenceTime.QuadPart = SystemTime.QuadPart - DueTime.QuadPart;
1062
1063 /* Make sure it hasn't already expired */
1064 Timer->Header.Absolute = TRUE;
1065 if (DifferenceTime.HighPart >= 0)
1066 {
1067 /* Cancel everything */
1068 Timer->Header.SignalState = TRUE;
1069 Timer->Header.Hand = 0;
1070 Timer->DueTime.QuadPart = 0;
1071 *Hand = 0;
1072 return FALSE;
1073 }
1074
1075 /* Set the time as Absolute */
1076 DueTime = DifferenceTime;
1077 }
1078
1079 /* Get the Interrupt Time */
1080 InterruptTime.QuadPart = KeQueryInterruptTime();
1081
1082 /* Recalculate due time */
1083 Timer->DueTime.QuadPart = InterruptTime.QuadPart - DueTime.QuadPart;
1084
1085 /* Get the handle */
1086 *Hand = KiComputeTimerTableIndex(Timer->DueTime.QuadPart);
1087 Timer->Header.Hand = (UCHAR)*Hand;
1088 Timer->Header.Inserted = TRUE;
1089 return TRUE;
1090 }
1091
1092 //
1093 // Called from Unlink and Queue Insert Code.
1094 // Also called by timer code when canceling an inserted timer.
1095 // Removes a timer from it's tree.
1096 //
1097 FORCEINLINE
1098 VOID
1099 KxRemoveTreeTimer(IN PKTIMER Timer)
1100 {
1101 ULONG Hand = Timer->Header.Hand;
1102 PKSPIN_LOCK_QUEUE LockQueue;
1103 PKTIMER_TABLE_ENTRY TimerEntry;
1104
1105 /* Acquire timer lock */
1106 LockQueue = KiAcquireTimerLock(Hand);
1107
1108 /* Set the timer as non-inserted */
1109 Timer->Header.Inserted = FALSE;
1110
1111 /* Remove it from the timer list */
1112 if (RemoveEntryList(&Timer->TimerListEntry))
1113 {
1114 /* Get the entry and check if it's empty */
1115 TimerEntry = &KiTimerTableListHead[Hand];
1116 if (IsListEmpty(&TimerEntry->Entry))
1117 {
1118 /* Clear the time then */
1119 TimerEntry->Time.HighPart = 0xFFFFFFFF;
1120 }
1121 }
1122
1123 /* Release the timer lock */
1124 KiReleaseTimerLock(LockQueue);
1125 }
1126
1127 FORCEINLINE
1128 VOID
1129 KxSetTimerForThreadWait(IN PKTIMER Timer,
1130 IN LARGE_INTEGER Interval,
1131 OUT PULONG Hand)
1132 {
1133 ULONGLONG DueTime;
1134 LARGE_INTEGER InterruptTime, SystemTime, TimeDifference;
1135
1136 /* Check the timer's interval to see if it's absolute */
1137 Timer->Header.Absolute = FALSE;
1138 if (Interval.HighPart >= 0)
1139 {
1140 /* Get the system time and calculate the relative time */
1141 KeQuerySystemTime(&SystemTime);
1142 TimeDifference.QuadPart = SystemTime.QuadPart - Interval.QuadPart;
1143 Timer->Header.Absolute = TRUE;
1144
1145 /* Check if we've already expired */
1146 if (TimeDifference.HighPart >= 0)
1147 {
1148 /* Reset everything */
1149 Timer->DueTime.QuadPart = 0;
1150 *Hand = 0;
1151 Timer->Header.Hand = 0;
1152 return;
1153 }
1154 else
1155 {
1156 /* Update the interval */
1157 Interval = TimeDifference;
1158 }
1159 }
1160
1161 /* Calculate the due time */
1162 InterruptTime.QuadPart = KeQueryInterruptTime();
1163 DueTime = InterruptTime.QuadPart - Interval.QuadPart;
1164 Timer->DueTime.QuadPart = DueTime;
1165
1166 /* Calculate the timer handle */
1167 *Hand = KiComputeTimerTableIndex(DueTime);
1168 Timer->Header.Hand = (UCHAR)*Hand;
1169 }
1170
1171 #define KxDelayThreadWait() \
1172 \
1173 /* Setup the Wait Block */ \
1174 Thread->WaitBlockList = TimerBlock; \
1175 \
1176 /* Setup the timer */ \
1177 KxSetTimerForThreadWait(Timer, *Interval, &Hand); \
1178 \
1179 /* Save the due time for the caller */ \
1180 DueTime.QuadPart = Timer->DueTime.QuadPart; \
1181 \
1182 /* Link the timer to this Wait Block */ \
1183 TimerBlock->NextWaitBlock = TimerBlock; \
1184 Timer->Header.WaitListHead.Flink = &TimerBlock->WaitListEntry; \
1185 Timer->Header.WaitListHead.Blink = &TimerBlock->WaitListEntry; \
1186 \
1187 /* Clear wait status */ \
1188 Thread->WaitStatus = STATUS_SUCCESS; \
1189 \
1190 /* Setup wait fields */ \
1191 Thread->Alertable = Alertable; \
1192 Thread->WaitReason = DelayExecution; \
1193 Thread->WaitMode = WaitMode; \
1194 \
1195 /* Check if we can swap the thread's stack */ \
1196 Thread->WaitListEntry.Flink = NULL; \
1197 Swappable = KiCheckThreadStackSwap(Thread, WaitMode); \
1198 \
1199 /* Set the wait time */ \
1200 Thread->WaitTime = KeTickCount.LowPart;
1201
1202 #define KxMultiThreadWait() \
1203 /* Link wait block array to the thread */ \
1204 Thread->WaitBlockList = WaitBlockArray; \
1205 \
1206 /* Reset the index */ \
1207 Index = 0; \
1208 \
1209 /* Loop wait blocks */ \
1210 do \
1211 { \
1212 /* Fill out the wait block */ \
1213 WaitBlock = &WaitBlockArray[Index]; \
1214 WaitBlock->Object = Object[Index]; \
1215 WaitBlock->WaitKey = (USHORT)Index; \
1216 WaitBlock->WaitType = WaitType; \
1217 WaitBlock->Thread = Thread; \
1218 \
1219 /* Link to next block */ \
1220 WaitBlock->NextWaitBlock = &WaitBlockArray[Index + 1]; \
1221 Index++; \
1222 } while (Index < Count); \
1223 \
1224 /* Link the last block */ \
1225 WaitBlock->NextWaitBlock = WaitBlockArray; \
1226 \
1227 /* Set default wait status */ \
1228 Thread->WaitStatus = STATUS_WAIT_0; \
1229 \
1230 /* Check if we have a timer */ \
1231 if (Timeout) \
1232 { \
1233 /* Link to the block */ \
1234 TimerBlock->NextWaitBlock = WaitBlockArray; \
1235 \
1236 /* Setup the timer */ \
1237 KxSetTimerForThreadWait(Timer, *Timeout, &Hand); \
1238 \
1239 /* Save the due time for the caller */ \
1240 DueTime.QuadPart = Timer->DueTime.QuadPart; \
1241 \
1242 /* Initialize the list */ \
1243 InitializeListHead(&Timer->Header.WaitListHead); \
1244 } \
1245 \
1246 /* Set wait settings */ \
1247 Thread->Alertable = Alertable; \
1248 Thread->WaitMode = WaitMode; \
1249 Thread->WaitReason = WaitReason; \
1250 \
1251 /* Check if we can swap the thread's stack */ \
1252 Thread->WaitListEntry.Flink = NULL; \
1253 Swappable = KiCheckThreadStackSwap(Thread, WaitMode); \
1254 \
1255 /* Set the wait time */ \
1256 Thread->WaitTime = KeTickCount.LowPart;
1257
1258 #define KxSingleThreadWait() \
1259 /* Setup the Wait Block */ \
1260 Thread->WaitBlockList = WaitBlock; \
1261 WaitBlock->WaitKey = STATUS_SUCCESS; \
1262 WaitBlock->Object = Object; \
1263 WaitBlock->WaitType = WaitAny; \
1264 \
1265 /* Clear wait status */ \
1266 Thread->WaitStatus = STATUS_SUCCESS; \
1267 \
1268 /* Check if we have a timer */ \
1269 if (Timeout) \
1270 { \
1271 /* Setup the timer */ \
1272 KxSetTimerForThreadWait(Timer, *Timeout, &Hand); \
1273 \
1274 /* Save the due time for the caller */ \
1275 DueTime.QuadPart = Timer->DueTime.QuadPart; \
1276 \
1277 /* Pointer to timer block */ \
1278 WaitBlock->NextWaitBlock = TimerBlock; \
1279 TimerBlock->NextWaitBlock = WaitBlock; \
1280 \
1281 /* Link the timer to this Wait Block */ \
1282 Timer->Header.WaitListHead.Flink = &TimerBlock->WaitListEntry; \
1283 Timer->Header.WaitListHead.Blink = &TimerBlock->WaitListEntry; \
1284 } \
1285 else \
1286 { \
1287 /* No timer block, just ourselves */ \
1288 WaitBlock->NextWaitBlock = WaitBlock; \
1289 } \
1290 \
1291 /* Set wait settings */ \
1292 Thread->Alertable = Alertable; \
1293 Thread->WaitMode = WaitMode; \
1294 Thread->WaitReason = WaitReason; \
1295 \
1296 /* Check if we can swap the thread's stack */ \
1297 Thread->WaitListEntry.Flink = NULL; \
1298 Swappable = KiCheckThreadStackSwap(Thread, WaitMode); \
1299 \
1300 /* Set the wait time */ \
1301 Thread->WaitTime = KeTickCount.LowPart;
1302
1303 #define KxQueueThreadWait() \
1304 /* Setup the Wait Block */ \
1305 Thread->WaitBlockList = WaitBlock; \
1306 WaitBlock->WaitKey = STATUS_SUCCESS; \
1307 WaitBlock->Object = Queue; \
1308 WaitBlock->WaitType = WaitAny; \
1309 WaitBlock->Thread = Thread; \
1310 \
1311 /* Clear wait status */ \
1312 Thread->WaitStatus = STATUS_SUCCESS; \
1313 \
1314 /* Check if we have a timer */ \
1315 if (Timeout) \
1316 { \
1317 /* Setup the timer */ \
1318 KxSetTimerForThreadWait(Timer, *Timeout, &Hand); \
1319 \
1320 /* Save the due time for the caller */ \
1321 DueTime.QuadPart = Timer->DueTime.QuadPart; \
1322 \
1323 /* Pointer to timer block */ \
1324 WaitBlock->NextWaitBlock = TimerBlock; \
1325 TimerBlock->NextWaitBlock = WaitBlock; \
1326 \
1327 /* Link the timer to this Wait Block */ \
1328 Timer->Header.WaitListHead.Flink = &TimerBlock->WaitListEntry; \
1329 Timer->Header.WaitListHead.Blink = &TimerBlock->WaitListEntry; \
1330 } \
1331 else \
1332 { \
1333 /* No timer block, just ourselves */ \
1334 WaitBlock->NextWaitBlock = WaitBlock; \
1335 } \
1336 \
1337 /* Set wait settings */ \
1338 Thread->Alertable = FALSE; \
1339 Thread->WaitMode = WaitMode; \
1340 Thread->WaitReason = WrQueue; \
1341 \
1342 /* Check if we can swap the thread's stack */ \
1343 Thread->WaitListEntry.Flink = NULL; \
1344 Swappable = KiCheckThreadStackSwap(Thread, WaitMode); \
1345 \
1346 /* Set the wait time */ \
1347 Thread->WaitTime = KeTickCount.LowPart;
1348
1349 //
1350 // Unwaits a Thread
1351 //
1352 FORCEINLINE
1353 VOID
1354 KxUnwaitThread(IN DISPATCHER_HEADER *Object,
1355 IN KPRIORITY Increment)
1356 {
1357 PLIST_ENTRY WaitEntry, WaitList;
1358 PKWAIT_BLOCK WaitBlock;
1359 PKTHREAD WaitThread;
1360 ULONG WaitKey;
1361
1362 /* Loop the Wait Entries */
1363 WaitList = &Object->WaitListHead;
1364 ASSERT(IsListEmpty(&Object->WaitListHead) == FALSE);
1365 WaitEntry = WaitList->Flink;
1366 do
1367 {
1368 /* Get the current wait block */
1369 WaitBlock = CONTAINING_RECORD(WaitEntry, KWAIT_BLOCK, WaitListEntry);
1370
1371 /* Get the waiting thread */
1372 WaitThread = WaitBlock->Thread;
1373
1374 /* Check the current Wait Mode */
1375 if (WaitBlock->WaitType == WaitAny)
1376 {
1377 /* Use the actual wait key */
1378 WaitKey = WaitBlock->WaitKey;
1379 }
1380 else
1381 {
1382 /* Otherwise, use STATUS_KERNEL_APC */
1383 WaitKey = STATUS_KERNEL_APC;
1384 }
1385
1386 /* Unwait the thread */
1387 KiUnwaitThread(WaitThread, WaitKey, Increment);
1388
1389 /* Next entry */
1390 WaitEntry = WaitList->Flink;
1391 } while (WaitEntry != WaitList);
1392 }
1393
1394 //
1395 // Unwaits a Thread waiting on an event
1396 //
1397 FORCEINLINE
1398 VOID
1399 KxUnwaitThreadForEvent(IN PKEVENT Event,
1400 IN KPRIORITY Increment)
1401 {
1402 PLIST_ENTRY WaitEntry, WaitList;
1403 PKWAIT_BLOCK WaitBlock;
1404 PKTHREAD WaitThread;
1405
1406 /* Loop the Wait Entries */
1407 WaitList = &Event->Header.WaitListHead;
1408 ASSERT(IsListEmpty(&Event->Header.WaitListHead) == FALSE);
1409 WaitEntry = WaitList->Flink;
1410 do
1411 {
1412 /* Get the current wait block */
1413 WaitBlock = CONTAINING_RECORD(WaitEntry, KWAIT_BLOCK, WaitListEntry);
1414
1415 /* Get the waiting thread */
1416 WaitThread = WaitBlock->Thread;
1417
1418 /* Check the current Wait Mode */
1419 if (WaitBlock->WaitType == WaitAny)
1420 {
1421 /* Un-signal it */
1422 Event->Header.SignalState = 0;
1423
1424 /* Un-signal the event and unwait the thread */
1425 KiUnwaitThread(WaitThread, WaitBlock->WaitKey, Increment);
1426 break;
1427 }
1428
1429 /* Unwait the thread with STATUS_KERNEL_APC */
1430 KiUnwaitThread(WaitThread, STATUS_KERNEL_APC, Increment);
1431
1432 /* Next entry */
1433 WaitEntry = WaitList->Flink;
1434 } while (WaitEntry != WaitList);
1435 }
1436
1437 //
1438 // This routine queues a thread that is ready on the PRCB's ready lists.
1439 // If this thread cannot currently run on this CPU, then the thread is
1440 // added to the deferred ready list instead.
1441 //
1442 // This routine must be entered with the PRCB lock held and it will exit
1443 // with the PRCB lock released!
1444 //
1445 FORCEINLINE
1446 VOID
1447 KxQueueReadyThread(IN PKTHREAD Thread,
1448 IN PKPRCB Prcb)
1449 {
1450 BOOLEAN Preempted;
1451 KPRIORITY Priority;
1452
1453 /* Sanity checks */
1454 ASSERT(Prcb == KeGetCurrentPrcb());
1455 ASSERT(Thread->State == Running);
1456 ASSERT(Thread->NextProcessor == Prcb->Number);
1457
1458 /* Check if this thread is allowed to run in this CPU */
1459 #ifdef CONFIG_SMP
1460 if ((Thread->Affinity) & (Prcb->SetMember))
1461 #else
1462 if (TRUE)
1463 #endif
1464 {
1465 /* Set thread ready for execution */
1466 Thread->State = Ready;
1467
1468 /* Save current priority and if someone had pre-empted it */
1469 Priority = Thread->Priority;
1470 Preempted = Thread->Preempted;
1471
1472 /* We're not pre-empting now, and set the wait time */
1473 Thread->Preempted = FALSE;
1474 Thread->WaitTime = KeTickCount.LowPart;
1475
1476 /* Sanity check */
1477 ASSERT((Priority >= 0) && (Priority <= HIGH_PRIORITY));
1478
1479 /* Insert this thread in the appropriate order */
1480 Preempted ? InsertHeadList(&Prcb->DispatcherReadyListHead[Priority],
1481 &Thread->WaitListEntry) :
1482 InsertTailList(&Prcb->DispatcherReadyListHead[Priority],
1483 &Thread->WaitListEntry);
1484
1485 /* Update the ready summary */
1486 Prcb->ReadySummary |= PRIORITY_MASK(Priority);
1487
1488 /* Sanity check */
1489 ASSERT(Priority == Thread->Priority);
1490
1491 /* Release the PRCB lock */
1492 KiReleasePrcbLock(Prcb);
1493 }
1494 else
1495 {
1496 /* Otherwise, prepare this thread to be deferred */
1497 Thread->State = DeferredReady;
1498 Thread->DeferredProcessor = Prcb->Number;
1499
1500 /* Release the lock and defer scheduling */
1501 KiReleasePrcbLock(Prcb);
1502 KiDeferredReadyThread(Thread);
1503 }
1504 }
1505
1506 //
1507 // This routine scans for an appropriate ready thread to select at the
1508 // given priority and for the given CPU.
1509 //
1510 FORCEINLINE
1511 PKTHREAD
1512 KiSelectReadyThread(IN KPRIORITY Priority,
1513 IN PKPRCB Prcb)
1514 {
1515 ULONG PrioritySet;
1516 LONG HighPriority;
1517 PLIST_ENTRY ListEntry;
1518 PKTHREAD Thread = NULL;
1519
1520 /* Save the current mask and get the priority set for the CPU */
1521 PrioritySet = Prcb->ReadySummary >> Priority;
1522 if (!PrioritySet) goto Quickie;
1523
1524 /* Get the highest priority possible */
1525 BitScanReverse((PULONG)&HighPriority, PrioritySet);
1526 ASSERT((PrioritySet & PRIORITY_MASK(HighPriority)) != 0);
1527 HighPriority += Priority;
1528
1529 /* Make sure the list isn't empty at the highest priority */
1530 ASSERT(IsListEmpty(&Prcb->DispatcherReadyListHead[HighPriority]) == FALSE);
1531
1532 /* Get the first thread on the list */
1533 ListEntry = Prcb->DispatcherReadyListHead[HighPriority].Flink;
1534 Thread = CONTAINING_RECORD(ListEntry, KTHREAD, WaitListEntry);
1535
1536 /* Make sure this thread is here for a reason */
1537 ASSERT(HighPriority == Thread->Priority);
1538 ASSERT(Thread->Affinity & AFFINITY_MASK(Prcb->Number));
1539 ASSERT(Thread->NextProcessor == Prcb->Number);
1540
1541 /* Remove it from the list */
1542 if (RemoveEntryList(&Thread->WaitListEntry))
1543 {
1544 /* The list is empty now, reset the ready summary */
1545 Prcb->ReadySummary ^= PRIORITY_MASK(HighPriority);
1546 }
1547
1548 /* Sanity check and return the thread */
1549 Quickie:
1550 ASSERT((Thread == NULL) ||
1551 (Thread->BasePriority == 0) ||
1552 (Thread->Priority != 0));
1553 return Thread;
1554 }
1555
1556 //
1557 // This routine computes the new priority for a thread. It is only valid for
1558 // threads with priorities in the dynamic priority range.
1559 //
1560 FORCEINLINE
1561 SCHAR
1562 KiComputeNewPriority(IN PKTHREAD Thread,
1563 IN SCHAR Adjustment)
1564 {
1565 SCHAR Priority;
1566
1567 /* Priority sanity checks */
1568 ASSERT((Thread->PriorityDecrement >= 0) &&
1569 (Thread->PriorityDecrement <= Thread->Priority));
1570 ASSERT((Thread->Priority < LOW_REALTIME_PRIORITY) ?
1571 TRUE : (Thread->PriorityDecrement == 0));
1572
1573 /* Get the current priority */
1574 Priority = Thread->Priority;
1575 if (Priority < LOW_REALTIME_PRIORITY)
1576 {
1577 /* Decrease priority by the priority decrement */
1578 Priority -= (Thread->PriorityDecrement + Adjustment);
1579
1580 /* Don't go out of bounds */
1581 if (Priority < Thread->BasePriority) Priority = Thread->BasePriority;
1582
1583 /* Reset the priority decrement */
1584 Thread->PriorityDecrement = 0;
1585 }
1586
1587 /* Sanity check */
1588 ASSERT((Thread->BasePriority == 0) || (Priority != 0));
1589
1590 /* Return the new priority */
1591 return Priority;
1592 }
1593
1594 //
1595 // Guarded Mutex Routines
1596 //
1597 FORCEINLINE
1598 VOID
1599 _KeInitializeGuardedMutex(OUT PKGUARDED_MUTEX GuardedMutex)
1600 {
1601 /* Setup the Initial Data */
1602 GuardedMutex->Count = GM_LOCK_BIT;
1603 GuardedMutex->Owner = NULL;
1604 GuardedMutex->Contention = 0;
1605
1606 /* Initialize the Wait Gate */
1607 KeInitializeGate(&GuardedMutex->Gate);
1608 }
1609
1610 FORCEINLINE
1611 VOID
1612 _KeAcquireGuardedMutexUnsafe(IN OUT PKGUARDED_MUTEX GuardedMutex)
1613 {
1614 PKTHREAD Thread = KeGetCurrentThread();
1615
1616 /* Sanity checks */
1617 ASSERT((KeGetCurrentIrql() == APC_LEVEL) ||
1618 (Thread->SpecialApcDisable < 0) ||
1619 (Thread->Teb == NULL) ||
1620 (Thread->Teb >= (PTEB)MM_SYSTEM_RANGE_START));
1621 ASSERT(GuardedMutex->Owner != Thread);
1622
1623 /* Remove the lock */
1624 if (!InterlockedBitTestAndReset(&GuardedMutex->Count, GM_LOCK_BIT_V))
1625 {
1626 /* The Guarded Mutex was already locked, enter contented case */
1627 KiAcquireGuardedMutex(GuardedMutex);
1628 }
1629
1630 /* Set the Owner */
1631 GuardedMutex->Owner = Thread;
1632 }
1633
1634 FORCEINLINE
1635 VOID
1636 _KeReleaseGuardedMutexUnsafe(IN OUT PKGUARDED_MUTEX GuardedMutex)
1637 {
1638 LONG OldValue, NewValue;
1639
1640 /* Sanity checks */
1641 ASSERT((KeGetCurrentIrql() == APC_LEVEL) ||
1642 (KeGetCurrentThread()->SpecialApcDisable < 0) ||
1643 (KeGetCurrentThread()->Teb == NULL) ||
1644 (KeGetCurrentThread()->Teb >= (PTEB)MM_SYSTEM_RANGE_START));
1645 ASSERT(GuardedMutex->Owner == KeGetCurrentThread());
1646
1647 /* Destroy the Owner */
1648 GuardedMutex->Owner = NULL;
1649
1650 /* Add the Lock Bit */
1651 OldValue = InterlockedExchangeAdd(&GuardedMutex->Count, GM_LOCK_BIT);
1652 ASSERT((OldValue & GM_LOCK_BIT) == 0);
1653
1654 /* Check if it was already locked, but not woken */
1655 if ((OldValue) && !(OldValue & GM_LOCK_WAITER_WOKEN))
1656 {
1657 /* Update the Oldvalue to what it should be now */
1658 OldValue += GM_LOCK_BIT;
1659
1660 /* The mutex will be woken, minus one waiter */
1661 NewValue = OldValue + GM_LOCK_WAITER_WOKEN -
1662 GM_LOCK_WAITER_INC;
1663
1664 /* Remove the Woken bit */
1665 if (InterlockedCompareExchange(&GuardedMutex->Count,
1666 NewValue,
1667 OldValue) == OldValue)
1668 {
1669 /* Signal the Gate */
1670 KeSignalGateBoostPriority(&GuardedMutex->Gate);
1671 }
1672 }
1673 }
1674
1675 FORCEINLINE
1676 VOID
1677 _KeAcquireGuardedMutex(IN PKGUARDED_MUTEX GuardedMutex)
1678 {
1679 PKTHREAD Thread = KeGetCurrentThread();
1680
1681 /* Sanity checks */
1682 ASSERT(KeGetCurrentIrql() <= APC_LEVEL);
1683 ASSERT(GuardedMutex->Owner != Thread);
1684
1685 /* Disable Special APCs */
1686 KeEnterGuardedRegion();
1687
1688 /* Remove the lock */
1689 if (!InterlockedBitTestAndReset(&GuardedMutex->Count, GM_LOCK_BIT_V))
1690 {
1691 /* The Guarded Mutex was already locked, enter contented case */
1692 KiAcquireGuardedMutex(GuardedMutex);
1693 }
1694
1695 /* Set the Owner and Special APC Disable state */
1696 GuardedMutex->Owner = Thread;
1697 GuardedMutex->SpecialApcDisable = Thread->SpecialApcDisable;
1698 }
1699
1700 FORCEINLINE
1701 VOID
1702 _KeReleaseGuardedMutex(IN OUT PKGUARDED_MUTEX GuardedMutex)
1703 {
1704 LONG OldValue, NewValue;
1705
1706 /* Sanity checks */
1707 ASSERT(KeGetCurrentIrql() <= APC_LEVEL);
1708 ASSERT(GuardedMutex->Owner == KeGetCurrentThread());
1709 ASSERT(KeGetCurrentThread()->SpecialApcDisable ==
1710 GuardedMutex->SpecialApcDisable);
1711
1712 /* Destroy the Owner */
1713 GuardedMutex->Owner = NULL;
1714
1715 /* Add the Lock Bit */
1716 OldValue = InterlockedExchangeAdd(&GuardedMutex->Count, GM_LOCK_BIT);
1717 ASSERT((OldValue & GM_LOCK_BIT) == 0);
1718
1719 /* Check if it was already locked, but not woken */
1720 if ((OldValue) && !(OldValue & GM_LOCK_WAITER_WOKEN))
1721 {
1722 /* Update the Oldvalue to what it should be now */
1723 OldValue += GM_LOCK_BIT;
1724
1725 /* The mutex will be woken, minus one waiter */
1726 NewValue = OldValue + GM_LOCK_WAITER_WOKEN -
1727 GM_LOCK_WAITER_INC;
1728
1729 /* Remove the Woken bit */
1730 if (InterlockedCompareExchange(&GuardedMutex->Count,
1731 NewValue,
1732 OldValue) == OldValue)
1733 {
1734 /* Signal the Gate */
1735 KeSignalGateBoostPriority(&GuardedMutex->Gate);
1736 }
1737 }
1738
1739 /* Re-enable APCs */
1740 KeLeaveGuardedRegion();
1741 }
1742
1743 FORCEINLINE
1744 BOOLEAN
1745 _KeTryToAcquireGuardedMutex(IN OUT PKGUARDED_MUTEX GuardedMutex)
1746 {
1747 PKTHREAD Thread = KeGetCurrentThread();
1748
1749 /* Block APCs */
1750 KeEnterGuardedRegion();
1751
1752 /* Remove the lock */
1753 if (!InterlockedBitTestAndReset(&GuardedMutex->Count, GM_LOCK_BIT_V))
1754 {
1755 /* Re-enable APCs */
1756 KeLeaveGuardedRegion();
1757 YieldProcessor();
1758
1759 /* Return failure */
1760 return FALSE;
1761 }
1762
1763 /* Set the Owner and APC State */
1764 GuardedMutex->Owner = Thread;
1765 GuardedMutex->SpecialApcDisable = Thread->SpecialApcDisable;
1766 return TRUE;
1767 }
1768
1769
1770 FORCEINLINE
1771 VOID
1772 KiAcquireNmiListLock(OUT PKIRQL OldIrql)
1773 {
1774 KeAcquireSpinLock(&KiNmiCallbackListLock, OldIrql);
1775 }
1776
1777 FORCEINLINE
1778 VOID
1779 KiReleaseNmiListLock(IN KIRQL OldIrql)
1780 {
1781 KeReleaseSpinLock(&KiNmiCallbackListLock, OldIrql);
1782 }