Sync to trunk (r44371)
[reactos.git] / reactos / ntoskrnl / include / internal / ke_x.h
1 /*
2 * PROJECT: ReactOS Kernel
3 * LICENSE: GPL - See COPYING in the top level directory
4 * FILE: ntoskrnl/include/ke_x.h
5 * PURPOSE: Internal Inlined Functions for the Kernel
6 * PROGRAMMERS: Alex Ionescu (alex.ionescu@reactos.org)
7 */
8
9 #ifndef _M_ARM
10 FORCEINLINE
11 PRKTHREAD
12 KeGetCurrentThread(VOID)
13 {
14 #ifdef _M_IX86
15 /* Return the current thread */
16 return ((PKIPCR)KeGetPcr())->PrcbData.CurrentThread;
17 #elif defined (_M_AMD64)
18 return (PRKTHREAD)__readgsqword(FIELD_OFFSET(KIPCR, Prcb.CurrentThread));
19 #else
20 PKPRCB Prcb = KeGetCurrentPrcb();
21 return Prcb->CurrentThread;
22 #endif
23 }
24
25 FORCEINLINE
26 UCHAR
27 KeGetPreviousMode(VOID)
28 {
29 /* Return the current mode */
30 return KeGetCurrentThread()->PreviousMode;
31 }
32 #endif
33
34 FORCEINLINE
35 VOID
36 KeFlushProcessTb(VOID)
37 {
38 /* Flush the TLB by resetting CR3 */
39 #ifdef _M_PPC
40 __asm__("sync\n\tisync\n\t");
41 #elif _M_ARM
42 //
43 // We need to implement this!
44 //
45 ASSERTMSG("Need ARM flush routine\n", FALSE);
46 #else
47 __writecr3(__readcr3());
48 #endif
49 }
50
51 //
52 // Enters a Guarded Region
53 //
54 #define KeEnterGuardedRegion() \
55 { \
56 PKTHREAD _Thread = KeGetCurrentThread(); \
57 \
58 /* Sanity checks */ \
59 ASSERT(KeGetCurrentIrql() <= APC_LEVEL); \
60 ASSERT(_Thread == KeGetCurrentThread()); \
61 ASSERT((_Thread->SpecialApcDisable <= 0) && \
62 (_Thread->SpecialApcDisable != -32768)); \
63 \
64 /* Disable Special APCs */ \
65 _Thread->SpecialApcDisable--; \
66 }
67
68 //
69 // Leaves a Guarded Region
70 //
71 #define KeLeaveGuardedRegion() \
72 { \
73 PKTHREAD _Thread = KeGetCurrentThread(); \
74 \
75 /* Sanity checks */ \
76 ASSERT(KeGetCurrentIrql() <= APC_LEVEL); \
77 ASSERT(_Thread == KeGetCurrentThread()); \
78 ASSERT(_Thread->SpecialApcDisable < 0); \
79 \
80 /* Leave region and check if APCs are OK now */ \
81 if (!(++_Thread->SpecialApcDisable)) \
82 { \
83 /* Check for Kernel APCs on the list */ \
84 if (!IsListEmpty(&_Thread->ApcState. \
85 ApcListHead[KernelMode])) \
86 { \
87 /* Check for APC Delivery */ \
88 KiCheckForKernelApcDelivery(); \
89 } \
90 } \
91 }
92
93 //
94 // Enters a Critical Region
95 //
96 #define KeEnterCriticalRegion() \
97 { \
98 PKTHREAD _Thread = KeGetCurrentThread(); \
99 \
100 /* Sanity checks */ \
101 ASSERT(_Thread == KeGetCurrentThread()); \
102 ASSERT((_Thread->KernelApcDisable <= 0) && \
103 (_Thread->KernelApcDisable != -32768)); \
104 \
105 /* Disable Kernel APCs */ \
106 _Thread->KernelApcDisable--; \
107 }
108
109 //
110 // Leaves a Critical Region
111 //
112 #define KeLeaveCriticalRegion() \
113 { \
114 PKTHREAD _Thread = KeGetCurrentThread(); \
115 \
116 /* Sanity checks */ \
117 ASSERT(_Thread == KeGetCurrentThread()); \
118 ASSERT(_Thread->KernelApcDisable < 0); \
119 \
120 /* Enable Kernel APCs */ \
121 _Thread->KernelApcDisable++; \
122 \
123 /* Check if Kernel APCs are now enabled */ \
124 if (!(_Thread->KernelApcDisable)) \
125 { \
126 /* Check if we need to request an APC Delivery */ \
127 if (!(IsListEmpty(&_Thread->ApcState.ApcListHead[KernelMode])) && \
128 !(_Thread->SpecialApcDisable)) \
129 { \
130 /* Check for the right environment */ \
131 KiCheckForKernelApcDelivery(); \
132 } \
133 } \
134 }
135
136 #ifndef CONFIG_SMP
137 //
138 // Spinlock Acquire at IRQL >= DISPATCH_LEVEL
139 //
140 FORCEINLINE
141 VOID
142 KxAcquireSpinLock(IN PKSPIN_LOCK SpinLock)
143 {
144 /* On UP builds, spinlocks don't exist at IRQL >= DISPATCH */
145 UNREFERENCED_PARAMETER(SpinLock);
146 }
147
148 //
149 // Spinlock Release at IRQL >= DISPATCH_LEVEL
150 //
151 FORCEINLINE
152 VOID
153 KxReleaseSpinLock(IN PKSPIN_LOCK SpinLock)
154 {
155 /* On UP builds, spinlocks don't exist at IRQL >= DISPATCH */
156 UNREFERENCED_PARAMETER(SpinLock);
157 }
158
159 //
160 // This routine protects against multiple CPU acquires, it's meaningless on UP.
161 //
162 FORCEINLINE
163 VOID
164 KiAcquireDispatcherObject(IN DISPATCHER_HEADER* Object)
165 {
166 UNREFERENCED_PARAMETER(Object);
167 }
168
169 //
170 // This routine protects against multiple CPU acquires, it's meaningless on UP.
171 //
172 FORCEINLINE
173 VOID
174 KiReleaseDispatcherObject(IN DISPATCHER_HEADER* Object)
175 {
176 UNREFERENCED_PARAMETER(Object);
177 }
178
179 FORCEINLINE
180 KIRQL
181 KiAcquireDispatcherLock(VOID)
182 {
183 /* Raise to DPC level */
184 return KeRaiseIrqlToDpcLevel();
185 }
186
187 FORCEINLINE
188 VOID
189 KiReleaseDispatcherLock(IN KIRQL OldIrql)
190 {
191 /* Just exit the dispatcher */
192 KiExitDispatcher(OldIrql);
193 }
194
195 FORCEINLINE
196 VOID
197 KiAcquireDispatcherLockAtDpcLevel(VOID)
198 {
199 /* This is a no-op at DPC Level for UP systems */
200 return;
201 }
202
203 FORCEINLINE
204 VOID
205 KiReleaseDispatcherLockFromDpcLevel(VOID)
206 {
207 /* This is a no-op at DPC Level for UP systems */
208 return;
209 }
210
211 //
212 // This routine makes the thread deferred ready on the boot CPU.
213 //
214 FORCEINLINE
215 VOID
216 KiInsertDeferredReadyList(IN PKTHREAD Thread)
217 {
218 /* Set the thread to deferred state and boot CPU */
219 Thread->State = DeferredReady;
220 Thread->DeferredProcessor = 0;
221
222 /* Make the thread ready immediately */
223 KiDeferredReadyThread(Thread);
224 }
225
226 FORCEINLINE
227 VOID
228 KiRescheduleThread(IN BOOLEAN NewThread,
229 IN ULONG Cpu)
230 {
231 /* This is meaningless on UP systems */
232 UNREFERENCED_PARAMETER(NewThread);
233 UNREFERENCED_PARAMETER(Cpu);
234 }
235
236 //
237 // This routine protects against multiple CPU acquires, it's meaningless on UP.
238 //
239 FORCEINLINE
240 VOID
241 KiSetThreadSwapBusy(IN PKTHREAD Thread)
242 {
243 UNREFERENCED_PARAMETER(Thread);
244 }
245
246 //
247 // This routine protects against multiple CPU acquires, it's meaningless on UP.
248 //
249 FORCEINLINE
250 VOID
251 KiAcquirePrcbLock(IN PKPRCB Prcb)
252 {
253 UNREFERENCED_PARAMETER(Prcb);
254 }
255
256 //
257 // This routine protects against multiple CPU acquires, it's meaningless on UP.
258 //
259 FORCEINLINE
260 VOID
261 KiReleasePrcbLock(IN PKPRCB Prcb)
262 {
263 UNREFERENCED_PARAMETER(Prcb);
264 }
265
266 //
267 // This routine protects against multiple CPU acquires, it's meaningless on UP.
268 //
269 FORCEINLINE
270 VOID
271 KiAcquireThreadLock(IN PKTHREAD Thread)
272 {
273 UNREFERENCED_PARAMETER(Thread);
274 }
275
276 //
277 // This routine protects against multiple CPU acquires, it's meaningless on UP.
278 //
279 FORCEINLINE
280 VOID
281 KiReleaseThreadLock(IN PKTHREAD Thread)
282 {
283 UNREFERENCED_PARAMETER(Thread);
284 }
285
286 //
287 // This routine protects against multiple CPU acquires, it's meaningless on UP.
288 //
289 FORCEINLINE
290 BOOLEAN
291 KiTryThreadLock(IN PKTHREAD Thread)
292 {
293 UNREFERENCED_PARAMETER(Thread);
294 return FALSE;
295 }
296
297 FORCEINLINE
298 VOID
299 KiCheckDeferredReadyList(IN PKPRCB Prcb)
300 {
301 /* There are no deferred ready lists on UP systems */
302 UNREFERENCED_PARAMETER(Prcb);
303 }
304
305 FORCEINLINE
306 VOID
307 KiRundownThread(IN PKTHREAD Thread)
308 {
309 #if defined(_M_IX86)
310 /* Check if this is the NPX Thread */
311 if (KeGetCurrentPrcb()->NpxThread == Thread)
312 {
313 /* Clear it */
314 KeGetCurrentPrcb()->NpxThread = NULL;
315 Ke386FnInit();
316 }
317 #endif
318 }
319
320 FORCEINLINE
321 VOID
322 KiRequestApcInterrupt(IN BOOLEAN NeedApc,
323 IN UCHAR Processor)
324 {
325 /* We deliver instantly on UP */
326 UNREFERENCED_PARAMETER(NeedApc);
327 UNREFERENCED_PARAMETER(Processor);
328 }
329
330 FORCEINLINE
331 PKSPIN_LOCK_QUEUE
332 KiAcquireTimerLock(IN ULONG Hand)
333 {
334 ASSERT(KeGetCurrentIrql() >= DISPATCH_LEVEL);
335
336 /* Nothing to do on UP */
337 UNREFERENCED_PARAMETER(Hand);
338 return NULL;
339 }
340
341 FORCEINLINE
342 VOID
343 KiReleaseTimerLock(IN PKSPIN_LOCK_QUEUE LockQueue)
344 {
345 ASSERT(KeGetCurrentIrql() >= DISPATCH_LEVEL);
346
347 /* Nothing to do on UP */
348 UNREFERENCED_PARAMETER(LockQueue);
349 }
350
351 #else
352
353 //
354 // Spinlock Acquisition at IRQL >= DISPATCH_LEVEL
355 //
356 FORCEINLINE
357 VOID
358 KxAcquireSpinLock(IN PKSPIN_LOCK SpinLock)
359 {
360 /* Make sure that we don't own the lock already */
361 if (((KSPIN_LOCK)KeGetCurrentThread() | 1) == *SpinLock)
362 {
363 /* We do, bugcheck! */
364 KeBugCheckEx(SPIN_LOCK_ALREADY_OWNED, (ULONG_PTR)SpinLock, 0, 0, 0);
365 }
366
367 /* Start acquire loop */
368 for (;;)
369 {
370 /* Try to acquire it */
371 if (InterlockedBitTestAndSet((PLONG)SpinLock, 0))
372 {
373 /* Value changed... wait until it's unlocked */
374 while (*(volatile KSPIN_LOCK *)SpinLock == 1)
375 {
376 #if DBG
377 /* On debug builds, we use a much slower but useful routine */
378 //Kii386SpinOnSpinLock(SpinLock, 5);
379
380 /* FIXME: Do normal yield for now */
381 YieldProcessor();
382 #else
383 /* Otherwise, just yield and keep looping */
384 YieldProcessor();
385 #endif
386 }
387 }
388 else
389 {
390 #if DBG
391 /* On debug builds, we OR in the KTHREAD */
392 *SpinLock = (KSPIN_LOCK)KeGetCurrentThread() | 1;
393 #endif
394 /* All is well, break out */
395 break;
396 }
397 }
398 }
399
400 //
401 // Spinlock Release at IRQL >= DISPATCH_LEVEL
402 //
403 FORCEINLINE
404 VOID
405 KxReleaseSpinLock(IN PKSPIN_LOCK SpinLock)
406 {
407 #if DBG
408 /* Make sure that the threads match */
409 if (((KSPIN_LOCK)KeGetCurrentThread() | 1) != *SpinLock)
410 {
411 /* They don't, bugcheck */
412 KeBugCheckEx(SPIN_LOCK_NOT_OWNED, (ULONG_PTR)SpinLock, 0, 0, 0);
413 }
414 #endif
415 /* Clear the lock */
416 InterlockedAnd((PLONG)SpinLock, 0);
417 }
418
419 FORCEINLINE
420 VOID
421 KiAcquireDispatcherObject(IN DISPATCHER_HEADER* Object)
422 {
423 LONG OldValue;
424
425 /* Make sure we're at a safe level to touch the lock */
426 ASSERT(KeGetCurrentIrql() >= DISPATCH_LEVEL);
427
428 /* Start acquire loop */
429 do
430 {
431 /* Loop until the other CPU releases it */
432 while (TRUE)
433 {
434 /* Check if it got released */
435 OldValue = Object->Lock;
436 if ((OldValue & KOBJECT_LOCK_BIT) == 0) break;
437
438 /* Let the CPU know that this is a loop */
439 YieldProcessor();
440 }
441
442 /* Try acquiring the lock now */
443 } while (InterlockedCompareExchange(&Object->Lock,
444 OldValue | KOBJECT_LOCK_BIT,
445 OldValue) != OldValue);
446 }
447
448 FORCEINLINE
449 VOID
450 KiReleaseDispatcherObject(IN DISPATCHER_HEADER* Object)
451 {
452 /* Make sure we're at a safe level to touch the lock */
453 ASSERT(KeGetCurrentIrql() >= DISPATCH_LEVEL);
454
455 /* Release it */
456 InterlockedAnd(&Object->Lock, ~KOBJECT_LOCK_BIT);
457 }
458
459 FORCEINLINE
460 KIRQL
461 KiAcquireDispatcherLock(VOID)
462 {
463 /* Raise to synchronization level and acquire the dispatcher lock */
464 return KeAcquireQueuedSpinLockRaiseToSynch(LockQueueDispatcherLock);
465 }
466
467 FORCEINLINE
468 VOID
469 KiReleaseDispatcherLock(IN KIRQL OldIrql)
470 {
471 /* First release the lock */
472 KeReleaseQueuedSpinLockFromDpcLevel(&KeGetCurrentPrcb()->
473 LockQueue[LockQueueDispatcherLock]);
474
475 /* Then exit the dispatcher */
476 KiExitDispatcher(OldIrql);
477 }
478
479 FORCEINLINE
480 VOID
481 KiAcquireDispatcherLockAtDpcLevel(VOID)
482 {
483 /* Acquire the dispatcher lock */
484 KeAcquireQueuedSpinLockAtDpcLevel(&KeGetCurrentPrcb()->
485 LockQueue[LockQueueDispatcherLock]);
486 }
487
488 FORCEINLINE
489 VOID
490 KiReleaseDispatcherLockFromDpcLevel(VOID)
491 {
492 /* Release the dispatcher lock */
493 KeReleaseQueuedSpinLockFromDpcLevel(&KeGetCurrentPrcb()->
494 LockQueue[LockQueueDispatcherLock]);
495 }
496
497 //
498 // This routine inserts a thread into the deferred ready list of the current CPU
499 //
500 FORCEINLINE
501 VOID
502 KiInsertDeferredReadyList(IN PKTHREAD Thread)
503 {
504 PKPRCB Prcb = KeGetCurrentPrcb();
505
506 /* Set the thread to deferred state and CPU */
507 Thread->State = DeferredReady;
508 Thread->DeferredProcessor = Prcb->Number;
509
510 /* Add it on the list */
511 PushEntryList(&Prcb->DeferredReadyListHead, &Thread->SwapListEntry);
512 }
513
514 FORCEINLINE
515 VOID
516 KiRescheduleThread(IN BOOLEAN NewThread,
517 IN ULONG Cpu)
518 {
519 /* Check if a new thread needs to be scheduled on a different CPU */
520 if ((NewThread) && !(KeGetPcr()->Number == Cpu))
521 {
522 /* Send an IPI to request delivery */
523 KiIpiSend(AFFINITY_MASK(Cpu), IPI_DPC);
524 }
525 }
526
527 //
528 // This routine sets the current thread in a swap busy state, which ensure that
529 // nobody else tries to swap it concurrently.
530 //
531 FORCEINLINE
532 VOID
533 KiSetThreadSwapBusy(IN PKTHREAD Thread)
534 {
535 /* Make sure nobody already set it */
536 ASSERT(Thread->SwapBusy == FALSE);
537
538 /* Set it ourselves */
539 Thread->SwapBusy = TRUE;
540 }
541
542 //
543 // This routine acquires the PRCB lock so that only one caller can touch
544 // volatile PRCB data.
545 //
546 // Since this is a simple optimized spin-lock, it must only be acquired
547 // at dispatcher level or higher!
548 //
549 FORCEINLINE
550 VOID
551 KiAcquirePrcbLock(IN PKPRCB Prcb)
552 {
553 /* Make sure we're at a safe level to touch the PRCB lock */
554 ASSERT(KeGetCurrentIrql() >= DISPATCH_LEVEL);
555
556 /* Start acquire loop */
557 for (;;)
558 {
559 /* Acquire the lock and break out if we acquired it first */
560 if (!InterlockedExchange((PLONG)&Prcb->PrcbLock, 1)) break;
561
562 /* Loop until the other CPU releases it */
563 do
564 {
565 /* Let the CPU know that this is a loop */
566 YieldProcessor();
567 } while (Prcb->PrcbLock);
568 }
569 }
570
571 //
572 // This routine releases the PRCB lock so that other callers can touch
573 // volatile PRCB data.
574 //
575 // Since this is a simple optimized spin-lock, it must be be only acquired
576 // at dispatcher level or higher!
577 //
578 FORCEINLINE
579 VOID
580 KiReleasePrcbLock(IN PKPRCB Prcb)
581 {
582 /* Make sure we are above dispatch and the lock is acquired! */
583 ASSERT(KeGetCurrentIrql() >= DISPATCH_LEVEL);
584 ASSERT(Prcb->PrcbLock != 0);
585
586 /* Release it */
587 InterlockedAnd((PLONG)&Prcb->PrcbLock, 0);
588 }
589
590 //
591 // This routine acquires the thread lock so that only one caller can touch
592 // volatile thread data.
593 //
594 // Since this is a simple optimized spin-lock, it must be be only acquired
595 // at dispatcher level or higher!
596 //
597 FORCEINLINE
598 VOID
599 KiAcquireThreadLock(IN PKTHREAD Thread)
600 {
601 /* Make sure we're at a safe level to touch the thread lock */
602 ASSERT(KeGetCurrentIrql() >= DISPATCH_LEVEL);
603
604 /* Start acquire loop */
605 for (;;)
606 {
607 /* Acquire the lock and break out if we acquired it first */
608 if (!InterlockedExchange((PLONG)&Thread->ThreadLock, 1)) break;
609
610 /* Loop until the other CPU releases it */
611 do
612 {
613 /* Let the CPU know that this is a loop */
614 YieldProcessor();
615 } while (Thread->ThreadLock);
616 }
617 }
618
619 //
620 // This routine releases the thread lock so that other callers can touch
621 // volatile thread data.
622 //
623 // Since this is a simple optimized spin-lock, it must be be only acquired
624 // at dispatcher level or higher!
625 //
626 FORCEINLINE
627 VOID
628 KiReleaseThreadLock(IN PKTHREAD Thread)
629 {
630 /* Make sure we are still above dispatch */
631 ASSERT(KeGetCurrentIrql() >= DISPATCH_LEVEL);
632
633 /* Release it */
634 InterlockedAnd((PLONG)&Thread->ThreadLock, 0);
635 }
636
637 FORCEINLINE
638 BOOLEAN
639 KiTryThreadLock(IN PKTHREAD Thread)
640 {
641 LONG Value;
642
643 /* If the lock isn't acquired, return false */
644 if (!Thread->ThreadLock) return FALSE;
645
646 /* Otherwise, try to acquire it and check the result */
647 Value = 1;
648 Value = InterlockedExchange((PLONG)&Thread->ThreadLock, Value);
649
650 /* Return the lock state */
651 return (Value == TRUE);
652 }
653
654 FORCEINLINE
655 VOID
656 KiCheckDeferredReadyList(IN PKPRCB Prcb)
657 {
658 /* Scan the deferred ready lists if required */
659 if (Prcb->DeferredReadyListHead.Next) KiProcessDeferredReadyList(Prcb);
660 }
661
662 FORCEINLINE
663 VOID
664 KiRundownThread(IN PKTHREAD Thread)
665 {
666 /* Nothing to do */
667 return;
668 }
669
670 FORCEINLINE
671 VOID
672 KiRequestApcInterrupt(IN BOOLEAN NeedApc,
673 IN UCHAR Processor)
674 {
675 /* Check if we need to request APC delivery */
676 if (NeedApc)
677 {
678 /* Check if it's on another CPU */
679 if (KeGetPcr()->Number != Processor)
680 {
681 /* Send an IPI to request delivery */
682 KiIpiSend(AFFINITY_MASK(Processor), IPI_APC);
683 }
684 else
685 {
686 /* Request a software interrupt */
687 HalRequestSoftwareInterrupt(APC_LEVEL);
688 }
689 }
690 }
691
692 FORCEINLINE
693 PKSPIN_LOCK_QUEUE
694 KiAcquireTimerLock(IN ULONG Hand)
695 {
696 PKSPIN_LOCK_QUEUE LockQueue;
697 ULONG LockIndex;
698 ASSERT(KeGetCurrentIrql() >= DISPATCH_LEVEL);
699
700 /* Get the lock index */
701 LockIndex = Hand >> LOCK_QUEUE_TIMER_LOCK_SHIFT;
702 LockIndex &= (LOCK_QUEUE_TIMER_TABLE_LOCKS - 1);
703
704 /* Now get the lock */
705 LockQueue = &KeGetCurrentPrcb()->LockQueue[LockQueueTimerTableLock + LockIndex];
706
707 /* Acquire it and return */
708 KeAcquireQueuedSpinLockAtDpcLevel(LockQueue);
709 return LockQueue;
710 }
711
712 FORCEINLINE
713 VOID
714 KiReleaseTimerLock(IN PKSPIN_LOCK_QUEUE LockQueue)
715 {
716 ASSERT(KeGetCurrentIrql() >= DISPATCH_LEVEL);
717
718 /* Release the lock */
719 KeReleaseQueuedSpinLockFromDpcLevel(LockQueue);
720 }
721
722 #endif
723
724 FORCEINLINE
725 VOID
726 KiAcquireApcLock(IN PKTHREAD Thread,
727 IN PKLOCK_QUEUE_HANDLE Handle)
728 {
729 /* Acquire the lock and raise to synchronization level */
730 KeAcquireInStackQueuedSpinLockRaiseToSynch(&Thread->ApcQueueLock, Handle);
731 }
732
733 FORCEINLINE
734 VOID
735 KiAcquireApcLockAtDpcLevel(IN PKTHREAD Thread,
736 IN PKLOCK_QUEUE_HANDLE Handle)
737 {
738 /* Acquire the lock */
739 KeAcquireInStackQueuedSpinLockAtDpcLevel(&Thread->ApcQueueLock, Handle);
740 }
741
742 FORCEINLINE
743 VOID
744 KiAcquireApcLockAtApcLevel(IN PKTHREAD Thread,
745 IN PKLOCK_QUEUE_HANDLE Handle)
746 {
747 /* Acquire the lock */
748 KeAcquireInStackQueuedSpinLock(&Thread->ApcQueueLock, Handle);
749 }
750
751 FORCEINLINE
752 VOID
753 KiReleaseApcLock(IN PKLOCK_QUEUE_HANDLE Handle)
754 {
755 /* Release the lock */
756 KeReleaseInStackQueuedSpinLock(Handle);
757 }
758
759 FORCEINLINE
760 VOID
761 KiReleaseApcLockFromDpcLevel(IN PKLOCK_QUEUE_HANDLE Handle)
762 {
763 /* Release the lock */
764 KeReleaseInStackQueuedSpinLockFromDpcLevel(Handle);
765 }
766
767 FORCEINLINE
768 VOID
769 KiAcquireProcessLock(IN PKPROCESS Process,
770 IN PKLOCK_QUEUE_HANDLE Handle)
771 {
772 /* Acquire the lock and raise to synchronization level */
773 KeAcquireInStackQueuedSpinLockRaiseToSynch(&Process->ProcessLock, Handle);
774 }
775
776 FORCEINLINE
777 VOID
778 KiReleaseProcessLock(IN PKLOCK_QUEUE_HANDLE Handle)
779 {
780 /* Release the lock */
781 KeReleaseInStackQueuedSpinLock(Handle);
782 }
783
784 FORCEINLINE
785 VOID
786 KiReleaseProcessLockFromDpcLevel(IN PKLOCK_QUEUE_HANDLE Handle)
787 {
788 /* Release the lock */
789 KeReleaseInStackQueuedSpinLockFromDpcLevel(Handle);
790 }
791
792 FORCEINLINE
793 VOID
794 KiAcquireDeviceQueueLock(IN PKDEVICE_QUEUE DeviceQueue,
795 IN PKLOCK_QUEUE_HANDLE DeviceLock)
796 {
797 /* Check if we were called from a threaded DPC */
798 if (KeGetCurrentPrcb()->DpcThreadActive)
799 {
800 /* Lock the Queue, we're not at DPC level */
801 KeAcquireInStackQueuedSpinLock(&DeviceQueue->Lock, DeviceLock);
802 }
803 else
804 {
805 /* We must be at DPC level, acquire the lock safely */
806 ASSERT(KeGetCurrentIrql() == DISPATCH_LEVEL);
807 KeAcquireInStackQueuedSpinLockAtDpcLevel(&DeviceQueue->Lock,
808 DeviceLock);
809 }
810 }
811
812 FORCEINLINE
813 VOID
814 KiReleaseDeviceQueueLock(IN PKLOCK_QUEUE_HANDLE DeviceLock)
815 {
816 /* Check if we were called from a threaded DPC */
817 if (KeGetCurrentPrcb()->DpcThreadActive)
818 {
819 /* Unlock the Queue, we're not at DPC level */
820 KeReleaseInStackQueuedSpinLock(DeviceLock);
821 }
822 else
823 {
824 /* We must be at DPC level, release the lock safely */
825 ASSERT(KeGetCurrentIrql() == DISPATCH_LEVEL);
826 KeReleaseInStackQueuedSpinLockFromDpcLevel(DeviceLock);
827 }
828 }
829
830 //
831 // Satisfies the wait of any dispatcher object
832 //
833 #define KiSatisfyObjectWait(Object, Thread) \
834 { \
835 /* Special case for Mutants */ \
836 if ((Object)->Header.Type == MutantObject) \
837 { \
838 /* Decrease the Signal State */ \
839 (Object)->Header.SignalState--; \
840 \
841 /* Check if it's now non-signaled */ \
842 if (!(Object)->Header.SignalState) \
843 { \
844 /* Set the Owner Thread */ \
845 (Object)->OwnerThread = Thread; \
846 \
847 /* Disable APCs if needed */ \
848 Thread->KernelApcDisable = Thread->KernelApcDisable - \
849 (Object)->ApcDisable; \
850 \
851 /* Check if it's abandoned */ \
852 if ((Object)->Abandoned) \
853 { \
854 /* Unabandon it */ \
855 (Object)->Abandoned = FALSE; \
856 \
857 /* Return Status */ \
858 Thread->WaitStatus = STATUS_ABANDONED; \
859 } \
860 \
861 /* Insert it into the Mutant List */ \
862 InsertHeadList(Thread->MutantListHead.Blink, \
863 &(Object)->MutantListEntry); \
864 } \
865 } \
866 else if (((Object)->Header.Type & TIMER_OR_EVENT_TYPE) == \
867 EventSynchronizationObject) \
868 { \
869 /* Synchronization Timers and Events just get un-signaled */ \
870 (Object)->Header.SignalState = 0; \
871 } \
872 else if ((Object)->Header.Type == SemaphoreObject) \
873 { \
874 /* These ones can have multiple states, so we only decrease it */ \
875 (Object)->Header.SignalState--; \
876 } \
877 }
878
879 //
880 // Satisfies the wait of a mutant dispatcher object
881 //
882 #define KiSatisfyMutantWait(Object, Thread) \
883 { \
884 /* Decrease the Signal State */ \
885 (Object)->Header.SignalState--; \
886 \
887 /* Check if it's now non-signaled */ \
888 if (!(Object)->Header.SignalState) \
889 { \
890 /* Set the Owner Thread */ \
891 (Object)->OwnerThread = Thread; \
892 \
893 /* Disable APCs if needed */ \
894 Thread->KernelApcDisable = Thread->KernelApcDisable - \
895 (Object)->ApcDisable; \
896 \
897 /* Check if it's abandoned */ \
898 if ((Object)->Abandoned) \
899 { \
900 /* Unabandon it */ \
901 (Object)->Abandoned = FALSE; \
902 \
903 /* Return Status */ \
904 Thread->WaitStatus = STATUS_ABANDONED; \
905 } \
906 \
907 /* Insert it into the Mutant List */ \
908 InsertHeadList(Thread->MutantListHead.Blink, \
909 &(Object)->MutantListEntry); \
910 } \
911 }
912
913 //
914 // Satisfies the wait of any nonmutant dispatcher object
915 //
916 #define KiSatisfyNonMutantWait(Object) \
917 { \
918 if (((Object)->Header.Type & TIMER_OR_EVENT_TYPE) == \
919 EventSynchronizationObject) \
920 { \
921 /* Synchronization Timers and Events just get un-signaled */ \
922 (Object)->Header.SignalState = 0; \
923 } \
924 else if ((Object)->Header.Type == SemaphoreObject) \
925 { \
926 /* These ones can have multiple states, so we only decrease it */ \
927 (Object)->Header.SignalState--; \
928 } \
929 }
930
931 //
932 // Recalculates the due time
933 //
934 FORCEINLINE
935 PLARGE_INTEGER
936 KiRecalculateDueTime(IN PLARGE_INTEGER OriginalDueTime,
937 IN PLARGE_INTEGER DueTime,
938 IN OUT PLARGE_INTEGER NewDueTime)
939 {
940 /* Don't do anything for absolute waits */
941 if (OriginalDueTime->QuadPart >= 0) return OriginalDueTime;
942
943 /* Otherwise, query the interrupt time and recalculate */
944 NewDueTime->QuadPart = KeQueryInterruptTime();
945 NewDueTime->QuadPart -= DueTime->QuadPart;
946 return NewDueTime;
947 }
948
949 //
950 // Determines whether a thread should be added to the wait list
951 //
952 FORCEINLINE
953 BOOLEAN
954 KiCheckThreadStackSwap(IN PKTHREAD Thread,
955 IN KPROCESSOR_MODE WaitMode)
956 {
957 /* Check the required conditions */
958 if ((WaitMode != KernelMode) &&
959 (Thread->EnableStackSwap) &&
960 (Thread->Priority >= (LOW_REALTIME_PRIORITY + 9)))
961 {
962 /* We are go for swap */
963 return TRUE;
964 }
965 else
966 {
967 /* Don't swap the thread */
968 return FALSE;
969 }
970 }
971
972 //
973 // Adds a thread to the wait list
974 //
975 #define KiAddThreadToWaitList(Thread, Swappable) \
976 { \
977 /* Make sure it's swappable */ \
978 if (Swappable) \
979 { \
980 /* Insert it into the PRCB's List */ \
981 InsertTailList(&KeGetCurrentPrcb()->WaitListHead, \
982 &Thread->WaitListEntry); \
983 } \
984 }
985
986 //
987 // Checks if a wait in progress should be interrupted by APCs or an alertable
988 // state.
989 //
990 FORCEINLINE
991 NTSTATUS
992 KiCheckAlertability(IN PKTHREAD Thread,
993 IN BOOLEAN Alertable,
994 IN KPROCESSOR_MODE WaitMode)
995 {
996 /* Check if the wait is alertable */
997 if (Alertable)
998 {
999 /* It is, first check if the thread is alerted in this mode */
1000 if (Thread->Alerted[WaitMode])
1001 {
1002 /* It is, so bail out of the wait */
1003 Thread->Alerted[WaitMode] = FALSE;
1004 return STATUS_ALERTED;
1005 }
1006 else if ((WaitMode != KernelMode) &&
1007 (!IsListEmpty(&Thread->ApcState.ApcListHead[UserMode])))
1008 {
1009 /* It's isn't, but this is a user wait with queued user APCs */
1010 Thread->ApcState.UserApcPending = TRUE;
1011 return STATUS_USER_APC;
1012 }
1013 else if (Thread->Alerted[KernelMode])
1014 {
1015 /* It isn't that either, but we're alered in kernel mode */
1016 Thread->Alerted[KernelMode] = FALSE;
1017 return STATUS_ALERTED;
1018 }
1019 }
1020 else if ((WaitMode != KernelMode) && (Thread->ApcState.UserApcPending))
1021 {
1022 /* Not alertable, but this is a user wait with pending user APCs */
1023 return STATUS_USER_APC;
1024 }
1025
1026 /* Otherwise, we're fine */
1027 return STATUS_WAIT_0;
1028 }
1029
1030 //
1031 // Called from KiCompleteTimer, KiInsertTreeTimer, KeSetSystemTime
1032 // to remove timer entries
1033 // See Windows HPI blog for more information.
1034 FORCEINLINE
1035 VOID
1036 KiRemoveEntryTimer(IN PKTIMER Timer)
1037 {
1038 ULONG Hand;
1039 PKTIMER_TABLE_ENTRY TableEntry;
1040
1041 /* Remove the timer from the timer list and check if it's empty */
1042 Hand = Timer->Header.Hand;
1043 if (RemoveEntryList(&Timer->TimerListEntry))
1044 {
1045 /* Get the respective timer table entry */
1046 TableEntry = &KiTimerTableListHead[Hand];
1047 if (&TableEntry->Entry == TableEntry->Entry.Flink)
1048 {
1049 /* Set the entry to an infinite absolute time */
1050 TableEntry->Time.HighPart = 0xFFFFFFFF;
1051 }
1052 }
1053
1054 /* Clear the list entries on dbg builds so we can tell the timer is gone */
1055 #if DBG
1056 Timer->TimerListEntry.Flink = NULL;
1057 Timer->TimerListEntry.Blink = NULL;
1058 #endif
1059 }
1060
1061 //
1062 // Called by Wait and Queue code to insert a timer for dispatching.
1063 // Also called by KeSetTimerEx to insert a timer from the caller.
1064 //
1065 FORCEINLINE
1066 VOID
1067 KxInsertTimer(IN PKTIMER Timer,
1068 IN ULONG Hand)
1069 {
1070 PKSPIN_LOCK_QUEUE LockQueue;
1071
1072 /* Acquire the lock and release the dispatcher lock */
1073 LockQueue = KiAcquireTimerLock(Hand);
1074 KiReleaseDispatcherLockFromDpcLevel();
1075
1076 /* Try to insert the timer */
1077 if (KiInsertTimerTable(Timer, Hand))
1078 {
1079 /* Complete it */
1080 KiCompleteTimer(Timer, LockQueue);
1081 }
1082 else
1083 {
1084 /* Do nothing, just release the lock */
1085 KiReleaseTimerLock(LockQueue);
1086 }
1087 }
1088
1089 //
1090 // Called by KeSetTimerEx and KiInsertTreeTimer to calculate Due Time
1091 // See the Windows HPI Blog for more information
1092 //
1093 FORCEINLINE
1094 BOOLEAN
1095 KiComputeDueTime(IN PKTIMER Timer,
1096 IN LARGE_INTEGER DueTime,
1097 OUT PULONG Hand)
1098 {
1099 LARGE_INTEGER InterruptTime, SystemTime, DifferenceTime;
1100
1101 /* Convert to relative time if needed */
1102 Timer->Header.Absolute = FALSE;
1103 if (DueTime.HighPart >= 0)
1104 {
1105 /* Get System Time */
1106 KeQuerySystemTime(&SystemTime);
1107
1108 /* Do the conversion */
1109 DifferenceTime.QuadPart = SystemTime.QuadPart - DueTime.QuadPart;
1110
1111 /* Make sure it hasn't already expired */
1112 Timer->Header.Absolute = TRUE;
1113 if (DifferenceTime.HighPart >= 0)
1114 {
1115 /* Cancel everything */
1116 Timer->Header.SignalState = TRUE;
1117 Timer->Header.Hand = 0;
1118 Timer->DueTime.QuadPart = 0;
1119 *Hand = 0;
1120 return FALSE;
1121 }
1122
1123 /* Set the time as Absolute */
1124 DueTime = DifferenceTime;
1125 }
1126
1127 /* Get the Interrupt Time */
1128 InterruptTime.QuadPart = KeQueryInterruptTime();
1129
1130 /* Recalculate due time */
1131 Timer->DueTime.QuadPart = InterruptTime.QuadPart - DueTime.QuadPart;
1132
1133 /* Get the handle */
1134 *Hand = KiComputeTimerTableIndex(Timer->DueTime.QuadPart);
1135 Timer->Header.Hand = (UCHAR)*Hand;
1136 Timer->Header.Inserted = TRUE;
1137 return TRUE;
1138 }
1139
1140 //
1141 // Called from Unlink and Queue Insert Code.
1142 // Also called by timer code when canceling an inserted timer.
1143 // Removes a timer from it's tree.
1144 //
1145 FORCEINLINE
1146 VOID
1147 KxRemoveTreeTimer(IN PKTIMER Timer)
1148 {
1149 ULONG Hand = Timer->Header.Hand;
1150 PKSPIN_LOCK_QUEUE LockQueue;
1151 PKTIMER_TABLE_ENTRY TimerEntry;
1152
1153 /* Acquire timer lock */
1154 LockQueue = KiAcquireTimerLock(Hand);
1155
1156 /* Set the timer as non-inserted */
1157 Timer->Header.Inserted = FALSE;
1158
1159 /* Remove it from the timer list */
1160 if (RemoveEntryList(&Timer->TimerListEntry))
1161 {
1162 /* Get the entry and check if it's empty */
1163 TimerEntry = &KiTimerTableListHead[Hand];
1164 if (IsListEmpty(&TimerEntry->Entry))
1165 {
1166 /* Clear the time then */
1167 TimerEntry->Time.HighPart = 0xFFFFFFFF;
1168 }
1169 }
1170
1171 /* Release the timer lock */
1172 KiReleaseTimerLock(LockQueue);
1173 }
1174
1175 FORCEINLINE
1176 VOID
1177 KxSetTimerForThreadWait(IN PKTIMER Timer,
1178 IN LARGE_INTEGER Interval,
1179 OUT PULONG Hand)
1180 {
1181 ULONGLONG DueTime;
1182 LARGE_INTEGER InterruptTime, SystemTime, TimeDifference;
1183
1184 /* Check the timer's interval to see if it's absolute */
1185 Timer->Header.Absolute = FALSE;
1186 if (Interval.HighPart >= 0)
1187 {
1188 /* Get the system time and calculate the relative time */
1189 KeQuerySystemTime(&SystemTime);
1190 TimeDifference.QuadPart = SystemTime.QuadPart - Interval.QuadPart;
1191 Timer->Header.Absolute = TRUE;
1192
1193 /* Check if we've already expired */
1194 if (TimeDifference.HighPart >= 0)
1195 {
1196 /* Reset everything */
1197 Timer->DueTime.QuadPart = 0;
1198 *Hand = 0;
1199 Timer->Header.Hand = 0;
1200 return;
1201 }
1202 else
1203 {
1204 /* Update the interval */
1205 Interval = TimeDifference;
1206 }
1207 }
1208
1209 /* Calculate the due time */
1210 InterruptTime.QuadPart = KeQueryInterruptTime();
1211 DueTime = InterruptTime.QuadPart - Interval.QuadPart;
1212 Timer->DueTime.QuadPart = DueTime;
1213
1214 /* Calculate the timer handle */
1215 *Hand = KiComputeTimerTableIndex(DueTime);
1216 Timer->Header.Hand = (UCHAR)*Hand;
1217 }
1218
1219 #define KxDelayThreadWait() \
1220 \
1221 /* Setup the Wait Block */ \
1222 Thread->WaitBlockList = TimerBlock; \
1223 \
1224 /* Setup the timer */ \
1225 KxSetTimerForThreadWait(Timer, *Interval, &Hand); \
1226 \
1227 /* Save the due time for the caller */ \
1228 DueTime.QuadPart = Timer->DueTime.QuadPart; \
1229 \
1230 /* Link the timer to this Wait Block */ \
1231 TimerBlock->NextWaitBlock = TimerBlock; \
1232 Timer->Header.WaitListHead.Flink = &TimerBlock->WaitListEntry; \
1233 Timer->Header.WaitListHead.Blink = &TimerBlock->WaitListEntry; \
1234 \
1235 /* Clear wait status */ \
1236 Thread->WaitStatus = STATUS_SUCCESS; \
1237 \
1238 /* Setup wait fields */ \
1239 Thread->Alertable = Alertable; \
1240 Thread->WaitReason = DelayExecution; \
1241 Thread->WaitMode = WaitMode; \
1242 \
1243 /* Check if we can swap the thread's stack */ \
1244 Thread->WaitListEntry.Flink = NULL; \
1245 Swappable = KiCheckThreadStackSwap(Thread, WaitMode); \
1246 \
1247 /* Set the wait time */ \
1248 Thread->WaitTime = KeTickCount.LowPart;
1249
1250 #define KxMultiThreadWait() \
1251 /* Link wait block array to the thread */ \
1252 Thread->WaitBlockList = WaitBlockArray; \
1253 \
1254 /* Reset the index */ \
1255 Index = 0; \
1256 \
1257 /* Loop wait blocks */ \
1258 do \
1259 { \
1260 /* Fill out the wait block */ \
1261 WaitBlock = &WaitBlockArray[Index]; \
1262 WaitBlock->Object = Object[Index]; \
1263 WaitBlock->WaitKey = (USHORT)Index; \
1264 WaitBlock->WaitType = WaitType; \
1265 WaitBlock->Thread = Thread; \
1266 \
1267 /* Link to next block */ \
1268 WaitBlock->NextWaitBlock = &WaitBlockArray[Index + 1]; \
1269 Index++; \
1270 } while (Index < Count); \
1271 \
1272 /* Link the last block */ \
1273 WaitBlock->NextWaitBlock = WaitBlockArray; \
1274 \
1275 /* Set default wait status */ \
1276 Thread->WaitStatus = STATUS_WAIT_0; \
1277 \
1278 /* Check if we have a timer */ \
1279 if (Timeout) \
1280 { \
1281 /* Link to the block */ \
1282 TimerBlock->NextWaitBlock = WaitBlockArray; \
1283 \
1284 /* Setup the timer */ \
1285 KxSetTimerForThreadWait(Timer, *Timeout, &Hand); \
1286 \
1287 /* Save the due time for the caller */ \
1288 DueTime.QuadPart = Timer->DueTime.QuadPart; \
1289 \
1290 /* Initialize the list */ \
1291 InitializeListHead(&Timer->Header.WaitListHead); \
1292 } \
1293 \
1294 /* Set wait settings */ \
1295 Thread->Alertable = Alertable; \
1296 Thread->WaitMode = WaitMode; \
1297 Thread->WaitReason = WaitReason; \
1298 \
1299 /* Check if we can swap the thread's stack */ \
1300 Thread->WaitListEntry.Flink = NULL; \
1301 Swappable = KiCheckThreadStackSwap(Thread, WaitMode); \
1302 \
1303 /* Set the wait time */ \
1304 Thread->WaitTime = KeTickCount.LowPart;
1305
1306 #define KxSingleThreadWait() \
1307 /* Setup the Wait Block */ \
1308 Thread->WaitBlockList = WaitBlock; \
1309 WaitBlock->WaitKey = STATUS_SUCCESS; \
1310 WaitBlock->Object = Object; \
1311 WaitBlock->WaitType = WaitAny; \
1312 \
1313 /* Clear wait status */ \
1314 Thread->WaitStatus = STATUS_SUCCESS; \
1315 \
1316 /* Check if we have a timer */ \
1317 if (Timeout) \
1318 { \
1319 /* Setup the timer */ \
1320 KxSetTimerForThreadWait(Timer, *Timeout, &Hand); \
1321 \
1322 /* Save the due time for the caller */ \
1323 DueTime.QuadPart = Timer->DueTime.QuadPart; \
1324 \
1325 /* Pointer to timer block */ \
1326 WaitBlock->NextWaitBlock = TimerBlock; \
1327 TimerBlock->NextWaitBlock = WaitBlock; \
1328 \
1329 /* Link the timer to this Wait Block */ \
1330 Timer->Header.WaitListHead.Flink = &TimerBlock->WaitListEntry; \
1331 Timer->Header.WaitListHead.Blink = &TimerBlock->WaitListEntry; \
1332 } \
1333 else \
1334 { \
1335 /* No timer block, just ourselves */ \
1336 WaitBlock->NextWaitBlock = WaitBlock; \
1337 } \
1338 \
1339 /* Set wait settings */ \
1340 Thread->Alertable = Alertable; \
1341 Thread->WaitMode = WaitMode; \
1342 Thread->WaitReason = WaitReason; \
1343 \
1344 /* Check if we can swap the thread's stack */ \
1345 Thread->WaitListEntry.Flink = NULL; \
1346 Swappable = KiCheckThreadStackSwap(Thread, WaitMode); \
1347 \
1348 /* Set the wait time */ \
1349 Thread->WaitTime = KeTickCount.LowPart;
1350
1351 #define KxQueueThreadWait() \
1352 /* Setup the Wait Block */ \
1353 Thread->WaitBlockList = WaitBlock; \
1354 WaitBlock->WaitKey = STATUS_SUCCESS; \
1355 WaitBlock->Object = Queue; \
1356 WaitBlock->WaitType = WaitAny; \
1357 WaitBlock->Thread = Thread; \
1358 \
1359 /* Clear wait status */ \
1360 Thread->WaitStatus = STATUS_SUCCESS; \
1361 \
1362 /* Check if we have a timer */ \
1363 if (Timeout) \
1364 { \
1365 /* Setup the timer */ \
1366 KxSetTimerForThreadWait(Timer, *Timeout, &Hand); \
1367 \
1368 /* Save the due time for the caller */ \
1369 DueTime.QuadPart = Timer->DueTime.QuadPart; \
1370 \
1371 /* Pointer to timer block */ \
1372 WaitBlock->NextWaitBlock = TimerBlock; \
1373 TimerBlock->NextWaitBlock = WaitBlock; \
1374 \
1375 /* Link the timer to this Wait Block */ \
1376 Timer->Header.WaitListHead.Flink = &TimerBlock->WaitListEntry; \
1377 Timer->Header.WaitListHead.Blink = &TimerBlock->WaitListEntry; \
1378 } \
1379 else \
1380 { \
1381 /* No timer block, just ourselves */ \
1382 WaitBlock->NextWaitBlock = WaitBlock; \
1383 } \
1384 \
1385 /* Set wait settings */ \
1386 Thread->Alertable = FALSE; \
1387 Thread->WaitMode = WaitMode; \
1388 Thread->WaitReason = WrQueue; \
1389 \
1390 /* Check if we can swap the thread's stack */ \
1391 Thread->WaitListEntry.Flink = NULL; \
1392 Swappable = KiCheckThreadStackSwap(Thread, WaitMode); \
1393 \
1394 /* Set the wait time */ \
1395 Thread->WaitTime = KeTickCount.LowPart;
1396
1397 //
1398 // Unwaits a Thread
1399 //
1400 FORCEINLINE
1401 VOID
1402 KxUnwaitThread(IN DISPATCHER_HEADER *Object,
1403 IN KPRIORITY Increment)
1404 {
1405 PLIST_ENTRY WaitEntry, WaitList;
1406 PKWAIT_BLOCK WaitBlock;
1407 PKTHREAD WaitThread;
1408 ULONG WaitKey;
1409
1410 /* Loop the Wait Entries */
1411 WaitList = &Object->WaitListHead;
1412 ASSERT(IsListEmpty(&Object->WaitListHead) == FALSE);
1413 WaitEntry = WaitList->Flink;
1414 do
1415 {
1416 /* Get the current wait block */
1417 WaitBlock = CONTAINING_RECORD(WaitEntry, KWAIT_BLOCK, WaitListEntry);
1418
1419 /* Get the waiting thread */
1420 WaitThread = WaitBlock->Thread;
1421
1422 /* Check the current Wait Mode */
1423 if (WaitBlock->WaitType == WaitAny)
1424 {
1425 /* Use the actual wait key */
1426 WaitKey = WaitBlock->WaitKey;
1427 }
1428 else
1429 {
1430 /* Otherwise, use STATUS_KERNEL_APC */
1431 WaitKey = STATUS_KERNEL_APC;
1432 }
1433
1434 /* Unwait the thread */
1435 KiUnwaitThread(WaitThread, WaitKey, Increment);
1436
1437 /* Next entry */
1438 WaitEntry = WaitList->Flink;
1439 } while (WaitEntry != WaitList);
1440 }
1441
1442 //
1443 // Unwaits a Thread waiting on an event
1444 //
1445 FORCEINLINE
1446 VOID
1447 KxUnwaitThreadForEvent(IN PKEVENT Event,
1448 IN KPRIORITY Increment)
1449 {
1450 PLIST_ENTRY WaitEntry, WaitList;
1451 PKWAIT_BLOCK WaitBlock;
1452 PKTHREAD WaitThread;
1453
1454 /* Loop the Wait Entries */
1455 WaitList = &Event->Header.WaitListHead;
1456 ASSERT(IsListEmpty(&Event->Header.WaitListHead) == FALSE);
1457 WaitEntry = WaitList->Flink;
1458 do
1459 {
1460 /* Get the current wait block */
1461 WaitBlock = CONTAINING_RECORD(WaitEntry, KWAIT_BLOCK, WaitListEntry);
1462
1463 /* Get the waiting thread */
1464 WaitThread = WaitBlock->Thread;
1465
1466 /* Check the current Wait Mode */
1467 if (WaitBlock->WaitType == WaitAny)
1468 {
1469 /* Un-signal it */
1470 Event->Header.SignalState = 0;
1471
1472 /* Un-signal the event and unwait the thread */
1473 KiUnwaitThread(WaitThread, WaitBlock->WaitKey, Increment);
1474 break;
1475 }
1476
1477 /* Unwait the thread with STATUS_KERNEL_APC */
1478 KiUnwaitThread(WaitThread, STATUS_KERNEL_APC, Increment);
1479
1480 /* Next entry */
1481 WaitEntry = WaitList->Flink;
1482 } while (WaitEntry != WaitList);
1483 }
1484
1485 //
1486 // This routine queues a thread that is ready on the PRCB's ready lists.
1487 // If this thread cannot currently run on this CPU, then the thread is
1488 // added to the deferred ready list instead.
1489 //
1490 // This routine must be entered with the PRCB lock held and it will exit
1491 // with the PRCB lock released!
1492 //
1493 FORCEINLINE
1494 VOID
1495 KxQueueReadyThread(IN PKTHREAD Thread,
1496 IN PKPRCB Prcb)
1497 {
1498 BOOLEAN Preempted;
1499 KPRIORITY Priority;
1500
1501 /* Sanity checks */
1502 ASSERT(Prcb == KeGetCurrentPrcb());
1503 ASSERT(Thread->State == Running);
1504 ASSERT(Thread->NextProcessor == Prcb->Number);
1505
1506 /* Check if this thread is allowed to run in this CPU */
1507 #ifdef CONFIG_SMP
1508 if ((Thread->Affinity) & (Prcb->SetMember))
1509 #else
1510 if (TRUE)
1511 #endif
1512 {
1513 /* Set thread ready for execution */
1514 Thread->State = Ready;
1515
1516 /* Save current priority and if someone had pre-empted it */
1517 Priority = Thread->Priority;
1518 Preempted = Thread->Preempted;
1519
1520 /* We're not pre-empting now, and set the wait time */
1521 Thread->Preempted = FALSE;
1522 Thread->WaitTime = KeTickCount.LowPart;
1523
1524 /* Sanity check */
1525 ASSERT((Priority >= 0) && (Priority <= HIGH_PRIORITY));
1526
1527 /* Insert this thread in the appropriate order */
1528 Preempted ? InsertHeadList(&Prcb->DispatcherReadyListHead[Priority],
1529 &Thread->WaitListEntry) :
1530 InsertTailList(&Prcb->DispatcherReadyListHead[Priority],
1531 &Thread->WaitListEntry);
1532
1533 /* Update the ready summary */
1534 Prcb->ReadySummary |= PRIORITY_MASK(Priority);
1535
1536 /* Sanity check */
1537 ASSERT(Priority == Thread->Priority);
1538
1539 /* Release the PRCB lock */
1540 KiReleasePrcbLock(Prcb);
1541 }
1542 else
1543 {
1544 /* Otherwise, prepare this thread to be deferred */
1545 Thread->State = DeferredReady;
1546 Thread->DeferredProcessor = Prcb->Number;
1547
1548 /* Release the lock and defer scheduling */
1549 KiReleasePrcbLock(Prcb);
1550 KiDeferredReadyThread(Thread);
1551 }
1552 }
1553
1554 //
1555 // This routine scans for an appropriate ready thread to select at the
1556 // given priority and for the given CPU.
1557 //
1558 FORCEINLINE
1559 PKTHREAD
1560 KiSelectReadyThread(IN KPRIORITY Priority,
1561 IN PKPRCB Prcb)
1562 {
1563 ULONG PrioritySet;
1564 LONG HighPriority;
1565 PLIST_ENTRY ListEntry;
1566 PKTHREAD Thread = NULL;
1567
1568 /* Save the current mask and get the priority set for the CPU */
1569 PrioritySet = Prcb->ReadySummary >> Priority;
1570 if (!PrioritySet) goto Quickie;
1571
1572 /* Get the highest priority possible */
1573 BitScanReverse((PULONG)&HighPriority, PrioritySet);
1574 ASSERT((PrioritySet & PRIORITY_MASK(HighPriority)) != 0);
1575 HighPriority += Priority;
1576
1577 /* Make sure the list isn't empty at the highest priority */
1578 ASSERT(IsListEmpty(&Prcb->DispatcherReadyListHead[HighPriority]) == FALSE);
1579
1580 /* Get the first thread on the list */
1581 ListEntry = Prcb->DispatcherReadyListHead[HighPriority].Flink;
1582 Thread = CONTAINING_RECORD(ListEntry, KTHREAD, WaitListEntry);
1583
1584 /* Make sure this thread is here for a reason */
1585 ASSERT(HighPriority == Thread->Priority);
1586 ASSERT(Thread->Affinity & AFFINITY_MASK(Prcb->Number));
1587 ASSERT(Thread->NextProcessor == Prcb->Number);
1588
1589 /* Remove it from the list */
1590 if (RemoveEntryList(&Thread->WaitListEntry))
1591 {
1592 /* The list is empty now, reset the ready summary */
1593 Prcb->ReadySummary ^= PRIORITY_MASK(HighPriority);
1594 }
1595
1596 /* Sanity check and return the thread */
1597 Quickie:
1598 ASSERT((Thread == NULL) ||
1599 (Thread->BasePriority == 0) ||
1600 (Thread->Priority != 0));
1601 return Thread;
1602 }
1603
1604 //
1605 // This routine computes the new priority for a thread. It is only valid for
1606 // threads with priorities in the dynamic priority range.
1607 //
1608 FORCEINLINE
1609 SCHAR
1610 KiComputeNewPriority(IN PKTHREAD Thread,
1611 IN SCHAR Adjustment)
1612 {
1613 SCHAR Priority;
1614
1615 /* Priority sanity checks */
1616 ASSERT((Thread->PriorityDecrement >= 0) &&
1617 (Thread->PriorityDecrement <= Thread->Priority));
1618 ASSERT((Thread->Priority < LOW_REALTIME_PRIORITY) ?
1619 TRUE : (Thread->PriorityDecrement == 0));
1620
1621 /* Get the current priority */
1622 Priority = Thread->Priority;
1623 if (Priority < LOW_REALTIME_PRIORITY)
1624 {
1625 /* Decrease priority by the priority decrement */
1626 Priority -= (Thread->PriorityDecrement + Adjustment);
1627
1628 /* Don't go out of bounds */
1629 if (Priority < Thread->BasePriority) Priority = Thread->BasePriority;
1630
1631 /* Reset the priority decrement */
1632 Thread->PriorityDecrement = 0;
1633 }
1634
1635 /* Sanity check */
1636 ASSERT((Thread->BasePriority == 0) || (Priority != 0));
1637
1638 /* Return the new priority */
1639 return Priority;
1640 }
1641
1642 //
1643 // Guarded Mutex Routines
1644 //
1645 FORCEINLINE
1646 VOID
1647 _KeInitializeGuardedMutex(OUT PKGUARDED_MUTEX GuardedMutex)
1648 {
1649 /* Setup the Initial Data */
1650 GuardedMutex->Count = GM_LOCK_BIT;
1651 GuardedMutex->Owner = NULL;
1652 GuardedMutex->Contention = 0;
1653
1654 /* Initialize the Wait Gate */
1655 KeInitializeGate(&GuardedMutex->Gate);
1656 }
1657
1658 FORCEINLINE
1659 VOID
1660 _KeAcquireGuardedMutexUnsafe(IN OUT PKGUARDED_MUTEX GuardedMutex)
1661 {
1662 PKTHREAD Thread = KeGetCurrentThread();
1663
1664 /* Sanity checks */
1665 ASSERT((KeGetCurrentIrql() == APC_LEVEL) ||
1666 (Thread->SpecialApcDisable < 0) ||
1667 (Thread->Teb == NULL) ||
1668 (Thread->Teb >= (PTEB)MM_SYSTEM_RANGE_START));
1669 ASSERT(GuardedMutex->Owner != Thread);
1670
1671 /* Remove the lock */
1672 if (!InterlockedBitTestAndReset(&GuardedMutex->Count, GM_LOCK_BIT_V))
1673 {
1674 /* The Guarded Mutex was already locked, enter contented case */
1675 KiAcquireGuardedMutex(GuardedMutex);
1676 }
1677
1678 /* Set the Owner */
1679 GuardedMutex->Owner = Thread;
1680 }
1681
1682 FORCEINLINE
1683 VOID
1684 _KeReleaseGuardedMutexUnsafe(IN OUT PKGUARDED_MUTEX GuardedMutex)
1685 {
1686 LONG OldValue, NewValue;
1687
1688 /* Sanity checks */
1689 ASSERT((KeGetCurrentIrql() == APC_LEVEL) ||
1690 (KeGetCurrentThread()->SpecialApcDisable < 0) ||
1691 (KeGetCurrentThread()->Teb == NULL) ||
1692 (KeGetCurrentThread()->Teb >= (PTEB)MM_SYSTEM_RANGE_START));
1693 ASSERT(GuardedMutex->Owner == KeGetCurrentThread());
1694
1695 /* Destroy the Owner */
1696 GuardedMutex->Owner = NULL;
1697
1698 /* Add the Lock Bit */
1699 OldValue = InterlockedExchangeAdd(&GuardedMutex->Count, GM_LOCK_BIT);
1700 ASSERT((OldValue & GM_LOCK_BIT) == 0);
1701
1702 /* Check if it was already locked, but not woken */
1703 if ((OldValue) && !(OldValue & GM_LOCK_WAITER_WOKEN))
1704 {
1705 /* Update the Oldvalue to what it should be now */
1706 OldValue += GM_LOCK_BIT;
1707
1708 /* The mutex will be woken, minus one waiter */
1709 NewValue = OldValue + GM_LOCK_WAITER_WOKEN -
1710 GM_LOCK_WAITER_INC;
1711
1712 /* Remove the Woken bit */
1713 if (InterlockedCompareExchange(&GuardedMutex->Count,
1714 NewValue,
1715 OldValue) == OldValue)
1716 {
1717 /* Signal the Gate */
1718 KeSignalGateBoostPriority(&GuardedMutex->Gate);
1719 }
1720 }
1721 }
1722
1723 FORCEINLINE
1724 VOID
1725 _KeAcquireGuardedMutex(IN PKGUARDED_MUTEX GuardedMutex)
1726 {
1727 PKTHREAD Thread = KeGetCurrentThread();
1728
1729 /* Sanity checks */
1730 ASSERT(KeGetCurrentIrql() <= APC_LEVEL);
1731 ASSERT(GuardedMutex->Owner != Thread);
1732
1733 /* Disable Special APCs */
1734 KeEnterGuardedRegion();
1735
1736 /* Remove the lock */
1737 if (!InterlockedBitTestAndReset(&GuardedMutex->Count, GM_LOCK_BIT_V))
1738 {
1739 /* The Guarded Mutex was already locked, enter contented case */
1740 KiAcquireGuardedMutex(GuardedMutex);
1741 }
1742
1743 /* Set the Owner and Special APC Disable state */
1744 GuardedMutex->Owner = Thread;
1745 GuardedMutex->SpecialApcDisable = Thread->SpecialApcDisable;
1746 }
1747
1748 FORCEINLINE
1749 VOID
1750 _KeReleaseGuardedMutex(IN OUT PKGUARDED_MUTEX GuardedMutex)
1751 {
1752 LONG OldValue, NewValue;
1753
1754 /* Sanity checks */
1755 ASSERT(KeGetCurrentIrql() <= APC_LEVEL);
1756 ASSERT(GuardedMutex->Owner == KeGetCurrentThread());
1757 ASSERT(KeGetCurrentThread()->SpecialApcDisable ==
1758 GuardedMutex->SpecialApcDisable);
1759
1760 /* Destroy the Owner */
1761 GuardedMutex->Owner = NULL;
1762
1763 /* Add the Lock Bit */
1764 OldValue = InterlockedExchangeAdd(&GuardedMutex->Count, GM_LOCK_BIT);
1765 ASSERT((OldValue & GM_LOCK_BIT) == 0);
1766
1767 /* Check if it was already locked, but not woken */
1768 if ((OldValue) && !(OldValue & GM_LOCK_WAITER_WOKEN))
1769 {
1770 /* Update the Oldvalue to what it should be now */
1771 OldValue += GM_LOCK_BIT;
1772
1773 /* The mutex will be woken, minus one waiter */
1774 NewValue = OldValue + GM_LOCK_WAITER_WOKEN -
1775 GM_LOCK_WAITER_INC;
1776
1777 /* Remove the Woken bit */
1778 if (InterlockedCompareExchange(&GuardedMutex->Count,
1779 NewValue,
1780 OldValue) == OldValue)
1781 {
1782 /* Signal the Gate */
1783 KeSignalGateBoostPriority(&GuardedMutex->Gate);
1784 }
1785 }
1786
1787 /* Re-enable APCs */
1788 KeLeaveGuardedRegion();
1789 }
1790
1791 FORCEINLINE
1792 BOOLEAN
1793 _KeTryToAcquireGuardedMutex(IN OUT PKGUARDED_MUTEX GuardedMutex)
1794 {
1795 PKTHREAD Thread = KeGetCurrentThread();
1796
1797 /* Block APCs */
1798 KeEnterGuardedRegion();
1799
1800 /* Remove the lock */
1801 if (!InterlockedBitTestAndReset(&GuardedMutex->Count, GM_LOCK_BIT_V))
1802 {
1803 /* Re-enable APCs */
1804 KeLeaveGuardedRegion();
1805 YieldProcessor();
1806
1807 /* Return failure */
1808 return FALSE;
1809 }
1810
1811 /* Set the Owner and APC State */
1812 GuardedMutex->Owner = Thread;
1813 GuardedMutex->SpecialApcDisable = Thread->SpecialApcDisable;
1814 return TRUE;
1815 }