Sync to trunk HEAD (r43416)
[reactos.git] / reactos / ntoskrnl / include / internal / ke_x.h
1 /*
2 * PROJECT: ReactOS Kernel
3 * LICENSE: GPL - See COPYING in the top level directory
4 * FILE: ntoskrnl/include/ke_x.h
5 * PURPOSE: Internal Inlined Functions for the Kernel
6 * PROGRAMMERS: Alex Ionescu (alex.ionescu@reactos.org)
7 */
8
9 //
10 // Thread Dispatcher Header DebugActive Mask
11 //
12 #define DR_MASK(x) 1 << x
13 #define DR_ACTIVE_MASK 0x10
14 #define DR_REG_MASK 0x4F
15
16 #ifdef _M_IX86
17 //
18 // Sanitizes a selector
19 //
20 FORCEINLINE
21 ULONG
22 Ke386SanitizeSeg(IN ULONG Cs,
23 IN KPROCESSOR_MODE Mode)
24 {
25 //
26 // Check if we're in kernel-mode, and force CPL 0 if so.
27 // Otherwise, force CPL 3.
28 //
29 return ((Mode == KernelMode) ?
30 (Cs & (0xFFFF & ~RPL_MASK)) :
31 (RPL_MASK | (Cs & 0xFFFF)));
32 }
33
34 //
35 // Sanitizes EFLAGS
36 //
37 FORCEINLINE
38 ULONG
39 Ke386SanitizeFlags(IN ULONG Eflags,
40 IN KPROCESSOR_MODE Mode)
41 {
42 //
43 // Check if we're in kernel-mode, and sanitize EFLAGS if so.
44 // Otherwise, also force interrupt mask on.
45 //
46 return ((Mode == KernelMode) ?
47 (Eflags & (EFLAGS_USER_SANITIZE | EFLAGS_INTERRUPT_MASK)) :
48 (EFLAGS_INTERRUPT_MASK | (Eflags & EFLAGS_USER_SANITIZE)));
49 }
50
51 //
52 // Gets a DR register from a CONTEXT structure
53 //
54 FORCEINLINE
55 PVOID
56 KiDrFromContext(IN ULONG Dr,
57 IN PCONTEXT Context)
58 {
59 return *(PVOID*)((ULONG_PTR)Context + KiDebugRegisterContextOffsets[Dr]);
60 }
61
62 //
63 // Gets a DR register from a KTRAP_FRAME structure
64 //
65 FORCEINLINE
66 PVOID*
67 KiDrFromTrapFrame(IN ULONG Dr,
68 IN PKTRAP_FRAME TrapFrame)
69 {
70 return (PVOID*)((ULONG_PTR)TrapFrame + KiDebugRegisterTrapOffsets[Dr]);
71 }
72
73 //
74 //
75 //
76 FORCEINLINE
77 PVOID
78 Ke386SanitizeDr(IN PVOID DrAddress,
79 IN KPROCESSOR_MODE Mode)
80 {
81 //
82 // Check if we're in kernel-mode, and return the address directly if so.
83 // Otherwise, make sure it's not inside the kernel-mode address space.
84 // If it is, then clear the address.
85 //
86 return ((Mode == KernelMode) ? DrAddress :
87 (DrAddress <= MM_HIGHEST_USER_ADDRESS) ? DrAddress : 0);
88 }
89 #endif /* _M_IX86 */
90
91 #ifndef _M_ARM
92 FORCEINLINE
93 PRKTHREAD
94 KeGetCurrentThread(VOID)
95 {
96 #ifdef _M_IX86
97 /* Return the current thread */
98 return ((PKIPCR)KeGetPcr())->PrcbData.CurrentThread;
99 #elif defined (_M_AMD64)
100 return (PRKTHREAD)__readgsqword(FIELD_OFFSET(KIPCR, Prcb.CurrentThread));
101 #else
102 PKPRCB Prcb = KeGetCurrentPrcb();
103 return Prcb->CurrentThread;
104 #endif
105 }
106
107 FORCEINLINE
108 UCHAR
109 KeGetPreviousMode(VOID)
110 {
111 /* Return the current mode */
112 return KeGetCurrentThread()->PreviousMode;
113 }
114 #endif
115
116 FORCEINLINE
117 VOID
118 KeFlushProcessTb(VOID)
119 {
120 /* Flush the TLB by resetting CR3 */
121 #ifdef _M_PPC
122 __asm__("sync\n\tisync\n\t");
123 #elif _M_ARM
124 //
125 // We need to implement this!
126 //
127 ASSERTMSG("Need ARM flush routine\n", FALSE);
128 #else
129 __writecr3(__readcr3());
130 #endif
131 }
132
133 //
134 // Enters a Guarded Region
135 //
136 #define KeEnterGuardedRegion() \
137 { \
138 PKTHREAD _Thread = KeGetCurrentThread(); \
139 \
140 /* Sanity checks */ \
141 ASSERT(KeGetCurrentIrql() <= APC_LEVEL); \
142 ASSERT(_Thread == KeGetCurrentThread()); \
143 ASSERT((_Thread->SpecialApcDisable <= 0) && \
144 (_Thread->SpecialApcDisable != -32768)); \
145 \
146 /* Disable Special APCs */ \
147 _Thread->SpecialApcDisable--; \
148 }
149
150 //
151 // Leaves a Guarded Region
152 //
153 #define KeLeaveGuardedRegion() \
154 { \
155 PKTHREAD _Thread = KeGetCurrentThread(); \
156 \
157 /* Sanity checks */ \
158 ASSERT(KeGetCurrentIrql() <= APC_LEVEL); \
159 ASSERT(_Thread == KeGetCurrentThread()); \
160 ASSERT(_Thread->SpecialApcDisable < 0); \
161 \
162 /* Leave region and check if APCs are OK now */ \
163 if (!(++_Thread->SpecialApcDisable)) \
164 { \
165 /* Check for Kernel APCs on the list */ \
166 if (!IsListEmpty(&_Thread->ApcState. \
167 ApcListHead[KernelMode])) \
168 { \
169 /* Check for APC Delivery */ \
170 KiCheckForKernelApcDelivery(); \
171 } \
172 } \
173 }
174
175 //
176 // Enters a Critical Region
177 //
178 #define KeEnterCriticalRegion() \
179 { \
180 PKTHREAD _Thread = KeGetCurrentThread(); \
181 \
182 /* Sanity checks */ \
183 ASSERT(_Thread == KeGetCurrentThread()); \
184 ASSERT((_Thread->KernelApcDisable <= 0) && \
185 (_Thread->KernelApcDisable != -32768)); \
186 \
187 /* Disable Kernel APCs */ \
188 _Thread->KernelApcDisable--; \
189 }
190
191 //
192 // Leaves a Critical Region
193 //
194 #define KeLeaveCriticalRegion() \
195 { \
196 PKTHREAD _Thread = KeGetCurrentThread(); \
197 \
198 /* Sanity checks */ \
199 ASSERT(_Thread == KeGetCurrentThread()); \
200 ASSERT(_Thread->KernelApcDisable < 0); \
201 \
202 /* Enable Kernel APCs */ \
203 _Thread->KernelApcDisable++; \
204 \
205 /* Check if Kernel APCs are now enabled */ \
206 if (!(_Thread->KernelApcDisable)) \
207 { \
208 /* Check if we need to request an APC Delivery */ \
209 if (!(IsListEmpty(&_Thread->ApcState.ApcListHead[KernelMode])) && \
210 !(_Thread->SpecialApcDisable)) \
211 { \
212 /* Check for the right environment */ \
213 KiCheckForKernelApcDelivery(); \
214 } \
215 } \
216 }
217
218 #ifndef CONFIG_SMP
219 //
220 // Spinlock Acquire at IRQL >= DISPATCH_LEVEL
221 //
222 FORCEINLINE
223 VOID
224 KxAcquireSpinLock(IN PKSPIN_LOCK SpinLock)
225 {
226 /* On UP builds, spinlocks don't exist at IRQL >= DISPATCH */
227 UNREFERENCED_PARAMETER(SpinLock);
228 }
229
230 //
231 // Spinlock Release at IRQL >= DISPATCH_LEVEL
232 //
233 FORCEINLINE
234 VOID
235 KxReleaseSpinLock(IN PKSPIN_LOCK SpinLock)
236 {
237 /* On UP builds, spinlocks don't exist at IRQL >= DISPATCH */
238 UNREFERENCED_PARAMETER(SpinLock);
239 }
240
241 //
242 // This routine protects against multiple CPU acquires, it's meaningless on UP.
243 //
244 FORCEINLINE
245 VOID
246 KiAcquireDispatcherObject(IN DISPATCHER_HEADER* Object)
247 {
248 UNREFERENCED_PARAMETER(Object);
249 }
250
251 //
252 // This routine protects against multiple CPU acquires, it's meaningless on UP.
253 //
254 FORCEINLINE
255 VOID
256 KiReleaseDispatcherObject(IN DISPATCHER_HEADER* Object)
257 {
258 UNREFERENCED_PARAMETER(Object);
259 }
260
261 FORCEINLINE
262 KIRQL
263 KiAcquireDispatcherLock(VOID)
264 {
265 /* Raise to DPC level */
266 return KeRaiseIrqlToDpcLevel();
267 }
268
269 FORCEINLINE
270 VOID
271 KiReleaseDispatcherLock(IN KIRQL OldIrql)
272 {
273 /* Just exit the dispatcher */
274 KiExitDispatcher(OldIrql);
275 }
276
277 FORCEINLINE
278 VOID
279 KiAcquireDispatcherLockAtDpcLevel(VOID)
280 {
281 /* This is a no-op at DPC Level for UP systems */
282 return;
283 }
284
285 FORCEINLINE
286 VOID
287 KiReleaseDispatcherLockFromDpcLevel(VOID)
288 {
289 /* This is a no-op at DPC Level for UP systems */
290 return;
291 }
292
293 //
294 // This routine makes the thread deferred ready on the boot CPU.
295 //
296 FORCEINLINE
297 VOID
298 KiInsertDeferredReadyList(IN PKTHREAD Thread)
299 {
300 /* Set the thread to deferred state and boot CPU */
301 Thread->State = DeferredReady;
302 Thread->DeferredProcessor = 0;
303
304 /* Make the thread ready immediately */
305 KiDeferredReadyThread(Thread);
306 }
307
308 FORCEINLINE
309 VOID
310 KiRescheduleThread(IN BOOLEAN NewThread,
311 IN ULONG Cpu)
312 {
313 /* This is meaningless on UP systems */
314 UNREFERENCED_PARAMETER(NewThread);
315 UNREFERENCED_PARAMETER(Cpu);
316 }
317
318 //
319 // This routine protects against multiple CPU acquires, it's meaningless on UP.
320 //
321 FORCEINLINE
322 VOID
323 KiSetThreadSwapBusy(IN PKTHREAD Thread)
324 {
325 UNREFERENCED_PARAMETER(Thread);
326 }
327
328 //
329 // This routine protects against multiple CPU acquires, it's meaningless on UP.
330 //
331 FORCEINLINE
332 VOID
333 KiAcquirePrcbLock(IN PKPRCB Prcb)
334 {
335 UNREFERENCED_PARAMETER(Prcb);
336 }
337
338 //
339 // This routine protects against multiple CPU acquires, it's meaningless on UP.
340 //
341 FORCEINLINE
342 VOID
343 KiReleasePrcbLock(IN PKPRCB Prcb)
344 {
345 UNREFERENCED_PARAMETER(Prcb);
346 }
347
348 //
349 // This routine protects against multiple CPU acquires, it's meaningless on UP.
350 //
351 FORCEINLINE
352 VOID
353 KiAcquireThreadLock(IN PKTHREAD Thread)
354 {
355 UNREFERENCED_PARAMETER(Thread);
356 }
357
358 //
359 // This routine protects against multiple CPU acquires, it's meaningless on UP.
360 //
361 FORCEINLINE
362 VOID
363 KiReleaseThreadLock(IN PKTHREAD Thread)
364 {
365 UNREFERENCED_PARAMETER(Thread);
366 }
367
368 //
369 // This routine protects against multiple CPU acquires, it's meaningless on UP.
370 //
371 FORCEINLINE
372 BOOLEAN
373 KiTryThreadLock(IN PKTHREAD Thread)
374 {
375 UNREFERENCED_PARAMETER(Thread);
376 return FALSE;
377 }
378
379 FORCEINLINE
380 VOID
381 KiCheckDeferredReadyList(IN PKPRCB Prcb)
382 {
383 /* There are no deferred ready lists on UP systems */
384 UNREFERENCED_PARAMETER(Prcb);
385 }
386
387 FORCEINLINE
388 VOID
389 KiRundownThread(IN PKTHREAD Thread)
390 {
391 #if defined(_M_IX86)
392 /* Check if this is the NPX Thread */
393 if (KeGetCurrentPrcb()->NpxThread == Thread)
394 {
395 /* Clear it */
396 KeGetCurrentPrcb()->NpxThread = NULL;
397 Ke386FnInit();
398 }
399 #endif
400 }
401
402 FORCEINLINE
403 VOID
404 KiRequestApcInterrupt(IN BOOLEAN NeedApc,
405 IN UCHAR Processor)
406 {
407 /* We deliver instantly on UP */
408 UNREFERENCED_PARAMETER(NeedApc);
409 UNREFERENCED_PARAMETER(Processor);
410 }
411
412 FORCEINLINE
413 PKSPIN_LOCK_QUEUE
414 KiAcquireTimerLock(IN ULONG Hand)
415 {
416 ASSERT(KeGetCurrentIrql() >= DISPATCH_LEVEL);
417
418 /* Nothing to do on UP */
419 UNREFERENCED_PARAMETER(Hand);
420 return NULL;
421 }
422
423 FORCEINLINE
424 VOID
425 KiReleaseTimerLock(IN PKSPIN_LOCK_QUEUE LockQueue)
426 {
427 ASSERT(KeGetCurrentIrql() >= DISPATCH_LEVEL);
428
429 /* Nothing to do on UP */
430 UNREFERENCED_PARAMETER(LockQueue);
431 }
432
433 #else
434
435 //
436 // Spinlock Acquisition at IRQL >= DISPATCH_LEVEL
437 //
438 FORCEINLINE
439 VOID
440 KxAcquireSpinLock(IN PKSPIN_LOCK SpinLock)
441 {
442 /* Make sure that we don't own the lock already */
443 if (((KSPIN_LOCK)KeGetCurrentThread() | 1) == *SpinLock)
444 {
445 /* We do, bugcheck! */
446 KeBugCheckEx(SPIN_LOCK_ALREADY_OWNED, (ULONG_PTR)SpinLock, 0, 0, 0);
447 }
448
449 /* Start acquire loop */
450 for (;;)
451 {
452 /* Try to acquire it */
453 if (InterlockedBitTestAndSet((PLONG)SpinLock, 0))
454 {
455 /* Value changed... wait until it's unlocked */
456 while (*(volatile KSPIN_LOCK *)SpinLock == 1)
457 {
458 #if DBG
459 /* On debug builds, we use a much slower but useful routine */
460 //Kii386SpinOnSpinLock(SpinLock, 5);
461
462 /* FIXME: Do normal yield for now */
463 YieldProcessor();
464 #else
465 /* Otherwise, just yield and keep looping */
466 YieldProcessor();
467 #endif
468 }
469 }
470 else
471 {
472 #if DBG
473 /* On debug builds, we OR in the KTHREAD */
474 *SpinLock = (KSPIN_LOCK)KeGetCurrentThread() | 1;
475 #endif
476 /* All is well, break out */
477 break;
478 }
479 }
480 }
481
482 //
483 // Spinlock Release at IRQL >= DISPATCH_LEVEL
484 //
485 FORCEINLINE
486 VOID
487 KxReleaseSpinLock(IN PKSPIN_LOCK SpinLock)
488 {
489 #if DBG
490 /* Make sure that the threads match */
491 if (((KSPIN_LOCK)KeGetCurrentThread() | 1) != *SpinLock)
492 {
493 /* They don't, bugcheck */
494 KeBugCheckEx(SPIN_LOCK_NOT_OWNED, (ULONG_PTR)SpinLock, 0, 0, 0);
495 }
496 #endif
497 /* Clear the lock */
498 InterlockedAnd((PLONG)SpinLock, 0);
499 }
500
501 FORCEINLINE
502 VOID
503 KiAcquireDispatcherObject(IN DISPATCHER_HEADER* Object)
504 {
505 LONG OldValue;
506
507 /* Make sure we're at a safe level to touch the lock */
508 ASSERT(KeGetCurrentIrql() >= DISPATCH_LEVEL);
509
510 /* Start acquire loop */
511 do
512 {
513 /* Loop until the other CPU releases it */
514 while (TRUE)
515 {
516 /* Check if it got released */
517 OldValue = Object->Lock;
518 if ((OldValue & KOBJECT_LOCK_BIT) == 0) break;
519
520 /* Let the CPU know that this is a loop */
521 YieldProcessor();
522 }
523
524 /* Try acquiring the lock now */
525 } while (InterlockedCompareExchange(&Object->Lock,
526 OldValue | KOBJECT_LOCK_BIT,
527 OldValue) != OldValue);
528 }
529
530 FORCEINLINE
531 VOID
532 KiReleaseDispatcherObject(IN DISPATCHER_HEADER* Object)
533 {
534 /* Make sure we're at a safe level to touch the lock */
535 ASSERT(KeGetCurrentIrql() >= DISPATCH_LEVEL);
536
537 /* Release it */
538 InterlockedAnd(&Object->Lock, ~KOBJECT_LOCK_BIT);
539 }
540
541 FORCEINLINE
542 KIRQL
543 KiAcquireDispatcherLock(VOID)
544 {
545 /* Raise to synchronization level and acquire the dispatcher lock */
546 return KeAcquireQueuedSpinLockRaiseToSynch(LockQueueDispatcherLock);
547 }
548
549 FORCEINLINE
550 VOID
551 KiReleaseDispatcherLock(IN KIRQL OldIrql)
552 {
553 /* First release the lock */
554 KeReleaseQueuedSpinLockFromDpcLevel(&KeGetCurrentPrcb()->
555 LockQueue[LockQueueDispatcherLock]);
556
557 /* Then exit the dispatcher */
558 KiExitDispatcher(OldIrql);
559 }
560
561 FORCEINLINE
562 VOID
563 KiAcquireDispatcherLockAtDpcLevel(VOID)
564 {
565 /* Acquire the dispatcher lock */
566 KeAcquireQueuedSpinLockAtDpcLevel(&KeGetCurrentPrcb()->
567 LockQueue[LockQueueDispatcherLock]);
568 }
569
570 FORCEINLINE
571 VOID
572 KiReleaseDispatcherLockFromDpcLevel(VOID)
573 {
574 /* Release the dispatcher lock */
575 KeReleaseQueuedSpinLockFromDpcLevel(&KeGetCurrentPrcb()->
576 LockQueue[LockQueueDispatcherLock]);
577 }
578
579 //
580 // This routine inserts a thread into the deferred ready list of the current CPU
581 //
582 FORCEINLINE
583 VOID
584 KiInsertDeferredReadyList(IN PKTHREAD Thread)
585 {
586 PKPRCB Prcb = KeGetCurrentPrcb();
587
588 /* Set the thread to deferred state and CPU */
589 Thread->State = DeferredReady;
590 Thread->DeferredProcessor = Prcb->Number;
591
592 /* Add it on the list */
593 PushEntryList(&Prcb->DeferredReadyListHead, &Thread->SwapListEntry);
594 }
595
596 FORCEINLINE
597 VOID
598 KiRescheduleThread(IN BOOLEAN NewThread,
599 IN ULONG Cpu)
600 {
601 /* Check if a new thread needs to be scheduled on a different CPU */
602 if ((NewThread) && !(KeGetPcr()->Number == Cpu))
603 {
604 /* Send an IPI to request delivery */
605 KiIpiSend(AFFINITY_MASK(Cpu), IPI_DPC);
606 }
607 }
608
609 //
610 // This routine sets the current thread in a swap busy state, which ensure that
611 // nobody else tries to swap it concurrently.
612 //
613 FORCEINLINE
614 VOID
615 KiSetThreadSwapBusy(IN PKTHREAD Thread)
616 {
617 /* Make sure nobody already set it */
618 ASSERT(Thread->SwapBusy == FALSE);
619
620 /* Set it ourselves */
621 Thread->SwapBusy = TRUE;
622 }
623
624 //
625 // This routine acquires the PRCB lock so that only one caller can touch
626 // volatile PRCB data.
627 //
628 // Since this is a simple optimized spin-lock, it must only be acquired
629 // at dispatcher level or higher!
630 //
631 FORCEINLINE
632 VOID
633 KiAcquirePrcbLock(IN PKPRCB Prcb)
634 {
635 /* Make sure we're at a safe level to touch the PRCB lock */
636 ASSERT(KeGetCurrentIrql() >= DISPATCH_LEVEL);
637
638 /* Start acquire loop */
639 for (;;)
640 {
641 /* Acquire the lock and break out if we acquired it first */
642 if (!InterlockedExchange((PLONG)&Prcb->PrcbLock, 1)) break;
643
644 /* Loop until the other CPU releases it */
645 do
646 {
647 /* Let the CPU know that this is a loop */
648 YieldProcessor();
649 } while (Prcb->PrcbLock);
650 }
651 }
652
653 //
654 // This routine releases the PRCB lock so that other callers can touch
655 // volatile PRCB data.
656 //
657 // Since this is a simple optimized spin-lock, it must be be only acquired
658 // at dispatcher level or higher!
659 //
660 FORCEINLINE
661 VOID
662 KiReleasePrcbLock(IN PKPRCB Prcb)
663 {
664 /* Make sure we are above dispatch and the lock is acquired! */
665 ASSERT(KeGetCurrentIrql() >= DISPATCH_LEVEL);
666 ASSERT(Prcb->PrcbLock != 0);
667
668 /* Release it */
669 InterlockedAnd((PLONG)&Prcb->PrcbLock, 0);
670 }
671
672 //
673 // This routine acquires the thread lock so that only one caller can touch
674 // volatile thread data.
675 //
676 // Since this is a simple optimized spin-lock, it must be be only acquired
677 // at dispatcher level or higher!
678 //
679 FORCEINLINE
680 VOID
681 KiAcquireThreadLock(IN PKTHREAD Thread)
682 {
683 /* Make sure we're at a safe level to touch the thread lock */
684 ASSERT(KeGetCurrentIrql() >= DISPATCH_LEVEL);
685
686 /* Start acquire loop */
687 for (;;)
688 {
689 /* Acquire the lock and break out if we acquired it first */
690 if (!InterlockedExchange((PLONG)&Thread->ThreadLock, 1)) break;
691
692 /* Loop until the other CPU releases it */
693 do
694 {
695 /* Let the CPU know that this is a loop */
696 YieldProcessor();
697 } while (Thread->ThreadLock);
698 }
699 }
700
701 //
702 // This routine releases the thread lock so that other callers can touch
703 // volatile thread data.
704 //
705 // Since this is a simple optimized spin-lock, it must be be only acquired
706 // at dispatcher level or higher!
707 //
708 FORCEINLINE
709 VOID
710 KiReleaseThreadLock(IN PKTHREAD Thread)
711 {
712 /* Make sure we are still above dispatch */
713 ASSERT(KeGetCurrentIrql() >= DISPATCH_LEVEL);
714
715 /* Release it */
716 InterlockedAnd((PLONG)&Thread->ThreadLock, 0);
717 }
718
719 FORCEINLINE
720 BOOLEAN
721 KiTryThreadLock(IN PKTHREAD Thread)
722 {
723 LONG Value;
724
725 /* If the lock isn't acquired, return false */
726 if (!Thread->ThreadLock) return FALSE;
727
728 /* Otherwise, try to acquire it and check the result */
729 Value = 1;
730 Value = InterlockedExchange((PLONG)&Thread->ThreadLock, Value);
731
732 /* Return the lock state */
733 return (Value == TRUE);
734 }
735
736 FORCEINLINE
737 VOID
738 KiCheckDeferredReadyList(IN PKPRCB Prcb)
739 {
740 /* Scan the deferred ready lists if required */
741 if (Prcb->DeferredReadyListHead.Next) KiProcessDeferredReadyList(Prcb);
742 }
743
744 FORCEINLINE
745 VOID
746 KiRundownThread(IN PKTHREAD Thread)
747 {
748 /* Nothing to do */
749 return;
750 }
751
752 FORCEINLINE
753 VOID
754 KiRequestApcInterrupt(IN BOOLEAN NeedApc,
755 IN UCHAR Processor)
756 {
757 /* Check if we need to request APC delivery */
758 if (NeedApc)
759 {
760 /* Check if it's on another CPU */
761 if (KeGetPcr()->Number != Processor)
762 {
763 /* Send an IPI to request delivery */
764 KiIpiSend(AFFINITY_MASK(Processor), IPI_APC);
765 }
766 else
767 {
768 /* Request a software interrupt */
769 HalRequestSoftwareInterrupt(APC_LEVEL);
770 }
771 }
772 }
773
774 FORCEINLINE
775 PKSPIN_LOCK_QUEUE
776 KiAcquireTimerLock(IN ULONG Hand)
777 {
778 PKSPIN_LOCK_QUEUE LockQueue;
779 ULONG LockIndex;
780 ASSERT(KeGetCurrentIrql() >= DISPATCH_LEVEL);
781
782 /* Get the lock index */
783 LockIndex = Hand >> LOCK_QUEUE_TIMER_LOCK_SHIFT;
784 LockIndex &= (LOCK_QUEUE_TIMER_TABLE_LOCKS - 1);
785
786 /* Now get the lock */
787 LockQueue = &KeGetCurrentPrcb()->LockQueue[LockQueueTimerTableLock + LockIndex];
788
789 /* Acquire it and return */
790 KeAcquireQueuedSpinLockAtDpcLevel(LockQueue);
791 return LockQueue;
792 }
793
794 FORCEINLINE
795 VOID
796 KiReleaseTimerLock(IN PKSPIN_LOCK_QUEUE LockQueue)
797 {
798 ASSERT(KeGetCurrentIrql() >= DISPATCH_LEVEL);
799
800 /* Release the lock */
801 KeReleaseQueuedSpinLockFromDpcLevel(LockQueue);
802 }
803
804 #endif
805
806 FORCEINLINE
807 VOID
808 KiAcquireApcLock(IN PKTHREAD Thread,
809 IN PKLOCK_QUEUE_HANDLE Handle)
810 {
811 /* Acquire the lock and raise to synchronization level */
812 KeAcquireInStackQueuedSpinLockRaiseToSynch(&Thread->ApcQueueLock, Handle);
813 }
814
815 FORCEINLINE
816 VOID
817 KiAcquireApcLockAtDpcLevel(IN PKTHREAD Thread,
818 IN PKLOCK_QUEUE_HANDLE Handle)
819 {
820 /* Acquire the lock */
821 KeAcquireInStackQueuedSpinLockAtDpcLevel(&Thread->ApcQueueLock, Handle);
822 }
823
824 FORCEINLINE
825 VOID
826 KiAcquireApcLockAtApcLevel(IN PKTHREAD Thread,
827 IN PKLOCK_QUEUE_HANDLE Handle)
828 {
829 /* Acquire the lock */
830 KeAcquireInStackQueuedSpinLock(&Thread->ApcQueueLock, Handle);
831 }
832
833 FORCEINLINE
834 VOID
835 KiReleaseApcLock(IN PKLOCK_QUEUE_HANDLE Handle)
836 {
837 /* Release the lock */
838 KeReleaseInStackQueuedSpinLock(Handle);
839 }
840
841 FORCEINLINE
842 VOID
843 KiReleaseApcLockFromDpcLevel(IN PKLOCK_QUEUE_HANDLE Handle)
844 {
845 /* Release the lock */
846 KeReleaseInStackQueuedSpinLockFromDpcLevel(Handle);
847 }
848
849 FORCEINLINE
850 VOID
851 KiAcquireProcessLock(IN PKPROCESS Process,
852 IN PKLOCK_QUEUE_HANDLE Handle)
853 {
854 /* Acquire the lock and raise to synchronization level */
855 KeAcquireInStackQueuedSpinLockRaiseToSynch(&Process->ProcessLock, Handle);
856 }
857
858 FORCEINLINE
859 VOID
860 KiReleaseProcessLock(IN PKLOCK_QUEUE_HANDLE Handle)
861 {
862 /* Release the lock */
863 KeReleaseInStackQueuedSpinLock(Handle);
864 }
865
866 FORCEINLINE
867 VOID
868 KiReleaseProcessLockFromDpcLevel(IN PKLOCK_QUEUE_HANDLE Handle)
869 {
870 /* Release the lock */
871 KeReleaseInStackQueuedSpinLockFromDpcLevel(Handle);
872 }
873
874 FORCEINLINE
875 VOID
876 KiAcquireDeviceQueueLock(IN PKDEVICE_QUEUE DeviceQueue,
877 IN PKLOCK_QUEUE_HANDLE DeviceLock)
878 {
879 /* Check if we were called from a threaded DPC */
880 if (KeGetCurrentPrcb()->DpcThreadActive)
881 {
882 /* Lock the Queue, we're not at DPC level */
883 KeAcquireInStackQueuedSpinLock(&DeviceQueue->Lock, DeviceLock);
884 }
885 else
886 {
887 /* We must be at DPC level, acquire the lock safely */
888 ASSERT(KeGetCurrentIrql() == DISPATCH_LEVEL);
889 KeAcquireInStackQueuedSpinLockAtDpcLevel(&DeviceQueue->Lock,
890 DeviceLock);
891 }
892 }
893
894 FORCEINLINE
895 VOID
896 KiReleaseDeviceQueueLock(IN PKLOCK_QUEUE_HANDLE DeviceLock)
897 {
898 /* Check if we were called from a threaded DPC */
899 if (KeGetCurrentPrcb()->DpcThreadActive)
900 {
901 /* Unlock the Queue, we're not at DPC level */
902 KeReleaseInStackQueuedSpinLock(DeviceLock);
903 }
904 else
905 {
906 /* We must be at DPC level, release the lock safely */
907 ASSERT(KeGetCurrentIrql() == DISPATCH_LEVEL);
908 KeReleaseInStackQueuedSpinLockFromDpcLevel(DeviceLock);
909 }
910 }
911
912 //
913 // Satisfies the wait of any dispatcher object
914 //
915 #define KiSatisfyObjectWait(Object, Thread) \
916 { \
917 /* Special case for Mutants */ \
918 if ((Object)->Header.Type == MutantObject) \
919 { \
920 /* Decrease the Signal State */ \
921 (Object)->Header.SignalState--; \
922 \
923 /* Check if it's now non-signaled */ \
924 if (!(Object)->Header.SignalState) \
925 { \
926 /* Set the Owner Thread */ \
927 (Object)->OwnerThread = Thread; \
928 \
929 /* Disable APCs if needed */ \
930 Thread->KernelApcDisable = Thread->KernelApcDisable - \
931 (Object)->ApcDisable; \
932 \
933 /* Check if it's abandoned */ \
934 if ((Object)->Abandoned) \
935 { \
936 /* Unabandon it */ \
937 (Object)->Abandoned = FALSE; \
938 \
939 /* Return Status */ \
940 Thread->WaitStatus = STATUS_ABANDONED; \
941 } \
942 \
943 /* Insert it into the Mutant List */ \
944 InsertHeadList(Thread->MutantListHead.Blink, \
945 &(Object)->MutantListEntry); \
946 } \
947 } \
948 else if (((Object)->Header.Type & TIMER_OR_EVENT_TYPE) == \
949 EventSynchronizationObject) \
950 { \
951 /* Synchronization Timers and Events just get un-signaled */ \
952 (Object)->Header.SignalState = 0; \
953 } \
954 else if ((Object)->Header.Type == SemaphoreObject) \
955 { \
956 /* These ones can have multiple states, so we only decrease it */ \
957 (Object)->Header.SignalState--; \
958 } \
959 }
960
961 //
962 // Satisfies the wait of a mutant dispatcher object
963 //
964 #define KiSatisfyMutantWait(Object, Thread) \
965 { \
966 /* Decrease the Signal State */ \
967 (Object)->Header.SignalState--; \
968 \
969 /* Check if it's now non-signaled */ \
970 if (!(Object)->Header.SignalState) \
971 { \
972 /* Set the Owner Thread */ \
973 (Object)->OwnerThread = Thread; \
974 \
975 /* Disable APCs if needed */ \
976 Thread->KernelApcDisable = Thread->KernelApcDisable - \
977 (Object)->ApcDisable; \
978 \
979 /* Check if it's abandoned */ \
980 if ((Object)->Abandoned) \
981 { \
982 /* Unabandon it */ \
983 (Object)->Abandoned = FALSE; \
984 \
985 /* Return Status */ \
986 Thread->WaitStatus = STATUS_ABANDONED; \
987 } \
988 \
989 /* Insert it into the Mutant List */ \
990 InsertHeadList(Thread->MutantListHead.Blink, \
991 &(Object)->MutantListEntry); \
992 } \
993 }
994
995 //
996 // Satisfies the wait of any nonmutant dispatcher object
997 //
998 #define KiSatisfyNonMutantWait(Object) \
999 { \
1000 if (((Object)->Header.Type & TIMER_OR_EVENT_TYPE) == \
1001 EventSynchronizationObject) \
1002 { \
1003 /* Synchronization Timers and Events just get un-signaled */ \
1004 (Object)->Header.SignalState = 0; \
1005 } \
1006 else if ((Object)->Header.Type == SemaphoreObject) \
1007 { \
1008 /* These ones can have multiple states, so we only decrease it */ \
1009 (Object)->Header.SignalState--; \
1010 } \
1011 }
1012
1013 //
1014 // Recalculates the due time
1015 //
1016 FORCEINLINE
1017 PLARGE_INTEGER
1018 KiRecalculateDueTime(IN PLARGE_INTEGER OriginalDueTime,
1019 IN PLARGE_INTEGER DueTime,
1020 IN OUT PLARGE_INTEGER NewDueTime)
1021 {
1022 /* Don't do anything for absolute waits */
1023 if (OriginalDueTime->QuadPart >= 0) return OriginalDueTime;
1024
1025 /* Otherwise, query the interrupt time and recalculate */
1026 NewDueTime->QuadPart = KeQueryInterruptTime();
1027 NewDueTime->QuadPart -= DueTime->QuadPart;
1028 return NewDueTime;
1029 }
1030
1031 //
1032 // Determines whether a thread should be added to the wait list
1033 //
1034 FORCEINLINE
1035 BOOLEAN
1036 KiCheckThreadStackSwap(IN PKTHREAD Thread,
1037 IN KPROCESSOR_MODE WaitMode)
1038 {
1039 /* Check the required conditions */
1040 if ((WaitMode != KernelMode) &&
1041 (Thread->EnableStackSwap) &&
1042 (Thread->Priority >= (LOW_REALTIME_PRIORITY + 9)))
1043 {
1044 /* We are go for swap */
1045 return TRUE;
1046 }
1047 else
1048 {
1049 /* Don't swap the thread */
1050 return FALSE;
1051 }
1052 }
1053
1054 //
1055 // Adds a thread to the wait list
1056 //
1057 #define KiAddThreadToWaitList(Thread, Swappable) \
1058 { \
1059 /* Make sure it's swappable */ \
1060 if (Swappable) \
1061 { \
1062 /* Insert it into the PRCB's List */ \
1063 InsertTailList(&KeGetCurrentPrcb()->WaitListHead, \
1064 &Thread->WaitListEntry); \
1065 } \
1066 }
1067
1068 //
1069 // Checks if a wait in progress should be interrupted by APCs or an alertable
1070 // state.
1071 //
1072 FORCEINLINE
1073 NTSTATUS
1074 KiCheckAlertability(IN PKTHREAD Thread,
1075 IN BOOLEAN Alertable,
1076 IN KPROCESSOR_MODE WaitMode)
1077 {
1078 /* Check if the wait is alertable */
1079 if (Alertable)
1080 {
1081 /* It is, first check if the thread is alerted in this mode */
1082 if (Thread->Alerted[WaitMode])
1083 {
1084 /* It is, so bail out of the wait */
1085 Thread->Alerted[WaitMode] = FALSE;
1086 return STATUS_ALERTED;
1087 }
1088 else if ((WaitMode != KernelMode) &&
1089 (!IsListEmpty(&Thread->ApcState.ApcListHead[UserMode])))
1090 {
1091 /* It's isn't, but this is a user wait with queued user APCs */
1092 Thread->ApcState.UserApcPending = TRUE;
1093 return STATUS_USER_APC;
1094 }
1095 else if (Thread->Alerted[KernelMode])
1096 {
1097 /* It isn't that either, but we're alered in kernel mode */
1098 Thread->Alerted[KernelMode] = FALSE;
1099 return STATUS_ALERTED;
1100 }
1101 }
1102 else if ((WaitMode != KernelMode) && (Thread->ApcState.UserApcPending))
1103 {
1104 /* Not alertable, but this is a user wait with pending user APCs */
1105 return STATUS_USER_APC;
1106 }
1107
1108 /* Otherwise, we're fine */
1109 return STATUS_WAIT_0;
1110 }
1111
1112 //
1113 // Called from KiCompleteTimer, KiInsertTreeTimer, KeSetSystemTime
1114 // to remove timer entries
1115 // See Windows HPI blog for more information.
1116 FORCEINLINE
1117 VOID
1118 KiRemoveEntryTimer(IN PKTIMER Timer)
1119 {
1120 ULONG Hand;
1121 PKTIMER_TABLE_ENTRY TableEntry;
1122
1123 /* Remove the timer from the timer list and check if it's empty */
1124 Hand = Timer->Header.Hand;
1125 if (RemoveEntryList(&Timer->TimerListEntry))
1126 {
1127 /* Get the respective timer table entry */
1128 TableEntry = &KiTimerTableListHead[Hand];
1129 if (&TableEntry->Entry == TableEntry->Entry.Flink)
1130 {
1131 /* Set the entry to an infinite absolute time */
1132 TableEntry->Time.HighPart = 0xFFFFFFFF;
1133 }
1134 }
1135
1136 /* Clear the list entries on dbg builds so we can tell the timer is gone */
1137 #if DBG
1138 Timer->TimerListEntry.Flink = NULL;
1139 Timer->TimerListEntry.Blink = NULL;
1140 #endif
1141 }
1142
1143 //
1144 // Called by Wait and Queue code to insert a timer for dispatching.
1145 // Also called by KeSetTimerEx to insert a timer from the caller.
1146 //
1147 FORCEINLINE
1148 VOID
1149 KxInsertTimer(IN PKTIMER Timer,
1150 IN ULONG Hand)
1151 {
1152 PKSPIN_LOCK_QUEUE LockQueue;
1153
1154 /* Acquire the lock and release the dispatcher lock */
1155 LockQueue = KiAcquireTimerLock(Hand);
1156 KiReleaseDispatcherLockFromDpcLevel();
1157
1158 /* Try to insert the timer */
1159 if (KiInsertTimerTable(Timer, Hand))
1160 {
1161 /* Complete it */
1162 KiCompleteTimer(Timer, LockQueue);
1163 }
1164 else
1165 {
1166 /* Do nothing, just release the lock */
1167 KiReleaseTimerLock(LockQueue);
1168 }
1169 }
1170
1171 //
1172 // Called by KeSetTimerEx and KiInsertTreeTimer to calculate Due Time
1173 // See the Windows HPI Blog for more information
1174 //
1175 FORCEINLINE
1176 BOOLEAN
1177 KiComputeDueTime(IN PKTIMER Timer,
1178 IN LARGE_INTEGER DueTime,
1179 OUT PULONG Hand)
1180 {
1181 LARGE_INTEGER InterruptTime, SystemTime, DifferenceTime;
1182
1183 /* Convert to relative time if needed */
1184 Timer->Header.Absolute = FALSE;
1185 if (DueTime.HighPart >= 0)
1186 {
1187 /* Get System Time */
1188 KeQuerySystemTime(&SystemTime);
1189
1190 /* Do the conversion */
1191 DifferenceTime.QuadPart = SystemTime.QuadPart - DueTime.QuadPart;
1192
1193 /* Make sure it hasn't already expired */
1194 Timer->Header.Absolute = TRUE;
1195 if (DifferenceTime.HighPart >= 0)
1196 {
1197 /* Cancel everything */
1198 Timer->Header.SignalState = TRUE;
1199 Timer->Header.Hand = 0;
1200 Timer->DueTime.QuadPart = 0;
1201 *Hand = 0;
1202 return FALSE;
1203 }
1204
1205 /* Set the time as Absolute */
1206 DueTime = DifferenceTime;
1207 }
1208
1209 /* Get the Interrupt Time */
1210 InterruptTime.QuadPart = KeQueryInterruptTime();
1211
1212 /* Recalculate due time */
1213 Timer->DueTime.QuadPart = InterruptTime.QuadPart - DueTime.QuadPart;
1214
1215 /* Get the handle */
1216 *Hand = KiComputeTimerTableIndex(Timer->DueTime.QuadPart);
1217 Timer->Header.Hand = (UCHAR)*Hand;
1218 Timer->Header.Inserted = TRUE;
1219 return TRUE;
1220 }
1221
1222 //
1223 // Called from Unlink and Queue Insert Code.
1224 // Also called by timer code when canceling an inserted timer.
1225 // Removes a timer from it's tree.
1226 //
1227 FORCEINLINE
1228 VOID
1229 KxRemoveTreeTimer(IN PKTIMER Timer)
1230 {
1231 ULONG Hand = Timer->Header.Hand;
1232 PKSPIN_LOCK_QUEUE LockQueue;
1233 PKTIMER_TABLE_ENTRY TimerEntry;
1234
1235 /* Acquire timer lock */
1236 LockQueue = KiAcquireTimerLock(Hand);
1237
1238 /* Set the timer as non-inserted */
1239 Timer->Header.Inserted = FALSE;
1240
1241 /* Remove it from the timer list */
1242 if (RemoveEntryList(&Timer->TimerListEntry))
1243 {
1244 /* Get the entry and check if it's empty */
1245 TimerEntry = &KiTimerTableListHead[Hand];
1246 if (IsListEmpty(&TimerEntry->Entry))
1247 {
1248 /* Clear the time then */
1249 TimerEntry->Time.HighPart = 0xFFFFFFFF;
1250 }
1251 }
1252
1253 /* Release the timer lock */
1254 KiReleaseTimerLock(LockQueue);
1255 }
1256
1257 FORCEINLINE
1258 VOID
1259 KxSetTimerForThreadWait(IN PKTIMER Timer,
1260 IN LARGE_INTEGER Interval,
1261 OUT PULONG Hand)
1262 {
1263 ULONGLONG DueTime;
1264 LARGE_INTEGER InterruptTime, SystemTime, TimeDifference;
1265
1266 /* Check the timer's interval to see if it's absolute */
1267 Timer->Header.Absolute = FALSE;
1268 if (Interval.HighPart >= 0)
1269 {
1270 /* Get the system time and calculate the relative time */
1271 KeQuerySystemTime(&SystemTime);
1272 TimeDifference.QuadPart = SystemTime.QuadPart - Interval.QuadPart;
1273 Timer->Header.Absolute = TRUE;
1274
1275 /* Check if we've already expired */
1276 if (TimeDifference.HighPart >= 0)
1277 {
1278 /* Reset everything */
1279 Timer->DueTime.QuadPart = 0;
1280 *Hand = 0;
1281 Timer->Header.Hand = 0;
1282 return;
1283 }
1284 else
1285 {
1286 /* Update the interval */
1287 Interval = TimeDifference;
1288 }
1289 }
1290
1291 /* Calculate the due time */
1292 InterruptTime.QuadPart = KeQueryInterruptTime();
1293 DueTime = InterruptTime.QuadPart - Interval.QuadPart;
1294 Timer->DueTime.QuadPart = DueTime;
1295
1296 /* Calculate the timer handle */
1297 *Hand = KiComputeTimerTableIndex(DueTime);
1298 Timer->Header.Hand = (UCHAR)*Hand;
1299 }
1300
1301 #define KxDelayThreadWait() \
1302 \
1303 /* Setup the Wait Block */ \
1304 Thread->WaitBlockList = TimerBlock; \
1305 \
1306 /* Setup the timer */ \
1307 KxSetTimerForThreadWait(Timer, *Interval, &Hand); \
1308 \
1309 /* Save the due time for the caller */ \
1310 DueTime.QuadPart = Timer->DueTime.QuadPart; \
1311 \
1312 /* Link the timer to this Wait Block */ \
1313 TimerBlock->NextWaitBlock = TimerBlock; \
1314 Timer->Header.WaitListHead.Flink = &TimerBlock->WaitListEntry; \
1315 Timer->Header.WaitListHead.Blink = &TimerBlock->WaitListEntry; \
1316 \
1317 /* Clear wait status */ \
1318 Thread->WaitStatus = STATUS_SUCCESS; \
1319 \
1320 /* Setup wait fields */ \
1321 Thread->Alertable = Alertable; \
1322 Thread->WaitReason = DelayExecution; \
1323 Thread->WaitMode = WaitMode; \
1324 \
1325 /* Check if we can swap the thread's stack */ \
1326 Thread->WaitListEntry.Flink = NULL; \
1327 Swappable = KiCheckThreadStackSwap(Thread, WaitMode); \
1328 \
1329 /* Set the wait time */ \
1330 Thread->WaitTime = KeTickCount.LowPart;
1331
1332 #define KxMultiThreadWait() \
1333 /* Link wait block array to the thread */ \
1334 Thread->WaitBlockList = WaitBlockArray; \
1335 \
1336 /* Reset the index */ \
1337 Index = 0; \
1338 \
1339 /* Loop wait blocks */ \
1340 do \
1341 { \
1342 /* Fill out the wait block */ \
1343 WaitBlock = &WaitBlockArray[Index]; \
1344 WaitBlock->Object = Object[Index]; \
1345 WaitBlock->WaitKey = (USHORT)Index; \
1346 WaitBlock->WaitType = WaitType; \
1347 WaitBlock->Thread = Thread; \
1348 \
1349 /* Link to next block */ \
1350 WaitBlock->NextWaitBlock = &WaitBlockArray[Index + 1]; \
1351 Index++; \
1352 } while (Index < Count); \
1353 \
1354 /* Link the last block */ \
1355 WaitBlock->NextWaitBlock = WaitBlockArray; \
1356 \
1357 /* Set default wait status */ \
1358 Thread->WaitStatus = STATUS_WAIT_0; \
1359 \
1360 /* Check if we have a timer */ \
1361 if (Timeout) \
1362 { \
1363 /* Link to the block */ \
1364 TimerBlock->NextWaitBlock = WaitBlockArray; \
1365 \
1366 /* Setup the timer */ \
1367 KxSetTimerForThreadWait(Timer, *Timeout, &Hand); \
1368 \
1369 /* Save the due time for the caller */ \
1370 DueTime.QuadPart = Timer->DueTime.QuadPart; \
1371 \
1372 /* Initialize the list */ \
1373 InitializeListHead(&Timer->Header.WaitListHead); \
1374 } \
1375 \
1376 /* Set wait settings */ \
1377 Thread->Alertable = Alertable; \
1378 Thread->WaitMode = WaitMode; \
1379 Thread->WaitReason = WaitReason; \
1380 \
1381 /* Check if we can swap the thread's stack */ \
1382 Thread->WaitListEntry.Flink = NULL; \
1383 Swappable = KiCheckThreadStackSwap(Thread, WaitMode); \
1384 \
1385 /* Set the wait time */ \
1386 Thread->WaitTime = KeTickCount.LowPart;
1387
1388 #define KxSingleThreadWait() \
1389 /* Setup the Wait Block */ \
1390 Thread->WaitBlockList = WaitBlock; \
1391 WaitBlock->WaitKey = STATUS_SUCCESS; \
1392 WaitBlock->Object = Object; \
1393 WaitBlock->WaitType = WaitAny; \
1394 \
1395 /* Clear wait status */ \
1396 Thread->WaitStatus = STATUS_SUCCESS; \
1397 \
1398 /* Check if we have a timer */ \
1399 if (Timeout) \
1400 { \
1401 /* Setup the timer */ \
1402 KxSetTimerForThreadWait(Timer, *Timeout, &Hand); \
1403 \
1404 /* Save the due time for the caller */ \
1405 DueTime.QuadPart = Timer->DueTime.QuadPart; \
1406 \
1407 /* Pointer to timer block */ \
1408 WaitBlock->NextWaitBlock = TimerBlock; \
1409 TimerBlock->NextWaitBlock = WaitBlock; \
1410 \
1411 /* Link the timer to this Wait Block */ \
1412 Timer->Header.WaitListHead.Flink = &TimerBlock->WaitListEntry; \
1413 Timer->Header.WaitListHead.Blink = &TimerBlock->WaitListEntry; \
1414 } \
1415 else \
1416 { \
1417 /* No timer block, just ourselves */ \
1418 WaitBlock->NextWaitBlock = WaitBlock; \
1419 } \
1420 \
1421 /* Set wait settings */ \
1422 Thread->Alertable = Alertable; \
1423 Thread->WaitMode = WaitMode; \
1424 Thread->WaitReason = WaitReason; \
1425 \
1426 /* Check if we can swap the thread's stack */ \
1427 Thread->WaitListEntry.Flink = NULL; \
1428 Swappable = KiCheckThreadStackSwap(Thread, WaitMode); \
1429 \
1430 /* Set the wait time */ \
1431 Thread->WaitTime = KeTickCount.LowPart;
1432
1433 #define KxQueueThreadWait() \
1434 /* Setup the Wait Block */ \
1435 Thread->WaitBlockList = WaitBlock; \
1436 WaitBlock->WaitKey = STATUS_SUCCESS; \
1437 WaitBlock->Object = Queue; \
1438 WaitBlock->WaitType = WaitAny; \
1439 WaitBlock->Thread = Thread; \
1440 \
1441 /* Clear wait status */ \
1442 Thread->WaitStatus = STATUS_SUCCESS; \
1443 \
1444 /* Check if we have a timer */ \
1445 if (Timeout) \
1446 { \
1447 /* Setup the timer */ \
1448 KxSetTimerForThreadWait(Timer, *Timeout, &Hand); \
1449 \
1450 /* Save the due time for the caller */ \
1451 DueTime.QuadPart = Timer->DueTime.QuadPart; \
1452 \
1453 /* Pointer to timer block */ \
1454 WaitBlock->NextWaitBlock = TimerBlock; \
1455 TimerBlock->NextWaitBlock = WaitBlock; \
1456 \
1457 /* Link the timer to this Wait Block */ \
1458 Timer->Header.WaitListHead.Flink = &TimerBlock->WaitListEntry; \
1459 Timer->Header.WaitListHead.Blink = &TimerBlock->WaitListEntry; \
1460 } \
1461 else \
1462 { \
1463 /* No timer block, just ourselves */ \
1464 WaitBlock->NextWaitBlock = WaitBlock; \
1465 } \
1466 \
1467 /* Set wait settings */ \
1468 Thread->Alertable = FALSE; \
1469 Thread->WaitMode = WaitMode; \
1470 Thread->WaitReason = WrQueue; \
1471 \
1472 /* Check if we can swap the thread's stack */ \
1473 Thread->WaitListEntry.Flink = NULL; \
1474 Swappable = KiCheckThreadStackSwap(Thread, WaitMode); \
1475 \
1476 /* Set the wait time */ \
1477 Thread->WaitTime = KeTickCount.LowPart;
1478
1479 //
1480 // Unwaits a Thread
1481 //
1482 FORCEINLINE
1483 VOID
1484 KxUnwaitThread(IN DISPATCHER_HEADER *Object,
1485 IN KPRIORITY Increment)
1486 {
1487 PLIST_ENTRY WaitEntry, WaitList;
1488 PKWAIT_BLOCK WaitBlock;
1489 PKTHREAD WaitThread;
1490 ULONG WaitKey;
1491
1492 /* Loop the Wait Entries */
1493 WaitList = &Object->WaitListHead;
1494 ASSERT(IsListEmpty(&Object->WaitListHead) == FALSE);
1495 WaitEntry = WaitList->Flink;
1496 do
1497 {
1498 /* Get the current wait block */
1499 WaitBlock = CONTAINING_RECORD(WaitEntry, KWAIT_BLOCK, WaitListEntry);
1500
1501 /* Get the waiting thread */
1502 WaitThread = WaitBlock->Thread;
1503
1504 /* Check the current Wait Mode */
1505 if (WaitBlock->WaitType == WaitAny)
1506 {
1507 /* Use the actual wait key */
1508 WaitKey = WaitBlock->WaitKey;
1509 }
1510 else
1511 {
1512 /* Otherwise, use STATUS_KERNEL_APC */
1513 WaitKey = STATUS_KERNEL_APC;
1514 }
1515
1516 /* Unwait the thread */
1517 KiUnwaitThread(WaitThread, WaitKey, Increment);
1518
1519 /* Next entry */
1520 WaitEntry = WaitList->Flink;
1521 } while (WaitEntry != WaitList);
1522 }
1523
1524 //
1525 // Unwaits a Thread waiting on an event
1526 //
1527 FORCEINLINE
1528 VOID
1529 KxUnwaitThreadForEvent(IN PKEVENT Event,
1530 IN KPRIORITY Increment)
1531 {
1532 PLIST_ENTRY WaitEntry, WaitList;
1533 PKWAIT_BLOCK WaitBlock;
1534 PKTHREAD WaitThread;
1535
1536 /* Loop the Wait Entries */
1537 WaitList = &Event->Header.WaitListHead;
1538 ASSERT(IsListEmpty(&Event->Header.WaitListHead) == FALSE);
1539 WaitEntry = WaitList->Flink;
1540 do
1541 {
1542 /* Get the current wait block */
1543 WaitBlock = CONTAINING_RECORD(WaitEntry, KWAIT_BLOCK, WaitListEntry);
1544
1545 /* Get the waiting thread */
1546 WaitThread = WaitBlock->Thread;
1547
1548 /* Check the current Wait Mode */
1549 if (WaitBlock->WaitType == WaitAny)
1550 {
1551 /* Un-signal it */
1552 Event->Header.SignalState = 0;
1553
1554 /* Un-signal the event and unwait the thread */
1555 KiUnwaitThread(WaitThread, WaitBlock->WaitKey, Increment);
1556 break;
1557 }
1558
1559 /* Unwait the thread with STATUS_KERNEL_APC */
1560 KiUnwaitThread(WaitThread, STATUS_KERNEL_APC, Increment);
1561
1562 /* Next entry */
1563 WaitEntry = WaitList->Flink;
1564 } while (WaitEntry != WaitList);
1565 }
1566
1567 //
1568 // This routine queues a thread that is ready on the PRCB's ready lists.
1569 // If this thread cannot currently run on this CPU, then the thread is
1570 // added to the deferred ready list instead.
1571 //
1572 // This routine must be entered with the PRCB lock held and it will exit
1573 // with the PRCB lock released!
1574 //
1575 FORCEINLINE
1576 VOID
1577 KxQueueReadyThread(IN PKTHREAD Thread,
1578 IN PKPRCB Prcb)
1579 {
1580 BOOLEAN Preempted;
1581 KPRIORITY Priority;
1582
1583 /* Sanity checks */
1584 ASSERT(Prcb == KeGetCurrentPrcb());
1585 ASSERT(Thread->State == Running);
1586 ASSERT(Thread->NextProcessor == Prcb->Number);
1587
1588 /* Check if this thread is allowed to run in this CPU */
1589 #ifdef CONFIG_SMP
1590 if ((Thread->Affinity) & (Prcb->SetMember))
1591 #else
1592 if (TRUE)
1593 #endif
1594 {
1595 /* Set thread ready for execution */
1596 Thread->State = Ready;
1597
1598 /* Save current priority and if someone had pre-empted it */
1599 Priority = Thread->Priority;
1600 Preempted = Thread->Preempted;
1601
1602 /* We're not pre-empting now, and set the wait time */
1603 Thread->Preempted = FALSE;
1604 Thread->WaitTime = KeTickCount.LowPart;
1605
1606 /* Sanity check */
1607 ASSERT((Priority >= 0) && (Priority <= HIGH_PRIORITY));
1608
1609 /* Insert this thread in the appropriate order */
1610 Preempted ? InsertHeadList(&Prcb->DispatcherReadyListHead[Priority],
1611 &Thread->WaitListEntry) :
1612 InsertTailList(&Prcb->DispatcherReadyListHead[Priority],
1613 &Thread->WaitListEntry);
1614
1615 /* Update the ready summary */
1616 Prcb->ReadySummary |= PRIORITY_MASK(Priority);
1617
1618 /* Sanity check */
1619 ASSERT(Priority == Thread->Priority);
1620
1621 /* Release the PRCB lock */
1622 KiReleasePrcbLock(Prcb);
1623 }
1624 else
1625 {
1626 /* Otherwise, prepare this thread to be deferred */
1627 Thread->State = DeferredReady;
1628 Thread->DeferredProcessor = Prcb->Number;
1629
1630 /* Release the lock and defer scheduling */
1631 KiReleasePrcbLock(Prcb);
1632 KiDeferredReadyThread(Thread);
1633 }
1634 }
1635
1636 //
1637 // This routine scans for an appropriate ready thread to select at the
1638 // given priority and for the given CPU.
1639 //
1640 FORCEINLINE
1641 PKTHREAD
1642 KiSelectReadyThread(IN KPRIORITY Priority,
1643 IN PKPRCB Prcb)
1644 {
1645 ULONG PrioritySet;
1646 LONG HighPriority;
1647 PLIST_ENTRY ListEntry;
1648 PKTHREAD Thread = NULL;
1649
1650 /* Save the current mask and get the priority set for the CPU */
1651 PrioritySet = Prcb->ReadySummary >> Priority;
1652 if (!PrioritySet) goto Quickie;
1653
1654 /* Get the highest priority possible */
1655 BitScanReverse((PULONG)&HighPriority, PrioritySet);
1656 ASSERT((PrioritySet & PRIORITY_MASK(HighPriority)) != 0);
1657 HighPriority += Priority;
1658
1659 /* Make sure the list isn't empty at the highest priority */
1660 ASSERT(IsListEmpty(&Prcb->DispatcherReadyListHead[HighPriority]) == FALSE);
1661
1662 /* Get the first thread on the list */
1663 ListEntry = Prcb->DispatcherReadyListHead[HighPriority].Flink;
1664 Thread = CONTAINING_RECORD(ListEntry, KTHREAD, WaitListEntry);
1665
1666 /* Make sure this thread is here for a reason */
1667 ASSERT(HighPriority == Thread->Priority);
1668 ASSERT(Thread->Affinity & AFFINITY_MASK(Prcb->Number));
1669 ASSERT(Thread->NextProcessor == Prcb->Number);
1670
1671 /* Remove it from the list */
1672 if (RemoveEntryList(&Thread->WaitListEntry))
1673 {
1674 /* The list is empty now, reset the ready summary */
1675 Prcb->ReadySummary ^= PRIORITY_MASK(HighPriority);
1676 }
1677
1678 /* Sanity check and return the thread */
1679 Quickie:
1680 ASSERT((Thread == NULL) ||
1681 (Thread->BasePriority == 0) ||
1682 (Thread->Priority != 0));
1683 return Thread;
1684 }
1685
1686 //
1687 // This routine computes the new priority for a thread. It is only valid for
1688 // threads with priorities in the dynamic priority range.
1689 //
1690 FORCEINLINE
1691 SCHAR
1692 KiComputeNewPriority(IN PKTHREAD Thread,
1693 IN SCHAR Adjustment)
1694 {
1695 SCHAR Priority;
1696
1697 /* Priority sanity checks */
1698 ASSERT((Thread->PriorityDecrement >= 0) &&
1699 (Thread->PriorityDecrement <= Thread->Priority));
1700 ASSERT((Thread->Priority < LOW_REALTIME_PRIORITY) ?
1701 TRUE : (Thread->PriorityDecrement == 0));
1702
1703 /* Get the current priority */
1704 Priority = Thread->Priority;
1705 if (Priority < LOW_REALTIME_PRIORITY)
1706 {
1707 /* Decrease priority by the priority decrement */
1708 Priority -= (Thread->PriorityDecrement + Adjustment);
1709
1710 /* Don't go out of bounds */
1711 if (Priority < Thread->BasePriority) Priority = Thread->BasePriority;
1712
1713 /* Reset the priority decrement */
1714 Thread->PriorityDecrement = 0;
1715 }
1716
1717 /* Sanity check */
1718 ASSERT((Thread->BasePriority == 0) || (Priority != 0));
1719
1720 /* Return the new priority */
1721 return Priority;
1722 }
1723
1724 //
1725 // Guarded Mutex Routines
1726 //
1727 FORCEINLINE
1728 VOID
1729 _KeInitializeGuardedMutex(OUT PKGUARDED_MUTEX GuardedMutex)
1730 {
1731 /* Setup the Initial Data */
1732 GuardedMutex->Count = GM_LOCK_BIT;
1733 GuardedMutex->Owner = NULL;
1734 GuardedMutex->Contention = 0;
1735
1736 /* Initialize the Wait Gate */
1737 KeInitializeGate(&GuardedMutex->Gate);
1738 }
1739
1740 FORCEINLINE
1741 VOID
1742 _KeAcquireGuardedMutexUnsafe(IN OUT PKGUARDED_MUTEX GuardedMutex)
1743 {
1744 PKTHREAD Thread = KeGetCurrentThread();
1745
1746 /* Sanity checks */
1747 ASSERT((KeGetCurrentIrql() == APC_LEVEL) ||
1748 (Thread->SpecialApcDisable < 0) ||
1749 (Thread->Teb == NULL) ||
1750 (Thread->Teb >= (PTEB)MM_SYSTEM_RANGE_START));
1751 ASSERT(GuardedMutex->Owner != Thread);
1752
1753 /* Remove the lock */
1754 if (!InterlockedBitTestAndReset(&GuardedMutex->Count, GM_LOCK_BIT_V))
1755 {
1756 /* The Guarded Mutex was already locked, enter contented case */
1757 KiAcquireGuardedMutex(GuardedMutex);
1758 }
1759
1760 /* Set the Owner */
1761 GuardedMutex->Owner = Thread;
1762 }
1763
1764 FORCEINLINE
1765 VOID
1766 _KeReleaseGuardedMutexUnsafe(IN OUT PKGUARDED_MUTEX GuardedMutex)
1767 {
1768 LONG OldValue, NewValue;
1769
1770 /* Sanity checks */
1771 ASSERT((KeGetCurrentIrql() == APC_LEVEL) ||
1772 (KeGetCurrentThread()->SpecialApcDisable < 0) ||
1773 (KeGetCurrentThread()->Teb == NULL) ||
1774 (KeGetCurrentThread()->Teb >= (PTEB)MM_SYSTEM_RANGE_START));
1775 ASSERT(GuardedMutex->Owner == KeGetCurrentThread());
1776
1777 /* Destroy the Owner */
1778 GuardedMutex->Owner = NULL;
1779
1780 /* Add the Lock Bit */
1781 OldValue = InterlockedExchangeAdd(&GuardedMutex->Count, GM_LOCK_BIT);
1782 ASSERT((OldValue & GM_LOCK_BIT) == 0);
1783
1784 /* Check if it was already locked, but not woken */
1785 if ((OldValue) && !(OldValue & GM_LOCK_WAITER_WOKEN))
1786 {
1787 /* Update the Oldvalue to what it should be now */
1788 OldValue += GM_LOCK_BIT;
1789
1790 /* The mutex will be woken, minus one waiter */
1791 NewValue = OldValue + GM_LOCK_WAITER_WOKEN -
1792 GM_LOCK_WAITER_INC;
1793
1794 /* Remove the Woken bit */
1795 if (InterlockedCompareExchange(&GuardedMutex->Count,
1796 NewValue,
1797 OldValue) == OldValue)
1798 {
1799 /* Signal the Gate */
1800 KeSignalGateBoostPriority(&GuardedMutex->Gate);
1801 }
1802 }
1803 }
1804
1805 FORCEINLINE
1806 VOID
1807 _KeAcquireGuardedMutex(IN PKGUARDED_MUTEX GuardedMutex)
1808 {
1809 PKTHREAD Thread = KeGetCurrentThread();
1810
1811 /* Sanity checks */
1812 ASSERT(KeGetCurrentIrql() <= APC_LEVEL);
1813 ASSERT(GuardedMutex->Owner != Thread);
1814
1815 /* Disable Special APCs */
1816 KeEnterGuardedRegion();
1817
1818 /* Remove the lock */
1819 if (!InterlockedBitTestAndReset(&GuardedMutex->Count, GM_LOCK_BIT_V))
1820 {
1821 /* The Guarded Mutex was already locked, enter contented case */
1822 KiAcquireGuardedMutex(GuardedMutex);
1823 }
1824
1825 /* Set the Owner and Special APC Disable state */
1826 GuardedMutex->Owner = Thread;
1827 GuardedMutex->SpecialApcDisable = Thread->SpecialApcDisable;
1828 }
1829
1830 FORCEINLINE
1831 VOID
1832 _KeReleaseGuardedMutex(IN OUT PKGUARDED_MUTEX GuardedMutex)
1833 {
1834 LONG OldValue, NewValue;
1835
1836 /* Sanity checks */
1837 ASSERT(KeGetCurrentIrql() <= APC_LEVEL);
1838 ASSERT(GuardedMutex->Owner == KeGetCurrentThread());
1839 ASSERT(KeGetCurrentThread()->SpecialApcDisable ==
1840 GuardedMutex->SpecialApcDisable);
1841
1842 /* Destroy the Owner */
1843 GuardedMutex->Owner = NULL;
1844
1845 /* Add the Lock Bit */
1846 OldValue = InterlockedExchangeAdd(&GuardedMutex->Count, GM_LOCK_BIT);
1847 ASSERT((OldValue & GM_LOCK_BIT) == 0);
1848
1849 /* Check if it was already locked, but not woken */
1850 if ((OldValue) && !(OldValue & GM_LOCK_WAITER_WOKEN))
1851 {
1852 /* Update the Oldvalue to what it should be now */
1853 OldValue += GM_LOCK_BIT;
1854
1855 /* The mutex will be woken, minus one waiter */
1856 NewValue = OldValue + GM_LOCK_WAITER_WOKEN -
1857 GM_LOCK_WAITER_INC;
1858
1859 /* Remove the Woken bit */
1860 if (InterlockedCompareExchange(&GuardedMutex->Count,
1861 NewValue,
1862 OldValue) == OldValue)
1863 {
1864 /* Signal the Gate */
1865 KeSignalGateBoostPriority(&GuardedMutex->Gate);
1866 }
1867 }
1868
1869 /* Re-enable APCs */
1870 KeLeaveGuardedRegion();
1871 }
1872
1873 FORCEINLINE
1874 BOOLEAN
1875 _KeTryToAcquireGuardedMutex(IN OUT PKGUARDED_MUTEX GuardedMutex)
1876 {
1877 PKTHREAD Thread = KeGetCurrentThread();
1878
1879 /* Block APCs */
1880 KeEnterGuardedRegion();
1881
1882 /* Remove the lock */
1883 if (!InterlockedBitTestAndReset(&GuardedMutex->Count, GM_LOCK_BIT_V))
1884 {
1885 /* Re-enable APCs */
1886 KeLeaveGuardedRegion();
1887 YieldProcessor();
1888
1889 /* Return failure */
1890 return FALSE;
1891 }
1892
1893 /* Set the Owner and APC State */
1894 GuardedMutex->Owner = Thread;
1895 GuardedMutex->SpecialApcDisable = Thread->SpecialApcDisable;
1896 return TRUE;
1897 }