Sync to trunk head (r40091)
[reactos.git] / reactos / ntoskrnl / include / internal / ke_x.h
1 /*
2 * PROJECT: ReactOS Kernel
3 * LICENSE: GPL - See COPYING in the top level directory
4 * FILE: ntoskrnl/include/ke_x.h
5 * PURPOSE: Internal Inlined Functions for the Kernel
6 * PROGRAMMERS: Alex Ionescu (alex.ionescu@reactos.org)
7 */
8
9 //
10 // Thread Dispatcher Header DebugActive Mask
11 //
12 #define DR_MASK(x) 1 << x
13 #define DR_ACTIVE_MASK 0x10
14 #define DR_REG_MASK 0x4F
15
16 #ifdef _M_IX86
17 //
18 // Sanitizes a selector
19 //
20 FORCEINLINE
21 ULONG
22 Ke386SanitizeSeg(IN ULONG Cs,
23 IN KPROCESSOR_MODE Mode)
24 {
25 //
26 // Check if we're in kernel-mode, and force CPL 0 if so.
27 // Otherwise, force CPL 3.
28 //
29 return ((Mode == KernelMode) ?
30 (Cs & (0xFFFF & ~RPL_MASK)) :
31 (RPL_MASK | (Cs & 0xFFFF)));
32 }
33
34 //
35 // Sanitizes EFLAGS
36 //
37 FORCEINLINE
38 ULONG
39 Ke386SanitizeFlags(IN ULONG Eflags,
40 IN KPROCESSOR_MODE Mode)
41 {
42 //
43 // Check if we're in kernel-mode, and sanitize EFLAGS if so.
44 // Otherwise, also force interrupt mask on.
45 //
46 return ((Mode == KernelMode) ?
47 (Eflags & (EFLAGS_USER_SANITIZE | EFLAGS_INTERRUPT_MASK)) :
48 (EFLAGS_INTERRUPT_MASK | (Eflags & EFLAGS_USER_SANITIZE)));
49 }
50
51 //
52 // Gets a DR register from a CONTEXT structure
53 //
54 FORCEINLINE
55 PVOID
56 KiDrFromContext(IN ULONG Dr,
57 IN PCONTEXT Context)
58 {
59 return *(PVOID*)((ULONG_PTR)Context + KiDebugRegisterContextOffsets[Dr]);
60 }
61
62 //
63 // Gets a DR register from a KTRAP_FRAME structure
64 //
65 FORCEINLINE
66 PVOID*
67 KiDrFromTrapFrame(IN ULONG Dr,
68 IN PKTRAP_FRAME TrapFrame)
69 {
70 return (PVOID*)((ULONG_PTR)TrapFrame + KiDebugRegisterTrapOffsets[Dr]);
71 }
72
73 //
74 //
75 //
76 FORCEINLINE
77 PVOID
78 Ke386SanitizeDr(IN PVOID DrAddress,
79 IN KPROCESSOR_MODE Mode)
80 {
81 //
82 // Check if we're in kernel-mode, and return the address directly if so.
83 // Otherwise, make sure it's not inside the kernel-mode address space.
84 // If it is, then clear the address.
85 //
86 return ((Mode == KernelMode) ? DrAddress :
87 (DrAddress <= MM_HIGHEST_USER_ADDRESS) ? DrAddress : 0);
88 }
89 #endif /* _M_IX86 */
90
91 #ifndef _M_ARM
92 FORCEINLINE
93 PRKTHREAD
94 KeGetCurrentThread(VOID)
95 {
96 #ifdef _M_IX86
97 /* Return the current thread */
98 return ((PKIPCR)KeGetPcr())->PrcbData.CurrentThread;
99 #elif defined (_M_AMD64)
100 return (PRKTHREAD)__readgsqword(FIELD_OFFSET(KIPCR, Prcb.CurrentThread));
101 #else
102 PKPRCB Prcb = KeGetCurrentPrcb();
103 return Prcb->CurrentThread;
104 #endif
105 }
106
107 FORCEINLINE
108 UCHAR
109 KeGetPreviousMode(VOID)
110 {
111 /* Return the current mode */
112 return KeGetCurrentThread()->PreviousMode;
113 }
114 #endif
115
116 FORCEINLINE
117 VOID
118 KeFlushProcessTb(VOID)
119 {
120 /* Flush the TLB by resetting CR3 */
121 #ifdef _M_PPC
122 __asm__("sync\n\tisync\n\t");
123 #elif _M_ARM
124 //
125 // We need to implement this!
126 //
127 ASSERTMSG("Need ARM flush routine\n", FALSE);
128 #else
129 __writecr3(__readcr3());
130 #endif
131 }
132
133 //
134 // Enters a Guarded Region
135 //
136 #define KeEnterGuardedRegion() \
137 { \
138 PKTHREAD _Thread = KeGetCurrentThread(); \
139 \
140 /* Sanity checks */ \
141 ASSERT(KeGetCurrentIrql() <= APC_LEVEL); \
142 ASSERT(_Thread == KeGetCurrentThread()); \
143 ASSERT((_Thread->SpecialApcDisable <= 0) && \
144 (_Thread->SpecialApcDisable != -32768)); \
145 \
146 /* Disable Special APCs */ \
147 _Thread->SpecialApcDisable--; \
148 }
149
150 //
151 // Leaves a Guarded Region
152 //
153 #define KeLeaveGuardedRegion() \
154 { \
155 PKTHREAD _Thread = KeGetCurrentThread(); \
156 \
157 /* Sanity checks */ \
158 ASSERT(KeGetCurrentIrql() <= APC_LEVEL); \
159 ASSERT(_Thread == KeGetCurrentThread()); \
160 ASSERT(_Thread->SpecialApcDisable < 0); \
161 \
162 /* Leave region and check if APCs are OK now */ \
163 if (!(++_Thread->SpecialApcDisable)) \
164 { \
165 /* Check for Kernel APCs on the list */ \
166 if (!IsListEmpty(&_Thread->ApcState. \
167 ApcListHead[KernelMode])) \
168 { \
169 /* Check for APC Delivery */ \
170 KiCheckForKernelApcDelivery(); \
171 } \
172 } \
173 }
174
175 //
176 // Enters a Critical Region
177 //
178 #define KeEnterCriticalRegion() \
179 { \
180 PKTHREAD _Thread = KeGetCurrentThread(); \
181 \
182 /* Sanity checks */ \
183 ASSERT(_Thread == KeGetCurrentThread()); \
184 ASSERT((_Thread->KernelApcDisable <= 0) && \
185 (_Thread->KernelApcDisable != -32768)); \
186 \
187 /* Disable Kernel APCs */ \
188 _Thread->KernelApcDisable--; \
189 }
190
191 //
192 // Leaves a Critical Region
193 //
194 #define KeLeaveCriticalRegion() \
195 { \
196 PKTHREAD _Thread = KeGetCurrentThread(); \
197 \
198 /* Sanity checks */ \
199 ASSERT(_Thread == KeGetCurrentThread()); \
200 ASSERT(_Thread->KernelApcDisable < 0); \
201 \
202 /* Enable Kernel APCs */ \
203 _Thread->KernelApcDisable++; \
204 \
205 /* Check if Kernel APCs are now enabled */ \
206 if (!(_Thread->KernelApcDisable)) \
207 { \
208 /* Check if we need to request an APC Delivery */ \
209 if (!(IsListEmpty(&_Thread->ApcState.ApcListHead[KernelMode])) && \
210 !(_Thread->SpecialApcDisable)) \
211 { \
212 /* Check for the right environment */ \
213 KiCheckForKernelApcDelivery(); \
214 } \
215 } \
216 }
217
218 #ifndef CONFIG_SMP
219 //
220 // Spinlock Acquire at IRQL >= DISPATCH_LEVEL
221 //
222 FORCEINLINE
223 VOID
224 KxAcquireSpinLock(IN PKSPIN_LOCK SpinLock)
225 {
226 /* On UP builds, spinlocks don't exist at IRQL >= DISPATCH */
227 UNREFERENCED_PARAMETER(SpinLock);
228 }
229
230 //
231 // Spinlock Release at IRQL >= DISPATCH_LEVEL
232 //
233 FORCEINLINE
234 VOID
235 KxReleaseSpinLock(IN PKSPIN_LOCK SpinLock)
236 {
237 /* On UP builds, spinlocks don't exist at IRQL >= DISPATCH */
238 UNREFERENCED_PARAMETER(SpinLock);
239 }
240
241 //
242 // This routine protects against multiple CPU acquires, it's meaningless on UP.
243 //
244 FORCEINLINE
245 VOID
246 KiAcquireDispatcherObject(IN DISPATCHER_HEADER* Object)
247 {
248 UNREFERENCED_PARAMETER(Object);
249 }
250
251 //
252 // This routine protects against multiple CPU acquires, it's meaningless on UP.
253 //
254 FORCEINLINE
255 VOID
256 KiReleaseDispatcherObject(IN DISPATCHER_HEADER* Object)
257 {
258 UNREFERENCED_PARAMETER(Object);
259 }
260
261 FORCEINLINE
262 KIRQL
263 KiAcquireDispatcherLock(VOID)
264 {
265 /* Raise to DPC level */
266 return KeRaiseIrqlToDpcLevel();
267 }
268
269 FORCEINLINE
270 VOID
271 KiReleaseDispatcherLock(IN KIRQL OldIrql)
272 {
273 /* Just exit the dispatcher */
274 KiExitDispatcher(OldIrql);
275 }
276
277 FORCEINLINE
278 VOID
279 KiAcquireDispatcherLockAtDpcLevel(VOID)
280 {
281 /* This is a no-op at DPC Level for UP systems */
282 return;
283 }
284
285 FORCEINLINE
286 VOID
287 KiReleaseDispatcherLockFromDpcLevel(VOID)
288 {
289 /* This is a no-op at DPC Level for UP systems */
290 return;
291 }
292
293 //
294 // This routine makes the thread deferred ready on the boot CPU.
295 //
296 FORCEINLINE
297 VOID
298 KiInsertDeferredReadyList(IN PKTHREAD Thread)
299 {
300 /* Set the thread to deferred state and boot CPU */
301 Thread->State = DeferredReady;
302 Thread->DeferredProcessor = 0;
303
304 /* Make the thread ready immediately */
305 KiDeferredReadyThread(Thread);
306 }
307
308 FORCEINLINE
309 VOID
310 KiRescheduleThread(IN BOOLEAN NewThread,
311 IN ULONG Cpu)
312 {
313 /* This is meaningless on UP systems */
314 UNREFERENCED_PARAMETER(NewThread);
315 UNREFERENCED_PARAMETER(Cpu);
316 }
317
318 //
319 // This routine protects against multiple CPU acquires, it's meaningless on UP.
320 //
321 FORCEINLINE
322 VOID
323 KiSetThreadSwapBusy(IN PKTHREAD Thread)
324 {
325 UNREFERENCED_PARAMETER(Thread);
326 }
327
328 //
329 // This routine protects against multiple CPU acquires, it's meaningless on UP.
330 //
331 FORCEINLINE
332 VOID
333 KiAcquirePrcbLock(IN PKPRCB Prcb)
334 {
335 UNREFERENCED_PARAMETER(Prcb);
336 }
337
338 //
339 // This routine protects against multiple CPU acquires, it's meaningless on UP.
340 //
341 FORCEINLINE
342 VOID
343 KiReleasePrcbLock(IN PKPRCB Prcb)
344 {
345 UNREFERENCED_PARAMETER(Prcb);
346 }
347
348 //
349 // This routine protects against multiple CPU acquires, it's meaningless on UP.
350 //
351 FORCEINLINE
352 VOID
353 KiAcquireThreadLock(IN PKTHREAD Thread)
354 {
355 UNREFERENCED_PARAMETER(Thread);
356 }
357
358 //
359 // This routine protects against multiple CPU acquires, it's meaningless on UP.
360 //
361 FORCEINLINE
362 VOID
363 KiReleaseThreadLock(IN PKTHREAD Thread)
364 {
365 UNREFERENCED_PARAMETER(Thread);
366 }
367
368 //
369 // This routine protects against multiple CPU acquires, it's meaningless on UP.
370 //
371 FORCEINLINE
372 BOOLEAN
373 KiTryThreadLock(IN PKTHREAD Thread)
374 {
375 UNREFERENCED_PARAMETER(Thread);
376 return FALSE;
377 }
378
379 FORCEINLINE
380 VOID
381 KiCheckDeferredReadyList(IN PKPRCB Prcb)
382 {
383 /* There are no deferred ready lists on UP systems */
384 UNREFERENCED_PARAMETER(Prcb);
385 }
386
387 FORCEINLINE
388 VOID
389 KiRundownThread(IN PKTHREAD Thread)
390 {
391 #if defined(_M_IX86)
392 /* Check if this is the NPX Thread */
393 if (KeGetCurrentPrcb()->NpxThread == Thread)
394 {
395 /* Clear it */
396 KeGetCurrentPrcb()->NpxThread = NULL;
397 KeArchFnInit();
398 }
399 #endif
400 }
401
402 FORCEINLINE
403 VOID
404 KiRequestApcInterrupt(IN BOOLEAN NeedApc,
405 IN UCHAR Processor)
406 {
407 /* We deliver instantly on UP */
408 UNREFERENCED_PARAMETER(NeedApc);
409 UNREFERENCED_PARAMETER(Processor);
410 }
411
412 FORCEINLINE
413 PKSPIN_LOCK_QUEUE
414 KiAcquireTimerLock(IN ULONG Hand)
415 {
416 ASSERT(KeGetCurrentIrql() >= DISPATCH_LEVEL);
417
418 /* Nothing to do on UP */
419 UNREFERENCED_PARAMETER(Hand);
420 return NULL;
421 }
422
423 FORCEINLINE
424 VOID
425 KiReleaseTimerLock(IN PKSPIN_LOCK_QUEUE LockQueue)
426 {
427 ASSERT(KeGetCurrentIrql() >= DISPATCH_LEVEL);
428
429 /* Nothing to do on UP */
430 UNREFERENCED_PARAMETER(LockQueue);
431 }
432
433 #else
434
435 //
436 // Spinlock Acquisition at IRQL >= DISPATCH_LEVEL
437 //
438 FORCEINLINE
439 VOID
440 KxAcquireSpinLock(IN PKSPIN_LOCK SpinLock)
441 {
442 for (;;)
443 {
444 /* Try to acquire it */
445 if (InterlockedBitTestAndSet((PLONG)SpinLock, 0))
446 {
447 /* Value changed... wait until it's locked */
448 while (*(volatile KSPIN_LOCK *)SpinLock == 1)
449 {
450 #ifdef DBG
451 /* On debug builds, we use a much slower but useful routine */
452 //Kii386SpinOnSpinLock(SpinLock, 5);
453
454 /* FIXME: Do normal yield for now */
455 YieldProcessor();
456 #else
457 /* Otherwise, just yield and keep looping */
458 YieldProcessor();
459 #endif
460 }
461 }
462 else
463 {
464 #ifdef DBG
465 /* On debug builds, we OR in the KTHREAD */
466 *SpinLock = (KSPIN_LOCK)KeGetCurrentThread() | 1;
467 #endif
468 /* All is well, break out */
469 break;
470 }
471 }
472 }
473
474 //
475 // Spinlock Release at IRQL >= DISPATCH_LEVEL
476 //
477 FORCEINLINE
478 VOID
479 KxReleaseSpinLock(IN PKSPIN_LOCK SpinLock)
480 {
481 #ifdef DBG
482 /* Make sure that the threads match */
483 if (((KSPIN_LOCK)KeGetCurrentThread() | 1) != *SpinLock)
484 {
485 /* They don't, bugcheck */
486 KeBugCheckEx(SPIN_LOCK_NOT_OWNED, (ULONG_PTR)SpinLock, 0, 0, 0);
487 }
488 #endif
489 /* Clear the lock */
490 InterlockedAnd((PLONG)SpinLock, 0);
491 }
492
493 FORCEINLINE
494 VOID
495 KiAcquireDispatcherObject(IN DISPATCHER_HEADER* Object)
496 {
497 LONG OldValue;
498
499 /* Make sure we're at a safe level to touch the lock */
500 ASSERT(KeGetCurrentIrql() >= DISPATCH_LEVEL);
501
502 /* Start acquire loop */
503 do
504 {
505 /* Loop until the other CPU releases it */
506 while (TRUE)
507 {
508 /* Check if it got released */
509 OldValue = Object->Lock;
510 if ((OldValue & KOBJECT_LOCK_BIT) == 0) break;
511
512 /* Let the CPU know that this is a loop */
513 YieldProcessor();
514 }
515
516 /* Try acquiring the lock now */
517 } while (InterlockedCompareExchange(&Object->Lock,
518 OldValue | KOBJECT_LOCK_BIT,
519 OldValue) != OldValue);
520 }
521
522 FORCEINLINE
523 VOID
524 KiReleaseDispatcherObject(IN DISPATCHER_HEADER* Object)
525 {
526 /* Make sure we're at a safe level to touch the lock */
527 ASSERT(KeGetCurrentIrql() >= DISPATCH_LEVEL);
528
529 /* Release it */
530 InterlockedAnd(&Object->Lock, ~KOBJECT_LOCK_BIT);
531 }
532
533 FORCEINLINE
534 KIRQL
535 KiAcquireDispatcherLock(VOID)
536 {
537 /* Raise to synchronization level and acquire the dispatcher lock */
538 return KeAcquireQueuedSpinLockRaiseToSynch(LockQueueDispatcherLock);
539 }
540
541 FORCEINLINE
542 VOID
543 KiReleaseDispatcherLock(IN KIRQL OldIrql)
544 {
545 /* First release the lock */
546 KeReleaseQueuedSpinLockFromDpcLevel(&KeGetCurrentPrcb()->
547 LockQueue[LockQueueDispatcherLock]);
548
549 /* Then exit the dispatcher */
550 KiExitDispatcher(OldIrql);
551 }
552
553 FORCEINLINE
554 VOID
555 KiAcquireDispatcherLockAtDpcLevel(VOID)
556 {
557 /* Acquire the dispatcher lock */
558 KeAcquireQueuedSpinLockAtDpcLevel(LockQueueDispatcherLock);
559 }
560
561 FORCEINLINE
562 VOID
563 KiReleaseDispatcherLockFromDpcLevel(VOID)
564 {
565 /* Release the dispatcher lock */
566 KeReleaseQueuedSpinLockFromDpcLevel(LockQueueDispatcherLock);
567 }
568
569 //
570 // This routine inserts a thread into the deferred ready list of the given CPU
571 //
572 FORCEINLINE
573 VOID
574 KiInsertDeferredReadyList(IN PKTHREAD Thread)
575 {
576 PKPRCB Prcb = KeGetCurrentPrcb();
577
578 /* Set the thread to deferred state and CPU */
579 Thread->State = DeferredReady;
580 Thread->DeferredProcessor = Prcb->Number;
581
582 /* Add it on the list */
583 PushEntryList(&Prcb->DeferredReadyListHead, &Thread->SwapListEntry);
584 }
585
586 FORCEINLINE
587 VOID
588 KiRescheduleThread(IN BOOLEAN NewThread,
589 IN ULONG Cpu)
590 {
591 /* Check if a new thread needs to be scheduled on a different CPU */
592 if ((NewThread) && !(KeGetPcr()->Number == Cpu))
593 {
594 /* Send an IPI to request delivery */
595 KiIpiSend(AFFINITY_MASK(Cpu), IPI_DPC);
596 }
597 }
598
599 //
600 // This routine sets the current thread in a swap busy state, which ensure that
601 // nobody else tries to swap it concurrently.
602 //
603 FORCEINLINE
604 VOID
605 KiSetThreadSwapBusy(IN PKTHREAD Thread)
606 {
607 /* Make sure nobody already set it */
608 ASSERT(Thread->SwapBusy == FALSE);
609
610 /* Set it ourselves */
611 Thread->SwapBusy = TRUE;
612 }
613
614 //
615 // This routine acquires the PRCB lock so that only one caller can touch
616 // volatile PRCB data.
617 //
618 // Since this is a simple optimized spin-lock, it must be be only acquired
619 // at dispatcher level or higher!
620 //
621 FORCEINLINE
622 VOID
623 KiAcquirePrcbLock(IN PKPRCB Prcb)
624 {
625 /* Make sure we're at a safe level to touch the PRCB lock */
626 ASSERT(KeGetCurrentIrql() >= DISPATCH_LEVEL);
627
628 /* Start acquire loop */
629 for (;;)
630 {
631 /* Acquire the lock and break out if we acquired it first */
632 if (!InterlockedExchange((PLONG)&Prcb->PrcbLock, 1)) break;
633
634 /* Loop until the other CPU releases it */
635 do
636 {
637 /* Let the CPU know that this is a loop */
638 YieldProcessor();
639 } while (Prcb->PrcbLock);
640 }
641 }
642
643 //
644 // This routine releases the PRCB lock so that other callers can touch
645 // volatile PRCB data.
646 //
647 // Since this is a simple optimized spin-lock, it must be be only acquired
648 // at dispatcher level or higher!
649 //
650 FORCEINLINE
651 VOID
652 KiReleasePrcbLock(IN PKPRCB Prcb)
653 {
654 /* Make sure it's acquired! */
655 ASSERT(Prcb->PrcbLock != 0);
656
657 /* Release it */
658 InterlockedAnd((PLONG)&Prcb->PrcbLock, 0);
659 }
660
661 //
662 // This routine acquires the thread lock so that only one caller can touch
663 // volatile thread data.
664 //
665 // Since this is a simple optimized spin-lock, it must be be only acquired
666 // at dispatcher level or higher!
667 //
668 FORCEINLINE
669 VOID
670 KiAcquireThreadLock(IN PKTHREAD Thread)
671 {
672 /* Make sure we're at a safe level to touch the thread lock */
673 ASSERT(KeGetCurrentIrql() >= DISPATCH_LEVEL);
674
675 /* Start acquire loop */
676 for (;;)
677 {
678 /* Acquire the lock and break out if we acquired it first */
679 if (!InterlockedExchange((PLONG)&Thread->ThreadLock, 1)) break;
680
681 /* Loop until the other CPU releases it */
682 do
683 {
684 /* Let the CPU know that this is a loop */
685 YieldProcessor();
686 } while (Thread->ThreadLock);
687 }
688 }
689
690 //
691 // This routine releases the thread lock so that other callers can touch
692 // volatile thread data.
693 //
694 // Since this is a simple optimized spin-lock, it must be be only acquired
695 // at dispatcher level or higher!
696 //
697 FORCEINLINE
698 VOID
699 KiReleaseThreadLock(IN PKTHREAD Thread)
700 {
701 /* Release it */
702 InterlockedAnd((PLONG)&Thread->ThreadLock, 0);
703 }
704
705 FORCEINLINE
706 BOOLEAN
707 KiTryThreadLock(IN PKTHREAD Thread)
708 {
709 LONG Value;
710
711 /* If the lock isn't acquired, return false */
712 if (!Thread->ThreadLock) return FALSE;
713
714 /* Otherwise, try to acquire it and check the result */
715 Value = 1;
716 Value = InterlockedExchange((PLONG)&Thread->ThreadLock, Value);
717
718 /* Return the lock state */
719 return (Value == TRUE);
720 }
721
722 FORCEINLINE
723 VOID
724 KiCheckDeferredReadyList(IN PKPRCB Prcb)
725 {
726 /* Scan the deferred ready lists if required */
727 if (Prcb->DeferredReadyListHead.Next) KiProcessDeferredReadyList(Prcb);
728 }
729
730 FORCEINLINE
731 VOID
732 KiRundownThread(IN PKTHREAD Thread)
733 {
734 #if defined(_M_IX86) || defined(_M_AMD64)
735 /* FIXME: TODO */
736 ASSERTMSG("Not yet implemented\n", FALSE);
737 #endif
738 }
739
740 FORCEINLINE
741 VOID
742 KiRequestApcInterrupt(IN BOOLEAN NeedApc,
743 IN UCHAR Processor)
744 {
745 /* Check if we need to request APC delivery */
746 if (NeedApc)
747 {
748 /* Check if it's on another CPU */
749 if (KeGetPcr()->Number != Processor)
750 {
751 /* Send an IPI to request delivery */
752 KiIpiSend(AFFINITY_MASK(Processor), IPI_APC);
753 }
754 else
755 {
756 /* Request a software interrupt */
757 HalRequestSoftwareInterrupt(APC_LEVEL);
758 }
759 }
760 }
761
762 FORCEINLINE
763 PKSPIN_LOCK_QUEUE
764 KiAcquireTimerLock(IN ULONG Hand)
765 {
766 PKSPIN_LOCK_QUEUE LockQueue;
767 ULONG LockIndex;
768 ASSERT(KeGetCurrentIrql() >= DISPATCH_LEVEL);
769
770 /* Get the lock index */
771 LockIndex = Hand >> LOCK_QUEUE_TIMER_LOCK_SHIFT;
772 LockIndex &= (LOCK_QUEUE_TIMER_TABLE_LOCKS - 1);
773
774 /* Now get the lock */
775 LockQueue = &KeGetCurrentPrcb()->LockQueue[LockQueueTimerTableLock + LockIndex];
776
777 /* Acquire it and return */
778 KeAcquireQueuedSpinLockAtDpcLevel(LockQueue);
779 return LockQueue;
780 }
781
782 FORCEINLINE
783 VOID
784 KiReleaseTimerLock(IN PKSPIN_LOCK_QUEUE LockQueue)
785 {
786 ASSERT(KeGetCurrentIrql() >= DISPATCH_LEVEL);
787
788 /* Release the lock */
789 KeReleaseQueuedSpinLockFromDpcLevel(LockQueue);
790 }
791
792 #endif
793
794 FORCEINLINE
795 VOID
796 KiAcquireApcLock(IN PKTHREAD Thread,
797 IN PKLOCK_QUEUE_HANDLE Handle)
798 {
799 /* Acquire the lock and raise to synchronization level */
800 KeAcquireInStackQueuedSpinLockRaiseToSynch(&Thread->ApcQueueLock, Handle);
801 }
802
803 FORCEINLINE
804 VOID
805 KiAcquireApcLockAtDpcLevel(IN PKTHREAD Thread,
806 IN PKLOCK_QUEUE_HANDLE Handle)
807 {
808 /* Acquire the lock */
809 KeAcquireInStackQueuedSpinLockAtDpcLevel(&Thread->ApcQueueLock, Handle);
810 }
811
812 FORCEINLINE
813 VOID
814 KiAcquireApcLockAtApcLevel(IN PKTHREAD Thread,
815 IN PKLOCK_QUEUE_HANDLE Handle)
816 {
817 /* Acquire the lock */
818 KeAcquireInStackQueuedSpinLock(&Thread->ApcQueueLock, Handle);
819 }
820
821 FORCEINLINE
822 VOID
823 KiReleaseApcLock(IN PKLOCK_QUEUE_HANDLE Handle)
824 {
825 /* Release the lock */
826 KeReleaseInStackQueuedSpinLock(Handle);
827 }
828
829 FORCEINLINE
830 VOID
831 KiReleaseApcLockFromDpcLevel(IN PKLOCK_QUEUE_HANDLE Handle)
832 {
833 /* Release the lock */
834 KeReleaseInStackQueuedSpinLockFromDpcLevel(Handle);
835 }
836
837 FORCEINLINE
838 VOID
839 KiAcquireProcessLock(IN PKPROCESS Process,
840 IN PKLOCK_QUEUE_HANDLE Handle)
841 {
842 /* Acquire the lock and raise to synchronization level */
843 KeAcquireInStackQueuedSpinLockRaiseToSynch(&Process->ProcessLock, Handle);
844 }
845
846 FORCEINLINE
847 VOID
848 KiReleaseProcessLock(IN PKLOCK_QUEUE_HANDLE Handle)
849 {
850 /* Release the lock */
851 KeReleaseInStackQueuedSpinLock(Handle);
852 }
853
854 FORCEINLINE
855 VOID
856 KiReleaseProcessLockFromDpcLevel(IN PKLOCK_QUEUE_HANDLE Handle)
857 {
858 /* Release the lock */
859 KeReleaseInStackQueuedSpinLockFromDpcLevel(Handle);
860 }
861
862 FORCEINLINE
863 VOID
864 KiAcquireDeviceQueueLock(IN PKDEVICE_QUEUE DeviceQueue,
865 IN PKLOCK_QUEUE_HANDLE DeviceLock)
866 {
867 /* Check if we were called from a threaded DPC */
868 if (KeGetCurrentPrcb()->DpcThreadActive)
869 {
870 /* Lock the Queue, we're not at DPC level */
871 KeAcquireInStackQueuedSpinLock(&DeviceQueue->Lock, DeviceLock);
872 }
873 else
874 {
875 /* We must be at DPC level, acquire the lock safely */
876 ASSERT(KeGetCurrentIrql() == DISPATCH_LEVEL);
877 KeAcquireInStackQueuedSpinLockAtDpcLevel(&DeviceQueue->Lock,
878 DeviceLock);
879 }
880 }
881
882 FORCEINLINE
883 VOID
884 KiReleaseDeviceQueueLock(IN PKLOCK_QUEUE_HANDLE DeviceLock)
885 {
886 /* Check if we were called from a threaded DPC */
887 if (KeGetCurrentPrcb()->DpcThreadActive)
888 {
889 /* Unlock the Queue, we're not at DPC level */
890 KeReleaseInStackQueuedSpinLock(DeviceLock);
891 }
892 else
893 {
894 /* We must be at DPC level, release the lock safely */
895 ASSERT(KeGetCurrentIrql() == DISPATCH_LEVEL);
896 KeReleaseInStackQueuedSpinLockFromDpcLevel(DeviceLock);
897 }
898 }
899
900 //
901 // Satisfies the wait of any dispatcher object
902 //
903 #define KiSatisfyObjectWait(Object, Thread) \
904 { \
905 /* Special case for Mutants */ \
906 if ((Object)->Header.Type == MutantObject) \
907 { \
908 /* Decrease the Signal State */ \
909 (Object)->Header.SignalState--; \
910 \
911 /* Check if it's now non-signaled */ \
912 if (!(Object)->Header.SignalState) \
913 { \
914 /* Set the Owner Thread */ \
915 (Object)->OwnerThread = Thread; \
916 \
917 /* Disable APCs if needed */ \
918 Thread->KernelApcDisable = Thread->KernelApcDisable - \
919 (Object)->ApcDisable; \
920 \
921 /* Check if it's abandoned */ \
922 if ((Object)->Abandoned) \
923 { \
924 /* Unabandon it */ \
925 (Object)->Abandoned = FALSE; \
926 \
927 /* Return Status */ \
928 Thread->WaitStatus = STATUS_ABANDONED; \
929 } \
930 \
931 /* Insert it into the Mutant List */ \
932 InsertHeadList(Thread->MutantListHead.Blink, \
933 &(Object)->MutantListEntry); \
934 } \
935 } \
936 else if (((Object)->Header.Type & TIMER_OR_EVENT_TYPE) == \
937 EventSynchronizationObject) \
938 { \
939 /* Synchronization Timers and Events just get un-signaled */ \
940 (Object)->Header.SignalState = 0; \
941 } \
942 else if ((Object)->Header.Type == SemaphoreObject) \
943 { \
944 /* These ones can have multiple states, so we only decrease it */ \
945 (Object)->Header.SignalState--; \
946 } \
947 }
948
949 //
950 // Satisfies the wait of a mutant dispatcher object
951 //
952 #define KiSatisfyMutantWait(Object, Thread) \
953 { \
954 /* Decrease the Signal State */ \
955 (Object)->Header.SignalState--; \
956 \
957 /* Check if it's now non-signaled */ \
958 if (!(Object)->Header.SignalState) \
959 { \
960 /* Set the Owner Thread */ \
961 (Object)->OwnerThread = Thread; \
962 \
963 /* Disable APCs if needed */ \
964 Thread->KernelApcDisable = Thread->KernelApcDisable - \
965 (Object)->ApcDisable; \
966 \
967 /* Check if it's abandoned */ \
968 if ((Object)->Abandoned) \
969 { \
970 /* Unabandon it */ \
971 (Object)->Abandoned = FALSE; \
972 \
973 /* Return Status */ \
974 Thread->WaitStatus = STATUS_ABANDONED; \
975 } \
976 \
977 /* Insert it into the Mutant List */ \
978 InsertHeadList(Thread->MutantListHead.Blink, \
979 &(Object)->MutantListEntry); \
980 } \
981 }
982
983 //
984 // Satisfies the wait of any nonmutant dispatcher object
985 //
986 #define KiSatisfyNonMutantWait(Object) \
987 { \
988 if (((Object)->Header.Type & TIMER_OR_EVENT_TYPE) == \
989 EventSynchronizationObject) \
990 { \
991 /* Synchronization Timers and Events just get un-signaled */ \
992 (Object)->Header.SignalState = 0; \
993 } \
994 else if ((Object)->Header.Type == SemaphoreObject) \
995 { \
996 /* These ones can have multiple states, so we only decrease it */ \
997 (Object)->Header.SignalState--; \
998 } \
999 }
1000
1001 //
1002 // Recalculates the due time
1003 //
1004 FORCEINLINE
1005 PLARGE_INTEGER
1006 KiRecalculateDueTime(IN PLARGE_INTEGER OriginalDueTime,
1007 IN PLARGE_INTEGER DueTime,
1008 IN OUT PLARGE_INTEGER NewDueTime)
1009 {
1010 /* Don't do anything for absolute waits */
1011 if (OriginalDueTime->QuadPart >= 0) return OriginalDueTime;
1012
1013 /* Otherwise, query the interrupt time and recalculate */
1014 NewDueTime->QuadPart = KeQueryInterruptTime();
1015 NewDueTime->QuadPart -= DueTime->QuadPart;
1016 return NewDueTime;
1017 }
1018
1019 //
1020 // Determines whether a thread should be added to the wait list
1021 //
1022 FORCEINLINE
1023 BOOLEAN
1024 KiCheckThreadStackSwap(IN PKTHREAD Thread,
1025 IN KPROCESSOR_MODE WaitMode)
1026 {
1027 /* Check the required conditions */
1028 if ((WaitMode != KernelMode) &&
1029 (Thread->EnableStackSwap) &&
1030 (Thread->Priority >= (LOW_REALTIME_PRIORITY + 9)))
1031 {
1032 /* We are go for swap */
1033 return TRUE;
1034 }
1035 else
1036 {
1037 /* Don't swap the thread */
1038 return FALSE;
1039 }
1040 }
1041
1042 //
1043 // Adds a thread to the wait list
1044 //
1045 #define KiAddThreadToWaitList(Thread, Swappable) \
1046 { \
1047 /* Make sure it's swappable */ \
1048 if (Swappable) \
1049 { \
1050 /* Insert it into the PRCB's List */ \
1051 InsertTailList(&KeGetCurrentPrcb()->WaitListHead, \
1052 &Thread->WaitListEntry); \
1053 } \
1054 }
1055
1056 //
1057 // Checks if a wait in progress should be interrupted by APCs or an alertable
1058 // state.
1059 //
1060 FORCEINLINE
1061 NTSTATUS
1062 KiCheckAlertability(IN PKTHREAD Thread,
1063 IN BOOLEAN Alertable,
1064 IN KPROCESSOR_MODE WaitMode)
1065 {
1066 /* Check if the wait is alertable */
1067 if (Alertable)
1068 {
1069 /* It is, first check if the thread is alerted in this mode */
1070 if (Thread->Alerted[WaitMode])
1071 {
1072 /* It is, so bail out of the wait */
1073 Thread->Alerted[WaitMode] = FALSE;
1074 return STATUS_ALERTED;
1075 }
1076 else if ((WaitMode != KernelMode) &&
1077 (!IsListEmpty(&Thread->ApcState.ApcListHead[UserMode])))
1078 {
1079 /* It's isn't, but this is a user wait with queued user APCs */
1080 Thread->ApcState.UserApcPending = TRUE;
1081 return STATUS_USER_APC;
1082 }
1083 else if (Thread->Alerted[KernelMode])
1084 {
1085 /* It isn't that either, but we're alered in kernel mode */
1086 Thread->Alerted[KernelMode] = FALSE;
1087 return STATUS_ALERTED;
1088 }
1089 }
1090 else if ((WaitMode != KernelMode) && (Thread->ApcState.UserApcPending))
1091 {
1092 /* Not alertable, but this is a user wait with pending user APCs */
1093 return STATUS_USER_APC;
1094 }
1095
1096 /* Otherwise, we're fine */
1097 return STATUS_WAIT_0;
1098 }
1099
1100 //
1101 // Called from KiCompleteTimer, KiInsertTreeTimer, KeSetSystemTime
1102 // to remove timer entries
1103 // See Windows HPI blog for more information.
1104 FORCEINLINE
1105 VOID
1106 KiRemoveEntryTimer(IN PKTIMER Timer)
1107 {
1108 ULONG Hand;
1109 PKTIMER_TABLE_ENTRY TableEntry;
1110
1111 /* Remove the timer from the timer list and check if it's empty */
1112 Hand = Timer->Header.Hand;
1113 if (RemoveEntryList(&Timer->TimerListEntry))
1114 {
1115 /* Get the respective timer table entry */
1116 TableEntry = &KiTimerTableListHead[Hand];
1117 if (&TableEntry->Entry == TableEntry->Entry.Flink)
1118 {
1119 /* Set the entry to an infinite absolute time */
1120 TableEntry->Time.HighPart = 0xFFFFFFFF;
1121 }
1122 }
1123
1124 /* Clear the list entries on dbg builds so we can tell the timer is gone */
1125 #if DBG
1126 Timer->TimerListEntry.Flink = NULL;
1127 Timer->TimerListEntry.Blink = NULL;
1128 #endif
1129 }
1130
1131 //
1132 // Called by Wait and Queue code to insert a timer for dispatching.
1133 // Also called by KeSetTimerEx to insert a timer from the caller.
1134 //
1135 FORCEINLINE
1136 VOID
1137 KxInsertTimer(IN PKTIMER Timer,
1138 IN ULONG Hand)
1139 {
1140 PKSPIN_LOCK_QUEUE LockQueue;
1141
1142 /* Acquire the lock and release the dispatcher lock */
1143 LockQueue = KiAcquireTimerLock(Hand);
1144 KiReleaseDispatcherLockFromDpcLevel();
1145
1146 /* Try to insert the timer */
1147 if (KiInsertTimerTable(Timer, Hand))
1148 {
1149 /* Complete it */
1150 KiCompleteTimer(Timer, LockQueue);
1151 }
1152 else
1153 {
1154 /* Do nothing, just release the lock */
1155 KiReleaseTimerLock(LockQueue);
1156 }
1157 }
1158
1159 //
1160 // Called by KeSetTimerEx and KiInsertTreeTimer to calculate Due Time
1161 // See the Windows HPI Blog for more information
1162 //
1163 FORCEINLINE
1164 BOOLEAN
1165 KiComputeDueTime(IN PKTIMER Timer,
1166 IN LARGE_INTEGER DueTime,
1167 OUT PULONG Hand)
1168 {
1169 LARGE_INTEGER InterruptTime, SystemTime, DifferenceTime;
1170
1171 /* Convert to relative time if needed */
1172 Timer->Header.Absolute = FALSE;
1173 if (DueTime.HighPart >= 0)
1174 {
1175 /* Get System Time */
1176 KeQuerySystemTime(&SystemTime);
1177
1178 /* Do the conversion */
1179 DifferenceTime.QuadPart = SystemTime.QuadPart - DueTime.QuadPart;
1180
1181 /* Make sure it hasn't already expired */
1182 Timer->Header.Absolute = TRUE;
1183 if (DifferenceTime.HighPart >= 0)
1184 {
1185 /* Cancel everything */
1186 Timer->Header.SignalState = TRUE;
1187 Timer->Header.Hand = 0;
1188 Timer->DueTime.QuadPart = 0;
1189 *Hand = 0;
1190 return FALSE;
1191 }
1192
1193 /* Set the time as Absolute */
1194 DueTime = DifferenceTime;
1195 }
1196
1197 /* Get the Interrupt Time */
1198 InterruptTime.QuadPart = KeQueryInterruptTime();
1199
1200 /* Recalculate due time */
1201 Timer->DueTime.QuadPart = InterruptTime.QuadPart - DueTime.QuadPart;
1202
1203 /* Get the handle */
1204 *Hand = KiComputeTimerTableIndex(Timer->DueTime.QuadPart);
1205 Timer->Header.Hand = (UCHAR)*Hand;
1206 Timer->Header.Inserted = TRUE;
1207 return TRUE;
1208 }
1209
1210 //
1211 // Called from Unlink and Queue Insert Code.
1212 // Also called by timer code when canceling an inserted timer.
1213 // Removes a timer from it's tree.
1214 //
1215 FORCEINLINE
1216 VOID
1217 KxRemoveTreeTimer(IN PKTIMER Timer)
1218 {
1219 ULONG Hand = Timer->Header.Hand;
1220 PKSPIN_LOCK_QUEUE LockQueue;
1221 PKTIMER_TABLE_ENTRY TimerEntry;
1222
1223 /* Acquire timer lock */
1224 LockQueue = KiAcquireTimerLock(Hand);
1225
1226 /* Set the timer as non-inserted */
1227 Timer->Header.Inserted = FALSE;
1228
1229 /* Remove it from the timer list */
1230 if (RemoveEntryList(&Timer->TimerListEntry))
1231 {
1232 /* Get the entry and check if it's empty */
1233 TimerEntry = &KiTimerTableListHead[Hand];
1234 if (IsListEmpty(&TimerEntry->Entry))
1235 {
1236 /* Clear the time then */
1237 TimerEntry->Time.HighPart = 0xFFFFFFFF;
1238 }
1239 }
1240
1241 /* Release the timer lock */
1242 KiReleaseTimerLock(LockQueue);
1243 }
1244
1245 FORCEINLINE
1246 VOID
1247 KxSetTimerForThreadWait(IN PKTIMER Timer,
1248 IN LARGE_INTEGER Interval,
1249 OUT PULONG Hand)
1250 {
1251 ULONGLONG DueTime;
1252 LARGE_INTEGER InterruptTime, SystemTime, TimeDifference;
1253
1254 /* Check the timer's interval to see if it's absolute */
1255 Timer->Header.Absolute = FALSE;
1256 if (Interval.HighPart >= 0)
1257 {
1258 /* Get the system time and calculate the relative time */
1259 KeQuerySystemTime(&SystemTime);
1260 TimeDifference.QuadPart = SystemTime.QuadPart - Interval.QuadPart;
1261 Timer->Header.Absolute = TRUE;
1262
1263 /* Check if we've already expired */
1264 if (TimeDifference.HighPart >= 0)
1265 {
1266 /* Reset everything */
1267 Timer->DueTime.QuadPart = 0;
1268 *Hand = 0;
1269 Timer->Header.Hand = 0;
1270 return;
1271 }
1272 else
1273 {
1274 /* Update the interval */
1275 Interval = TimeDifference;
1276 }
1277 }
1278
1279 /* Calculate the due time */
1280 InterruptTime.QuadPart = KeQueryInterruptTime();
1281 DueTime = InterruptTime.QuadPart - Interval.QuadPart;
1282 Timer->DueTime.QuadPart = DueTime;
1283
1284 /* Calculate the timer handle */
1285 *Hand = KiComputeTimerTableIndex(DueTime);
1286 Timer->Header.Hand = (UCHAR)*Hand;
1287 }
1288
1289 #define KxDelayThreadWait() \
1290 \
1291 /* Setup the Wait Block */ \
1292 Thread->WaitBlockList = TimerBlock; \
1293 \
1294 /* Setup the timer */ \
1295 KxSetTimerForThreadWait(Timer, *Interval, &Hand); \
1296 \
1297 /* Save the due time for the caller */ \
1298 DueTime.QuadPart = Timer->DueTime.QuadPart; \
1299 \
1300 /* Link the timer to this Wait Block */ \
1301 TimerBlock->NextWaitBlock = TimerBlock; \
1302 Timer->Header.WaitListHead.Flink = &TimerBlock->WaitListEntry; \
1303 Timer->Header.WaitListHead.Blink = &TimerBlock->WaitListEntry; \
1304 \
1305 /* Clear wait status */ \
1306 Thread->WaitStatus = STATUS_SUCCESS; \
1307 \
1308 /* Setup wait fields */ \
1309 Thread->Alertable = Alertable; \
1310 Thread->WaitReason = DelayExecution; \
1311 Thread->WaitMode = WaitMode; \
1312 \
1313 /* Check if we can swap the thread's stack */ \
1314 Thread->WaitListEntry.Flink = NULL; \
1315 Swappable = KiCheckThreadStackSwap(Thread, WaitMode); \
1316 \
1317 /* Set the wait time */ \
1318 Thread->WaitTime = KeTickCount.LowPart;
1319
1320 #define KxMultiThreadWait() \
1321 /* Link wait block array to the thread */ \
1322 Thread->WaitBlockList = WaitBlockArray; \
1323 \
1324 /* Reset the index */ \
1325 Index = 0; \
1326 \
1327 /* Loop wait blocks */ \
1328 do \
1329 { \
1330 /* Fill out the wait block */ \
1331 WaitBlock = &WaitBlockArray[Index]; \
1332 WaitBlock->Object = Object[Index]; \
1333 WaitBlock->WaitKey = (USHORT)Index; \
1334 WaitBlock->WaitType = WaitType; \
1335 WaitBlock->Thread = Thread; \
1336 \
1337 /* Link to next block */ \
1338 WaitBlock->NextWaitBlock = &WaitBlockArray[Index + 1]; \
1339 Index++; \
1340 } while (Index < Count); \
1341 \
1342 /* Link the last block */ \
1343 WaitBlock->NextWaitBlock = WaitBlockArray; \
1344 \
1345 /* Set default wait status */ \
1346 Thread->WaitStatus = STATUS_WAIT_0; \
1347 \
1348 /* Check if we have a timer */ \
1349 if (Timeout) \
1350 { \
1351 /* Link to the block */ \
1352 TimerBlock->NextWaitBlock = WaitBlockArray; \
1353 \
1354 /* Setup the timer */ \
1355 KxSetTimerForThreadWait(Timer, *Timeout, &Hand); \
1356 \
1357 /* Save the due time for the caller */ \
1358 DueTime.QuadPart = Timer->DueTime.QuadPart; \
1359 \
1360 /* Initialize the list */ \
1361 InitializeListHead(&Timer->Header.WaitListHead); \
1362 } \
1363 \
1364 /* Set wait settings */ \
1365 Thread->Alertable = Alertable; \
1366 Thread->WaitMode = WaitMode; \
1367 Thread->WaitReason = WaitReason; \
1368 \
1369 /* Check if we can swap the thread's stack */ \
1370 Thread->WaitListEntry.Flink = NULL; \
1371 Swappable = KiCheckThreadStackSwap(Thread, WaitMode); \
1372 \
1373 /* Set the wait time */ \
1374 Thread->WaitTime = KeTickCount.LowPart;
1375
1376 #define KxSingleThreadWait() \
1377 /* Setup the Wait Block */ \
1378 Thread->WaitBlockList = WaitBlock; \
1379 WaitBlock->WaitKey = STATUS_SUCCESS; \
1380 WaitBlock->Object = Object; \
1381 WaitBlock->WaitType = WaitAny; \
1382 \
1383 /* Clear wait status */ \
1384 Thread->WaitStatus = STATUS_SUCCESS; \
1385 \
1386 /* Check if we have a timer */ \
1387 if (Timeout) \
1388 { \
1389 /* Setup the timer */ \
1390 KxSetTimerForThreadWait(Timer, *Timeout, &Hand); \
1391 \
1392 /* Save the due time for the caller */ \
1393 DueTime.QuadPart = Timer->DueTime.QuadPart; \
1394 \
1395 /* Pointer to timer block */ \
1396 WaitBlock->NextWaitBlock = TimerBlock; \
1397 TimerBlock->NextWaitBlock = WaitBlock; \
1398 \
1399 /* Link the timer to this Wait Block */ \
1400 Timer->Header.WaitListHead.Flink = &TimerBlock->WaitListEntry; \
1401 Timer->Header.WaitListHead.Blink = &TimerBlock->WaitListEntry; \
1402 } \
1403 else \
1404 { \
1405 /* No timer block, just ourselves */ \
1406 WaitBlock->NextWaitBlock = WaitBlock; \
1407 } \
1408 \
1409 /* Set wait settings */ \
1410 Thread->Alertable = Alertable; \
1411 Thread->WaitMode = WaitMode; \
1412 Thread->WaitReason = WaitReason; \
1413 \
1414 /* Check if we can swap the thread's stack */ \
1415 Thread->WaitListEntry.Flink = NULL; \
1416 Swappable = KiCheckThreadStackSwap(Thread, WaitMode); \
1417 \
1418 /* Set the wait time */ \
1419 Thread->WaitTime = KeTickCount.LowPart;
1420
1421 #define KxQueueThreadWait() \
1422 /* Setup the Wait Block */ \
1423 Thread->WaitBlockList = WaitBlock; \
1424 WaitBlock->WaitKey = STATUS_SUCCESS; \
1425 WaitBlock->Object = Queue; \
1426 WaitBlock->WaitType = WaitAny; \
1427 WaitBlock->Thread = Thread; \
1428 \
1429 /* Clear wait status */ \
1430 Thread->WaitStatus = STATUS_SUCCESS; \
1431 \
1432 /* Check if we have a timer */ \
1433 if (Timeout) \
1434 { \
1435 /* Setup the timer */ \
1436 KxSetTimerForThreadWait(Timer, *Timeout, &Hand); \
1437 \
1438 /* Save the due time for the caller */ \
1439 DueTime.QuadPart = Timer->DueTime.QuadPart; \
1440 \
1441 /* Pointer to timer block */ \
1442 WaitBlock->NextWaitBlock = TimerBlock; \
1443 TimerBlock->NextWaitBlock = WaitBlock; \
1444 \
1445 /* Link the timer to this Wait Block */ \
1446 Timer->Header.WaitListHead.Flink = &TimerBlock->WaitListEntry; \
1447 Timer->Header.WaitListHead.Blink = &TimerBlock->WaitListEntry; \
1448 } \
1449 else \
1450 { \
1451 /* No timer block, just ourselves */ \
1452 WaitBlock->NextWaitBlock = WaitBlock; \
1453 } \
1454 \
1455 /* Set wait settings */ \
1456 Thread->Alertable = FALSE; \
1457 Thread->WaitMode = WaitMode; \
1458 Thread->WaitReason = WrQueue; \
1459 \
1460 /* Check if we can swap the thread's stack */ \
1461 Thread->WaitListEntry.Flink = NULL; \
1462 Swappable = KiCheckThreadStackSwap(Thread, WaitMode); \
1463 \
1464 /* Set the wait time */ \
1465 Thread->WaitTime = KeTickCount.LowPart;
1466
1467 //
1468 // Unwaits a Thread
1469 //
1470 FORCEINLINE
1471 VOID
1472 KxUnwaitThread(IN DISPATCHER_HEADER *Object,
1473 IN KPRIORITY Increment)
1474 {
1475 PLIST_ENTRY WaitEntry, WaitList;
1476 PKWAIT_BLOCK WaitBlock;
1477 PKTHREAD WaitThread;
1478 ULONG WaitKey;
1479
1480 /* Loop the Wait Entries */
1481 WaitList = &Object->WaitListHead;
1482 ASSERT(IsListEmpty(&Object->WaitListHead) == FALSE);
1483 WaitEntry = WaitList->Flink;
1484 do
1485 {
1486 /* Get the current wait block */
1487 WaitBlock = CONTAINING_RECORD(WaitEntry, KWAIT_BLOCK, WaitListEntry);
1488
1489 /* Get the waiting thread */
1490 WaitThread = WaitBlock->Thread;
1491
1492 /* Check the current Wait Mode */
1493 if (WaitBlock->WaitType == WaitAny)
1494 {
1495 /* Use the actual wait key */
1496 WaitKey = WaitBlock->WaitKey;
1497 }
1498 else
1499 {
1500 /* Otherwise, use STATUS_KERNEL_APC */
1501 WaitKey = STATUS_KERNEL_APC;
1502 }
1503
1504 /* Unwait the thread */
1505 KiUnwaitThread(WaitThread, WaitKey, Increment);
1506
1507 /* Next entry */
1508 WaitEntry = WaitList->Flink;
1509 } while (WaitEntry != WaitList);
1510 }
1511
1512 //
1513 // Unwaits a Thread waiting on an event
1514 //
1515 FORCEINLINE
1516 VOID
1517 KxUnwaitThreadForEvent(IN PKEVENT Event,
1518 IN KPRIORITY Increment)
1519 {
1520 PLIST_ENTRY WaitEntry, WaitList;
1521 PKWAIT_BLOCK WaitBlock;
1522 PKTHREAD WaitThread;
1523
1524 /* Loop the Wait Entries */
1525 WaitList = &Event->Header.WaitListHead;
1526 ASSERT(IsListEmpty(&Event->Header.WaitListHead) == FALSE);
1527 WaitEntry = WaitList->Flink;
1528 do
1529 {
1530 /* Get the current wait block */
1531 WaitBlock = CONTAINING_RECORD(WaitEntry, KWAIT_BLOCK, WaitListEntry);
1532
1533 /* Get the waiting thread */
1534 WaitThread = WaitBlock->Thread;
1535
1536 /* Check the current Wait Mode */
1537 if (WaitBlock->WaitType == WaitAny)
1538 {
1539 /* Un-signal it */
1540 Event->Header.SignalState = 0;
1541
1542 /* Un-signal the event and unwait the thread */
1543 KiUnwaitThread(WaitThread, WaitBlock->WaitKey, Increment);
1544 break;
1545 }
1546
1547 /* Unwait the thread with STATUS_KERNEL_APC */
1548 KiUnwaitThread(WaitThread, STATUS_KERNEL_APC, Increment);
1549
1550 /* Next entry */
1551 WaitEntry = WaitList->Flink;
1552 } while (WaitEntry != WaitList);
1553 }
1554
1555 //
1556 // This routine queues a thread that is ready on the PRCB's ready lists.
1557 // If this thread cannot currently run on this CPU, then the thread is
1558 // added to the deferred ready list instead.
1559 //
1560 // This routine must be entered with the PRCB lock held and it will exit
1561 // with the PRCB lock released!
1562 //
1563 FORCEINLINE
1564 VOID
1565 KxQueueReadyThread(IN PKTHREAD Thread,
1566 IN PKPRCB Prcb)
1567 {
1568 BOOLEAN Preempted;
1569 KPRIORITY Priority;
1570
1571 /* Sanity checks */
1572 ASSERT(Prcb == KeGetCurrentPrcb());
1573 ASSERT(Thread->State == Running);
1574 ASSERT(Thread->NextProcessor == Prcb->Number);
1575
1576 /* Check if this thread is allowed to run in this CPU */
1577 #ifdef CONFIG_SMP
1578 if ((Thread->Affinity) & (Prcb->SetMember))
1579 #else
1580 if (TRUE)
1581 #endif
1582 {
1583 /* Set thread ready for execution */
1584 Thread->State = Ready;
1585
1586 /* Save current priority and if someone had pre-empted it */
1587 Priority = Thread->Priority;
1588 Preempted = Thread->Preempted;
1589
1590 /* We're not pre-empting now, and set the wait time */
1591 Thread->Preempted = FALSE;
1592 Thread->WaitTime = KeTickCount.LowPart;
1593
1594 /* Sanity check */
1595 ASSERT((Priority >= 0) && (Priority <= HIGH_PRIORITY));
1596
1597 /* Insert this thread in the appropriate order */
1598 Preempted ? InsertHeadList(&Prcb->DispatcherReadyListHead[Priority],
1599 &Thread->WaitListEntry) :
1600 InsertTailList(&Prcb->DispatcherReadyListHead[Priority],
1601 &Thread->WaitListEntry);
1602
1603 /* Update the ready summary */
1604 Prcb->ReadySummary |= PRIORITY_MASK(Priority);
1605
1606 /* Sanity check */
1607 ASSERT(Priority == Thread->Priority);
1608
1609 /* Release the PRCB lock */
1610 KiReleasePrcbLock(Prcb);
1611 }
1612 else
1613 {
1614 /* Otherwise, prepare this thread to be deferred */
1615 Thread->State = DeferredReady;
1616 Thread->DeferredProcessor = Prcb->Number;
1617
1618 /* Release the lock and defer scheduling */
1619 KiReleasePrcbLock(Prcb);
1620 KiDeferredReadyThread(Thread);
1621 }
1622 }
1623
1624 //
1625 // This routine scans for an appropriate ready thread to select at the
1626 // given priority and for the given CPU.
1627 //
1628 FORCEINLINE
1629 PKTHREAD
1630 KiSelectReadyThread(IN KPRIORITY Priority,
1631 IN PKPRCB Prcb)
1632 {
1633 ULONG PrioritySet;
1634 LONG HighPriority;
1635 PLIST_ENTRY ListEntry;
1636 PKTHREAD Thread = NULL;
1637
1638 /* Save the current mask and get the priority set for the CPU */
1639 PrioritySet = Prcb->ReadySummary >> Priority;
1640 if (!PrioritySet) goto Quickie;
1641
1642 /* Get the highest priority possible */
1643 BitScanReverse((PULONG)&HighPriority, PrioritySet);
1644 ASSERT((PrioritySet & PRIORITY_MASK(HighPriority)) != 0);
1645 HighPriority += Priority;
1646
1647 /* Make sure the list isn't empty at the highest priority */
1648 ASSERT(IsListEmpty(&Prcb->DispatcherReadyListHead[HighPriority]) == FALSE);
1649
1650 /* Get the first thread on the list */
1651 ListEntry = Prcb->DispatcherReadyListHead[HighPriority].Flink;
1652 Thread = CONTAINING_RECORD(ListEntry, KTHREAD, WaitListEntry);
1653
1654 /* Make sure this thread is here for a reason */
1655 ASSERT(HighPriority == Thread->Priority);
1656 ASSERT(Thread->Affinity & AFFINITY_MASK(Prcb->Number));
1657 ASSERT(Thread->NextProcessor == Prcb->Number);
1658
1659 /* Remove it from the list */
1660 if (RemoveEntryList(&Thread->WaitListEntry))
1661 {
1662 /* The list is empty now, reset the ready summary */
1663 Prcb->ReadySummary ^= PRIORITY_MASK(HighPriority);
1664 }
1665
1666 /* Sanity check and return the thread */
1667 Quickie:
1668 ASSERT((Thread == NULL) ||
1669 (Thread->BasePriority == 0) ||
1670 (Thread->Priority != 0));
1671 return Thread;
1672 }
1673
1674 //
1675 // This routine computes the new priority for a thread. It is only valid for
1676 // threads with priorities in the dynamic priority range.
1677 //
1678 FORCEINLINE
1679 SCHAR
1680 KiComputeNewPriority(IN PKTHREAD Thread,
1681 IN SCHAR Adjustment)
1682 {
1683 SCHAR Priority;
1684
1685 /* Priority sanity checks */
1686 ASSERT((Thread->PriorityDecrement >= 0) &&
1687 (Thread->PriorityDecrement <= Thread->Priority));
1688 ASSERT((Thread->Priority < LOW_REALTIME_PRIORITY) ?
1689 TRUE : (Thread->PriorityDecrement == 0));
1690
1691 /* Get the current priority */
1692 Priority = Thread->Priority;
1693 if (Priority < LOW_REALTIME_PRIORITY)
1694 {
1695 /* Decrease priority by the priority decrement */
1696 Priority -= (Thread->PriorityDecrement + Adjustment);
1697
1698 /* Don't go out of bounds */
1699 if (Priority < Thread->BasePriority) Priority = Thread->BasePriority;
1700
1701 /* Reset the priority decrement */
1702 Thread->PriorityDecrement = 0;
1703 }
1704
1705 /* Sanity check */
1706 ASSERT((Thread->BasePriority == 0) || (Priority != 0));
1707
1708 /* Return the new priority */
1709 return Priority;
1710 }
1711
1712 //
1713 // Guarded Mutex Routines
1714 //
1715 FORCEINLINE
1716 VOID
1717 _KeInitializeGuardedMutex(OUT PKGUARDED_MUTEX GuardedMutex)
1718 {
1719 /* Setup the Initial Data */
1720 GuardedMutex->Count = GM_LOCK_BIT;
1721 GuardedMutex->Owner = NULL;
1722 GuardedMutex->Contention = 0;
1723
1724 /* Initialize the Wait Gate */
1725 KeInitializeGate(&GuardedMutex->Gate);
1726 }
1727
1728 FORCEINLINE
1729 VOID
1730 _KeAcquireGuardedMutexUnsafe(IN OUT PKGUARDED_MUTEX GuardedMutex)
1731 {
1732 PKTHREAD Thread = KeGetCurrentThread();
1733
1734 /* Sanity checks */
1735 ASSERT((KeGetCurrentIrql() == APC_LEVEL) ||
1736 (Thread->SpecialApcDisable < 0) ||
1737 (Thread->Teb == NULL) ||
1738 (Thread->Teb >= (PTEB)MM_SYSTEM_RANGE_START));
1739 ASSERT(GuardedMutex->Owner != Thread);
1740
1741 /* Remove the lock */
1742 if (!InterlockedBitTestAndReset(&GuardedMutex->Count, GM_LOCK_BIT_V))
1743 {
1744 /* The Guarded Mutex was already locked, enter contented case */
1745 KiAcquireGuardedMutex(GuardedMutex);
1746 }
1747
1748 /* Set the Owner */
1749 GuardedMutex->Owner = Thread;
1750 }
1751
1752 FORCEINLINE
1753 VOID
1754 _KeReleaseGuardedMutexUnsafe(IN OUT PKGUARDED_MUTEX GuardedMutex)
1755 {
1756 LONG OldValue, NewValue;
1757
1758 /* Sanity checks */
1759 ASSERT((KeGetCurrentIrql() == APC_LEVEL) ||
1760 (KeGetCurrentThread()->SpecialApcDisable < 0) ||
1761 (KeGetCurrentThread()->Teb == NULL) ||
1762 (KeGetCurrentThread()->Teb >= (PTEB)MM_SYSTEM_RANGE_START));
1763 ASSERT(GuardedMutex->Owner == KeGetCurrentThread());
1764
1765 /* Destroy the Owner */
1766 GuardedMutex->Owner = NULL;
1767
1768 /* Add the Lock Bit */
1769 OldValue = InterlockedExchangeAdd(&GuardedMutex->Count, GM_LOCK_BIT);
1770 ASSERT((OldValue & GM_LOCK_BIT) == 0);
1771
1772 /* Check if it was already locked, but not woken */
1773 if ((OldValue) && !(OldValue & GM_LOCK_WAITER_WOKEN))
1774 {
1775 /* Update the Oldvalue to what it should be now */
1776 OldValue += GM_LOCK_BIT;
1777
1778 /* The mutex will be woken, minus one waiter */
1779 NewValue = OldValue + GM_LOCK_WAITER_WOKEN -
1780 GM_LOCK_WAITER_INC;
1781
1782 /* Remove the Woken bit */
1783 if (InterlockedCompareExchange(&GuardedMutex->Count,
1784 NewValue,
1785 OldValue) == OldValue)
1786 {
1787 /* Signal the Gate */
1788 KeSignalGateBoostPriority(&GuardedMutex->Gate);
1789 }
1790 }
1791 }
1792
1793 FORCEINLINE
1794 VOID
1795 _KeAcquireGuardedMutex(IN PKGUARDED_MUTEX GuardedMutex)
1796 {
1797 PKTHREAD Thread = KeGetCurrentThread();
1798
1799 /* Sanity checks */
1800 ASSERT(KeGetCurrentIrql() <= APC_LEVEL);
1801 ASSERT(GuardedMutex->Owner != Thread);
1802
1803 /* Disable Special APCs */
1804 KeEnterGuardedRegion();
1805
1806 /* Remove the lock */
1807 if (!InterlockedBitTestAndReset(&GuardedMutex->Count, GM_LOCK_BIT_V))
1808 {
1809 /* The Guarded Mutex was already locked, enter contented case */
1810 KiAcquireGuardedMutex(GuardedMutex);
1811 }
1812
1813 /* Set the Owner and Special APC Disable state */
1814 GuardedMutex->Owner = Thread;
1815 GuardedMutex->SpecialApcDisable = Thread->SpecialApcDisable;
1816 }
1817
1818 FORCEINLINE
1819 VOID
1820 _KeReleaseGuardedMutex(IN OUT PKGUARDED_MUTEX GuardedMutex)
1821 {
1822 LONG OldValue, NewValue;
1823
1824 /* Sanity checks */
1825 ASSERT(KeGetCurrentIrql() <= APC_LEVEL);
1826 ASSERT(GuardedMutex->Owner == KeGetCurrentThread());
1827 ASSERT(KeGetCurrentThread()->SpecialApcDisable ==
1828 GuardedMutex->SpecialApcDisable);
1829
1830 /* Destroy the Owner */
1831 GuardedMutex->Owner = NULL;
1832
1833 /* Add the Lock Bit */
1834 OldValue = InterlockedExchangeAdd(&GuardedMutex->Count, GM_LOCK_BIT);
1835 ASSERT((OldValue & GM_LOCK_BIT) == 0);
1836
1837 /* Check if it was already locked, but not woken */
1838 if ((OldValue) && !(OldValue & GM_LOCK_WAITER_WOKEN))
1839 {
1840 /* Update the Oldvalue to what it should be now */
1841 OldValue += GM_LOCK_BIT;
1842
1843 /* The mutex will be woken, minus one waiter */
1844 NewValue = OldValue + GM_LOCK_WAITER_WOKEN -
1845 GM_LOCK_WAITER_INC;
1846
1847 /* Remove the Woken bit */
1848 if (InterlockedCompareExchange(&GuardedMutex->Count,
1849 NewValue,
1850 OldValue) == OldValue)
1851 {
1852 /* Signal the Gate */
1853 KeSignalGateBoostPriority(&GuardedMutex->Gate);
1854 }
1855 }
1856
1857 /* Re-enable APCs */
1858 KeLeaveGuardedRegion();
1859 }
1860
1861 FORCEINLINE
1862 BOOLEAN
1863 _KeTryToAcquireGuardedMutex(IN OUT PKGUARDED_MUTEX GuardedMutex)
1864 {
1865 PKTHREAD Thread = KeGetCurrentThread();
1866
1867 /* Block APCs */
1868 KeEnterGuardedRegion();
1869
1870 /* Remove the lock */
1871 if (!InterlockedBitTestAndReset(&GuardedMutex->Count, GM_LOCK_BIT_V))
1872 {
1873 /* Re-enable APCs */
1874 KeLeaveGuardedRegion();
1875 YieldProcessor();
1876
1877 /* Return failure */
1878 return FALSE;
1879 }
1880
1881 /* Set the Owner and APC State */
1882 GuardedMutex->Owner = Thread;
1883 GuardedMutex->SpecialApcDisable = Thread->SpecialApcDisable;
1884 return TRUE;
1885 }