[NDK][NTOS] Add global definition of INIT_FUNCTION/INIT_SECTION (#779)
[reactos.git] / ntoskrnl / include / internal / ke_x.h
1 /*
2 * PROJECT: ReactOS Kernel
3 * LICENSE: GPL - See COPYING in the top level directory
4 * FILE: ntoskrnl/include/internal/ke_x.h
5 * PURPOSE: Internal Inlined Functions for the Kernel
6 * PROGRAMMERS: Alex Ionescu (alex.ionescu@reactos.org)
7 */
8
9 #ifndef _M_ARM
10 FORCEINLINE
11 KPROCESSOR_MODE
12 KeGetPreviousMode(VOID)
13 {
14 /* Return the current mode */
15 return KeGetCurrentThread()->PreviousMode;
16 }
17 #endif
18
19 //
20 // Enters a Guarded Region
21 //
22 #define KeEnterGuardedRegionThread(_Thread) \
23 { \
24 /* Sanity checks */ \
25 ASSERT(KeGetCurrentIrql() <= APC_LEVEL); \
26 ASSERT(_Thread == KeGetCurrentThread()); \
27 ASSERT((_Thread->SpecialApcDisable <= 0) && \
28 (_Thread->SpecialApcDisable != -32768)); \
29 \
30 /* Disable Special APCs */ \
31 _Thread->SpecialApcDisable--; \
32 }
33
34 #define KeEnterGuardedRegion() \
35 { \
36 PKTHREAD _Thread = KeGetCurrentThread(); \
37 KeEnterGuardedRegionThread(_Thread); \
38 }
39
40 //
41 // Leaves a Guarded Region
42 //
43 #define KeLeaveGuardedRegionThread(_Thread) \
44 { \
45 /* Sanity checks */ \
46 ASSERT(KeGetCurrentIrql() <= APC_LEVEL); \
47 ASSERT(_Thread == KeGetCurrentThread()); \
48 ASSERT(_Thread->SpecialApcDisable < 0); \
49 \
50 /* Leave region and check if APCs are OK now */ \
51 if (!(++_Thread->SpecialApcDisable)) \
52 { \
53 /* Check for Kernel APCs on the list */ \
54 if (!IsListEmpty(&_Thread->ApcState. \
55 ApcListHead[KernelMode])) \
56 { \
57 /* Check for APC Delivery */ \
58 KiCheckForKernelApcDelivery(); \
59 } \
60 } \
61 }
62
63 #define KeLeaveGuardedRegion() \
64 { \
65 PKTHREAD _Thread = KeGetCurrentThread(); \
66 KeLeaveGuardedRegionThread(_Thread); \
67 }
68
69 //
70 // Enters a Critical Region
71 //
72 #define KeEnterCriticalRegionThread(_Thread) \
73 { \
74 /* Sanity checks */ \
75 ASSERT(_Thread == KeGetCurrentThread()); \
76 ASSERT((_Thread->KernelApcDisable <= 0) && \
77 (_Thread->KernelApcDisable != -32768)); \
78 \
79 /* Disable Kernel APCs */ \
80 _Thread->KernelApcDisable--; \
81 }
82
83 #define KeEnterCriticalRegion() \
84 { \
85 PKTHREAD _Thread = KeGetCurrentThread(); \
86 KeEnterCriticalRegionThread(_Thread); \
87 }
88
89 //
90 // Leaves a Critical Region
91 //
92 #define KeLeaveCriticalRegionThread(_Thread) \
93 { \
94 /* Sanity checks */ \
95 ASSERT(_Thread == KeGetCurrentThread()); \
96 ASSERT(_Thread->KernelApcDisable < 0); \
97 \
98 /* Enable Kernel APCs */ \
99 _Thread->KernelApcDisable++; \
100 \
101 /* Check if Kernel APCs are now enabled */ \
102 if (!(_Thread->KernelApcDisable)) \
103 { \
104 /* Check if we need to request an APC Delivery */ \
105 if (!(IsListEmpty(&_Thread->ApcState.ApcListHead[KernelMode])) && \
106 !(_Thread->SpecialApcDisable)) \
107 { \
108 /* Check for the right environment */ \
109 KiCheckForKernelApcDelivery(); \
110 } \
111 } \
112 }
113
114 #define KeLeaveCriticalRegion() \
115 { \
116 PKTHREAD _Thread = KeGetCurrentThread(); \
117 KeLeaveCriticalRegionThread(_Thread); \
118 }
119
120 #ifndef CONFIG_SMP
121
122 //
123 // This routine protects against multiple CPU acquires, it's meaningless on UP.
124 //
125 FORCEINLINE
126 VOID
127 KiAcquireDispatcherObject(IN DISPATCHER_HEADER* Object)
128 {
129 UNREFERENCED_PARAMETER(Object);
130 }
131
132 //
133 // This routine protects against multiple CPU acquires, it's meaningless on UP.
134 //
135 FORCEINLINE
136 VOID
137 KiReleaseDispatcherObject(IN DISPATCHER_HEADER* Object)
138 {
139 UNREFERENCED_PARAMETER(Object);
140 }
141
142 FORCEINLINE
143 KIRQL
144 KiAcquireDispatcherLock(VOID)
145 {
146 /* Raise to synch level */
147 return KfRaiseIrql(SYNCH_LEVEL);
148 }
149
150 FORCEINLINE
151 VOID
152 KiReleaseDispatcherLock(IN KIRQL OldIrql)
153 {
154 /* Just exit the dispatcher */
155 KiExitDispatcher(OldIrql);
156 }
157
158 FORCEINLINE
159 VOID
160 KiAcquireDispatcherLockAtDpcLevel(VOID)
161 {
162 /* This is a no-op at DPC Level for UP systems */
163 return;
164 }
165
166 FORCEINLINE
167 VOID
168 KiReleaseDispatcherLockFromDpcLevel(VOID)
169 {
170 /* This is a no-op at DPC Level for UP systems */
171 return;
172 }
173
174 //
175 // This routine makes the thread deferred ready on the boot CPU.
176 //
177 FORCEINLINE
178 VOID
179 KiInsertDeferredReadyList(IN PKTHREAD Thread)
180 {
181 /* Set the thread to deferred state and boot CPU */
182 Thread->State = DeferredReady;
183 Thread->DeferredProcessor = 0;
184
185 /* Make the thread ready immediately */
186 KiDeferredReadyThread(Thread);
187 }
188
189 FORCEINLINE
190 VOID
191 KiRescheduleThread(IN BOOLEAN NewThread,
192 IN ULONG Cpu)
193 {
194 /* This is meaningless on UP systems */
195 UNREFERENCED_PARAMETER(NewThread);
196 UNREFERENCED_PARAMETER(Cpu);
197 }
198
199 //
200 // This routine protects against multiple CPU acquires, it's meaningless on UP.
201 //
202 FORCEINLINE
203 VOID
204 KiSetThreadSwapBusy(IN PKTHREAD Thread)
205 {
206 UNREFERENCED_PARAMETER(Thread);
207 }
208
209 //
210 // This routine protects against multiple CPU acquires, it's meaningless on UP.
211 //
212 FORCEINLINE
213 VOID
214 KiAcquirePrcbLock(IN PKPRCB Prcb)
215 {
216 UNREFERENCED_PARAMETER(Prcb);
217 }
218
219 //
220 // This routine protects against multiple CPU acquires, it's meaningless on UP.
221 //
222 FORCEINLINE
223 VOID
224 KiReleasePrcbLock(IN PKPRCB Prcb)
225 {
226 UNREFERENCED_PARAMETER(Prcb);
227 }
228
229 //
230 // This routine protects against multiple CPU acquires, it's meaningless on UP.
231 //
232 FORCEINLINE
233 VOID
234 KiAcquireThreadLock(IN PKTHREAD Thread)
235 {
236 UNREFERENCED_PARAMETER(Thread);
237 }
238
239 //
240 // This routine protects against multiple CPU acquires, it's meaningless on UP.
241 //
242 FORCEINLINE
243 VOID
244 KiReleaseThreadLock(IN PKTHREAD Thread)
245 {
246 UNREFERENCED_PARAMETER(Thread);
247 }
248
249 //
250 // This routine protects against multiple CPU acquires, it's meaningless on UP.
251 //
252 FORCEINLINE
253 BOOLEAN
254 KiTryThreadLock(IN PKTHREAD Thread)
255 {
256 UNREFERENCED_PARAMETER(Thread);
257 return FALSE;
258 }
259
260 FORCEINLINE
261 VOID
262 KiCheckDeferredReadyList(IN PKPRCB Prcb)
263 {
264 /* There are no deferred ready lists on UP systems */
265 UNREFERENCED_PARAMETER(Prcb);
266 }
267
268 FORCEINLINE
269 VOID
270 KiRequestApcInterrupt(IN BOOLEAN NeedApc,
271 IN UCHAR Processor)
272 {
273 /* We deliver instantly on UP */
274 UNREFERENCED_PARAMETER(NeedApc);
275 UNREFERENCED_PARAMETER(Processor);
276 }
277
278 FORCEINLINE
279 PKSPIN_LOCK_QUEUE
280 KiAcquireTimerLock(IN ULONG Hand)
281 {
282 ASSERT(KeGetCurrentIrql() >= DISPATCH_LEVEL);
283
284 /* Nothing to do on UP */
285 UNREFERENCED_PARAMETER(Hand);
286 return NULL;
287 }
288
289 FORCEINLINE
290 VOID
291 KiReleaseTimerLock(IN PKSPIN_LOCK_QUEUE LockQueue)
292 {
293 ASSERT(KeGetCurrentIrql() >= DISPATCH_LEVEL);
294
295 /* Nothing to do on UP */
296 UNREFERENCED_PARAMETER(LockQueue);
297 }
298
299 #else
300
301 FORCEINLINE
302 VOID
303 KiAcquireDispatcherObject(IN DISPATCHER_HEADER* Object)
304 {
305 LONG OldValue;
306
307 /* Make sure we're at a safe level to touch the lock */
308 ASSERT(KeGetCurrentIrql() >= DISPATCH_LEVEL);
309
310 /* Start acquire loop */
311 do
312 {
313 /* Loop until the other CPU releases it */
314 while (TRUE)
315 {
316 /* Check if it got released */
317 OldValue = Object->Lock;
318 if ((OldValue & KOBJECT_LOCK_BIT) == 0) break;
319
320 /* Let the CPU know that this is a loop */
321 YieldProcessor();
322 }
323
324 /* Try acquiring the lock now */
325 } while (InterlockedCompareExchange(&Object->Lock,
326 OldValue | KOBJECT_LOCK_BIT,
327 OldValue) != OldValue);
328 }
329
330 FORCEINLINE
331 VOID
332 KiReleaseDispatcherObject(IN DISPATCHER_HEADER* Object)
333 {
334 /* Make sure we're at a safe level to touch the lock */
335 ASSERT(KeGetCurrentIrql() >= DISPATCH_LEVEL);
336
337 /* Release it */
338 InterlockedAnd(&Object->Lock, ~KOBJECT_LOCK_BIT);
339 }
340
341 FORCEINLINE
342 KIRQL
343 KiAcquireDispatcherLock(VOID)
344 {
345 /* Raise to synchronization level and acquire the dispatcher lock */
346 return KeAcquireQueuedSpinLockRaiseToSynch(LockQueueDispatcherLock);
347 }
348
349 FORCEINLINE
350 VOID
351 KiReleaseDispatcherLock(IN KIRQL OldIrql)
352 {
353 /* First release the lock */
354 KeReleaseQueuedSpinLockFromDpcLevel(&KeGetCurrentPrcb()->
355 LockQueue[LockQueueDispatcherLock]);
356
357 /* Then exit the dispatcher */
358 KiExitDispatcher(OldIrql);
359 }
360
361 FORCEINLINE
362 VOID
363 KiAcquireDispatcherLockAtDpcLevel(VOID)
364 {
365 /* Acquire the dispatcher lock */
366 KeAcquireQueuedSpinLockAtDpcLevel(&KeGetCurrentPrcb()->
367 LockQueue[LockQueueDispatcherLock]);
368 }
369
370 FORCEINLINE
371 VOID
372 KiReleaseDispatcherLockFromDpcLevel(VOID)
373 {
374 /* Release the dispatcher lock */
375 KeReleaseQueuedSpinLockFromDpcLevel(&KeGetCurrentPrcb()->
376 LockQueue[LockQueueDispatcherLock]);
377 }
378
379 //
380 // This routine inserts a thread into the deferred ready list of the current CPU
381 //
382 FORCEINLINE
383 VOID
384 KiInsertDeferredReadyList(IN PKTHREAD Thread)
385 {
386 PKPRCB Prcb = KeGetCurrentPrcb();
387
388 /* Set the thread to deferred state and CPU */
389 Thread->State = DeferredReady;
390 Thread->DeferredProcessor = Prcb->Number;
391
392 /* Add it on the list */
393 PushEntryList(&Prcb->DeferredReadyListHead, &Thread->SwapListEntry);
394 }
395
396 FORCEINLINE
397 VOID
398 KiRescheduleThread(IN BOOLEAN NewThread,
399 IN ULONG Cpu)
400 {
401 /* Check if a new thread needs to be scheduled on a different CPU */
402 if ((NewThread) && !(KeGetCurrentPrcb()->Number == Cpu))
403 {
404 /* Send an IPI to request delivery */
405 KiIpiSend(AFFINITY_MASK(Cpu), IPI_DPC);
406 }
407 }
408
409 //
410 // This routine sets the current thread in a swap busy state, which ensure that
411 // nobody else tries to swap it concurrently.
412 //
413 FORCEINLINE
414 VOID
415 KiSetThreadSwapBusy(IN PKTHREAD Thread)
416 {
417 /* Make sure nobody already set it */
418 ASSERT(Thread->SwapBusy == FALSE);
419
420 /* Set it ourselves */
421 Thread->SwapBusy = TRUE;
422 }
423
424 //
425 // This routine acquires the PRCB lock so that only one caller can touch
426 // volatile PRCB data.
427 //
428 // Since this is a simple optimized spin-lock, it must only be acquired
429 // at dispatcher level or higher!
430 //
431 FORCEINLINE
432 VOID
433 KiAcquirePrcbLock(IN PKPRCB Prcb)
434 {
435 /* Make sure we're at a safe level to touch the PRCB lock */
436 ASSERT(KeGetCurrentIrql() >= DISPATCH_LEVEL);
437
438 /* Start acquire loop */
439 for (;;)
440 {
441 /* Acquire the lock and break out if we acquired it first */
442 if (!InterlockedExchange((PLONG)&Prcb->PrcbLock, 1)) break;
443
444 /* Loop until the other CPU releases it */
445 do
446 {
447 /* Let the CPU know that this is a loop */
448 YieldProcessor();
449 } while (Prcb->PrcbLock);
450 }
451 }
452
453 //
454 // This routine releases the PRCB lock so that other callers can touch
455 // volatile PRCB data.
456 //
457 // Since this is a simple optimized spin-lock, it must be be only acquired
458 // at dispatcher level or higher!
459 //
460 FORCEINLINE
461 VOID
462 KiReleasePrcbLock(IN PKPRCB Prcb)
463 {
464 /* Make sure we are above dispatch and the lock is acquired! */
465 ASSERT(KeGetCurrentIrql() >= DISPATCH_LEVEL);
466 ASSERT(Prcb->PrcbLock != 0);
467
468 /* Release it */
469 InterlockedAnd((PLONG)&Prcb->PrcbLock, 0);
470 }
471
472 //
473 // This routine acquires the thread lock so that only one caller can touch
474 // volatile thread data.
475 //
476 // Since this is a simple optimized spin-lock, it must be be only acquired
477 // at dispatcher level or higher!
478 //
479 FORCEINLINE
480 VOID
481 KiAcquireThreadLock(IN PKTHREAD Thread)
482 {
483 /* Make sure we're at a safe level to touch the thread lock */
484 ASSERT(KeGetCurrentIrql() >= DISPATCH_LEVEL);
485
486 /* Start acquire loop */
487 for (;;)
488 {
489 /* Acquire the lock and break out if we acquired it first */
490 if (!InterlockedExchange((PLONG)&Thread->ThreadLock, 1)) break;
491
492 /* Loop until the other CPU releases it */
493 do
494 {
495 /* Let the CPU know that this is a loop */
496 YieldProcessor();
497 } while (Thread->ThreadLock);
498 }
499 }
500
501 //
502 // This routine releases the thread lock so that other callers can touch
503 // volatile thread data.
504 //
505 // Since this is a simple optimized spin-lock, it must be be only acquired
506 // at dispatcher level or higher!
507 //
508 FORCEINLINE
509 VOID
510 KiReleaseThreadLock(IN PKTHREAD Thread)
511 {
512 /* Make sure we are still above dispatch */
513 ASSERT(KeGetCurrentIrql() >= DISPATCH_LEVEL);
514
515 /* Release it */
516 InterlockedAnd((PLONG)&Thread->ThreadLock, 0);
517 }
518
519 FORCEINLINE
520 BOOLEAN
521 KiTryThreadLock(IN PKTHREAD Thread)
522 {
523 LONG Value;
524
525 /* If the lock isn't acquired, return false */
526 if (!Thread->ThreadLock) return FALSE;
527
528 /* Otherwise, try to acquire it and check the result */
529 Value = 1;
530 Value = InterlockedExchange((PLONG)&Thread->ThreadLock, Value);
531
532 /* Return the lock state */
533 return (Value == 1);
534 }
535
536 FORCEINLINE
537 VOID
538 KiCheckDeferredReadyList(IN PKPRCB Prcb)
539 {
540 /* Scan the deferred ready lists if required */
541 if (Prcb->DeferredReadyListHead.Next) KiProcessDeferredReadyList(Prcb);
542 }
543
544 FORCEINLINE
545 VOID
546 KiRequestApcInterrupt(IN BOOLEAN NeedApc,
547 IN UCHAR Processor)
548 {
549 /* Check if we need to request APC delivery */
550 if (NeedApc)
551 {
552 /* Check if it's on another CPU */
553 if (KeGetCurrentPrcb()->Number != Processor)
554 {
555 /* Send an IPI to request delivery */
556 KiIpiSend(AFFINITY_MASK(Processor), IPI_APC);
557 }
558 else
559 {
560 /* Request a software interrupt */
561 HalRequestSoftwareInterrupt(APC_LEVEL);
562 }
563 }
564 }
565
566 FORCEINLINE
567 PKSPIN_LOCK_QUEUE
568 KiAcquireTimerLock(IN ULONG Hand)
569 {
570 PKSPIN_LOCK_QUEUE LockQueue;
571 ULONG LockIndex;
572 ASSERT(KeGetCurrentIrql() >= DISPATCH_LEVEL);
573
574 /* Get the lock index */
575 LockIndex = Hand >> LOCK_QUEUE_TIMER_LOCK_SHIFT;
576 LockIndex &= (LOCK_QUEUE_TIMER_TABLE_LOCKS - 1);
577
578 /* Now get the lock */
579 LockQueue = &KeGetCurrentPrcb()->LockQueue[LockQueueTimerTableLock + LockIndex];
580
581 /* Acquire it and return */
582 KeAcquireQueuedSpinLockAtDpcLevel(LockQueue);
583 return LockQueue;
584 }
585
586 FORCEINLINE
587 VOID
588 KiReleaseTimerLock(IN PKSPIN_LOCK_QUEUE LockQueue)
589 {
590 ASSERT(KeGetCurrentIrql() >= DISPATCH_LEVEL);
591
592 /* Release the lock */
593 KeReleaseQueuedSpinLockFromDpcLevel(LockQueue);
594 }
595
596 #endif
597
598 FORCEINLINE
599 VOID
600 KiAcquireApcLock(IN PKTHREAD Thread,
601 IN PKLOCK_QUEUE_HANDLE Handle)
602 {
603 /* Acquire the lock and raise to synchronization level */
604 KeAcquireInStackQueuedSpinLockRaiseToSynch(&Thread->ApcQueueLock, Handle);
605 }
606
607 FORCEINLINE
608 VOID
609 KiAcquireApcLockAtDpcLevel(IN PKTHREAD Thread,
610 IN PKLOCK_QUEUE_HANDLE Handle)
611 {
612 /* Acquire the lock */
613 KeAcquireInStackQueuedSpinLockAtDpcLevel(&Thread->ApcQueueLock, Handle);
614 }
615
616 FORCEINLINE
617 VOID
618 KiAcquireApcLockAtApcLevel(IN PKTHREAD Thread,
619 IN PKLOCK_QUEUE_HANDLE Handle)
620 {
621 /* Acquire the lock */
622 KeAcquireInStackQueuedSpinLock(&Thread->ApcQueueLock, Handle);
623 }
624
625 FORCEINLINE
626 VOID
627 KiReleaseApcLock(IN PKLOCK_QUEUE_HANDLE Handle)
628 {
629 /* Release the lock */
630 KeReleaseInStackQueuedSpinLock(Handle);
631 }
632
633 FORCEINLINE
634 VOID
635 KiReleaseApcLockFromDpcLevel(IN PKLOCK_QUEUE_HANDLE Handle)
636 {
637 /* Release the lock */
638 KeReleaseInStackQueuedSpinLockFromDpcLevel(Handle);
639 }
640
641 FORCEINLINE
642 VOID
643 KiAcquireProcessLock(IN PKPROCESS Process,
644 IN PKLOCK_QUEUE_HANDLE Handle)
645 {
646 /* Acquire the lock and raise to synchronization level */
647 KeAcquireInStackQueuedSpinLockRaiseToSynch(&Process->ProcessLock, Handle);
648 }
649
650 FORCEINLINE
651 VOID
652 KiReleaseProcessLock(IN PKLOCK_QUEUE_HANDLE Handle)
653 {
654 /* Release the lock */
655 KeReleaseInStackQueuedSpinLock(Handle);
656 }
657
658 FORCEINLINE
659 VOID
660 KiReleaseProcessLockFromDpcLevel(IN PKLOCK_QUEUE_HANDLE Handle)
661 {
662 /* Release the lock */
663 KeReleaseInStackQueuedSpinLockFromDpcLevel(Handle);
664 }
665
666 FORCEINLINE
667 VOID
668 KiAcquireDeviceQueueLock(IN PKDEVICE_QUEUE DeviceQueue,
669 IN PKLOCK_QUEUE_HANDLE DeviceLock)
670 {
671 /* Check if we were called from a threaded DPC */
672 if (KeGetCurrentPrcb()->DpcThreadActive)
673 {
674 /* Lock the Queue, we're not at DPC level */
675 KeAcquireInStackQueuedSpinLock(&DeviceQueue->Lock, DeviceLock);
676 }
677 else
678 {
679 /* We must be at DPC level, acquire the lock safely */
680 ASSERT(KeGetCurrentIrql() == DISPATCH_LEVEL);
681 KeAcquireInStackQueuedSpinLockAtDpcLevel(&DeviceQueue->Lock,
682 DeviceLock);
683 }
684 }
685
686 FORCEINLINE
687 VOID
688 KiReleaseDeviceQueueLock(IN PKLOCK_QUEUE_HANDLE DeviceLock)
689 {
690 /* Check if we were called from a threaded DPC */
691 if (KeGetCurrentPrcb()->DpcThreadActive)
692 {
693 /* Unlock the Queue, we're not at DPC level */
694 KeReleaseInStackQueuedSpinLock(DeviceLock);
695 }
696 else
697 {
698 /* We must be at DPC level, release the lock safely */
699 ASSERT(KeGetCurrentIrql() == DISPATCH_LEVEL);
700 KeReleaseInStackQueuedSpinLockFromDpcLevel(DeviceLock);
701 }
702 }
703
704 //
705 // Satisfies the wait of a mutant dispatcher object
706 //
707 #define KiSatisfyMutantWait(Object, Thread) \
708 { \
709 /* Decrease the Signal State */ \
710 (Object)->Header.SignalState--; \
711 \
712 /* Check if it's now non-signaled */ \
713 if (!(Object)->Header.SignalState) \
714 { \
715 /* Set the Owner Thread */ \
716 (Object)->OwnerThread = Thread; \
717 \
718 /* Disable APCs if needed */ \
719 Thread->KernelApcDisable = Thread->KernelApcDisable - \
720 (Object)->ApcDisable; \
721 \
722 /* Check if it's abandoned */ \
723 if ((Object)->Abandoned) \
724 { \
725 /* Unabandon it */ \
726 (Object)->Abandoned = FALSE; \
727 \
728 /* Return Status */ \
729 Thread->WaitStatus = STATUS_ABANDONED; \
730 } \
731 \
732 /* Insert it into the Mutant List */ \
733 InsertHeadList(Thread->MutantListHead.Blink, \
734 &(Object)->MutantListEntry); \
735 } \
736 }
737
738 //
739 // Satisfies the wait of any nonmutant dispatcher object
740 //
741 #define KiSatisfyNonMutantWait(Object) \
742 { \
743 if (((Object)->Header.Type & TIMER_OR_EVENT_TYPE) == \
744 EventSynchronizationObject) \
745 { \
746 /* Synchronization Timers and Events just get un-signaled */ \
747 (Object)->Header.SignalState = 0; \
748 } \
749 else if ((Object)->Header.Type == SemaphoreObject) \
750 { \
751 /* These ones can have multiple states, so we only decrease it */ \
752 (Object)->Header.SignalState--; \
753 } \
754 }
755
756 //
757 // Satisfies the wait of any dispatcher object
758 //
759 #define KiSatisfyObjectWait(Object, Thread) \
760 { \
761 /* Special case for Mutants */ \
762 if ((Object)->Header.Type == MutantObject) \
763 { \
764 KiSatisfyMutantWait((Object), (Thread)); \
765 } \
766 else \
767 { \
768 KiSatisfyNonMutantWait(Object); \
769 } \
770 }
771
772 //
773 // Recalculates the due time
774 //
775 FORCEINLINE
776 PLARGE_INTEGER
777 KiRecalculateDueTime(IN PLARGE_INTEGER OriginalDueTime,
778 IN PLARGE_INTEGER DueTime,
779 IN OUT PLARGE_INTEGER NewDueTime)
780 {
781 /* Don't do anything for absolute waits */
782 if (OriginalDueTime->QuadPart >= 0) return OriginalDueTime;
783
784 /* Otherwise, query the interrupt time and recalculate */
785 NewDueTime->QuadPart = KeQueryInterruptTime();
786 NewDueTime->QuadPart -= DueTime->QuadPart;
787 return NewDueTime;
788 }
789
790 //
791 // Determines whether a thread should be added to the wait list
792 //
793 FORCEINLINE
794 BOOLEAN
795 KiCheckThreadStackSwap(IN PKTHREAD Thread,
796 IN KPROCESSOR_MODE WaitMode)
797 {
798 /* Check the required conditions */
799 if ((WaitMode != KernelMode) &&
800 (Thread->EnableStackSwap) &&
801 (Thread->Priority >= (LOW_REALTIME_PRIORITY + 9)))
802 {
803 /* We are go for swap */
804 return TRUE;
805 }
806 else
807 {
808 /* Don't swap the thread */
809 return FALSE;
810 }
811 }
812
813 //
814 // Adds a thread to the wait list
815 //
816 #define KiAddThreadToWaitList(Thread, Swappable) \
817 { \
818 /* Make sure it's swappable */ \
819 if (Swappable) \
820 { \
821 /* Insert it into the PRCB's List */ \
822 InsertTailList(&KeGetCurrentPrcb()->WaitListHead, \
823 &Thread->WaitListEntry); \
824 } \
825 }
826
827 //
828 // Checks if a wait in progress should be interrupted by APCs or an alertable
829 // state.
830 //
831 FORCEINLINE
832 NTSTATUS
833 KiCheckAlertability(IN PKTHREAD Thread,
834 IN BOOLEAN Alertable,
835 IN KPROCESSOR_MODE WaitMode)
836 {
837 /* Check if the wait is alertable */
838 if (Alertable)
839 {
840 /* It is, first check if the thread is alerted in this mode */
841 if (Thread->Alerted[WaitMode])
842 {
843 /* It is, so bail out of the wait */
844 Thread->Alerted[WaitMode] = FALSE;
845 return STATUS_ALERTED;
846 }
847 else if ((WaitMode != KernelMode) &&
848 (!IsListEmpty(&Thread->ApcState.ApcListHead[UserMode])))
849 {
850 /* It's isn't, but this is a user wait with queued user APCs */
851 Thread->ApcState.UserApcPending = TRUE;
852 return STATUS_USER_APC;
853 }
854 else if (Thread->Alerted[KernelMode])
855 {
856 /* It isn't that either, but we're alered in kernel mode */
857 Thread->Alerted[KernelMode] = FALSE;
858 return STATUS_ALERTED;
859 }
860 }
861 else if ((WaitMode != KernelMode) && (Thread->ApcState.UserApcPending))
862 {
863 /* Not alertable, but this is a user wait with pending user APCs */
864 return STATUS_USER_APC;
865 }
866
867 /* Otherwise, we're fine */
868 return STATUS_WAIT_0;
869 }
870
871 FORCEINLINE
872 ULONG
873 KiComputeTimerTableIndex(IN ULONGLONG DueTime)
874 {
875 return (DueTime / KeMaximumIncrement) & (TIMER_TABLE_SIZE - 1);
876 }
877
878 //
879 // Called from KiCompleteTimer, KiInsertTreeTimer, KeSetSystemTime
880 // to remove timer entries
881 // See Windows HPI blog for more information.
882 FORCEINLINE
883 VOID
884 KiRemoveEntryTimer(IN PKTIMER Timer)
885 {
886 ULONG Hand;
887 PKTIMER_TABLE_ENTRY TableEntry;
888
889 /* Remove the timer from the timer list and check if it's empty */
890 Hand = Timer->Header.Hand;
891 if (RemoveEntryList(&Timer->TimerListEntry))
892 {
893 /* Get the respective timer table entry */
894 TableEntry = &KiTimerTableListHead[Hand];
895 if (&TableEntry->Entry == TableEntry->Entry.Flink)
896 {
897 /* Set the entry to an infinite absolute time */
898 TableEntry->Time.HighPart = 0xFFFFFFFF;
899 }
900 }
901
902 /* Clear the list entries on dbg builds so we can tell the timer is gone */
903 #if DBG
904 Timer->TimerListEntry.Flink = NULL;
905 Timer->TimerListEntry.Blink = NULL;
906 #endif
907 }
908
909 //
910 // Called by Wait and Queue code to insert a timer for dispatching.
911 // Also called by KeSetTimerEx to insert a timer from the caller.
912 //
913 FORCEINLINE
914 VOID
915 KxInsertTimer(IN PKTIMER Timer,
916 IN ULONG Hand)
917 {
918 PKSPIN_LOCK_QUEUE LockQueue;
919
920 /* Acquire the lock and release the dispatcher lock */
921 LockQueue = KiAcquireTimerLock(Hand);
922 KiReleaseDispatcherLockFromDpcLevel();
923
924 /* Try to insert the timer */
925 if (KiInsertTimerTable(Timer, Hand))
926 {
927 /* Complete it */
928 KiCompleteTimer(Timer, LockQueue);
929 }
930 else
931 {
932 /* Do nothing, just release the lock */
933 KiReleaseTimerLock(LockQueue);
934 }
935 }
936
937 //
938 // Called by KeSetTimerEx and KiInsertTreeTimer to calculate Due Time
939 // See the Windows HPI Blog for more information
940 //
941 FORCEINLINE
942 BOOLEAN
943 KiComputeDueTime(IN PKTIMER Timer,
944 IN LARGE_INTEGER DueTime,
945 OUT PULONG Hand)
946 {
947 LARGE_INTEGER InterruptTime, SystemTime, DifferenceTime;
948
949 /* Convert to relative time if needed */
950 Timer->Header.Absolute = FALSE;
951 if (DueTime.HighPart >= 0)
952 {
953 /* Get System Time */
954 KeQuerySystemTime(&SystemTime);
955
956 /* Do the conversion */
957 DifferenceTime.QuadPart = SystemTime.QuadPart - DueTime.QuadPart;
958
959 /* Make sure it hasn't already expired */
960 Timer->Header.Absolute = TRUE;
961 if (DifferenceTime.HighPart >= 0)
962 {
963 /* Cancel everything */
964 Timer->Header.SignalState = TRUE;
965 Timer->Header.Hand = 0;
966 Timer->DueTime.QuadPart = 0;
967 *Hand = 0;
968 return FALSE;
969 }
970
971 /* Set the time as Absolute */
972 DueTime = DifferenceTime;
973 }
974
975 /* Get the Interrupt Time */
976 InterruptTime.QuadPart = KeQueryInterruptTime();
977
978 /* Recalculate due time */
979 Timer->DueTime.QuadPart = InterruptTime.QuadPart - DueTime.QuadPart;
980
981 /* Get the handle */
982 *Hand = KiComputeTimerTableIndex(Timer->DueTime.QuadPart);
983 Timer->Header.Hand = (UCHAR)*Hand;
984 Timer->Header.Inserted = TRUE;
985 return TRUE;
986 }
987
988 //
989 // Called from Unlink and Queue Insert Code.
990 // Also called by timer code when canceling an inserted timer.
991 // Removes a timer from it's tree.
992 //
993 FORCEINLINE
994 VOID
995 KxRemoveTreeTimer(IN PKTIMER Timer)
996 {
997 ULONG Hand = Timer->Header.Hand;
998 PKSPIN_LOCK_QUEUE LockQueue;
999 PKTIMER_TABLE_ENTRY TimerEntry;
1000
1001 /* Acquire timer lock */
1002 LockQueue = KiAcquireTimerLock(Hand);
1003
1004 /* Set the timer as non-inserted */
1005 Timer->Header.Inserted = FALSE;
1006
1007 /* Remove it from the timer list */
1008 if (RemoveEntryList(&Timer->TimerListEntry))
1009 {
1010 /* Get the entry and check if it's empty */
1011 TimerEntry = &KiTimerTableListHead[Hand];
1012 if (IsListEmpty(&TimerEntry->Entry))
1013 {
1014 /* Clear the time then */
1015 TimerEntry->Time.HighPart = 0xFFFFFFFF;
1016 }
1017 }
1018
1019 /* Release the timer lock */
1020 KiReleaseTimerLock(LockQueue);
1021 }
1022
1023 FORCEINLINE
1024 VOID
1025 KxSetTimerForThreadWait(IN PKTIMER Timer,
1026 IN LARGE_INTEGER Interval,
1027 OUT PULONG Hand)
1028 {
1029 ULONGLONG DueTime;
1030 LARGE_INTEGER InterruptTime, SystemTime, TimeDifference;
1031
1032 /* Check the timer's interval to see if it's absolute */
1033 Timer->Header.Absolute = FALSE;
1034 if (Interval.HighPart >= 0)
1035 {
1036 /* Get the system time and calculate the relative time */
1037 KeQuerySystemTime(&SystemTime);
1038 TimeDifference.QuadPart = SystemTime.QuadPart - Interval.QuadPart;
1039 Timer->Header.Absolute = TRUE;
1040
1041 /* Check if we've already expired */
1042 if (TimeDifference.HighPart >= 0)
1043 {
1044 /* Reset everything */
1045 Timer->DueTime.QuadPart = 0;
1046 *Hand = 0;
1047 Timer->Header.Hand = 0;
1048 return;
1049 }
1050 else
1051 {
1052 /* Update the interval */
1053 Interval = TimeDifference;
1054 }
1055 }
1056
1057 /* Calculate the due time */
1058 InterruptTime.QuadPart = KeQueryInterruptTime();
1059 DueTime = InterruptTime.QuadPart - Interval.QuadPart;
1060 Timer->DueTime.QuadPart = DueTime;
1061
1062 /* Calculate the timer handle */
1063 *Hand = KiComputeTimerTableIndex(DueTime);
1064 Timer->Header.Hand = (UCHAR)*Hand;
1065 }
1066
1067 #define KxDelayThreadWait() \
1068 \
1069 /* Setup the Wait Block */ \
1070 Thread->WaitBlockList = TimerBlock; \
1071 \
1072 /* Setup the timer */ \
1073 KxSetTimerForThreadWait(Timer, *Interval, &Hand); \
1074 \
1075 /* Save the due time for the caller */ \
1076 DueTime.QuadPart = Timer->DueTime.QuadPart; \
1077 \
1078 /* Link the timer to this Wait Block */ \
1079 TimerBlock->NextWaitBlock = TimerBlock; \
1080 Timer->Header.WaitListHead.Flink = &TimerBlock->WaitListEntry; \
1081 Timer->Header.WaitListHead.Blink = &TimerBlock->WaitListEntry; \
1082 \
1083 /* Clear wait status */ \
1084 Thread->WaitStatus = STATUS_SUCCESS; \
1085 \
1086 /* Setup wait fields */ \
1087 Thread->Alertable = Alertable; \
1088 Thread->WaitReason = DelayExecution; \
1089 Thread->WaitMode = WaitMode; \
1090 \
1091 /* Check if we can swap the thread's stack */ \
1092 Thread->WaitListEntry.Flink = NULL; \
1093 Swappable = KiCheckThreadStackSwap(Thread, WaitMode); \
1094 \
1095 /* Set the wait time */ \
1096 Thread->WaitTime = KeTickCount.LowPart;
1097
1098 #define KxMultiThreadWait() \
1099 /* Link wait block array to the thread */ \
1100 Thread->WaitBlockList = WaitBlockArray; \
1101 \
1102 /* Reset the index */ \
1103 Index = 0; \
1104 \
1105 /* Loop wait blocks */ \
1106 do \
1107 { \
1108 /* Fill out the wait block */ \
1109 WaitBlock = &WaitBlockArray[Index]; \
1110 WaitBlock->Object = Object[Index]; \
1111 WaitBlock->WaitKey = (USHORT)Index; \
1112 WaitBlock->WaitType = WaitType; \
1113 WaitBlock->Thread = Thread; \
1114 \
1115 /* Link to next block */ \
1116 WaitBlock->NextWaitBlock = &WaitBlockArray[Index + 1]; \
1117 Index++; \
1118 } while (Index < Count); \
1119 \
1120 /* Link the last block */ \
1121 WaitBlock->NextWaitBlock = WaitBlockArray; \
1122 \
1123 /* Set default wait status */ \
1124 Thread->WaitStatus = STATUS_WAIT_0; \
1125 \
1126 /* Check if we have a timer */ \
1127 if (Timeout) \
1128 { \
1129 /* Link to the block */ \
1130 TimerBlock->NextWaitBlock = WaitBlockArray; \
1131 \
1132 /* Setup the timer */ \
1133 KxSetTimerForThreadWait(Timer, *Timeout, &Hand); \
1134 \
1135 /* Save the due time for the caller */ \
1136 DueTime.QuadPart = Timer->DueTime.QuadPart; \
1137 \
1138 /* Initialize the list */ \
1139 InitializeListHead(&Timer->Header.WaitListHead); \
1140 } \
1141 \
1142 /* Set wait settings */ \
1143 Thread->Alertable = Alertable; \
1144 Thread->WaitMode = WaitMode; \
1145 Thread->WaitReason = WaitReason; \
1146 \
1147 /* Check if we can swap the thread's stack */ \
1148 Thread->WaitListEntry.Flink = NULL; \
1149 Swappable = KiCheckThreadStackSwap(Thread, WaitMode); \
1150 \
1151 /* Set the wait time */ \
1152 Thread->WaitTime = KeTickCount.LowPart;
1153
1154 #define KxSingleThreadWait() \
1155 /* Setup the Wait Block */ \
1156 Thread->WaitBlockList = WaitBlock; \
1157 WaitBlock->WaitKey = STATUS_SUCCESS; \
1158 WaitBlock->Object = Object; \
1159 WaitBlock->WaitType = WaitAny; \
1160 \
1161 /* Clear wait status */ \
1162 Thread->WaitStatus = STATUS_SUCCESS; \
1163 \
1164 /* Check if we have a timer */ \
1165 if (Timeout) \
1166 { \
1167 /* Setup the timer */ \
1168 KxSetTimerForThreadWait(Timer, *Timeout, &Hand); \
1169 \
1170 /* Save the due time for the caller */ \
1171 DueTime.QuadPart = Timer->DueTime.QuadPart; \
1172 \
1173 /* Pointer to timer block */ \
1174 WaitBlock->NextWaitBlock = TimerBlock; \
1175 TimerBlock->NextWaitBlock = WaitBlock; \
1176 \
1177 /* Link the timer to this Wait Block */ \
1178 Timer->Header.WaitListHead.Flink = &TimerBlock->WaitListEntry; \
1179 Timer->Header.WaitListHead.Blink = &TimerBlock->WaitListEntry; \
1180 } \
1181 else \
1182 { \
1183 /* No timer block, just ourselves */ \
1184 WaitBlock->NextWaitBlock = WaitBlock; \
1185 } \
1186 \
1187 /* Set wait settings */ \
1188 Thread->Alertable = Alertable; \
1189 Thread->WaitMode = WaitMode; \
1190 Thread->WaitReason = WaitReason; \
1191 \
1192 /* Check if we can swap the thread's stack */ \
1193 Thread->WaitListEntry.Flink = NULL; \
1194 Swappable = KiCheckThreadStackSwap(Thread, WaitMode); \
1195 \
1196 /* Set the wait time */ \
1197 Thread->WaitTime = KeTickCount.LowPart;
1198
1199 #define KxQueueThreadWait() \
1200 /* Setup the Wait Block */ \
1201 Thread->WaitBlockList = WaitBlock; \
1202 WaitBlock->WaitKey = STATUS_SUCCESS; \
1203 WaitBlock->Object = Queue; \
1204 WaitBlock->WaitType = WaitAny; \
1205 WaitBlock->Thread = Thread; \
1206 \
1207 /* Clear wait status */ \
1208 Thread->WaitStatus = STATUS_SUCCESS; \
1209 \
1210 /* Check if we have a timer */ \
1211 if (Timeout) \
1212 { \
1213 /* Setup the timer */ \
1214 KxSetTimerForThreadWait(Timer, *Timeout, &Hand); \
1215 \
1216 /* Save the due time for the caller */ \
1217 DueTime.QuadPart = Timer->DueTime.QuadPart; \
1218 \
1219 /* Pointer to timer block */ \
1220 WaitBlock->NextWaitBlock = TimerBlock; \
1221 TimerBlock->NextWaitBlock = WaitBlock; \
1222 \
1223 /* Link the timer to this Wait Block */ \
1224 Timer->Header.WaitListHead.Flink = &TimerBlock->WaitListEntry; \
1225 Timer->Header.WaitListHead.Blink = &TimerBlock->WaitListEntry; \
1226 } \
1227 else \
1228 { \
1229 /* No timer block, just ourselves */ \
1230 WaitBlock->NextWaitBlock = WaitBlock; \
1231 } \
1232 \
1233 /* Set wait settings */ \
1234 Thread->Alertable = FALSE; \
1235 Thread->WaitMode = WaitMode; \
1236 Thread->WaitReason = WrQueue; \
1237 \
1238 /* Check if we can swap the thread's stack */ \
1239 Thread->WaitListEntry.Flink = NULL; \
1240 Swappable = KiCheckThreadStackSwap(Thread, WaitMode); \
1241 \
1242 /* Set the wait time */ \
1243 Thread->WaitTime = KeTickCount.LowPart;
1244
1245 //
1246 // Unwaits a Thread
1247 //
1248 FORCEINLINE
1249 VOID
1250 KxUnwaitThread(IN DISPATCHER_HEADER *Object,
1251 IN KPRIORITY Increment)
1252 {
1253 PLIST_ENTRY WaitEntry, WaitList;
1254 PKWAIT_BLOCK WaitBlock;
1255 PKTHREAD WaitThread;
1256 ULONG WaitKey;
1257
1258 /* Loop the Wait Entries */
1259 WaitList = &Object->WaitListHead;
1260 ASSERT(IsListEmpty(&Object->WaitListHead) == FALSE);
1261 WaitEntry = WaitList->Flink;
1262 do
1263 {
1264 /* Get the current wait block */
1265 WaitBlock = CONTAINING_RECORD(WaitEntry, KWAIT_BLOCK, WaitListEntry);
1266
1267 /* Get the waiting thread */
1268 WaitThread = WaitBlock->Thread;
1269
1270 /* Check the current Wait Mode */
1271 if (WaitBlock->WaitType == WaitAny)
1272 {
1273 /* Use the actual wait key */
1274 WaitKey = WaitBlock->WaitKey;
1275 }
1276 else
1277 {
1278 /* Otherwise, use STATUS_KERNEL_APC */
1279 WaitKey = STATUS_KERNEL_APC;
1280 }
1281
1282 /* Unwait the thread */
1283 KiUnwaitThread(WaitThread, WaitKey, Increment);
1284
1285 /* Next entry */
1286 WaitEntry = WaitList->Flink;
1287 } while (WaitEntry != WaitList);
1288 }
1289
1290 //
1291 // Unwaits a Thread waiting on an event
1292 //
1293 FORCEINLINE
1294 VOID
1295 KxUnwaitThreadForEvent(IN PKEVENT Event,
1296 IN KPRIORITY Increment)
1297 {
1298 PLIST_ENTRY WaitEntry, WaitList;
1299 PKWAIT_BLOCK WaitBlock;
1300 PKTHREAD WaitThread;
1301
1302 /* Loop the Wait Entries */
1303 WaitList = &Event->Header.WaitListHead;
1304 ASSERT(IsListEmpty(&Event->Header.WaitListHead) == FALSE);
1305 WaitEntry = WaitList->Flink;
1306 do
1307 {
1308 /* Get the current wait block */
1309 WaitBlock = CONTAINING_RECORD(WaitEntry, KWAIT_BLOCK, WaitListEntry);
1310
1311 /* Get the waiting thread */
1312 WaitThread = WaitBlock->Thread;
1313
1314 /* Check the current Wait Mode */
1315 if (WaitBlock->WaitType == WaitAny)
1316 {
1317 /* Un-signal it */
1318 Event->Header.SignalState = 0;
1319
1320 /* Un-signal the event and unwait the thread */
1321 KiUnwaitThread(WaitThread, WaitBlock->WaitKey, Increment);
1322 break;
1323 }
1324
1325 /* Unwait the thread with STATUS_KERNEL_APC */
1326 KiUnwaitThread(WaitThread, STATUS_KERNEL_APC, Increment);
1327
1328 /* Next entry */
1329 WaitEntry = WaitList->Flink;
1330 } while (WaitEntry != WaitList);
1331 }
1332
1333 //
1334 // This routine queues a thread that is ready on the PRCB's ready lists.
1335 // If this thread cannot currently run on this CPU, then the thread is
1336 // added to the deferred ready list instead.
1337 //
1338 // This routine must be entered with the PRCB lock held and it will exit
1339 // with the PRCB lock released!
1340 //
1341 FORCEINLINE
1342 VOID
1343 KxQueueReadyThread(IN PKTHREAD Thread,
1344 IN PKPRCB Prcb)
1345 {
1346 BOOLEAN Preempted;
1347 KPRIORITY Priority;
1348
1349 /* Sanity checks */
1350 ASSERT(Prcb == KeGetCurrentPrcb());
1351 ASSERT(Thread->State == Running);
1352 ASSERT(Thread->NextProcessor == Prcb->Number);
1353
1354 /* Check if this thread is allowed to run in this CPU */
1355 #ifdef CONFIG_SMP
1356 if ((Thread->Affinity) & (Prcb->SetMember))
1357 #else
1358 if (TRUE)
1359 #endif
1360 {
1361 /* Set thread ready for execution */
1362 Thread->State = Ready;
1363
1364 /* Save current priority and if someone had pre-empted it */
1365 Priority = Thread->Priority;
1366 Preempted = Thread->Preempted;
1367
1368 /* We're not pre-empting now, and set the wait time */
1369 Thread->Preempted = FALSE;
1370 Thread->WaitTime = KeTickCount.LowPart;
1371
1372 /* Sanity check */
1373 ASSERT((Priority >= 0) && (Priority <= HIGH_PRIORITY));
1374
1375 /* Insert this thread in the appropriate order */
1376 Preempted ? InsertHeadList(&Prcb->DispatcherReadyListHead[Priority],
1377 &Thread->WaitListEntry) :
1378 InsertTailList(&Prcb->DispatcherReadyListHead[Priority],
1379 &Thread->WaitListEntry);
1380
1381 /* Update the ready summary */
1382 Prcb->ReadySummary |= PRIORITY_MASK(Priority);
1383
1384 /* Sanity check */
1385 ASSERT(Priority == Thread->Priority);
1386
1387 /* Release the PRCB lock */
1388 KiReleasePrcbLock(Prcb);
1389 }
1390 else
1391 {
1392 /* Otherwise, prepare this thread to be deferred */
1393 Thread->State = DeferredReady;
1394 Thread->DeferredProcessor = Prcb->Number;
1395
1396 /* Release the lock and defer scheduling */
1397 KiReleasePrcbLock(Prcb);
1398 KiDeferredReadyThread(Thread);
1399 }
1400 }
1401
1402 //
1403 // This routine scans for an appropriate ready thread to select at the
1404 // given priority and for the given CPU.
1405 //
1406 FORCEINLINE
1407 PKTHREAD
1408 KiSelectReadyThread(IN KPRIORITY Priority,
1409 IN PKPRCB Prcb)
1410 {
1411 ULONG PrioritySet;
1412 LONG HighPriority;
1413 PLIST_ENTRY ListEntry;
1414 PKTHREAD Thread = NULL;
1415
1416 /* Save the current mask and get the priority set for the CPU */
1417 PrioritySet = Prcb->ReadySummary >> Priority;
1418 if (!PrioritySet) goto Quickie;
1419
1420 /* Get the highest priority possible */
1421 BitScanReverse((PULONG)&HighPriority, PrioritySet);
1422 ASSERT((PrioritySet & PRIORITY_MASK(HighPriority)) != 0);
1423 HighPriority += Priority;
1424
1425 /* Make sure the list isn't empty at the highest priority */
1426 ASSERT(IsListEmpty(&Prcb->DispatcherReadyListHead[HighPriority]) == FALSE);
1427
1428 /* Get the first thread on the list */
1429 ListEntry = Prcb->DispatcherReadyListHead[HighPriority].Flink;
1430 Thread = CONTAINING_RECORD(ListEntry, KTHREAD, WaitListEntry);
1431
1432 /* Make sure this thread is here for a reason */
1433 ASSERT(HighPriority == Thread->Priority);
1434 ASSERT(Thread->Affinity & AFFINITY_MASK(Prcb->Number));
1435 ASSERT(Thread->NextProcessor == Prcb->Number);
1436
1437 /* Remove it from the list */
1438 if (RemoveEntryList(&Thread->WaitListEntry))
1439 {
1440 /* The list is empty now, reset the ready summary */
1441 Prcb->ReadySummary ^= PRIORITY_MASK(HighPriority);
1442 }
1443
1444 /* Sanity check and return the thread */
1445 Quickie:
1446 ASSERT((Thread == NULL) ||
1447 (Thread->BasePriority == 0) ||
1448 (Thread->Priority != 0));
1449 return Thread;
1450 }
1451
1452 //
1453 // This routine computes the new priority for a thread. It is only valid for
1454 // threads with priorities in the dynamic priority range.
1455 //
1456 FORCEINLINE
1457 SCHAR
1458 KiComputeNewPriority(IN PKTHREAD Thread,
1459 IN SCHAR Adjustment)
1460 {
1461 SCHAR Priority;
1462
1463 /* Priority sanity checks */
1464 ASSERT((Thread->PriorityDecrement >= 0) &&
1465 (Thread->PriorityDecrement <= Thread->Priority));
1466 ASSERT((Thread->Priority < LOW_REALTIME_PRIORITY) ?
1467 TRUE : (Thread->PriorityDecrement == 0));
1468
1469 /* Get the current priority */
1470 Priority = Thread->Priority;
1471 if (Priority < LOW_REALTIME_PRIORITY)
1472 {
1473 /* Decrease priority by the priority decrement */
1474 Priority -= (Thread->PriorityDecrement + Adjustment);
1475
1476 /* Don't go out of bounds */
1477 if (Priority < Thread->BasePriority) Priority = Thread->BasePriority;
1478
1479 /* Reset the priority decrement */
1480 Thread->PriorityDecrement = 0;
1481 }
1482
1483 /* Sanity check */
1484 ASSERT((Thread->BasePriority == 0) || (Priority != 0));
1485
1486 /* Return the new priority */
1487 return Priority;
1488 }
1489
1490 //
1491 // Guarded Mutex Routines
1492 //
1493 FORCEINLINE
1494 VOID
1495 _KeInitializeGuardedMutex(OUT PKGUARDED_MUTEX GuardedMutex)
1496 {
1497 /* Setup the Initial Data */
1498 GuardedMutex->Count = GM_LOCK_BIT;
1499 GuardedMutex->Owner = NULL;
1500 GuardedMutex->Contention = 0;
1501
1502 /* Initialize the Wait Gate */
1503 KeInitializeGate(&GuardedMutex->Gate);
1504 }
1505
1506 FORCEINLINE
1507 VOID
1508 _KeAcquireGuardedMutexUnsafe(IN OUT PKGUARDED_MUTEX GuardedMutex)
1509 {
1510 PKTHREAD Thread = KeGetCurrentThread();
1511
1512 /* Sanity checks */
1513 ASSERT((KeGetCurrentIrql() == APC_LEVEL) ||
1514 (Thread->SpecialApcDisable < 0) ||
1515 (Thread->Teb == NULL) ||
1516 (Thread->Teb >= (PTEB)MM_SYSTEM_RANGE_START));
1517 ASSERT(GuardedMutex->Owner != Thread);
1518
1519 /* Remove the lock */
1520 if (!InterlockedBitTestAndReset(&GuardedMutex->Count, GM_LOCK_BIT_V))
1521 {
1522 /* The Guarded Mutex was already locked, enter contented case */
1523 KiAcquireGuardedMutex(GuardedMutex);
1524 }
1525
1526 /* Set the Owner */
1527 GuardedMutex->Owner = Thread;
1528 }
1529
1530 FORCEINLINE
1531 VOID
1532 _KeReleaseGuardedMutexUnsafe(IN OUT PKGUARDED_MUTEX GuardedMutex)
1533 {
1534 LONG OldValue, NewValue;
1535
1536 /* Sanity checks */
1537 ASSERT((KeGetCurrentIrql() == APC_LEVEL) ||
1538 (KeGetCurrentThread()->SpecialApcDisable < 0) ||
1539 (KeGetCurrentThread()->Teb == NULL) ||
1540 (KeGetCurrentThread()->Teb >= (PTEB)MM_SYSTEM_RANGE_START));
1541 ASSERT(GuardedMutex->Owner == KeGetCurrentThread());
1542
1543 /* Destroy the Owner */
1544 GuardedMutex->Owner = NULL;
1545
1546 /* Add the Lock Bit */
1547 OldValue = InterlockedExchangeAdd(&GuardedMutex->Count, GM_LOCK_BIT);
1548 ASSERT((OldValue & GM_LOCK_BIT) == 0);
1549
1550 /* Check if it was already locked, but not woken */
1551 if ((OldValue) && !(OldValue & GM_LOCK_WAITER_WOKEN))
1552 {
1553 /* Update the Oldvalue to what it should be now */
1554 OldValue += GM_LOCK_BIT;
1555
1556 /* The mutex will be woken, minus one waiter */
1557 NewValue = OldValue + GM_LOCK_WAITER_WOKEN -
1558 GM_LOCK_WAITER_INC;
1559
1560 /* Remove the Woken bit */
1561 if (InterlockedCompareExchange(&GuardedMutex->Count,
1562 NewValue,
1563 OldValue) == OldValue)
1564 {
1565 /* Signal the Gate */
1566 KeSignalGateBoostPriority(&GuardedMutex->Gate);
1567 }
1568 }
1569 }
1570
1571 FORCEINLINE
1572 VOID
1573 _KeAcquireGuardedMutex(IN PKGUARDED_MUTEX GuardedMutex)
1574 {
1575 PKTHREAD Thread = KeGetCurrentThread();
1576
1577 /* Sanity checks */
1578 ASSERT(KeGetCurrentIrql() <= APC_LEVEL);
1579 ASSERT(GuardedMutex->Owner != Thread);
1580
1581 /* Disable Special APCs */
1582 KeEnterGuardedRegionThread(Thread);
1583
1584 /* Remove the lock */
1585 if (!InterlockedBitTestAndReset(&GuardedMutex->Count, GM_LOCK_BIT_V))
1586 {
1587 /* The Guarded Mutex was already locked, enter contented case */
1588 KiAcquireGuardedMutex(GuardedMutex);
1589 }
1590
1591 /* Set the Owner and Special APC Disable state */
1592 GuardedMutex->Owner = Thread;
1593 GuardedMutex->SpecialApcDisable = Thread->SpecialApcDisable;
1594 }
1595
1596 FORCEINLINE
1597 VOID
1598 _KeReleaseGuardedMutex(IN OUT PKGUARDED_MUTEX GuardedMutex)
1599 {
1600 PKTHREAD Thread = KeGetCurrentThread();
1601 LONG OldValue, NewValue;
1602
1603 /* Sanity checks */
1604 ASSERT(KeGetCurrentIrql() <= APC_LEVEL);
1605 ASSERT(GuardedMutex->Owner == Thread);
1606 ASSERT(Thread->SpecialApcDisable == GuardedMutex->SpecialApcDisable);
1607
1608 /* Destroy the Owner */
1609 GuardedMutex->Owner = NULL;
1610
1611 /* Add the Lock Bit */
1612 OldValue = InterlockedExchangeAdd(&GuardedMutex->Count, GM_LOCK_BIT);
1613 ASSERT((OldValue & GM_LOCK_BIT) == 0);
1614
1615 /* Check if it was already locked, but not woken */
1616 if ((OldValue) && !(OldValue & GM_LOCK_WAITER_WOKEN))
1617 {
1618 /* Update the Oldvalue to what it should be now */
1619 OldValue += GM_LOCK_BIT;
1620
1621 /* The mutex will be woken, minus one waiter */
1622 NewValue = OldValue + GM_LOCK_WAITER_WOKEN -
1623 GM_LOCK_WAITER_INC;
1624
1625 /* Remove the Woken bit */
1626 if (InterlockedCompareExchange(&GuardedMutex->Count,
1627 NewValue,
1628 OldValue) == OldValue)
1629 {
1630 /* Signal the Gate */
1631 KeSignalGateBoostPriority(&GuardedMutex->Gate);
1632 }
1633 }
1634
1635 /* Re-enable APCs */
1636 KeLeaveGuardedRegionThread(Thread);
1637 }
1638
1639 FORCEINLINE
1640 BOOLEAN
1641 _KeTryToAcquireGuardedMutex(IN OUT PKGUARDED_MUTEX GuardedMutex)
1642 {
1643 PKTHREAD Thread = KeGetCurrentThread();
1644
1645 /* Block APCs */
1646 KeEnterGuardedRegionThread(Thread);
1647
1648 /* Remove the lock */
1649 if (!InterlockedBitTestAndReset(&GuardedMutex->Count, GM_LOCK_BIT_V))
1650 {
1651 /* Re-enable APCs */
1652 KeLeaveGuardedRegionThread(Thread);
1653 YieldProcessor();
1654
1655 /* Return failure */
1656 return FALSE;
1657 }
1658
1659 /* Set the Owner and APC State */
1660 GuardedMutex->Owner = Thread;
1661 GuardedMutex->SpecialApcDisable = Thread->SpecialApcDisable;
1662 return TRUE;
1663 }
1664
1665
1666 FORCEINLINE
1667 VOID
1668 KiAcquireNmiListLock(OUT PKIRQL OldIrql)
1669 {
1670 KeAcquireSpinLock(&KiNmiCallbackListLock, OldIrql);
1671 }
1672
1673 FORCEINLINE
1674 VOID
1675 KiReleaseNmiListLock(IN KIRQL OldIrql)
1676 {
1677 KeReleaseSpinLock(&KiNmiCallbackListLock, OldIrql);
1678 }
1679
1680 #if defined(_M_IX86) || defined(_M_AMD64)
1681 FORCEINLINE
1682 VOID
1683 KiCpuId(
1684 PCPU_INFO CpuInfo,
1685 ULONG Function)
1686 {
1687 __cpuid((INT*)CpuInfo->AsUINT32, Function);
1688 }
1689
1690 FORCEINLINE
1691 VOID
1692 KiCpuIdEx(
1693 PCPU_INFO CpuInfo,
1694 ULONG Function,
1695 ULONG SubFunction)
1696 {
1697 __cpuidex((INT*)CpuInfo->AsUINT32, Function, SubFunction);
1698 }
1699 #endif /* _M_IX86 || _M_AMD64 */