Create the AHCI branch for Aman's work
[reactos.git] / ntoskrnl / ke / dpc.c
1 /*
2 * PROJECT: ReactOS Kernel
3 * LICENSE: GPL - See COPYING in the top level directory
4 * FILE: ntoskrnl/ke/dpc.c
5 * PURPOSE: Deferred Procedure Call (DPC) Support
6 * PROGRAMMERS: Alex Ionescu (alex.ionescu@reactos.org)
7 * Philip Susi (phreak@iag.net)
8 * Eric Kohl
9 */
10
11 /* INCLUDES ******************************************************************/
12
13 #include <ntoskrnl.h>
14 #define NDEBUG
15 #include <debug.h>
16
17 /* GLOBALS *******************************************************************/
18
19 ULONG KiMaximumDpcQueueDepth = 4;
20 ULONG KiMinimumDpcRate = 3;
21 ULONG KiAdjustDpcThreshold = 20;
22 ULONG KiIdealDpcRate = 20;
23 BOOLEAN KeThreadDpcEnable;
24 FAST_MUTEX KiGenericCallDpcMutex;
25 KDPC KiTimerExpireDpc;
26 ULONG KiTimeLimitIsrMicroseconds;
27 ULONG KiDPCTimeout = 110;
28
29 /* PRIVATE FUNCTIONS *********************************************************/
30
31 VOID
32 NTAPI
33 KiCheckTimerTable(IN ULARGE_INTEGER CurrentTime)
34 {
35 #if DBG
36 ULONG i = 0;
37 PLIST_ENTRY ListHead, NextEntry;
38 KIRQL OldIrql;
39 PKTIMER Timer;
40
41 /* Raise IRQL to high and loop timers */
42 KeRaiseIrql(HIGH_LEVEL, &OldIrql);
43 do
44 {
45 /* Loop the current list */
46 ListHead = &KiTimerTableListHead[i].Entry;
47 NextEntry = ListHead->Flink;
48 while (NextEntry != ListHead)
49 {
50 /* Get the timer and move to the next one */
51 Timer = CONTAINING_RECORD(NextEntry, KTIMER, TimerListEntry);
52 NextEntry = NextEntry->Flink;
53
54 /* Check if it expired */
55 if (Timer->DueTime.QuadPart <= CurrentTime.QuadPart)
56 {
57 /* Check if the DPC was queued, but didn't run */
58 if (!(KeGetCurrentPrcb()->TimerRequest) &&
59 !(*((volatile PULONG*)(&KiTimerExpireDpc.DpcData))))
60 {
61 /* This is bad, breakpoint! */
62 DPRINT1("Invalid timer state!\n");
63 DbgBreakPoint();
64 }
65 }
66 }
67
68 /* Move to the next timer */
69 i++;
70 } while(i < TIMER_TABLE_SIZE);
71
72 /* Lower IRQL and return */
73 KeLowerIrql(OldIrql);
74 #endif
75 }
76
77 VOID
78 NTAPI
79 KiTimerExpiration(IN PKDPC Dpc,
80 IN PVOID DeferredContext,
81 IN PVOID SystemArgument1,
82 IN PVOID SystemArgument2)
83 {
84 ULARGE_INTEGER SystemTime, InterruptTime;
85 LARGE_INTEGER Interval;
86 LONG Limit, Index, i;
87 ULONG Timers, ActiveTimers, DpcCalls;
88 PLIST_ENTRY ListHead, NextEntry;
89 KIRQL OldIrql;
90 PKTIMER Timer;
91 PKDPC TimerDpc;
92 ULONG Period;
93 DPC_QUEUE_ENTRY DpcEntry[MAX_TIMER_DPCS];
94 PKSPIN_LOCK_QUEUE LockQueue;
95 #ifdef CONFIG_SMP
96 PKPRCB Prcb = KeGetCurrentPrcb();
97 #endif
98
99 /* Disable interrupts */
100 _disable();
101
102 /* Query system and interrupt time */
103 KeQuerySystemTime((PLARGE_INTEGER)&SystemTime);
104 InterruptTime.QuadPart = KeQueryInterruptTime();
105 Limit = KeTickCount.LowPart;
106
107 /* Bring interrupts back */
108 _enable();
109
110 /* Get the index of the timer and normalize it */
111 Index = PtrToLong(SystemArgument1);
112 if ((Limit - Index) >= TIMER_TABLE_SIZE)
113 {
114 /* Normalize it */
115 Limit = Index + TIMER_TABLE_SIZE - 1;
116 }
117
118 /* Setup index and actual limit */
119 Index--;
120 Limit &= (TIMER_TABLE_SIZE - 1);
121
122 /* Setup accounting data */
123 DpcCalls = 0;
124 Timers = 24;
125 ActiveTimers = 4;
126
127 /* Lock the Database and Raise IRQL */
128 OldIrql = KiAcquireDispatcherLock();
129
130 /* Start expiration loop */
131 do
132 {
133 /* Get the current index */
134 Index = (Index + 1) & (TIMER_TABLE_SIZE - 1);
135
136 /* Get list pointers and loop the list */
137 ListHead = &KiTimerTableListHead[Index].Entry;
138 while (ListHead != ListHead->Flink)
139 {
140 /* Lock the timer and go to the next entry */
141 LockQueue = KiAcquireTimerLock(Index);
142 NextEntry = ListHead->Flink;
143
144 /* Get the current timer and check its due time */
145 Timers--;
146 Timer = CONTAINING_RECORD(NextEntry, KTIMER, TimerListEntry);
147 if ((NextEntry != ListHead) &&
148 (Timer->DueTime.QuadPart <= InterruptTime.QuadPart))
149 {
150 /* It's expired, remove it */
151 ActiveTimers--;
152 KiRemoveEntryTimer(Timer);
153
154 /* Make it non-inserted, unlock it, and signal it */
155 Timer->Header.Inserted = FALSE;
156 KiReleaseTimerLock(LockQueue);
157 Timer->Header.SignalState = 1;
158
159 /* Get the DPC and period */
160 TimerDpc = Timer->Dpc;
161 Period = Timer->Period;
162
163 /* Check if there's any waiters */
164 if (!IsListEmpty(&Timer->Header.WaitListHead))
165 {
166 /* Check the type of event */
167 if (Timer->Header.Type == TimerNotificationObject)
168 {
169 /* Unwait the thread */
170 KxUnwaitThread(&Timer->Header, IO_NO_INCREMENT);
171 }
172 else
173 {
174 /* Otherwise unwait the thread and signal the timer */
175 KxUnwaitThreadForEvent((PKEVENT)Timer, IO_NO_INCREMENT);
176 }
177 }
178
179 /* Check if we have a period */
180 if (Period)
181 {
182 /* Calculate the interval and insert the timer */
183 Interval.QuadPart = Int32x32To64(Period, -10000);
184 while (!KiInsertTreeTimer(Timer, Interval));
185 }
186
187 /* Check if we have a DPC */
188 if (TimerDpc)
189 {
190 #ifdef CONFIG_SMP
191 /*
192 * If the DPC is targeted to another processor,
193 * then insert it into that processor's DPC queue
194 * instead of delivering it now.
195 * If the DPC is a threaded DPC, and the current CPU
196 * has threaded DPCs enabled (KiExecuteDpc is actively parsing DPCs),
197 * then also insert it into the DPC queue for threaded delivery,
198 * instead of doing it here.
199 */
200 if (((TimerDpc->Number >= MAXIMUM_PROCESSORS) &&
201 ((TimerDpc->Number - MAXIMUM_PROCESSORS) != Prcb->Number)) ||
202 ((TimerDpc->Type == ThreadedDpcObject) && (Prcb->ThreadDpcEnable)))
203 {
204 /* Queue it */
205 KeInsertQueueDpc(TimerDpc,
206 UlongToPtr(SystemTime.LowPart),
207 UlongToPtr(SystemTime.HighPart));
208 }
209 else
210 #endif
211 {
212 /* Setup the DPC Entry */
213 DpcEntry[DpcCalls].Dpc = TimerDpc;
214 DpcEntry[DpcCalls].Routine = TimerDpc->DeferredRoutine;
215 DpcEntry[DpcCalls].Context = TimerDpc->DeferredContext;
216 DpcCalls++;
217 ASSERT(DpcCalls < MAX_TIMER_DPCS);
218 }
219 }
220
221 /* Check if we're done processing */
222 if (!(ActiveTimers) || !(Timers))
223 {
224 /* Release the dispatcher while doing DPCs */
225 KiReleaseDispatcherLock(DISPATCH_LEVEL);
226
227 /* Start looping all DPC Entries */
228 for (i = 0; DpcCalls; DpcCalls--, i++)
229 {
230 /* Call the DPC */
231 DpcEntry[i].Routine(DpcEntry[i].Dpc,
232 DpcEntry[i].Context,
233 UlongToPtr(SystemTime.LowPart),
234 UlongToPtr(SystemTime.HighPart));
235 }
236
237 /* Reset accounting */
238 Timers = 24;
239 ActiveTimers = 4;
240
241 /* Lock the dispatcher database */
242 KiAcquireDispatcherLock();
243 }
244 }
245 else
246 {
247 /* Check if the timer list is empty */
248 if (NextEntry != ListHead)
249 {
250 /* Sanity check */
251 ASSERT(KiTimerTableListHead[Index].Time.QuadPart <=
252 Timer->DueTime.QuadPart);
253
254 /* Update the time */
255 _disable();
256 KiTimerTableListHead[Index].Time.QuadPart =
257 Timer->DueTime.QuadPart;
258 _enable();
259 }
260
261 /* Release the lock */
262 KiReleaseTimerLock(LockQueue);
263
264 /* Check if we've scanned all the timers we could */
265 if (!Timers)
266 {
267 /* Release the dispatcher while doing DPCs */
268 KiReleaseDispatcherLock(DISPATCH_LEVEL);
269
270 /* Start looping all DPC Entries */
271 for (i = 0; DpcCalls; DpcCalls--, i++)
272 {
273 /* Call the DPC */
274 DpcEntry[i].Routine(DpcEntry[i].Dpc,
275 DpcEntry[i].Context,
276 UlongToPtr(SystemTime.LowPart),
277 UlongToPtr(SystemTime.HighPart));
278 }
279
280 /* Reset accounting */
281 Timers = 24;
282 ActiveTimers = 4;
283
284 /* Lock the dispatcher database */
285 KiAcquireDispatcherLock();
286 }
287
288 /* Done looping */
289 break;
290 }
291 }
292 } while (Index != Limit);
293
294 /* Verify the timer table, on debug builds */
295 if (KeNumberProcessors == 1) KiCheckTimerTable(InterruptTime);
296
297 /* Check if we still have DPC entries */
298 if (DpcCalls)
299 {
300 /* Release the dispatcher while doing DPCs */
301 KiReleaseDispatcherLock(DISPATCH_LEVEL);
302
303 /* Start looping all DPC Entries */
304 for (i = 0; DpcCalls; DpcCalls--, i++)
305 {
306 /* Call the DPC */
307 DpcEntry[i].Routine(DpcEntry[i].Dpc,
308 DpcEntry[i].Context,
309 UlongToPtr(SystemTime.LowPart),
310 UlongToPtr(SystemTime.HighPart));
311 }
312
313 /* Lower IRQL if we need to */
314 if (OldIrql != DISPATCH_LEVEL) KeLowerIrql(OldIrql);
315 }
316 else
317 {
318 /* Unlock the dispatcher */
319 KiReleaseDispatcherLock(OldIrql);
320 }
321 }
322
323 VOID
324 FASTCALL
325 KiTimerListExpire(IN PLIST_ENTRY ExpiredListHead,
326 IN KIRQL OldIrql)
327 {
328 ULARGE_INTEGER SystemTime;
329 LARGE_INTEGER Interval;
330 LONG i;
331 ULONG DpcCalls = 0;
332 PKTIMER Timer;
333 PKDPC TimerDpc;
334 ULONG Period;
335 DPC_QUEUE_ENTRY DpcEntry[MAX_TIMER_DPCS];
336 #ifdef CONFIG_SMP
337 PKPRCB Prcb = KeGetCurrentPrcb();
338 #endif
339
340 /* Query system */
341 KeQuerySystemTime((PLARGE_INTEGER)&SystemTime);
342
343 /* Loop expired list */
344 while (ExpiredListHead->Flink != ExpiredListHead)
345 {
346 /* Get the current timer */
347 Timer = CONTAINING_RECORD(ExpiredListHead->Flink, KTIMER, TimerListEntry);
348
349 /* Remove it */
350 RemoveEntryList(&Timer->TimerListEntry);
351
352 /* Not inserted */
353 Timer->Header.Inserted = FALSE;
354
355 /* Signal it */
356 Timer->Header.SignalState = 1;
357
358 /* Get the DPC and period */
359 TimerDpc = Timer->Dpc;
360 Period = Timer->Period;
361
362 /* Check if there's any waiters */
363 if (!IsListEmpty(&Timer->Header.WaitListHead))
364 {
365 /* Check the type of event */
366 if (Timer->Header.Type == TimerNotificationObject)
367 {
368 /* Unwait the thread */
369 KxUnwaitThread(&Timer->Header, IO_NO_INCREMENT);
370 }
371 else
372 {
373 /* Otherwise unwait the thread and signal the timer */
374 KxUnwaitThreadForEvent((PKEVENT)Timer, IO_NO_INCREMENT);
375 }
376 }
377
378 /* Check if we have a period */
379 if (Period)
380 {
381 /* Calculate the interval and insert the timer */
382 Interval.QuadPart = Int32x32To64(Period, -10000);
383 while (!KiInsertTreeTimer(Timer, Interval));
384 }
385
386 /* Check if we have a DPC */
387 if (TimerDpc)
388 {
389 #ifdef CONFIG_SMP
390 /*
391 * If the DPC is targeted to another processor,
392 * then insert it into that processor's DPC queue
393 * instead of delivering it now.
394 * If the DPC is a threaded DPC, and the current CPU
395 * has threaded DPCs enabled (KiExecuteDpc is actively parsing DPCs),
396 * then also insert it into the DPC queue for threaded delivery,
397 * instead of doing it here.
398 */
399 if (((TimerDpc->Number >= MAXIMUM_PROCESSORS) &&
400 ((TimerDpc->Number - MAXIMUM_PROCESSORS) != Prcb->Number)) ||
401 ((TimerDpc->Type == ThreadedDpcObject) && (Prcb->ThreadDpcEnable)))
402 {
403 /* Queue it */
404 KeInsertQueueDpc(TimerDpc,
405 UlongToPtr(SystemTime.LowPart),
406 UlongToPtr(SystemTime.HighPart));
407 }
408 else
409 #endif
410 {
411 /* Setup the DPC Entry */
412 DpcEntry[DpcCalls].Dpc = TimerDpc;
413 DpcEntry[DpcCalls].Routine = TimerDpc->DeferredRoutine;
414 DpcEntry[DpcCalls].Context = TimerDpc->DeferredContext;
415 DpcCalls++;
416 ASSERT(DpcCalls < MAX_TIMER_DPCS);
417 }
418 }
419 }
420
421 /* Check if we still have DPC entries */
422 if (DpcCalls)
423 {
424 /* Release the dispatcher while doing DPCs */
425 KiReleaseDispatcherLock(DISPATCH_LEVEL);
426
427 /* Start looping all DPC Entries */
428 for (i = 0; DpcCalls; DpcCalls--, i++)
429 {
430 /* Call the DPC */
431 DpcEntry[i].Routine(DpcEntry[i].Dpc,
432 DpcEntry[i].Context,
433 UlongToPtr(SystemTime.LowPart),
434 UlongToPtr(SystemTime.HighPart));
435 }
436
437 /* Lower IRQL */
438 KeLowerIrql(OldIrql);
439 }
440 else
441 {
442 /* Unlock the dispatcher */
443 KiReleaseDispatcherLock(OldIrql);
444 }
445 }
446
447 VOID
448 NTAPI
449 KiQuantumEnd(VOID)
450 {
451 PKPRCB Prcb = KeGetCurrentPrcb();
452 PKTHREAD NextThread, Thread = Prcb->CurrentThread;
453
454 /* Check if a DPC Event was requested to be signaled */
455 if (InterlockedExchange(&Prcb->DpcSetEventRequest, 0))
456 {
457 /* Signal it */
458 KeSetEvent(&Prcb->DpcEvent, 0, 0);
459 }
460
461 /* Raise to synchronization level and lock the PRCB and thread */
462 KeRaiseIrqlToSynchLevel();
463 KiAcquireThreadLock(Thread);
464 KiAcquirePrcbLock(Prcb);
465
466 /* Check if Quantum expired */
467 if (Thread->Quantum <= 0)
468 {
469 /* Check if we're real-time and with quantums disabled */
470 if ((Thread->Priority >= LOW_REALTIME_PRIORITY) &&
471 (Thread->ApcState.Process->DisableQuantum))
472 {
473 /* Otherwise, set maximum quantum */
474 Thread->Quantum = MAX_QUANTUM;
475 }
476 else
477 {
478 /* Reset the new Quantum */
479 Thread->Quantum = Thread->QuantumReset;
480
481 /* Calculate new priority */
482 Thread->Priority = KiComputeNewPriority(Thread, 1);
483
484 /* Check if a new thread is scheduled */
485 if (!Prcb->NextThread)
486 {
487 /* Get a new ready thread */
488 NextThread = KiSelectReadyThread(Thread->Priority, Prcb);
489 if (NextThread)
490 {
491 /* Found one, set it on standby */
492 NextThread->State = Standby;
493 Prcb->NextThread = NextThread;
494 }
495 }
496 else
497 {
498 /* Otherwise, make sure that this thread doesn't get preempted */
499 Thread->Preempted = FALSE;
500 }
501 }
502 }
503
504 /* Release the thread lock */
505 KiReleaseThreadLock(Thread);
506
507 /* Check if there's no thread scheduled */
508 if (!Prcb->NextThread)
509 {
510 /* Just leave now */
511 KiReleasePrcbLock(Prcb);
512 KeLowerIrql(DISPATCH_LEVEL);
513 return;
514 }
515
516 /* Get the next thread now */
517 NextThread = Prcb->NextThread;
518
519 /* Set current thread's swap busy to true */
520 KiSetThreadSwapBusy(Thread);
521
522 /* Switch threads in PRCB */
523 Prcb->NextThread = NULL;
524 Prcb->CurrentThread = NextThread;
525
526 /* Set thread to running and the switch reason to Quantum End */
527 NextThread->State = Running;
528 Thread->WaitReason = WrQuantumEnd;
529
530 /* Queue it on the ready lists */
531 KxQueueReadyThread(Thread, Prcb);
532
533 /* Set wait IRQL to APC_LEVEL */
534 Thread->WaitIrql = APC_LEVEL;
535
536 /* Swap threads */
537 KiSwapContext(APC_LEVEL, Thread);
538
539 /* Lower IRQL back to DISPATCH_LEVEL */
540 KeLowerIrql(DISPATCH_LEVEL);
541 }
542
543 VOID
544 FASTCALL
545 KiRetireDpcList(IN PKPRCB Prcb)
546 {
547 PKDPC_DATA DpcData;
548 PLIST_ENTRY ListHead, DpcEntry;
549 PKDPC Dpc;
550 PKDEFERRED_ROUTINE DeferredRoutine;
551 PVOID DeferredContext, SystemArgument1, SystemArgument2;
552 ULONG_PTR TimerHand;
553 #ifdef CONFIG_SMP
554 KIRQL OldIrql;
555 #endif
556
557 /* Get data and list variables before starting anything else */
558 DpcData = &Prcb->DpcData[DPC_NORMAL];
559 ListHead = &DpcData->DpcListHead;
560
561 /* Main outer loop */
562 do
563 {
564 /* Set us as active */
565 Prcb->DpcRoutineActive = TRUE;
566
567 /* Check if this is a timer expiration request */
568 if (Prcb->TimerRequest)
569 {
570 /* It is, get the timer hand and disable timer request */
571 TimerHand = Prcb->TimerHand;
572 Prcb->TimerRequest = 0;
573
574 /* Expire timers with interrups enabled */
575 _enable();
576 KiTimerExpiration(NULL, NULL, (PVOID)TimerHand, NULL);
577 _disable();
578 }
579
580 /* Loop while we have entries in the queue */
581 while (DpcData->DpcQueueDepth != 0)
582 {
583 /* Lock the DPC data and get the DPC entry*/
584 KeAcquireSpinLockAtDpcLevel(&DpcData->DpcLock);
585 DpcEntry = ListHead->Flink;
586
587 /* Make sure we have an entry */
588 if (DpcEntry != ListHead)
589 {
590 /* Remove the DPC from the list */
591 RemoveEntryList(DpcEntry);
592 Dpc = CONTAINING_RECORD(DpcEntry, KDPC, DpcListEntry);
593
594 /* Clear its DPC data and save its parameters */
595 Dpc->DpcData = NULL;
596 DeferredRoutine = Dpc->DeferredRoutine;
597 DeferredContext = Dpc->DeferredContext;
598 SystemArgument1 = Dpc->SystemArgument1;
599 SystemArgument2 = Dpc->SystemArgument2;
600
601 /* Decrease the queue depth */
602 DpcData->DpcQueueDepth--;
603
604 #if DBG
605 /* Clear DPC Time */
606 Prcb->DebugDpcTime = 0;
607 #endif
608
609 /* Release the lock */
610 KeReleaseSpinLockFromDpcLevel(&DpcData->DpcLock);
611
612 /* Re-enable interrupts */
613 _enable();
614
615 /* Call the DPC */
616 DeferredRoutine(Dpc,
617 DeferredContext,
618 SystemArgument1,
619 SystemArgument2);
620 ASSERT(KeGetCurrentIrql() == DISPATCH_LEVEL);
621
622 /* Disable interrupts and keep looping */
623 _disable();
624 }
625 else
626 {
627 /* The queue should be flushed now */
628 ASSERT(DpcData->DpcQueueDepth == 0);
629
630 /* Release DPC Lock */
631 KeReleaseSpinLockFromDpcLevel(&DpcData->DpcLock);
632 }
633 }
634
635 /* Clear DPC Flags */
636 Prcb->DpcRoutineActive = FALSE;
637 Prcb->DpcInterruptRequested = FALSE;
638
639 #ifdef CONFIG_SMP
640 /* Check if we have deferred threads */
641 if (Prcb->DeferredReadyListHead.Next)
642 {
643
644 /* Re-enable interrupts and raise to synch */
645 _enable();
646 OldIrql = KeRaiseIrqlToSynchLevel();
647
648 /* Process deferred threads */
649 KiProcessDeferredReadyList(Prcb);
650
651 /* Lower IRQL back and disable interrupts */
652 KeLowerIrql(OldIrql);
653 _disable();
654 }
655 #endif
656 } while (DpcData->DpcQueueDepth != 0);
657 }
658
659 VOID
660 NTAPI
661 KiInitializeDpc(IN PKDPC Dpc,
662 IN PKDEFERRED_ROUTINE DeferredRoutine,
663 IN PVOID DeferredContext,
664 IN KOBJECTS Type)
665 {
666 /* Setup the DPC Object */
667 Dpc->Type = Type;
668 Dpc->Number = 0;
669 Dpc->Importance= MediumImportance;
670 Dpc->DeferredRoutine = DeferredRoutine;
671 Dpc->DeferredContext = DeferredContext;
672 Dpc->DpcData = NULL;
673 }
674
675 /* PUBLIC FUNCTIONS **********************************************************/
676
677 /*
678 * @implemented
679 */
680 VOID
681 NTAPI
682 KeInitializeThreadedDpc(IN PKDPC Dpc,
683 IN PKDEFERRED_ROUTINE DeferredRoutine,
684 IN PVOID DeferredContext)
685 {
686 /* Call the internal routine */
687 KiInitializeDpc(Dpc, DeferredRoutine, DeferredContext, ThreadedDpcObject);
688 }
689
690 /*
691 * @implemented
692 */
693 VOID
694 NTAPI
695 KeInitializeDpc(IN PKDPC Dpc,
696 IN PKDEFERRED_ROUTINE DeferredRoutine,
697 IN PVOID DeferredContext)
698 {
699 /* Call the internal routine */
700 KiInitializeDpc(Dpc, DeferredRoutine, DeferredContext, DpcObject);
701 }
702
703 /*
704 * @implemented
705 */
706 BOOLEAN
707 NTAPI
708 KeInsertQueueDpc(IN PKDPC Dpc,
709 IN PVOID SystemArgument1,
710 IN PVOID SystemArgument2)
711 {
712 KIRQL OldIrql;
713 PKPRCB Prcb, CurrentPrcb;
714 ULONG Cpu;
715 PKDPC_DATA DpcData;
716 BOOLEAN DpcConfigured = FALSE, DpcInserted = FALSE;
717 ASSERT_DPC(Dpc);
718
719 /* Check IRQL and Raise it to HIGH_LEVEL */
720 KeRaiseIrql(HIGH_LEVEL, &OldIrql);
721 CurrentPrcb = KeGetCurrentPrcb();
722
723 /* Check if the DPC has more then the maximum number of CPUs */
724 if (Dpc->Number >= MAXIMUM_PROCESSORS)
725 {
726 /* Then substract the maximum and get that PRCB. */
727 Cpu = Dpc->Number - MAXIMUM_PROCESSORS;
728 Prcb = KiProcessorBlock[Cpu];
729 }
730 else
731 {
732 /* Use the current one */
733 Prcb = CurrentPrcb;
734 Cpu = Prcb->Number;
735 }
736
737 /* ROS Sanity Check */
738 ASSERT(Prcb == CurrentPrcb);
739
740 /* Check if this is a threaded DPC and threaded DPCs are enabled */
741 if ((Dpc->Type == ThreadedDpcObject) && (Prcb->ThreadDpcEnable))
742 {
743 /* Then use the threaded data */
744 DpcData = &Prcb->DpcData[DPC_THREADED];
745 }
746 else
747 {
748 /* Otherwise, use the regular data */
749 DpcData = &Prcb->DpcData[DPC_NORMAL];
750 }
751
752 /* Acquire the DPC lock */
753 KiAcquireSpinLock(&DpcData->DpcLock);
754
755 /* Get the DPC Data */
756 if (!InterlockedCompareExchangePointer(&Dpc->DpcData, DpcData, NULL))
757 {
758 /* Now we can play with the DPC safely */
759 Dpc->SystemArgument1 = SystemArgument1;
760 Dpc->SystemArgument2 = SystemArgument2;
761 DpcData->DpcQueueDepth++;
762 DpcData->DpcCount++;
763 DpcConfigured = TRUE;
764
765 /* Check if this is a high importance DPC */
766 if (Dpc->Importance == HighImportance)
767 {
768 /* Pre-empty other DPCs */
769 InsertHeadList(&DpcData->DpcListHead, &Dpc->DpcListEntry);
770 }
771 else
772 {
773 /* Add it at the end */
774 InsertTailList(&DpcData->DpcListHead, &Dpc->DpcListEntry);
775 }
776
777 /* Check if this is the DPC on the threaded list */
778 if (&Prcb->DpcData[DPC_THREADED] == DpcData)
779 {
780 /* Make sure a threaded DPC isn't already active */
781 if (!(Prcb->DpcThreadActive) && !(Prcb->DpcThreadRequested))
782 {
783 /* FIXME: Setup Threaded DPC */
784 UNIMPLEMENTED_FATAL("Threaded DPC not supported\n");
785 }
786 }
787 else
788 {
789 /* Make sure a DPC isn't executing already */
790 if (!(Prcb->DpcRoutineActive) && !(Prcb->DpcInterruptRequested))
791 {
792 /* Check if this is the same CPU */
793 if (Prcb != CurrentPrcb)
794 {
795 /*
796 * Check if the DPC is of high importance or above the
797 * maximum depth. If it is, then make sure that the CPU
798 * isn't idle, or that it's sleeping.
799 */
800 if (((Dpc->Importance == HighImportance) ||
801 (DpcData->DpcQueueDepth >=
802 Prcb->MaximumDpcQueueDepth)) &&
803 (!(AFFINITY_MASK(Cpu) & KiIdleSummary) ||
804 (Prcb->Sleeping)))
805 {
806 /* Set interrupt requested */
807 Prcb->DpcInterruptRequested = TRUE;
808
809 /* Set DPC inserted */
810 DpcInserted = TRUE;
811 }
812 }
813 else
814 {
815 /* Check if the DPC is of anything but low importance */
816 if ((Dpc->Importance != LowImportance) ||
817 (DpcData->DpcQueueDepth >=
818 Prcb->MaximumDpcQueueDepth) ||
819 (Prcb->DpcRequestRate < Prcb->MinimumDpcRate))
820 {
821 /* Set interrupt requested */
822 Prcb->DpcInterruptRequested = TRUE;
823
824 /* Set DPC inserted */
825 DpcInserted = TRUE;
826 }
827 }
828 }
829 }
830 }
831
832 /* Release the lock */
833 KiReleaseSpinLock(&DpcData->DpcLock);
834
835 /* Check if the DPC was inserted */
836 if (DpcInserted)
837 {
838 /* Check if this was SMP */
839 if (Prcb != CurrentPrcb)
840 {
841 /* It was, request and IPI */
842 KiIpiSend(AFFINITY_MASK(Cpu), IPI_DPC);
843 }
844 else
845 {
846 /* It wasn't, request an interrupt from HAL */
847 HalRequestSoftwareInterrupt(DISPATCH_LEVEL);
848 }
849 }
850
851 /* Lower IRQL */
852 KeLowerIrql(OldIrql);
853 return DpcConfigured;
854 }
855
856 /*
857 * @implemented
858 */
859 BOOLEAN
860 NTAPI
861 KeRemoveQueueDpc(IN PKDPC Dpc)
862 {
863 PKDPC_DATA DpcData;
864 BOOLEAN Enable;
865 ASSERT_DPC(Dpc);
866
867 /* Disable interrupts */
868 Enable = KeDisableInterrupts();
869
870 /* Get DPC data */
871 DpcData = Dpc->DpcData;
872 if (DpcData)
873 {
874 /* Acquire the DPC lock */
875 KiAcquireSpinLock(&DpcData->DpcLock);
876
877 /* Make sure that the data didn't change */
878 if (DpcData == Dpc->DpcData)
879 {
880 /* Remove the DPC */
881 DpcData->DpcQueueDepth--;
882 RemoveEntryList(&Dpc->DpcListEntry);
883 Dpc->DpcData = NULL;
884 }
885
886 /* Release the lock */
887 KiReleaseSpinLock(&DpcData->DpcLock);
888 }
889
890 /* Re-enable interrupts */
891 if (Enable) _enable();
892
893 /* Return if the DPC was in the queue or not */
894 return DpcData ? TRUE : FALSE;
895 }
896
897 /*
898 * @implemented
899 */
900 VOID
901 NTAPI
902 KeFlushQueuedDpcs(VOID)
903 {
904 PKPRCB CurrentPrcb = KeGetCurrentPrcb();
905 PAGED_CODE();
906
907 /* Check if this is an UP machine */
908 if (KeActiveProcessors == 1)
909 {
910 /* Check if there are DPCs on either queues */
911 if ((CurrentPrcb->DpcData[DPC_NORMAL].DpcQueueDepth > 0) ||
912 (CurrentPrcb->DpcData[DPC_THREADED].DpcQueueDepth > 0))
913 {
914 /* Request an interrupt */
915 HalRequestSoftwareInterrupt(DISPATCH_LEVEL);
916 }
917 }
918 else
919 {
920 /* FIXME: SMP support required */
921 ASSERT(FALSE);
922 }
923 }
924
925 /*
926 * @implemented
927 */
928 BOOLEAN
929 NTAPI
930 KeIsExecutingDpc(VOID)
931 {
932 /* Return if the Dpc Routine is active */
933 return KeGetCurrentPrcb()->DpcRoutineActive;
934 }
935
936 /*
937 * @implemented
938 */
939 VOID
940 NTAPI
941 KeSetImportanceDpc (IN PKDPC Dpc,
942 IN KDPC_IMPORTANCE Importance)
943 {
944 /* Set the DPC Importance */
945 ASSERT_DPC(Dpc);
946 Dpc->Importance = Importance;
947 }
948
949 /*
950 * @implemented
951 */
952 VOID
953 NTAPI
954 KeSetTargetProcessorDpc(IN PKDPC Dpc,
955 IN CCHAR Number)
956 {
957 /* Set a target CPU */
958 ASSERT_DPC(Dpc);
959 Dpc->Number = Number + MAXIMUM_PROCESSORS;
960 }
961
962 /*
963 * @implemented
964 */
965 VOID
966 NTAPI
967 KeGenericCallDpc(IN PKDEFERRED_ROUTINE Routine,
968 IN PVOID Context)
969 {
970 ULONG Barrier = KeNumberProcessors;
971 KIRQL OldIrql;
972 DEFERRED_REVERSE_BARRIER ReverseBarrier;
973 ASSERT(KeGetCurrentIrql () < DISPATCH_LEVEL);
974
975 //
976 // The barrier is the number of processors, each processor will decrement it
977 // by one, so when all processors have run the DPC, the barrier reaches zero
978 //
979 ReverseBarrier.Barrier = Barrier;
980 ReverseBarrier.TotalProcessors = Barrier;
981
982 //
983 // But we don't need the barrier on UP, since we can simply call the routine
984 // directly while at DISPATCH_LEVEL and not worry about anything else
985 //
986 KeRaiseIrql(DISPATCH_LEVEL, &OldIrql);
987 Routine(&KeGetCurrentPrcb()->CallDpc, Context, &Barrier, &ReverseBarrier);
988 KeLowerIrql(OldIrql);
989 }
990
991 /*
992 * @implemented
993 */
994 VOID
995 NTAPI
996 KeSignalCallDpcDone(IN PVOID SystemArgument1)
997 {
998 //
999 // Decrement the barrier, which is actually the processor count
1000 //
1001 InterlockedDecrement((PLONG)SystemArgument1);
1002 }
1003
1004 /*
1005 * @implemented
1006 */
1007 BOOLEAN
1008 NTAPI
1009 KeSignalCallDpcSynchronize(IN PVOID SystemArgument2)
1010 {
1011 //
1012 // There is nothing to do on UP systems -- the processor calling this wins
1013 //
1014 UNREFERENCED_PARAMETER(SystemArgument2);
1015 return TRUE;
1016 }
1017
1018 /* EOF */