- merge audio headers
[reactos.git] / ntoskrnl / ke / i386 / ctxswitch.S
1 /*
2 * COPYRIGHT: See COPYING in the top level directory
3 * PROJECT: ReactOS kernel
4 * FILE: ntoskrnl/ke/i386/ctxswitch.S
5 * PURPOSE: Thread Context Switching
6 *
7 * PROGRAMMERS: Alex Ionescu (alex@relsoft.net)
8 * Gregor Anich (FPU Code)
9 */
10
11 /* INCLUDES ******************************************************************/
12
13 #include <ndk/asm.h>
14 .intel_syntax noprefix
15
16 #define Ready 1
17 #define Running 2
18 #define WrDispatchInt 0x1F
19
20 /* FUNCTIONS ****************************************************************/
21
22 /*++
23 * KiSwapContextInternal
24 *
25 * The KiSwapContextInternal routine switches context to another thread.
26 *
27 * Params:
28 * ESI - Pointer to the KTHREAD to which the caller wishes to
29 * switch to.
30 * EDI - Pointer to the KTHREAD to which the caller wishes to
31 * switch from.
32 *
33 * Returns:
34 * None.
35 *
36 * Remarks:
37 * Absolutely all registers except ESP can be trampled here for maximum code flexibility.
38 *
39 *--*/
40 .globl @KiSwapContextInternal@0
41 .func @KiSwapContextInternal@0, @KiSwapContextInternal@0
42 @KiSwapContextInternal@0:
43
44 /* Save the IRQL */
45 push ecx
46
47 #ifdef CONFIG_SMP
48 GetSwapLock:
49 /* Acquire the swap lock */
50 cmp byte ptr [esi+KTHREAD_SWAP_BUSY], 0
51 jz NotBusy
52 pause
53 jmp GetSwapLock
54 NotBusy:
55 #endif
56 /* Increase context switches (use ES for lazy load) */
57 inc dword ptr es:[ebx+KPCR_CONTEXT_SWITCHES]
58
59 /* Save the Exception list */
60 push [ebx+KPCR_EXCEPTION_LIST]
61
62 /* Check for WMI */
63 cmp dword ptr [ebx+KPCR_PERF_GLOBAL_GROUP_MASK], 0
64 jnz WmiTrace
65
66 AfterTrace:
67 #ifdef CONFIG_SMP
68 #if DBG
69 /* Assert that we're on the right CPU */
70 mov cl, [esi+KTHREAD_NEXT_PROCESSOR]
71 cmp cl, [ebx+KPCR_PROCESSOR_NUMBER]
72 jnz WrongCpu
73 #endif
74 #endif
75
76 /* Get CR0 and save it */
77 mov ebp, cr0
78 mov edx, ebp
79
80 #ifdef CONFIG_SMP
81 /* Check NPX State */
82 cmp byte ptr [edi+KTHREAD_NPX_STATE], NPX_STATE_LOADED
83 jz NpxLoaded
84 SetStack:
85 #endif
86
87 /* Set new stack */
88 mov [edi+KTHREAD_KERNEL_STACK], esp
89
90 /* Checking NPX, disable interrupts now */
91 mov eax, [esi+KTHREAD_INITIAL_STACK]
92 cli
93
94 /* Get the NPX State */
95 movzx ecx, byte ptr [esi+KTHREAD_NPX_STATE]
96
97 /* Clear the other bits, merge in CR0, merge in FPU CR0 bits and compare */
98 and edx, ~(CR0_MP + CR0_EM + CR0_TS)
99 or ecx, edx
100 or ecx, [eax - (NPX_FRAME_LENGTH - FN_CR0_NPX_STATE)]
101 cmp ebp, ecx
102 jnz NewCr0
103
104 StackOk:
105 /* Enable interrupts and set the current stack */
106 sti
107 mov esp, [esi+KTHREAD_KERNEL_STACK]
108
109 /* Check if address space switch is needed */
110 mov ebp, [esi+KTHREAD_APCSTATE_PROCESS]
111 mov eax, [edi+KTHREAD_APCSTATE_PROCESS]
112 cmp ebp, eax
113 jz SameProcess
114
115 #ifdef CONFIG_SMP
116 /* Get the active processors and XOR with the process' */
117 mov ecx, [ebx+KPCR_SET_MEMBER_COPY]
118 lock xor [ebp+KPROCESS_ACTIVE_PROCESSORS], ecx
119 lock xor [eax+KPROCESS_ACTIVE_PROCESSORS], ecx
120
121 /* Assert change went ok */
122 #if DBG
123 test [ebp+KPROCESS_ACTIVE_PROCESSORS], ecx
124 jz WrongActiveCpu
125 test [eax+KPROCESS_ACTIVE_PROCESSORS], ecx
126 jnz WrongActiveCpu
127 #endif
128 #endif
129
130 /* Check if we need an LDT */
131 mov ecx, [ebp+KPROCESS_LDT_DESCRIPTOR0]
132 or ecx, [eax+KPROCESS_LDT_DESCRIPTOR0]
133 jnz LdtReload
134
135 UpdateCr3:
136 /* Switch address space */
137 mov eax, [ebp+KPROCESS_DIRECTORY_TABLE_BASE]
138 mov cr3, eax
139
140 SameProcess:
141
142 #ifdef CONFIG_SMP
143 /* Release swap lock */
144 and byte ptr [edi+KTHREAD_SWAP_BUSY], 0
145 #endif
146
147 /* Clear gs */
148 xor eax, eax
149 mov gs, ax
150
151 /* Set the TEB */
152 mov eax, [esi+KTHREAD_TEB]
153 mov [ebx+KPCR_TEB], eax
154 mov ecx, [ebx+KPCR_GDT]
155 mov [ecx+0x3A], ax
156 shr eax, 16
157 mov [ecx+0x3C], al
158 mov [ecx+0x3F], ah
159
160 /* Get stack pointer */
161 mov eax, [esi+KTHREAD_INITIAL_STACK]
162
163 /* Make space for the NPX Frame */
164 sub eax, NPX_FRAME_LENGTH
165
166 /* Check if this isn't V86 Mode, so we can bias the Esp0 */
167 test dword ptr [eax - KTRAP_FRAME_SIZE + KTRAP_FRAME_EFLAGS], EFLAGS_V86_MASK
168 jnz NoAdjust
169
170 /* Bias esp */
171 sub eax, KTRAP_FRAME_V86_GS - KTRAP_FRAME_SS
172
173 NoAdjust:
174
175 /* Set new ESP0 */
176 mov ecx, [ebx+KPCR_TSS]
177 mov [ecx+KTSS_ESP0], eax
178
179 /* Set current IOPM offset in the TSS */
180 mov ax, [ebp+KPROCESS_IOPM_OFFSET]
181 mov [ecx+KTSS_IOMAPBASE], ax
182
183 /* Increase context switches */
184 inc dword ptr [esi+KTHREAD_CONTEXT_SWITCHES]
185
186 /* Restore exception list */
187 pop [ebx+KPCR_EXCEPTION_LIST]
188
189 /* Restore IRQL */
190 pop ecx
191
192 /* DPC shouldn't be active */
193 cmp byte ptr [ebx+KPCR_PRCB_DPC_ROUTINE_ACTIVE], 0
194 jnz BugCheckDpc
195
196 /* Check if kernel APCs are pending */
197 cmp byte ptr [esi+KTHREAD_PENDING_KERNEL_APC], 0
198 jnz CheckApc
199
200 /* No APCs, return */
201 xor eax, eax
202 ret
203
204 CheckApc:
205
206 /* Check if they're disabled */
207 cmp word ptr [esi+KTHREAD_SPECIAL_APC_DISABLE], 0
208 jnz ApcReturn
209 test cl, cl
210 jz ApcReturn
211
212 /* Request APC Delivery */
213 mov cl, APC_LEVEL
214 call @HalRequestSoftwareInterrupt@4
215 or eax, esp
216
217 ApcReturn:
218
219 /* Return with APC pending */
220 setz al
221 ret
222
223 LdtReload:
224 /* Check if it's empty */
225 mov eax, [ebp+KPROCESS_LDT_DESCRIPTOR0]
226 test eax, eax
227 jz LoadLdt
228
229 /* Write the LDT Selector */
230 mov ecx, [ebx+KPCR_GDT]
231 mov [ecx+KGDT_LDT], eax
232 mov eax, [ebp+KPROCESS_LDT_DESCRIPTOR1]
233 mov [ecx+KGDT_LDT+4], eax
234
235 /* Write the INT21 handler */
236 mov ecx, [ebx+KPCR_IDT]
237 mov eax, [ebp+KPROCESS_INT21_DESCRIPTOR0]
238 mov [ecx+0x108], eax
239 mov eax, [ebp+KPROCESS_INT21_DESCRIPTOR1]
240 mov [ecx+0x10C], eax
241
242 /* Save LDT Selector */
243 mov eax, KGDT_LDT
244
245 LoadLdt:
246 lldt ax
247 jmp UpdateCr3
248
249 NewCr0:
250
251 #if DBG
252 /* Assert NPX State */
253 test byte ptr [esi+KTHREAD_NPX_STATE], ~(NPX_STATE_NOT_LOADED)
254 jnz InvalidNpx
255 test dword ptr [eax - (NPX_FRAME_LENGTH - FN_CR0_NPX_STATE)], ~(CR0_PE + CR0_MP + CR0_EM + CR0_TS)
256 jnz InvalidNpx
257 #endif
258
259 /* Update CR0 */
260 mov cr0, ecx
261 jmp StackOk
262
263 #ifdef CONFIG_SMP
264 NpxLoaded:
265
266 /* Mask out FPU flags */
267 and edx, ~(CR0_MP + CR0_EM + CR0_TS)
268
269 /* Get the NPX Frame */
270 mov ecx, [edi+KTHREAD_INITIAL_STACK]
271 sub ecx, NPX_FRAME_LENGTH
272
273 /* Check if we have a new CR0 */
274 cmp ebp, edx
275 jz Cr0Equal
276
277 /* We do, update it */
278 mov cr0, edx
279 mov ebp, edx
280
281 Cr0Equal:
282
283 /* Save the NPX State */
284 fxsave [ecx]
285 mov byte ptr [edi+KTHREAD_NPX_STATE], NPX_STATE_NOT_LOADED
286
287 /* Clear the NPX Thread */
288 mov dword ptr [ebx+KPCR_NPX_THREAD], 0
289
290 /* Jump back */
291 jmp SetStack
292 #endif
293
294 WmiTrace:
295
296 /* No WMI support yet */
297 int 3
298
299 /* Jump back */
300 jmp AfterTrace
301
302 BugCheckDpc:
303
304 /* Bugcheck the machine, printing out the threads being switched */
305 mov eax, [edi+KTHREAD_INITIAL_STACK]
306 push 0
307 push eax
308 push esi
309 push edi
310 push ATTEMPTED_SWITCH_FROM_DPC
311 call _KeBugCheckEx@20
312
313 #if DBG
314 InvalidNpx:
315 int 3
316 WrongActiveCpu:
317 int 3
318 WrongCpu:
319 int 3
320 #endif
321 .endfunc
322
323 /*++
324 * KiSwapContext
325 *
326 * The KiSwapContext routine switches context to another thread.
327 *
328 * Params:
329 * TargetThread - Pointer to the KTHREAD to which the caller wishes to
330 * switch to.
331 *
332 * Returns:
333 * The WaitStatus of the Target Thread.
334 *
335 * Remarks:
336 * This is a wrapper around KiSwapContextInternal which will save all the
337 * non-volatile registers so that the Internal function can use all of
338 * them. It will also save the old current thread and set the new one.
339 *
340 * The calling thread does not return after KiSwapContextInternal until
341 * another thread switches to IT.
342 *
343 *--*/
344 .globl @KiSwapContext@8
345 .func @KiSwapContext@8, @KiSwapContext@8
346 @KiSwapContext@8:
347
348 /* Save 4 registers */
349 sub esp, 4 * 4
350
351 /* Save all the non-volatile ones */
352 mov [esp+12], ebx
353 mov [esp+8], esi
354 mov [esp+4], edi
355 mov [esp+0], ebp
356
357 /* Get the current KPCR */
358 mov ebx, fs:[KPCR_SELF]
359
360 /* Get the Current Thread */
361 mov edi, ecx
362
363 /* Get the New Thread */
364 mov esi, edx
365
366 /* Get the wait IRQL */
367 movzx ecx, byte ptr [edi+KTHREAD_WAIT_IRQL]
368
369 /* Do the swap with the registers correctly setup */
370 call @KiSwapContextInternal@0
371
372 /* Return the registers */
373 mov ebp, [esp+0]
374 mov edi, [esp+4]
375 mov esi, [esp+8]
376 mov ebx, [esp+12]
377
378 /* Clean stack */
379 add esp, 4 * 4
380 ret
381 .endfunc
382
383 /* DPC INTERRUPT HANDLER ******************************************************/
384
385 .globl _KiDispatchInterrupt@0
386 .func KiDispatchInterrupt@0
387 _KiDispatchInterrupt@0:
388
389 /* Preserve EBX */
390 push ebx
391
392 /* Get the PCR and disable interrupts */
393 mov ebx, PCR[KPCR_SELF]
394 cli
395
396 /* Check if we have to deliver DPCs, timers, or deferred threads */
397 mov eax, [ebx+KPCR_PRCB_DPC_QUEUE_DEPTH]
398 or eax, [ebx+KPCR_PRCB_TIMER_REQUEST]
399 or eax, [ebx+KPCR_PRCB_DEFERRED_READY_LIST_HEAD]
400 jz CheckQuantum
401
402 /* Save stack pointer and exception list, then clear it */
403 push ebp
404 push dword ptr [ebx+KPCR_EXCEPTION_LIST]
405 mov dword ptr [ebx+KPCR_EXCEPTION_LIST], -1
406
407 /* Save the stack and switch to the DPC Stack */
408 mov edx, esp
409 mov esp, [ebx+KPCR_PRCB_DPC_STACK]
410 push edx
411
412 /* Deliver DPCs */
413 mov ecx, [ebx+KPCR_PRCB]
414 call @KiRetireDpcList@4
415
416 /* Restore stack and exception list */
417 pop esp
418 pop dword ptr [ebx+KPCR_EXCEPTION_LIST]
419 pop ebp
420
421 CheckQuantum:
422
423 /* Re-enable interrupts */
424 sti
425
426 /* Check if we have quantum end */
427 cmp byte ptr [ebx+KPCR_PRCB_QUANTUM_END], 0
428 jnz QuantumEnd
429
430 /* Check if we have a thread to swap to */
431 cmp byte ptr [ebx+KPCR_PRCB_NEXT_THREAD], 0
432 je Return
433
434 /* Make space on the stack to save registers */
435 sub esp, 3 * 4
436 mov [esp+8], esi
437 mov [esp+4], edi
438 mov [esp+0], ebp
439
440 /* Get the current thread */
441 mov edi, [ebx+KPCR_CURRENT_THREAD]
442
443 #ifdef CONFIG_SMP
444 /* Raise to synch level */
445 call _KeRaiseIrqlToSynchLevel@0
446
447 /* Set context swap busy */
448 mov byte ptr [edi+KTHREAD_SWAP_BUSY], 1
449
450 /* Acquire the PRCB Lock */
451 lock bts dword ptr [ebx+KPCR_PRCB_PRCB_LOCK], 0
452 jnb GetNext
453 lea ecx, [ebx+KPCR_PRCB_PRCB_LOCK]
454 call @KefAcquireSpinLockAtDpcLevel@4
455 #endif
456
457 GetNext:
458 /* Get the next thread and clear it */
459 mov esi, [ebx+KPCR_PRCB_NEXT_THREAD]
460 and dword ptr [ebx+KPCR_PRCB_NEXT_THREAD], 0
461
462 /* Set us as the current running thread */
463 mov [ebx+KPCR_CURRENT_THREAD], esi
464 mov byte ptr [esi+KTHREAD_STATE_], Running
465 mov byte ptr [edi+KTHREAD_WAIT_REASON], WrDispatchInt
466
467 /* Put thread in ECX and get the PRCB in EDX */
468 mov ecx, edi
469 lea edx, [ebx+KPCR_PRCB_DATA]
470 call @KiQueueReadyThread@8
471
472 /* Set APC_LEVEL and do the swap */
473 mov cl, APC_LEVEL
474 call @KiSwapContextInternal@0
475
476 #ifdef CONFIG_SMP
477 /* Lower IRQL back to dispatch */
478 mov cl, DISPATCH_LEVEL
479 call @KfLowerIrql@4
480 #endif
481
482 /* Restore registers */
483 mov ebp, [esp+0]
484 mov edi, [esp+4]
485 mov esi, [esp+8]
486 add esp, 3*4
487
488 Return:
489 /* All done */
490 pop ebx
491 ret
492
493 QuantumEnd:
494 /* Disable quantum end and process it */
495 mov byte ptr [ebx+KPCR_PRCB_QUANTUM_END], 0
496 call _KiQuantumEnd@0
497 pop ebx
498 ret
499 .endfunc
500
501 .globl @KiIdleLoop@0
502 .func @KiIdleLoop@0, @KiIdleLoop@0
503 @KiIdleLoop@0:
504
505 /* Set EBX */
506 mov ebx, fs:[KPCR_SELF]
507
508 /* Jump into mainline code */
509 jmp MainLoop
510
511 CpuIdle:
512 /* Call the CPU's idle function */
513 lea ecx, [ebx+KPCR_PRCB_POWER_STATE_IDLE_FUNCTION]
514 call [ecx]
515
516 MainLoop:
517 /* Cycle interrupts for 1 cycle */
518 sti
519 nop
520 nop
521 cli
522
523 /* Check if we have to deliver DPCs, timers, or deferred threads */
524 mov eax, [ebx+KPCR_PRCB_DPC_QUEUE_DEPTH]
525 or eax, [ebx+KPCR_PRCB_TIMER_REQUEST]
526 #ifdef CONFIG_SMP
527 or eax, [ebx+KPCR_PRCB_DEFERRED_READY_LIST_HEAD]
528 #endif
529 jz CheckSchedule
530
531 mov cl, DISPATCH_LEVEL
532 call @HalClearSoftwareInterrupt@4
533
534 /* Handle the above */
535 lea ecx, [ebx+KPCR_PRCB_DATA]
536 call @KiRetireDpcList@4
537
538 CheckSchedule:
539 /* Check if a next thread is queued */
540 cmp dword ptr [ebx+KPCR_PRCB_NEXT_THREAD], 0
541 #ifdef CONFIG_SMP
542 jz NoNextThread
543 #else
544 jz CpuIdle
545 #endif
546
547 #ifdef CONFIG_SMP
548 /* There is, raise IRQL to synch level */
549 call _KeRaiseIrqlToSynchLevel@0
550 #endif
551 sti
552
553 /* Set the current thread to ready */
554 mov edi, [ebx+KPCR_CURRENT_THREAD]
555 #ifdef CONFIG_SMP
556 mov byte ptr [edi+KTHREAD_SWAP_BUSY], 1
557
558 /* Acquire the PRCB Lock */
559 lock bts dword ptr [ebx+KPCR_PRCB_PRCB_LOCK], 0
560 jnb CheckNext
561 lea ecx, [ebx+KPCR_PRCB_PRCB_LOCK]
562 call @KefAcquireSpinLockAtDpcLevel@4
563 #endif
564
565 CheckNext:
566 /* Check if the next thread is the current */
567 mov esi, [ebx+KPCR_PRCB_NEXT_THREAD]
568 #ifdef CONFIG_SMP
569 cmp esi, edi
570 jz SameThread
571 #endif
572
573 /* Clear the next thread and set this one instead */
574 and dword ptr [ebx+KPCR_PRCB_NEXT_THREAD], 0
575 mov [ebx+KPCR_CURRENT_THREAD], esi
576
577 /* Set the thread as running */
578 mov byte ptr [esi+KTHREAD_STATE_], Running
579
580 #ifdef CONFIG_SMP
581 /* Disable the idle scheduler and release the PRCB lock */
582 and byte ptr [ebx+KPCR_PRCB_IDLE_SCHEDULE], 0
583 and dword ptr [ebx+KPCR_PRCB_PRCB_LOCK], 0
584 #endif
585
586 SwapContext:
587 /* ReactOS Mm Hack */
588 mov ecx, esi
589 call @MiSyncForContextSwitch@4
590
591 /* Swap context at APC_LEVEL */
592 mov ecx, APC_LEVEL
593 call @KiSwapContextInternal@0
594
595 #ifdef CONFIG_SMP
596 /* Lower to DPC level */
597 mov ecx, DISPATCH_LEVEL
598 call @KfLowerIrql@4
599 #endif
600 jmp MainLoop
601
602 #ifdef CONFIG_SMP
603 SameThread:
604 /* Clear the next thread, and put the thread as ready after lock release */
605 and dword ptr [ebx+KPCR_PRCB_NEXT_THREAD], 0
606 and dword ptr [ebx+KPCR_PRCB_PRCB_LOCK], 0
607 and byte ptr [edi+KTHREAD_STATE_], Ready
608 jmp MainLoop
609
610 NoNextThread:
611 /* Check if the idle scheduler is enabled */
612 cmp byte ptr [ebx+KPCR_PRCB_IDLE_SCHEDULE], 0
613 jz CpuIdle
614
615 /* It is, so call the scheduler */
616 lea ecx, [ebx+KPCR_PRCB_DATA]
617 call @KiIdleSchedule@4
618 test eax, eax
619
620 /* Get new thread pointers and either swap or idle loop again */
621 mov esi, eax
622 mov edi, [ebx+KPCR_PRCB_IDLE_THREAD]
623 jnz SwapContext
624 jmp MainLoop
625 #endif
626 .endfunc
627
628 /* FIXFIX: Move to C code ****/
629 .globl _Ki386SetupAndExitToV86Mode@4
630 .func Ki386SetupAndExitToV86Mode@4
631 _Ki386SetupAndExitToV86Mode@4:
632
633 /* Enter V8086 mode */
634 pushad
635 sub esp, (12 + KTRAP_FRAME_LENGTH + NPX_FRAME_LENGTH)
636 mov ecx, esp
637 call @KiEnterV86Mode@4
638 jmp $
639 .endfunc
640
641 .globl @Ki386BiosCallReturnAddress@4
642 @Ki386BiosCallReturnAddress@4:
643
644 /* Exit V8086 mode */
645 call @KiExitV86Mode@4
646 mov esp, eax
647 add esp, (12 + KTRAP_FRAME_LENGTH + NPX_FRAME_LENGTH)
648 popad
649 ret 4
650