merge trunk head (37902)
[reactos.git] / reactos / ntoskrnl / ke / i386 / ctxswitch.S
1 /*
2 * COPYRIGHT: See COPYING in the top level directory
3 * PROJECT: ReactOS kernel
4 * FILE: ntoskrnl/ke/i386/ctxswitch.S
5 * PURPOSE: Thread Context Switching
6 *
7 * PROGRAMMERS: Alex Ionescu (alex@relsoft.net)
8 * Gregor Anich (FPU Code)
9 */
10
11 /* INCLUDES ******************************************************************/
12
13 #include <ndk/asm.h>
14 .intel_syntax noprefix
15
16 #define Ready 1
17 #define Running 2
18 #define WrDispatchInt 0x1F
19
20 Dividend: .float 4195835.0
21 Divisor: .float 3145727.0
22 Result1: .float 0
23 Result2: .float 0
24
25 /* FUNCTIONS ****************************************************************/
26
27 .globl _KiIsNpxErrataPresent@0
28 .func KiIsNpxErrataPresent@0
29 _KiIsNpxErrataPresent@0:
30
31 /* Disable interrupts */
32 cli
33
34 /* Get CR0 and mask out FPU flags */
35 mov eax, cr0
36 mov ecx, eax
37 and eax, ~(CR0_MP + CR0_TS + CR0_EM)
38 mov cr0, eax
39
40 /* Initialize the FPU */
41 fninit
42
43 /* Do the divison and inverse multiplication */
44 fld qword ptr Dividend
45 fstp qword ptr Result1
46 fld qword ptr Divisor
47 fstp qword ptr Result2
48 fld qword ptr Result1
49 fdiv qword ptr Result2
50 fmul qword ptr Result2
51
52 /* Do the compare and check flags */
53 fcomp qword ptr Result1
54 fstsw ax
55 sahf
56
57 /* Restore CR0 and interrupts */
58 mov cr0, ecx
59 sti
60
61 /* Return errata status */
62 xor eax, eax
63 jz NoErrata
64 inc eax
65
66 NoErrata:
67 ret
68 .endfunc
69
70 .globl _KiIsNpxPresent@0
71 .func KiIsNpxPresent@0
72 _KiIsNpxPresent@0:
73
74 /* Save stack */
75 push ebp
76
77 /* Get CR0 and mask out FPU flags */
78 mov eax, cr0
79 and eax, ~(CR0_MP + CR0_TS + CR0_EM + CR0_ET)
80
81 /* Initialize the FPU and assume FALSE for return */
82 xor edx, edx
83 fninit
84
85 /* Save magic value on stack */
86 mov ecx, 0x42424242
87 push ecx
88
89 /* Setup stack for FPU store */
90 mov ebp ,esp
91 fnstsw [ebp]
92
93 /* Now check if our magic got cleared */
94 cmp byte ptr [ebp], 0
95 jnz NoFpu
96
97 /* Enable FPU, set return to TRUE */
98 or eax, CR0_ET
99 mov edx, 1
100
101 /* If this is a 486 or higher, enable INT 16 as well */
102 cmp dword ptr fs:KPCR_PRCB_CPU_TYPE, 3
103 jbe NoFpu
104 or eax, CR0_NE
105
106 NoFpu:
107 /* Set emulation enabled during the first boot phase and set the CR0 */
108 or eax, (CR0_EM + CR0_TS)
109 mov cr0, eax
110
111 /* Restore stack */
112 pop eax
113 pop ebp
114
115 /* Return true or false */
116 mov eax, edx
117 ret
118 .endfunc
119
120 .globl _KiFlushNPXState@4
121 .func KiFlushNPXState@4
122 _KiFlushNPXState@4:
123
124 /* Save volatiles and disable interrupts */
125 push esi
126 push edi
127 push ebx
128 pushfd
129 cli
130
131 /* Save the PCR and get the current thread */
132 mov edi, fs:[KPCR_SELF]
133 mov esi, [edi+KPCR_CURRENT_THREAD]
134
135 /* Check if we're already loaded */
136 cmp byte ptr [esi+KTHREAD_NPX_STATE], NPX_STATE_LOADED
137 je IsValid
138
139 /* Check if we're supposed to get it */
140 cmp dword ptr [esp+20], 0
141 je Return
142
143 #ifdef DBG
144 /* Assert Fxsr support */
145 test byte ptr _KeI386FxsrPresent, 1
146 jnz AssertOk
147 int 3
148 AssertOk:
149 #endif
150
151 /* Get CR0 and test if it's valid */
152 mov ebx, cr0
153 test bl, CR0_MP + CR0_TS + CR0_EM
154 jz Cr0OK
155
156 /* Enable fnsave to work */
157 and ebx, ~(CR0_MP + CR0_TS + CR0_EM)
158 mov cr0, ebx
159
160 Cr0OK:
161 /* Check if we are the NPX Thread */
162 mov eax, [edi+KPCR_NPX_THREAD]
163 or eax, eax
164 jz DontSave
165
166 /* Check if it's not loaded */
167 cmp byte ptr [eax+KTHREAD_NPX_STATE], NPX_STATE_NOT_LOADED
168 jnz DontSave
169
170 #ifdef DBG
171 /* We are the NPX Thread with an unloaded NPX State... this isn't normal! */
172 int 3
173 #endif
174
175 /* Save the NPX State */
176 mov ecx, [eax+KTHREAD_INITIAL_STACK]
177 sub ecx, NPX_FRAME_LENGTH
178 fxsave [ecx]
179 mov byte ptr [eax+KTHREAD_NPX_STATE], NPX_STATE_NOT_LOADED
180
181 DontSave:
182 /* Load the NPX State */
183 mov ecx, [esi+KTHREAD_INITIAL_STACK]
184 sub ecx, NPX_FRAME_LENGTH
185 fxrstor [ecx]
186
187 /* Get the CR0 state and destination */
188 mov edx, [ecx+FN_CR0_NPX_STATE]
189 mov ecx, [esp+20]
190 jmp DoneLoad
191
192 IsValid:
193 /* We already have a valid state, flush it */
194 mov ebx, cr0
195 test bl, CR0_MP + CR0_TS + CR0_EM
196 jz Cr0OK2
197
198 /* Enable fnsave to work */
199 and ebx, ~(CR0_MP + CR0_TS + CR0_EM)
200 mov cr0, ebx
201
202 Cr0OK2:
203 /* Get the kernel stack */
204 mov ecx, [esi+KTHREAD_INITIAL_STACK]
205 test byte ptr _KeI386FxsrPresent, 1
206 lea ecx, [ecx-NPX_FRAME_LENGTH]
207
208 /* Set the NPX State */
209 mov byte ptr [esi+KTHREAD_NPX_STATE], NPX_STATE_NOT_LOADED
210
211 /* Get Cr0 */
212 mov edx, [ecx+FN_CR0_NPX_STATE]
213 jz DoneLoad
214
215 /* Save the FX State */
216 fxsave [ecx]
217
218 /* Check if we also have to save it in the parameter */
219 mov ecx, [esp+20]
220 jecxz NoSave
221
222 DoneLoad:
223 /* Save the Fn state in the parameter we got */
224 fnsave [ecx]
225 fwait
226
227 NoSave:
228 /* Clear eax */
229 xor eax, eax
230
231 /* Add NPX State */
232 or ebx, NPX_STATE_NOT_LOADED
233
234 /* Clear the NPX thread */
235 mov [edi+KPCR_NPX_THREAD], eax
236
237 /* Add saved CR0 into NPX State, and set it */
238 or ebx, edx
239 mov cr0, ebx
240
241 /* Re-enable interrupts and return */
242 Return:
243 popf
244 pop ebx
245 pop edi
246 pop esi
247 ret 4
248
249 .endfunc
250
251 /*++
252 * KiThreadStartup
253 *
254 * The KiThreadStartup routine is the beginning of any thread.
255 *
256 * Params:
257 * SystemRoutine - Pointer to the System Startup Routine. Either
258 * PspUserThreadStartup or PspSystemThreadStartup
259 *
260 * StartRoutine - For Kernel Threads only, specifies the starting execution
261 * point of the new thread.
262 *
263 * StartContext - For Kernel Threads only, specifies a pointer to variable
264 * context data to be sent to the StartRoutine above.
265 *
266 * UserThread - Indicates whether or not this is a user thread. This tells
267 * us if the thread has a context or not.
268 *
269 * TrapFrame - Pointer to the KTHREAD to which the caller wishes to
270 * switch from.
271 *
272 * Returns:
273 * Should never return for a system thread. Returns through the System Call
274 * Exit Dispatcher for a user thread.
275 *
276 * Remarks:
277 * If a return from a system thread is detected, a bug check will occur.
278 *
279 *--*/
280 .func KiThreadStartup@156
281 .globl _KiThreadStartup@156
282 _KiThreadStartup@156:
283
284 /*
285 * Clear all the non-volatile registers, so the thread won't be tempted to
286 * expect any static data (like some badly coded usermode/win9x apps do)
287 */
288 xor ebx, ebx
289 xor esi, esi
290 xor edi, edi
291 xor ebp, ebp
292
293 /* It's now safe to go to APC */
294 mov ecx, APC_LEVEL
295 call @KfLowerIrql@4
296
297 /*
298 * Call the System Routine which is right on our stack now.
299 * After we pop the pointer, the Start Routine/Context will be on the
300 * stack, as parameters to the System Routine
301 */
302 pop eax
303 call eax
304
305 /* The thread returned... was it a user-thread? */
306 pop ecx
307 or ecx, ecx
308 jz BadThread
309
310 /* Yes it was, set our trapframe for the System Call Exit Dispatcher */
311 mov ebp, esp
312
313 /* Exit back to user-mode */
314 jmp _KiServiceExit2
315
316 BadThread:
317
318 /* A system thread returned...this is very bad! */
319 int 3
320 .endfunc
321
322 /*++
323 * KiSwapContextInternal
324 *
325 * \brief
326 * The KiSwapContextInternal routine switches context to another thread.
327 *
328 * BOOLEAN USERCALL KiSwapContextInternal();
329 *
330 * Params:
331 * ESI - Pointer to the KTHREAD to which the caller wishes to
332 * switch to.
333 * EDI - Pointer to the KTHREAD to which the caller wishes to
334 * switch from.
335 *
336 * \returns
337 * APC state.
338 *
339 * \remarks
340 * Absolutely all registers except ESP can be trampled here for maximum code flexibility.
341 *
342 *--*/
343 .globl @KiSwapContextInternal@0
344 .func @KiSwapContextInternal@0, @KiSwapContextInternal@0
345 @KiSwapContextInternal@0:
346
347 /* Save the IRQL */
348 push ecx
349
350 #ifdef CONFIG_SMP
351 GetSwapLock:
352 /* Acquire the swap lock */
353 cmp byte ptr [esi+KTHREAD_SWAP_BUSY], 0
354 jz NotBusy
355 pause
356 jmp GetSwapLock
357 #endif
358 NotBusy:
359 /* Increase context switches (use ES for lazy load) */
360 inc dword ptr es:[ebx+KPCR_CONTEXT_SWITCHES]
361
362 /* Save the Exception list */
363 push [ebx+KPCR_EXCEPTION_LIST]
364
365 /* Check for WMI */
366 cmp dword ptr [ebx+KPCR_PERF_GLOBAL_GROUP_MASK], 0
367 jnz WmiTrace
368
369 AfterTrace:
370 #ifdef CONFIG_SMP
371 #ifdef DBG
372 /* Assert that we're on the right CPU */
373 mov cl, [esi+KTHREAD_NEXT_PROCESSOR]
374 cmp cl, [ebx+KPCR_PROCESSOR_NUMBER]
375 jnz WrongCpu
376 #endif
377 #endif
378
379 /* Get CR0 and save it */
380 mov ebp, cr0
381 mov edx, ebp
382
383 #ifdef CONFIG_SMP
384 /* Check NPX State */
385 cmp byte ptr [edi+KTHREAD_NPX_STATE], NPX_STATE_LOADED
386 jz NpxLoaded
387 #endif
388
389 SetStack:
390 /* Set new stack */
391 mov [edi+KTHREAD_KERNEL_STACK], esp
392
393 /* Checking NPX, disable interrupts now */
394 mov eax, [esi+KTHREAD_INITIAL_STACK]
395 cli
396
397 /* Get the NPX State */
398 movzx ecx, byte ptr [esi+KTHREAD_NPX_STATE]
399
400 /* Clear the other bits, merge in CR0, merge in FPU CR0 bits and compare */
401 and edx, ~(CR0_MP + CR0_EM + CR0_TS)
402 or ecx, edx
403 or ecx, [eax - (NPX_FRAME_LENGTH - FN_CR0_NPX_STATE)]
404 cmp ebp, ecx
405 jnz NewCr0
406
407 StackOk:
408 /* Enable interrupts and set the current stack */
409 sti
410 mov esp, [esi+KTHREAD_KERNEL_STACK]
411
412 /* Check if address space switch is needed */
413 mov ebp, [esi+KTHREAD_APCSTATE_PROCESS]
414 mov eax, [edi+KTHREAD_APCSTATE_PROCESS]
415 cmp ebp, eax
416 jz SameProcess
417
418 #ifdef CONFIG_SMP
419 /* Get the active processors and XOR with the process' */
420 mov ecx, [ebx+KPCR_SET_MEMBER_COPY]
421 lock xor [ebp+KPROCESS_ACTIVE_PROCESSORS], ecx
422 lock xor [eax+KPROCESS_ACTIVE_PROCESSORS], ecx
423
424 /* Assert change went ok */
425 #ifdef DBG
426 test [ebp+KPROCESS_ACTIVE_PROCESSORS], ecx
427 jz WrongActiveCpu
428 test [eax+KPROCESS_ACTIVE_PROCESSORS], ecx
429 jz WrongActiveCpu
430 #endif
431 #endif
432
433 /* Check if we need an LDT */
434 mov ecx, [ebp+KPROCESS_LDT_DESCRIPTOR0]
435 or ecx, [eax+KPROCESS_LDT_DESCRIPTOR0]
436 jnz LdtReload
437
438 UpdateCr3:
439 /* Switch address space */
440 mov eax, [ebp+KPROCESS_DIRECTORY_TABLE_BASE]
441 mov cr3, eax
442
443 SameProcess:
444
445 #ifdef CONFIG_SMP
446 /* Release swap lock */
447 and byte ptr [edi+KTHREAD_SWAP_BUSY], 0
448 #endif
449
450 /* Clear gs */
451 xor eax, eax
452 mov gs, ax
453
454 /* Set the TEB */
455 mov eax, [esi+KTHREAD_TEB]
456 mov [ebx+KPCR_TEB], eax
457 mov ecx, [ebx+KPCR_GDT]
458 mov [ecx+0x3A], ax
459 shr eax, 16
460 mov [ecx+0x3C], al
461 mov [ecx+0x3F], ah
462
463 /* Get stack pointer */
464 mov eax, [esi+KTHREAD_INITIAL_STACK]
465
466 /* Make space for the NPX Frame */
467 sub eax, NPX_FRAME_LENGTH
468
469 /* Check if this isn't V86 Mode, so we can bias the Esp0 */
470 test dword ptr [eax - KTRAP_FRAME_SIZE + KTRAP_FRAME_EFLAGS], EFLAGS_V86_MASK
471 jnz NoAdjust
472
473 /* Bias esp */
474 sub eax, KTRAP_FRAME_V86_GS - KTRAP_FRAME_SS
475
476 NoAdjust:
477
478 /* Set new ESP0 */
479 mov ecx, [ebx+KPCR_TSS]
480 mov [ecx+KTSS_ESP0], eax
481
482 /* Set current IOPM offset in the TSS */
483 mov ax, [ebp+KPROCESS_IOPM_OFFSET]
484 mov [ecx+KTSS_IOMAPBASE], ax
485
486 /* Increase context switches */
487 inc dword ptr [esi+KTHREAD_CONTEXT_SWITCHES]
488
489 /* Restore exception list */
490 pop [ebx+KPCR_EXCEPTION_LIST]
491
492 /* Restore IRQL */
493 pop ecx
494
495 /* DPC shouldn't be active */
496 cmp byte ptr [ebx+KPCR_PRCB_DPC_ROUTINE_ACTIVE], 0
497 jnz BugCheckDpc
498
499 /* Check if kernel APCs are pending */
500 cmp byte ptr [esi+KTHREAD_PENDING_KERNEL_APC], 0
501 jnz CheckApc
502
503 /* No APCs, return */
504 xor eax, eax
505 ret
506
507 CheckApc:
508
509 /* Check if they're disabled */
510 cmp word ptr [esi+KTHREAD_SPECIAL_APC_DISABLE], 0
511 jnz ApcReturn
512 test cl, cl
513 jz ApcReturn
514
515 /* Request APC Delivery */
516 mov cl, APC_LEVEL
517 call @HalRequestSoftwareInterrupt@4
518 or eax, esp
519
520 ApcReturn:
521
522 /* Return with APC pending */
523 setz al
524 ret
525
526 LdtReload:
527 /* Check if it's empty */
528 mov eax, [ebp+KPROCESS_LDT_DESCRIPTOR0]
529 test eax, eax
530 jz LoadLdt
531
532 /* Write the LDT Selector */
533 mov ecx, [ebx+KPCR_GDT]
534 mov [ecx+KGDT_LDT], eax
535 mov eax, [ebp+KPROCESS_LDT_DESCRIPTOR1]
536 mov [ecx+KGDT_LDT+4], eax
537
538 /* Write the INT21 handler */
539 mov ecx, [ebx+KPCR_IDT]
540 mov eax, [ebp+KPROCESS_INT21_DESCRIPTOR0]
541 mov [ecx+0x108], eax
542 mov eax, [ebp+KPROCESS_INT21_DESCRIPTOR1]
543 mov [ecx+0x10C], eax
544
545 /* Save LDT Selector */
546 mov eax, KGDT_LDT
547
548 LoadLdt:
549 lldt ax
550 jmp UpdateCr3
551
552 NewCr0:
553
554 #ifdef DBG
555 /* Assert NPX State */
556 test byte ptr [esi+KTHREAD_NPX_STATE], ~(NPX_STATE_NOT_LOADED)
557 jnz InvalidNpx
558 test dword ptr [eax - (NPX_FRAME_LENGTH - FN_CR0_NPX_STATE)], ~(CR0_PE + CR0_MP + CR0_EM + CR0_TS)
559 jnz InvalidNpx
560 #endif
561
562 /* Update CR0 */
563 mov cr0, ecx
564 jmp StackOk
565
566 #ifdef CONFIG_SMP
567 NpxLoaded:
568
569 /* FIXME: TODO */
570 int 3
571
572 /* Jump back */
573 jmp SetStack
574 #endif
575
576 WmiTrace:
577
578 /* No WMI support yet */
579 int 3
580
581 /* Jump back */
582 jmp AfterTrace
583
584 BugCheckDpc:
585
586 /* Bugcheck the machine, printing out the threads being switched */
587 mov eax, [edi+KTHREAD_INITIAL_STACK]
588 push 0
589 push eax
590 push esi
591 push edi
592 push ATTEMPTED_SWITCH_FROM_DPC
593 call _KeBugCheckEx@20
594
595 #ifdef DBG
596 InvalidNpx:
597 int 3
598 WrongActiveCpu:
599 int 3
600 WrongCpu:
601 int 3
602 #endif
603 .endfunc
604
605 /**
606 * KiSwapContext
607 *
608 * \brief
609 * The KiSwapContext routine switches context to another thread.
610 *
611 * BOOLEAN FASTCALL
612 * KiSwapContext(PKTHREAD CurrentThread, PKTHREAD TargetThread);
613 *
614 * \param CurrentThread
615 * Pointer to the KTHREAD of the current thread.
616 *
617 * \param TargetThread
618 * Pointer to the KTHREAD to which the caller wishes to switch to.
619 *
620 * \returns
621 * The WaitStatus of the Target Thread.
622 *
623 * \remarks
624 * This is a wrapper around KiSwapContextInternal which will save all the
625 * non-volatile registers so that the Internal function can use all of
626 * them. It will also save the old current thread and set the new one.
627 *
628 * The calling thread does not return after KiSwapContextInternal until
629 * another thread switches to IT.
630 *
631 *--*/
632 .globl @KiSwapContext@8
633 .func @KiSwapContext@8, @KiSwapContext@8
634 @KiSwapContext@8:
635
636 /* Save 4 registers */
637 sub esp, 4 * 4
638
639 /* Save all the non-volatile ones */
640 mov [esp+12], ebx
641 mov [esp+8], esi
642 mov [esp+4], edi
643 mov [esp+0], ebp
644
645 /* Get the current KPCR */
646 mov ebx, fs:[KPCR_SELF]
647
648 /* Get the Current Thread */
649 mov edi, ecx
650
651 /* Get the New Thread */
652 mov esi, edx
653
654 /* Get the wait IRQL */
655 movzx ecx, byte ptr [edi+KTHREAD_WAIT_IRQL]
656
657 /* Do the swap with the registers correctly setup */
658 call @KiSwapContextInternal@0
659
660 /* Return the registers */
661 mov ebp, [esp+0]
662 mov edi, [esp+4]
663 mov esi, [esp+8]
664 mov ebx, [esp+12]
665
666 /* Clean stack */
667 add esp, 4 * 4
668 ret
669 .endfunc
670
671 .globl @KiIdleLoop@0
672 .func @KiIdleLoop@0, @KiIdleLoop@0
673 @KiIdleLoop@0:
674
675 /* Set EBX */
676 mov ebx, fs:[KPCR_SELF]
677
678 /* Jump into mainline code */
679 jmp MainLoop
680
681 CpuIdle:
682 /* Call the CPU's idle function */
683 lea ecx, [ebx+KPCR_PRCB_POWER_STATE_IDLE_FUNCTION]
684 call [ecx]
685
686 MainLoop:
687 /* Cycle interrupts for 1 cycle */
688 sti
689 nop
690 nop
691 cli
692
693 /* Check if we have to deliver DPCs, timers, or deferred threads */
694 mov eax, [ebx+KPCR_PRCB_DPC_QUEUE_DEPTH]
695 or eax, [ebx+KPCR_PRCB_TIMER_REQUEST]
696 #ifdef CONFIG_SMP
697 or eax, [ebx+KPCR_PRCB_DEFERRED_READY_LIST_HEAD]
698 #endif
699 jz CheckSchedule
700
701 mov cl, DISPATCH_LEVEL
702 call @HalClearSoftwareInterrupt@4
703
704 /* Handle the above */
705 lea ecx, [ebx+KPCR_PRCB_DATA]
706 call @KiRetireDpcList@4
707
708 CheckSchedule:
709 /* Check if a next thread is queued */
710 cmp dword ptr [ebx+KPCR_PRCB_NEXT_THREAD], 0
711 #ifdef CONFIG_SMP
712 jz NoNextThread
713 #else
714 jz CpuIdle
715 #endif
716
717 #ifdef CONFIG_SMP
718 /* There is, raise IRQL to synch level */
719 call _KeRaiseIrqlToSynchLevel@0
720 #endif
721 sti
722
723 /* Set the current thread to ready */
724 mov edi, [ebx+KPCR_CURRENT_THREAD]
725 #ifdef CONFIG_SMP
726 mov byte ptr [edi+KTHREAD_SWAP_BUSY], 1
727
728 /* Acquire the PRCB Lock */
729 lock bts dword ptr [ebx+KPCR_PRCB_PRCB_LOCK], 0
730 jnb CheckNext
731 lea ecx, [ebx+KPCR_PRCB_PRCB_LOCK]
732 call @KefAcquireSpinLockAtDpcLevel@4
733 #endif
734
735 CheckNext:
736 /* Check if the next thread is the current */
737 mov esi, [ebx+KPCR_PRCB_NEXT_THREAD]
738 #ifdef CONFIG_SMP
739 cmp esi, edi
740 jz SameThread
741 #endif
742
743 /* Clear the next thread and set this one instead */
744 and dword ptr [ebx+KPCR_PRCB_NEXT_THREAD], 0
745 mov [ebx+KPCR_CURRENT_THREAD], esi
746
747 /* Set the thread as running */
748 mov byte ptr [esi+KTHREAD_STATE_], Running
749
750 #ifdef CONFIG_SMP
751 /* Disable the idle scheduler and release the PRCB lock */
752 and byte ptr [ebx+KPCR_PRCB_IDLE_SCHEDULE], 0
753 and dword ptr [ebx+KPCR_PRCB_PRCB_LOCK], 0
754 #endif
755
756 SwapContext:
757 /* ReactOS Mm Hack */
758 mov ecx, esi
759 call @MiSyncForContextSwitch@4
760
761 /* Swap context at APC_LEVEL */
762 mov ecx, APC_LEVEL
763 call @KiSwapContextInternal@0
764
765 #ifdef CONFIG_SMP
766 /* Lower to DPC level */
767 mov ecx, DISPATCH_LEVEL
768 call @KfLowerIrql@4
769 #endif
770 jmp MainLoop
771
772 #ifdef CONFIG_SMP
773 SameThread:
774 /* Clear the next thread, and put the thready as ready after lock release */
775 and dword ptr [ebx+KPCR_PRCB_NEXT_THREAD], 0
776 and dword ptr [ebx+KPCR_PRCB_PRCB_LOCK], 0
777 and byte ptr [edi+KTHREAD_STATE_], Ready
778 jmp MainLoop
779
780 NoNextThread:
781 /* Check if the idle scheduler is enabled */
782 cmp byte ptr [ebx+KPCR_PRCB_IDLE_SCHEDULE], 0
783 jz CpuIdle
784
785 /* It is, so call the scheduler */
786 lea ecx, [ebx+KPCR_PRCB_DATA]
787 call @KiIdleSchedule@4
788 test eax, eax
789
790 /* Get new thread pointers and either swap or idle loop again */
791 mov esi, eax
792 mov edi, [ebx+KPCR_PRCB_IDLE_THREAD]
793 jnz SwapContext
794 jmp MainLoop
795 #endif
796 .endfunc
797
798 .globl _Ki386AdjustEsp0@4
799 .func Ki386AdjustEsp0@4
800 _Ki386AdjustEsp0@4:
801
802 /* Get the current thread */
803 mov eax, [fs:KPCR_CURRENT_THREAD]
804
805 /* Get trap frame and stack */
806 mov edx, [esp+4]
807 mov eax, [eax+KTHREAD_INITIAL_STACK]
808
809 /* Check if V86 */
810 test dword ptr [edx+KTRAP_FRAME_EFLAGS], EFLAGS_V86_MASK
811 jnz 1f
812
813 /* Bias the stack */
814 sub eax, KTRAP_FRAME_V86_GS - KTRAP_FRAME_SS
815
816 1:
817 /* Skip FX Save Area */
818 sub eax, SIZEOF_FX_SAVE_AREA
819
820 /* Disable interrupts */
821 pushf
822 cli
823
824 /* Adjust ESP0 */
825 mov edx, [fs:KPCR_TSS]
826 mov ss:[edx+KTSS_ESP0], eax
827
828 /* Enable interrupts and return */
829 popf
830 ret 4
831 .endfunc
832
833 .globl _KiSwapProcess@8
834 .func KiSwapProcess@8
835 _KiSwapProcess@8:
836
837 /* Get process pointers */
838 mov edx, [esp+4]
839 mov eax, [esp+8]
840
841 #ifdef CONFIG_SMP
842 /* Update active processors */
843 mov ecx, fs:[KPCR_SET_MEMBER]
844 lock xor [edx+KPROCESS_ACTIVE_PROCESSORS], ecx
845 lock xor [eax+KPROCESS_ACTIVE_PROCESSORS], ecx
846
847 /* Sanity check */
848 #ifdef DBG
849 test dword ptr [edx+KPROCESS_ACTIVE_PROCESSORS], 0
850 jz WrongCpu1
851 test dword ptr [eax+KPROCESS_ACTIVE_PROCESSORS], 0
852 jnz WrongCpu2
853 #endif
854 #endif
855
856 /* Check if their LDTs changed */
857 mov ecx, [edx+KPROCESS_LDT_DESCRIPTOR0]
858 or ecx, [eax+KPROCESS_LDT_DESCRIPTOR0]
859 jnz NewLdt
860
861 /* Update CR3 */
862 mov eax, [edx+KPROCESS_DIRECTORY_TABLE_BASE]
863 mov cr3, eax
864
865 /* Get the KTSS */
866 mov ecx, fs:[KPCR_TSS]
867
868 /* Clear GS on process swap */
869 xor eax, eax
870 mov gs, ax
871
872 /* Update IOPM offset */
873 mov ax, [edx+KPROCESS_IOPM_OFFSET]
874 mov [ecx+KTSS_IOMAPBASE], ax
875
876 /* Return */
877 ret 8
878
879 NewLdt:
880 /* FIXME: TODO */
881 int 3
882
883 #ifdef DBG
884 WrongCpu1:
885 int 3
886 WrongCpu2:
887 int 3
888 #endif
889 .endfunc