2 * PROJECT: ReactOS Kernel
3 * LICENSE: BSD - See COPYING.ARM in the top level directory
4 * FILE: ntoskrnl/include/trap_x.h
5 * PURPOSE: Internal Inlined Functions for the Trap Handling Code
6 * PROGRAMMERS: ReactOS Portable Systems Group
12 // Unreachable code hint for GCC 4.5.x, older GCC versions, and MSVC
15 #if __GNUC__ * 100 + __GNUC_MINOR__ >= 405
16 #define UNREACHABLE __builtin_unreachable()
18 #define UNREACHABLE __builtin_trap()
21 #define UNREACHABLE __assume(0)
31 KiDumpTrapFrame(IN PKTRAP_FRAME TrapFrame
)
33 /* Dump the whole thing */
34 DbgPrint("DbgEbp: %x\n", TrapFrame
->DbgEbp
);
35 DbgPrint("DbgEip: %x\n", TrapFrame
->DbgEip
);
36 DbgPrint("DbgArgMark: %x\n", TrapFrame
->DbgArgMark
);
37 DbgPrint("DbgArgPointer: %x\n", TrapFrame
->DbgArgPointer
);
38 DbgPrint("TempSegCs: %x\n", TrapFrame
->TempSegCs
);
39 DbgPrint("TempEsp: %x\n", TrapFrame
->TempEsp
);
40 DbgPrint("Dr0: %x\n", TrapFrame
->Dr0
);
41 DbgPrint("Dr1: %x\n", TrapFrame
->Dr1
);
42 DbgPrint("Dr2: %x\n", TrapFrame
->Dr2
);
43 DbgPrint("Dr3: %x\n", TrapFrame
->Dr3
);
44 DbgPrint("Dr6: %x\n", TrapFrame
->Dr6
);
45 DbgPrint("Dr7: %x\n", TrapFrame
->Dr7
);
46 DbgPrint("SegGs: %x\n", TrapFrame
->SegGs
);
47 DbgPrint("SegEs: %x\n", TrapFrame
->SegEs
);
48 DbgPrint("SegDs: %x\n", TrapFrame
->SegDs
);
49 DbgPrint("Edx: %x\n", TrapFrame
->Edx
);
50 DbgPrint("Ecx: %x\n", TrapFrame
->Ecx
);
51 DbgPrint("Eax: %x\n", TrapFrame
->Eax
);
52 DbgPrint("PreviousPreviousMode: %x\n", TrapFrame
->PreviousPreviousMode
);
53 DbgPrint("ExceptionList: %x\n", TrapFrame
->ExceptionList
);
54 DbgPrint("SegFs: %x\n", TrapFrame
->SegFs
);
55 DbgPrint("Edi: %x\n", TrapFrame
->Edi
);
56 DbgPrint("Esi: %x\n", TrapFrame
->Esi
);
57 DbgPrint("Ebx: %x\n", TrapFrame
->Ebx
);
58 DbgPrint("Ebp: %x\n", TrapFrame
->Ebp
);
59 DbgPrint("ErrCode: %x\n", TrapFrame
->ErrCode
);
60 DbgPrint("Eip: %x\n", TrapFrame
->Eip
);
61 DbgPrint("SegCs: %x\n", TrapFrame
->SegCs
);
62 DbgPrint("EFlags: %x\n", TrapFrame
->EFlags
);
63 DbgPrint("HardwareEsp: %x\n", TrapFrame
->HardwareEsp
);
64 DbgPrint("HardwareSegSs: %x\n", TrapFrame
->HardwareSegSs
);
65 DbgPrint("V86Es: %x\n", TrapFrame
->V86Es
);
66 DbgPrint("V86Ds: %x\n", TrapFrame
->V86Ds
);
67 DbgPrint("V86Fs: %x\n", TrapFrame
->V86Fs
);
68 DbgPrint("V86Gs: %x\n", TrapFrame
->V86Gs
);
74 KiFillTrapFrameDebug(IN PKTRAP_FRAME TrapFrame
)
76 /* Set the debug information */
77 TrapFrame
->DbgArgPointer
= TrapFrame
->Edx
;
78 TrapFrame
->DbgArgMark
= 0xBADB0D00;
79 TrapFrame
->DbgEip
= TrapFrame
->Eip
;
80 TrapFrame
->DbgEbp
= TrapFrame
->Ebp
;
85 KiExitTrapDebugChecks(IN PKTRAP_FRAME TrapFrame
,
86 IN KTRAP_STATE_BITS SkipBits
)
88 /* Make sure interrupts are disabled */
89 if (__readeflags() & EFLAGS_INTERRUPT_MASK
)
91 DbgPrint("Exiting with interrupts enabled: %lx\n", __readeflags());
95 /* Make sure this is a real trap frame */
96 if (TrapFrame
->DbgArgMark
!= 0xBADB0D00)
98 DbgPrint("Exiting with an invalid trap frame? (No MAGIC in trap frame)\n");
99 KiDumpTrapFrame(TrapFrame
);
103 /* Make sure we're not in user-mode or something */
104 if (Ke386GetFs() != KGDT_R0_PCR
)
106 DbgPrint("Exiting with an invalid FS: %lx\n", Ke386GetFs());
110 /* Make sure we have a valid SEH chain */
111 if (KeGetPcr()->Tib
.ExceptionList
== 0)
113 DbgPrint("Exiting with NULL exception chain: %p\n", KeGetPcr()->Tib
.ExceptionList
);
117 /* Make sure we're restoring a valid SEH chain */
118 if (TrapFrame
->ExceptionList
== 0)
120 DbgPrint("Entered a trap with a NULL exception chain: %p\n", TrapFrame
->ExceptionList
);
124 /* If we're ignoring previous mode, make sure caller doesn't actually want it */
125 if ((SkipBits
.SkipPreviousMode
) && (TrapFrame
->PreviousPreviousMode
!= -1))
127 DbgPrint("Exiting a trap witout restoring previous mode, yet previous mode seems valid: %lx", TrapFrame
->PreviousPreviousMode
);
134 KiExitSystemCallDebugChecks(IN ULONG SystemCall
,
135 IN PKTRAP_FRAME TrapFrame
)
139 /* Check if this was a user call */
140 if (KiUserMode(TrapFrame
))
142 /* Make sure we are not returning with elevated IRQL */
143 OldIrql
= KeGetCurrentIrql();
144 if (OldIrql
!= PASSIVE_LEVEL
)
146 /* Forcibly put us in a sane state */
147 KeGetPcr()->CurrentIrql
= PASSIVE_LEVEL
;
151 KeBugCheckEx(IRQL_GT_ZERO_AT_SYSTEM_SERVICE
,
158 /* Make sure we're not attached and that APCs are not disabled */
159 if ((KeGetCurrentThread()->ApcStateIndex
!= CurrentApcEnvironment
) ||
160 (KeGetCurrentThread()->CombinedApcDisable
!= 0))
163 KeBugCheckEx(APC_INDEX_MISMATCH
,
165 KeGetCurrentThread()->ApcStateIndex
,
166 KeGetCurrentThread()->CombinedApcDisable
,
172 #define KiExitTrapDebugChecks(x, y)
173 #define KiFillTrapFrameDebug(x)
174 #define KiExitSystemCallDebugChecks(x, y)
182 KiUserTrap(IN PKTRAP_FRAME TrapFrame
)
184 /* Anything else but Ring 0 is Ring 3 */
185 return (TrapFrame
->SegCs
& MODE_MASK
);
189 // "BOP" code used by VDM and V8086 Mode
195 /* Invalid instruction that an invalid opcode handler must trap and handle */
196 asm volatile(".byte 0xC4\n.byte 0xC4\n");
200 // Returns whether or not this is a V86 trap by checking the EFLAGS field.
202 // FIXME: GCC 4.5 Can Improve this with "goto labels"
206 KiIsV8086TrapSafe(IN PKTRAP_FRAME TrapFrame
)
211 * The check MUST be done this way, as we guarantee that no DS/ES/FS segment
212 * is used (since it might be garbage).
214 * Instead, we use the SS segment which is guaranteed to be correct. Because
215 * operate in 32-bit flat mode, this works just fine.
219 "testl $%c[f], %%ss:%1\n"
222 : "m"(TrapFrame
->EFlags
),
223 [f
] "i"(EFLAGS_V86_MASK
)
226 /* If V86 flag was set */
231 // Returns whether or not this is a user-mode trap by checking the SegCs field.
233 // FIXME: GCC 4.5 Can Improve this with "goto labels"
237 KiIsUserTrapSafe(IN PKTRAP_FRAME TrapFrame
)
242 * The check MUST be done this way, as we guarantee that no DS/ES/FS segment
243 * is used (since it might be garbage).
245 * Instead, we use the SS segment which is guaranteed to be correct. Because
246 * operate in 32-bit flat mode, this works just fine.
250 "cmp $%c[f], %%ss:%1\n"
253 : "m"(TrapFrame
->SegCs
),
254 [f
] "i"(KGDT_R0_CODE
)
257 /* If V86 flag was set */
263 KiUserSystemCall(IN PKTRAP_FRAME TrapFrame
)
266 * Kernel call or user call?
268 * This decision is made in inlined assembly because we need to patch
269 * the relative offset of the user-mode jump to point to the SYSEXIT
270 * routine if the CPU supports it. The only way to guarantee that a
271 * relative jnz/jz instruction is generated is to force it with the
276 "test $1, %0\n" /* MODE_MASK */
277 ".globl _KiSystemCallExitBranch\n_KiSystemCallExitBranch:\n"
278 "jnz _KiSystemCallExit\n"
280 : "r"(TrapFrame
->SegCs
)
286 KiSetSaneSegments(IN PKTRAP_FRAME TrapFrame
)
291 * We really have to get a good DS/ES first before touching any data.
293 * These two reads will either go in a register (with optimizations ON) or
294 * a stack variable (which is on SS:ESP, guaranteed to be good/valid).
296 * Because the assembly is marked volatile, the order of instructions is
297 * as-is, otherwise the optimizer could simply get rid of our DS/ES.
302 Ke386SetDs(KGDT_R3_DATA
| RPL_MASK
);
303 Ke386SetEs(KGDT_R3_DATA
| RPL_MASK
);
304 TrapFrame
->SegDs
= Ds
;
305 TrapFrame
->SegEs
= Es
;
309 // Generates an Exit Epilog Stub for the given name
311 #define KI_FUNCTION_CALL 0x1
312 #define KI_EDITED_FRAME 0x2
313 #define KI_DIRECT_EXIT 0x4
314 #define KI_FAST_SYSTEM_CALL_EXIT 0x8
315 #define KI_SYSTEM_CALL_EXIT 0x10
316 #define KI_SYSTEM_CALL_JUMP 0x20
317 #define KiTrapExitStub(x, y) VOID FORCEINLINE DECLSPEC_NORETURN x(IN PKTRAP_FRAME TrapFrame) { KiTrapExit(TrapFrame, y); UNREACHABLE; }
318 #define KiTrapExitStub2(x, y) VOID FORCEINLINE x(IN PKTRAP_FRAME TrapFrame) { KiTrapExit(TrapFrame, y); }
321 // How volatiles will be restored
323 #define KI_EAX_NO_VOLATILES 0x0
324 #define KI_EAX_ONLY 0x1
325 #define KI_ALL_VOLATILES 0x2
328 // Exit mechanism to use
330 #define KI_EXIT_IRET 0x0
331 #define KI_EXIT_SYSEXIT 0x1
332 #define KI_EXIT_JMP 0x2
333 #define KI_EXIT_RET 0x3
336 // Master Trap Epilog
340 KiTrapExit(IN PKTRAP_FRAME TrapFrame
,
343 ULONG FrameSize
= FIELD_OFFSET(KTRAP_FRAME
, Eip
);
344 ULONG ExitMechanism
= KI_EXIT_IRET
, Volatiles
= KI_ALL_VOLATILES
, NonVolatiles
= TRUE
;
345 ULONG EcxField
= FIELD_OFFSET(KTRAP_FRAME
, Ecx
), EdxField
= FIELD_OFFSET(KTRAP_FRAME
, Edx
);
347 /* System call exit needs a special label */
348 if (Flags
& KI_SYSTEM_CALL_EXIT
) __asm__ __volatile__
350 ".globl _KiSystemCallExit\n_KiSystemCallExit:\n"
353 /* Start by making the trap frame equal to the stack */
362 /* Check what kind of trap frame this trap requires */
363 if (Flags
& KI_FUNCTION_CALL
)
365 /* These calls have an EIP on the stack they need */
366 ExitMechanism
= KI_EXIT_RET
;
369 else if (Flags
& KI_EDITED_FRAME
)
371 /* Edited frames store a new ESP in the error code field */
372 FrameSize
= FIELD_OFFSET(KTRAP_FRAME
, ErrCode
);
374 else if (Flags
& KI_DIRECT_EXIT
)
376 /* Exits directly without restoring anything, interrupt frame on stack */
377 NonVolatiles
= Volatiles
= FALSE
;
379 else if (Flags
& KI_FAST_SYSTEM_CALL_EXIT
)
381 /* We have a fake interrupt stack with a ring transition */
382 FrameSize
= FIELD_OFFSET(KTRAP_FRAME
, V86Es
);
383 ExitMechanism
= KI_EXIT_SYSEXIT
;
385 /* SYSEXIT wants EIP in EDX and ESP in ECX */
386 EcxField
= FIELD_OFFSET(KTRAP_FRAME
, HardwareEsp
);
387 EdxField
= FIELD_OFFSET(KTRAP_FRAME
, Eip
);
389 else if (Flags
& KI_SYSTEM_CALL_EXIT
)
391 /* Only restore EAX */
392 NonVolatiles
= KI_EAX_ONLY
;
394 else if (Flags
& KI_SYSTEM_CALL_JUMP
)
396 /* We have a fake interrupt stack with no ring transition */
397 FrameSize
= FIELD_OFFSET(KTRAP_FRAME
, HardwareEsp
);
398 NonVolatiles
= KI_EAX_ONLY
;
399 ExitMechanism
= KI_EXIT_JMP
;
402 /* Restore the non volatiles */
403 if (NonVolatiles
) __asm__ __volatile__
405 "movl %c[b](%%esp), %%ebx\n"
406 "movl %c[s](%%esp), %%esi\n"
407 "movl %c[i](%%esp), %%edi\n"
408 "movl %c[p](%%esp), %%ebp\n"
410 : [b
] "i"(FIELD_OFFSET(KTRAP_FRAME
, Ebx
)),
411 [s
] "i"(FIELD_OFFSET(KTRAP_FRAME
, Esi
)),
412 [i
] "i"(FIELD_OFFSET(KTRAP_FRAME
, Edi
)),
413 [p
] "i"(FIELD_OFFSET(KTRAP_FRAME
, Ebp
))
417 /* Restore EAX if volatiles must be restored */
418 if (Volatiles
) __asm__ __volatile__
420 "movl %c[a](%%esp), %%eax\n":: [a
] "i"(FIELD_OFFSET(KTRAP_FRAME
, Eax
)) : "%esp"
423 /* Restore the other volatiles if needed */
424 if (Volatiles
== KI_ALL_VOLATILES
) __asm__ __volatile__
426 "movl %c[c](%%esp), %%ecx\n"
427 "movl %c[d](%%esp), %%edx\n"
434 /* Ring 0 system calls jump back to EDX */
435 if (Flags
& KI_SYSTEM_CALL_JUMP
) __asm__ __volatile__
437 "movl %c[d](%%esp), %%edx\n":: [d
] "i"(FIELD_OFFSET(KTRAP_FRAME
, Eip
)) : "%esp"
440 /* Now destroy the trap frame on the stack */
441 __asm__
__volatile__ ("addl $%c[e],%%esp\n":: [e
] "i"(FrameSize
) : "%esp");
443 /* Edited traps need to change to a new ESP */
444 if (Flags
& KI_EDITED_FRAME
) __asm__
__volatile__ ("movl (%%esp), %%esp\n":::"%esp");
446 /* Check the exit mechanism and apply it */
447 if (ExitMechanism
== KI_EXIT_RET
) __asm__
__volatile__("ret\n"::: "%esp");
448 else if (ExitMechanism
== KI_EXIT_IRET
) __asm__
__volatile__("iret\n"::: "%esp");
449 else if (ExitMechanism
== KI_EXIT_JMP
) __asm__
__volatile__("jmp *%%edx\n.globl _KiSystemCallExit2\n_KiSystemCallExit2:\n"::: "%esp");
450 else if (ExitMechanism
== KI_EXIT_SYSEXIT
) __asm__
__volatile__("sti\nsysexit\n"::: "%esp");
454 // All the specific trap epilog stubs
456 KiTrapExitStub (KiTrapReturn
, 0);
457 KiTrapExitStub (KiDirectTrapReturn
, KI_DIRECT_EXIT
);
458 KiTrapExitStub (KiCallReturn
, KI_FUNCTION_CALL
);
459 KiTrapExitStub (KiEditedTrapReturn
, KI_EDITED_FRAME
);
460 KiTrapExitStub2(KiSystemCallReturn
, KI_SYSTEM_CALL_JUMP
);
461 KiTrapExitStub (KiSystemCallSysExitReturn
, KI_FAST_SYSTEM_CALL_EXIT
);
462 KiTrapExitStub (KiSystemCallTrapReturn
, KI_SYSTEM_CALL_EXIT
);
465 // Generic Exit Routine
470 KiExitTrap(IN PKTRAP_FRAME TrapFrame
,
473 KTRAP_EXIT_SKIP_BITS SkipBits
= { .Bits
= Skip
};
476 /* Debugging checks */
477 KiExitTrapDebugChecks(TrapFrame
, SkipBits
);
479 /* Restore the SEH handler chain */
480 KeGetPcr()->Tib
.ExceptionList
= TrapFrame
->ExceptionList
;
482 /* Check if the previous mode must be restored */
483 if (__builtin_expect(!SkipBits
.SkipPreviousMode
, 0)) /* More INTS than SYSCALLs */
486 KeGetCurrentThread()->PreviousMode
= TrapFrame
->PreviousPreviousMode
;
489 /* Check if there are active debug registers */
490 if (__builtin_expect(TrapFrame
->Dr7
& ~DR7_RESERVED_MASK
, 0))
492 /* Not handled yet */
493 DbgPrint("Need Hardware Breakpoint Support!\n");
498 /* Check if this was a V8086 trap */
499 if (__builtin_expect(TrapFrame
->EFlags
& EFLAGS_V86_MASK
, 0)) KiTrapReturn(TrapFrame
);
501 /* Check if the trap frame was edited */
502 if (__builtin_expect(!(TrapFrame
->SegCs
& FRAME_EDITED
), 0))
505 * An edited trap frame happens when we need to modify CS and/or ESP but
506 * don't actually have a ring transition. This happens when a kernelmode
507 * caller wants to perform an NtContinue to another kernel address, such
508 * as in the case of SEH (basically, a longjmp), or to a user address.
510 * Therefore, the CPU never saved CS/ESP on the stack because we did not
511 * get a trap frame due to a ring transition (there was no interrupt).
512 * Even if we didn't want to restore CS to a new value, a problem occurs
513 * due to the fact a normal RET would not work if we restored ESP since
514 * RET would then try to read the result off the stack.
516 * The NT kernel solves this by adding 12 bytes of stack to the exiting
517 * trap frame, in which EFLAGS, CS, and EIP are stored, and then saving
518 * the ESP that's being requested into the ErrorCode field. It will then
519 * exit with an IRET. This fixes both issues, because it gives the stack
520 * some space where to hold the return address and then end up with the
521 * wanted stack, and it uses IRET which allows a new CS to be inputted.
525 /* Set CS that is requested */
526 TrapFrame
->SegCs
= TrapFrame
->TempSegCs
;
528 /* First make space on requested stack */
529 ReturnStack
= (PULONG
)(TrapFrame
->TempEsp
- 12);
530 TrapFrame
->ErrCode
= (ULONG_PTR
)ReturnStack
;
532 /* Now copy IRET frame */
533 ReturnStack
[0] = TrapFrame
->Eip
;
534 ReturnStack
[1] = TrapFrame
->SegCs
;
535 ReturnStack
[2] = TrapFrame
->EFlags
;
537 /* Do special edited return */
538 KiEditedTrapReturn(TrapFrame
);
541 /* Check if this is a user trap */
542 if (__builtin_expect(KiUserTrap(TrapFrame
), 1)) /* Ring 3 is where we spend time */
544 /* Check if segments should be restored */
545 if (!SkipBits
.SkipSegments
)
547 /* Restore segments */
548 Ke386SetGs(TrapFrame
->SegGs
);
549 Ke386SetEs(TrapFrame
->SegEs
);
550 Ke386SetDs(TrapFrame
->SegDs
);
551 Ke386SetFs(TrapFrame
->SegFs
);
554 /* Always restore FS since it goes from KPCR to TEB */
555 Ke386SetFs(TrapFrame
->SegFs
);
558 /* Check for system call -- a system call skips volatiles! */
559 if (__builtin_expect(SkipBits
.SkipVolatiles
, 0)) /* More INTs than SYSCALLs */
561 /* User or kernel call? */
562 KiUserSystemCall(TrapFrame
);
565 __writeeflags(TrapFrame
->EFlags
);
567 /* Call is kernel, so do a jump back since this wasn't a real INT */
568 KiSystemCallReturn(TrapFrame
);
570 /* If we got here, this is SYSEXIT: are we stepping code? */
571 if (!(TrapFrame
->EFlags
& EFLAGS_TF
))
573 /* Restore user FS */
574 Ke386SetFs(KGDT_R3_TEB
| RPL_MASK
);
576 /* Remove interrupt flag */
577 TrapFrame
->EFlags
&= ~EFLAGS_INTERRUPT_MASK
;
578 __writeeflags(TrapFrame
->EFlags
);
580 /* Exit through SYSEXIT */
581 KiSystemCallSysExitReturn(TrapFrame
);
584 /* Exit through IRETD, either due to debugging or due to lack of SYSEXIT */
585 KiSystemCallTrapReturn(TrapFrame
);
588 /* Return from interrupt */
589 KiTrapReturn(TrapFrame
);
593 // Virtual 8086 Mode Optimized Trap Exit
597 KiExitV86Trap(IN PKTRAP_FRAME TrapFrame
)
603 Thread
= KeGetCurrentThread();
606 /* Turn off the alerted state for kernel mode */
607 Thread
->Alerted
[KernelMode
] = FALSE
;
609 /* Are there pending user APCs? */
610 if (__builtin_expect(!Thread
->ApcState
.UserApcPending
, 1)) break;
612 /* Raise to APC level and enable interrupts */
613 OldIrql
= KfRaiseIrql(APC_LEVEL
);
617 KiDeliverApc(UserMode
, NULL
, TrapFrame
);
619 /* Restore IRQL and disable interrupts once again */
620 KfLowerIrql(OldIrql
);
623 /* Return if this isn't V86 mode anymore */
624 if (__builtin_expect(TrapFrame
->EFlags
& EFLAGS_V86_MASK
, 0)) return;
627 /* If we got here, we're still in a valid V8086 context, so quit it */
628 if (__builtin_expect(TrapFrame
->Dr7
& ~DR7_RESERVED_MASK
, 0))
630 /* Not handled yet */
631 DbgPrint("Need Hardware Breakpoint Support!\n");
635 /* Return from interrupt */
636 KiTrapReturn(TrapFrame
);
640 // Virtual 8086 Mode Optimized Trap Entry
644 KiEnterV86Trap(IN PKTRAP_FRAME TrapFrame
)
646 /* Load correct registers */
647 Ke386SetFs(KGDT_R0_PCR
);
648 Ke386SetDs(KGDT_R3_DATA
| RPL_MASK
);
649 Ke386SetEs(KGDT_R3_DATA
| RPL_MASK
);
651 /* Save exception list */
652 TrapFrame
->ExceptionList
= KeGetPcr()->Tib
.ExceptionList
;
654 /* Clear direction flag */
655 Ke386ClearDirectionFlag();
657 /* Save DR7 and check for debugging */
658 TrapFrame
->Dr7
= __readdr(7);
659 if (__builtin_expect(TrapFrame
->Dr7
& ~DR7_RESERVED_MASK
, 0))
661 DbgPrint("Need Hardware Breakpoint Support!\n");
667 // Interrupt Trap Entry
671 KiEnterInterruptTrap(IN PKTRAP_FRAME TrapFrame
)
673 /* Check for V86 mode, otherwise check for ring 3 code */
674 if (__builtin_expect(KiIsV8086TrapSafe(TrapFrame
), 0))
676 /* Set correct segments */
677 Ke386SetDs(KGDT_R3_DATA
| RPL_MASK
);
678 Ke386SetEs(KGDT_R3_DATA
| RPL_MASK
);
679 Ke386SetFs(KGDT_R0_PCR
);
681 /* Restore V8086 segments into Protected Mode segments */
682 TrapFrame
->SegFs
= TrapFrame
->V86Fs
;
683 TrapFrame
->SegGs
= TrapFrame
->V86Gs
;
684 TrapFrame
->SegDs
= TrapFrame
->V86Ds
;
685 TrapFrame
->SegEs
= TrapFrame
->V86Es
;
687 else if (__builtin_expect(KiIsUserTrapSafe(TrapFrame
), 1)) /* Ring 3 is more common */
689 /* Switch to sane segments */
690 KiSetSaneSegments(TrapFrame
);
693 TrapFrame
->SegFs
= Ke386GetFs();
694 TrapFrame
->SegGs
= Ke386GetGs();
697 Ke386SetFs(KGDT_R0_PCR
);
700 /* Save exception list and terminate it */
701 TrapFrame
->ExceptionList
= KeGetPcr()->Tib
.ExceptionList
;
702 KeGetPcr()->Tib
.ExceptionList
= EXCEPTION_CHAIN_END
;
704 /* Clear direction flag */
705 Ke386ClearDirectionFlag();
707 /* Flush DR7 and check for debugging */
709 if (__builtin_expect(KeGetCurrentThread()->DispatcherHeader
.DebugActive
& 0xFF, 0))
711 DbgPrint("Need Hardware Breakpoint Support!\n");
715 /* Set debug header */
716 KiFillTrapFrameDebug(TrapFrame
);
720 // Generic Trap Entry
724 KiEnterTrap(IN PKTRAP_FRAME TrapFrame
)
726 /* Switch to sane segments */
727 KiSetSaneSegments(TrapFrame
);
729 /* Now we can save the other segments and then switch to the correct FS */
730 TrapFrame
->SegFs
= Ke386GetFs();
731 TrapFrame
->SegGs
= Ke386GetGs();
732 Ke386SetFs(KGDT_R0_PCR
);
734 /* Save exception list */
735 TrapFrame
->ExceptionList
= KeGetPcr()->Tib
.ExceptionList
;
737 /* Check for V86 mode */
738 if (__builtin_expect(TrapFrame
->EFlags
& EFLAGS_V86_MASK
, 0))
740 /* Restore V8086 segments into Protected Mode segments */
741 TrapFrame
->SegFs
= TrapFrame
->V86Fs
;
742 TrapFrame
->SegGs
= TrapFrame
->V86Gs
;
743 TrapFrame
->SegDs
= TrapFrame
->V86Ds
;
744 TrapFrame
->SegEs
= TrapFrame
->V86Es
;
747 /* Clear direction flag */
748 Ke386ClearDirectionFlag();
750 /* Flush DR7 and check for debugging */
752 if (__builtin_expect(KeGetCurrentThread()->DispatcherHeader
.DebugActive
& 0xFF, 0))
754 DbgPrint("Need Hardware Breakpoint Support!\n");
758 /* Set debug header */
759 KiFillTrapFrameDebug(TrapFrame
);
763 // Generates a Trap Prolog Stub for the given name
765 #define KI_PUSH_FAKE_ERROR_CODE 0x1
766 #define KI_UNUSED 0x2
767 #define KI_NONVOLATILES_ONLY 0x4
768 #define KI_FAST_SYSTEM_CALL 0x8
769 #define KI_SOFTWARE_TRAP 0x10
770 #define KI_HARDWARE_INT 0x20
771 #define KiTrap(x, y) VOID DECLSPEC_NORETURN x(VOID) { KiTrapStub(y, x##Handler); UNREACHABLE; }
772 #define KiTrampoline(x, y) VOID DECLSPEC_NOINLINE x(VOID) { KiTrapStub(y, x##Handler); }
779 KiTrapStub(IN ULONG Flags
,
784 /* Is this a fast system call? They don't have a stack! */
785 if (Flags
& KI_FAST_SYSTEM_CALL
) __asm__ __volatile__
787 "movl %%ss:%c[t], %%esp\n"
788 "movl %c[e](%%esp), %%esp\n"
790 : [e
] "i"(FIELD_OFFSET(KTSS
, Esp0
)),
795 /* Check what kind of trap frame this trap requires */
796 if (Flags
& KI_SOFTWARE_TRAP
)
798 /* Software traps need a complete non-ring transition trap frame */
799 FrameSize
= FIELD_OFFSET(KTRAP_FRAME
, HardwareEsp
);
801 else if (Flags
& KI_FAST_SYSTEM_CALL
)
803 /* SYSENTER requires us to build a complete ring transition trap frame */
804 FrameSize
= FIELD_OFFSET(KTRAP_FRAME
, V86Es
);
806 /* And it only preserves nonvolatile registers */
807 Flags
|= KI_NONVOLATILES_ONLY
;
809 else if (Flags
& KI_PUSH_FAKE_ERROR_CODE
)
811 /* If the trap doesn't have an error code, we'll make space for it */
812 FrameSize
= FIELD_OFFSET(KTRAP_FRAME
, Eip
);
816 /* The trap already has an error code, so just make space for the rest */
817 FrameSize
= FIELD_OFFSET(KTRAP_FRAME
, ErrCode
);
820 /* Software traps need to get their EIP from the caller's frame */
821 if (Flags
& KI_SOFTWARE_TRAP
) __asm__
__volatile__ ("popl %%eax\n":::"%esp");
823 /* Save nonvolatile registers */
826 /* EBX, ESI, EDI and EBP are saved */
827 "movl %%ebp, %c[p](%%esp)\n"
828 "movl %%ebx, %c[b](%%esp)\n"
829 "movl %%esi, %c[s](%%esp)\n"
830 "movl %%edi, %c[i](%%esp)\n"
832 : [b
] "i"(- FrameSize
+ FIELD_OFFSET(KTRAP_FRAME
, Ebx
)),
833 [s
] "i"(- FrameSize
+ FIELD_OFFSET(KTRAP_FRAME
, Esi
)),
834 [i
] "i"(- FrameSize
+ FIELD_OFFSET(KTRAP_FRAME
, Edi
)),
835 [p
] "i"(- FrameSize
+ FIELD_OFFSET(KTRAP_FRAME
, Ebp
))
839 /* Does the caller want nonvolatiles only? */
840 if (!(Flags
& KI_NONVOLATILES_ONLY
)) __asm__ __volatile__
842 /* Otherwise, save the volatiles as well */
843 "movl %%eax, %c[a](%%esp)\n"
844 "movl %%ecx, %c[c](%%esp)\n"
845 "movl %%edx, %c[d](%%esp)\n"
847 : [a
] "i"(- FrameSize
+ FIELD_OFFSET(KTRAP_FRAME
, Eax
)),
848 [c
] "i"(- FrameSize
+ FIELD_OFFSET(KTRAP_FRAME
, Ecx
)),
849 [d
] "i"(- FrameSize
+ FIELD_OFFSET(KTRAP_FRAME
, Edx
))
853 /* Now set parameter 1 (ECX) to point to the frame */
854 __asm__
__volatile__ ("movl %%esp, %%ecx\n":::"%esp");
856 /* Now go ahead and make space for this frame */
857 __asm__
__volatile__ ("subl $%c[e],%%esp\n":: [e
] "i"(FrameSize
) : "%esp");
858 __asm__
__volatile__ ("subl $%c[e],%%ecx\n":: [e
] "i"(FrameSize
) : "%ecx");
861 * For hardware interrupts, set parameter 2 (EDX) to hold KINTERRUPT.
862 * This code will be dynamically patched when an interrupt is registered!
864 if (Flags
& KI_HARDWARE_INT
) __asm__ __volatile__
866 ".globl _KiInterruptTemplate2ndDispatch\n_KiInterruptTemplate2ndDispatch:\n"
868 ".globl _KiInterruptTemplateObject\n_KiInterruptTemplateObject:\n"
872 /* Now jump to the C handler */
873 if (Flags
& KI_HARDWARE_INT
)__asm__ __volatile__
876 * For hardware interrupts, use an absolute JMP instead of a relative JMP
877 * since the position of this code is arbitrary in memory, and therefore
878 * the compiler-generated offset will not be correct.
881 ".globl _KiInterruptTemplateDispatch\n_KiInterruptTemplateDispatch:\n"
885 else __asm__
__volatile__ ("jmp %c[x]\n":: [x
] "i"(Handler
));