* PURPOSE: Internal Inlined Functions for the Trap Handling Code
* PROGRAMMERS: ReactOS Portable Systems Group
*/
-#ifndef _TRAP_X_
-#define _TRAP_X_
+
+#pragma once
//
-// Unreachable code hint for GCC 4.5.x, 4.4.x, and MSVC
+// Unreachable code hint for GCC 4.5.x, older GCC versions, and MSVC
//
+#ifdef __GNUC__
#if __GNUC__ * 100 + __GNUC_MINOR__ >= 405
#define UNREACHABLE __builtin_unreachable()
-#elif __GNUC__ * 100 + __GNUC_MINOR__ >= 404
+#else
#define UNREACHABLE __builtin_trap()
+#endif
#elif _MSC_VER
#define UNREACHABLE __assume(0)
#else
}
/* Make sure we have a valid SEH chain */
- if (KeGetPcr()->Tib.ExceptionList == 0)
+ if (KeGetPcr()->NtTib.ExceptionList == 0)
{
- DbgPrint("Exiting with NULL exception chain: %p\n", KeGetPcr()->Tib.ExceptionList);
+ DbgPrint("Exiting with NULL exception chain: %p\n", KeGetPcr()->NtTib.ExceptionList);
while (TRUE);
}
}
//
-// Assembly exit stubs
+// "BOP" code used by VDM and V8086 Mode
//
VOID
FORCEINLINE
-//DECLSPEC_NORETURN
-KiSystemCallReturn(IN PKTRAP_FRAME TrapFrame)
+KiIssueBop(VOID)
{
- /* Restore nonvolatiles, EAX, and do a "jump" back to the kernel caller */
- __asm__ __volatile__
- (
- "movl %0, %%esp\n"
- "movl %c[b](%%esp), %%ebx\n"
- "movl %c[s](%%esp), %%esi\n"
- "movl %c[i](%%esp), %%edi\n"
- "movl %c[p](%%esp), %%ebp\n"
- "movl %c[a](%%esp), %%eax\n"
- "movl %c[e](%%esp), %%edx\n"
- "addl $%c[v],%%esp\n" /* A WHOLE *KERNEL* frame since we're not IRET'ing */
- "jmp *%%edx\n"
- :
- : "r"(TrapFrame),
- [b] "i"(KTRAP_FRAME_EBX),
- [s] "i"(KTRAP_FRAME_ESI),
- [i] "i"(KTRAP_FRAME_EDI),
- [p] "i"(KTRAP_FRAME_EBP),
- [a] "i"(KTRAP_FRAME_EAX),
- [e] "i"(KTRAP_FRAME_EIP),
- [v] "i"(KTRAP_FRAME_ESP)
- : "%esp"
- );
- UNREACHABLE;
+ /* Invalid instruction that an invalid opcode handler must trap and handle */
+ asm volatile(".byte 0xC4\n.byte 0xC4\n");
}
VOID
FORCEINLINE
-//DECLSPEC_NORETURN
-KiSystemCallTrapReturn(IN PKTRAP_FRAME TrapFrame)
+KiUserSystemCall(IN PKTRAP_FRAME TrapFrame)
{
- /* Regular interrupt exit, but we only restore EAX as a volatile */
- __asm__ __volatile__
+ /*
+ * Kernel call or user call?
+ *
+ * This decision is made in inlined assembly because we need to patch
+ * the relative offset of the user-mode jump to point to the SYSEXIT
+ * routine if the CPU supports it. The only way to guarantee that a
+ * relative jnz/jz instruction is generated is to force it with the
+ * inline assembler.
+ */
+ asm volatile
(
- "movl %0, %%esp\n"
- "movl %c[b](%%esp), %%ebx\n"
- "movl %c[s](%%esp), %%esi\n"
- "movl %c[i](%%esp), %%edi\n"
- "movl %c[p](%%esp), %%ebp\n"
- "movl %c[a](%%esp), %%eax\n"
- "addl $%c[e],%%esp\n"
- "iret\n"
+ "test $1, %0\n" /* MODE_MASK */
+ ".globl _KiSystemCallExitBranch\n_KiSystemCallExitBranch:\n"
+ "jnz _KiSystemCallExit\n"
:
- : "r"(TrapFrame),
- [b] "i"(KTRAP_FRAME_EBX),
- [s] "i"(KTRAP_FRAME_ESI),
- [i] "i"(KTRAP_FRAME_EDI),
- [p] "i"(KTRAP_FRAME_EBP),
- [a] "i"(KTRAP_FRAME_EAX),
- [e] "i"(KTRAP_FRAME_EIP)
- : "%esp"
+ : "r"(TrapFrame->SegCs)
);
- UNREACHABLE;
}
+//
+// Generates an Exit Epilog Stub for the given name
+//
+#define KI_FUNCTION_CALL 0x1
+#define KI_EDITED_FRAME 0x2
+#define KI_DIRECT_EXIT 0x4
+#define KI_FAST_SYSTEM_CALL_EXIT 0x8
+#define KI_SYSTEM_CALL_EXIT 0x10
+#define KI_SYSTEM_CALL_JUMP 0x20
+#define KiTrapExitStub(x, y) VOID FORCEINLINE DECLSPEC_NORETURN x(IN PKTRAP_FRAME TrapFrame) { KiTrapExit(TrapFrame, y); UNREACHABLE; }
+#define KiTrapExitStub2(x, y) VOID FORCEINLINE x(IN PKTRAP_FRAME TrapFrame) { KiTrapExit(TrapFrame, y); }
+
+//
+// How volatiles will be restored
+//
+#define KI_EAX_NO_VOLATILES 0x0
+#define KI_EAX_ONLY 0x1
+#define KI_ALL_VOLATILES 0x2
+
+//
+// Exit mechanism to use
+//
+#define KI_EXIT_IRET 0x0
+#define KI_EXIT_SYSEXIT 0x1
+#define KI_EXIT_JMP 0x2
+#define KI_EXIT_RET 0x3
+
+//
+// Master Trap Epilog
+//
VOID
FORCEINLINE
-//DECLSPEC_NORETURN
-KiSystemCallSysExitReturn(IN PKTRAP_FRAME TrapFrame)
+KiTrapExit(IN PKTRAP_FRAME TrapFrame,
+ IN ULONG Flags)
{
- /* Restore nonvolatiles, EAX, and do a SYSEXIT back to the user caller */
+ ULONG FrameSize = FIELD_OFFSET(KTRAP_FRAME, Eip);
+ ULONG ExitMechanism = KI_EXIT_IRET, Volatiles = KI_ALL_VOLATILES, NonVolatiles = TRUE;
+ ULONG EcxField = FIELD_OFFSET(KTRAP_FRAME, Ecx), EdxField = FIELD_OFFSET(KTRAP_FRAME, Edx);
+
+ /* System call exit needs a special label */
+ if (Flags & KI_SYSTEM_CALL_EXIT) __asm__ __volatile__
+ (
+ ".globl _KiSystemCallExit\n_KiSystemCallExit:\n"
+ );
+
+ /* Start by making the trap frame equal to the stack */
__asm__ __volatile__
(
"movl %0, %%esp\n"
- "movl %c[s](%%esp), %%esi\n"
- "movl %c[b](%%esp), %%ebx\n"
- "movl %c[i](%%esp), %%edi\n"
- "movl %c[p](%%esp), %%ebp\n"
- "movl %c[a](%%esp), %%eax\n"
- "movl %c[e](%%esp), %%edx\n" /* SYSEXIT says EIP in EDX */
- "movl %c[x](%%esp), %%ecx\n" /* SYSEXIT says ESP in ECX */
- "addl $%c[v],%%esp\n" /* A WHOLE *USER* frame since we're not IRET'ing */
- "sti\nsysexit\n"
:
- : "r"(TrapFrame),
- [b] "i"(KTRAP_FRAME_EBX),
- [s] "i"(KTRAP_FRAME_ESI),
- [i] "i"(KTRAP_FRAME_EDI),
- [p] "i"(KTRAP_FRAME_EBP),
- [a] "i"(KTRAP_FRAME_EAX),
- [e] "i"(KTRAP_FRAME_EIP),
- [x] "i"(KTRAP_FRAME_ESP),
- [v] "i"(KTRAP_FRAME_V86_ES)
+ : "r"(TrapFrame)
: "%esp"
);
- UNREACHABLE;
-}
-
-VOID
-FORCEINLINE
-//DECLSPEC_NORETURN
-KiTrapReturn(IN PKTRAP_FRAME TrapFrame)
-{
- /* Regular interrupt exit */
- __asm__ __volatile__
+
+ /* Check what kind of trap frame this trap requires */
+ if (Flags & KI_FUNCTION_CALL)
+ {
+ /* These calls have an EIP on the stack they need */
+ ExitMechanism = KI_EXIT_RET;
+ Volatiles = FALSE;
+ }
+ else if (Flags & KI_EDITED_FRAME)
+ {
+ /* Edited frames store a new ESP in the error code field */
+ FrameSize = FIELD_OFFSET(KTRAP_FRAME, ErrCode);
+ }
+ else if (Flags & KI_DIRECT_EXIT)
+ {
+ /* Exits directly without restoring anything, interrupt frame on stack */
+ NonVolatiles = Volatiles = FALSE;
+ }
+ else if (Flags & KI_FAST_SYSTEM_CALL_EXIT)
+ {
+ /* We have a fake interrupt stack with a ring transition */
+ FrameSize = FIELD_OFFSET(KTRAP_FRAME, V86Es);
+ ExitMechanism = KI_EXIT_SYSEXIT;
+
+ /* SYSEXIT wants EIP in EDX and ESP in ECX */
+ EcxField = FIELD_OFFSET(KTRAP_FRAME, HardwareEsp);
+ EdxField = FIELD_OFFSET(KTRAP_FRAME, Eip);
+ }
+ else if (Flags & KI_SYSTEM_CALL_EXIT)
+ {
+ /* Only restore EAX */
+ NonVolatiles = KI_EAX_ONLY;
+ }
+ else if (Flags & KI_SYSTEM_CALL_JUMP)
+ {
+ /* We have a fake interrupt stack with no ring transition */
+ FrameSize = FIELD_OFFSET(KTRAP_FRAME, HardwareEsp);
+ NonVolatiles = KI_EAX_ONLY;
+ ExitMechanism = KI_EXIT_JMP;
+ }
+
+ /* Restore the non volatiles */
+ if (NonVolatiles) __asm__ __volatile__
(
- "movl %0, %%esp\n"
- "movl %c[a](%%esp), %%eax\n"
"movl %c[b](%%esp), %%ebx\n"
- "movl %c[c](%%esp), %%ecx\n"
- "movl %c[d](%%esp), %%edx\n"
"movl %c[s](%%esp), %%esi\n"
"movl %c[i](%%esp), %%edi\n"
"movl %c[p](%%esp), %%ebp\n"
- "addl $%c[e],%%esp\n"
- "iret\n"
:
- : "r"(TrapFrame),
- [a] "i"(KTRAP_FRAME_EAX),
- [b] "i"(KTRAP_FRAME_EBX),
- [c] "i"(KTRAP_FRAME_ECX),
- [d] "i"(KTRAP_FRAME_EDX),
- [s] "i"(KTRAP_FRAME_ESI),
- [i] "i"(KTRAP_FRAME_EDI),
- [p] "i"(KTRAP_FRAME_EBP),
- [e] "i"(KTRAP_FRAME_EIP)
+ : [b] "i"(FIELD_OFFSET(KTRAP_FRAME, Ebx)),
+ [s] "i"(FIELD_OFFSET(KTRAP_FRAME, Esi)),
+ [i] "i"(FIELD_OFFSET(KTRAP_FRAME, Edi)),
+ [p] "i"(FIELD_OFFSET(KTRAP_FRAME, Ebp))
: "%esp"
);
- UNREACHABLE;
-}
-
-VOID
-FORCEINLINE
-//DECLSPEC_NORETURN
-KiEditedTrapReturn(IN PKTRAP_FRAME TrapFrame)
-{
- /* Regular interrupt exit */
- __asm__ __volatile__
+
+ /* Restore EAX if volatiles must be restored */
+ if (Volatiles) __asm__ __volatile__
+ (
+ "movl %c[a](%%esp), %%eax\n":: [a] "i"(FIELD_OFFSET(KTRAP_FRAME, Eax)) : "%esp"
+ );
+
+ /* Restore the other volatiles if needed */
+ if (Volatiles == KI_ALL_VOLATILES) __asm__ __volatile__
(
- "movl %0, %%esp\n"
- "movl %c[a](%%esp), %%eax\n"
- "movl %c[b](%%esp), %%ebx\n"
"movl %c[c](%%esp), %%ecx\n"
"movl %c[d](%%esp), %%edx\n"
- "movl %c[s](%%esp), %%esi\n"
- "movl %c[i](%%esp), %%edi\n"
- "movl %c[p](%%esp), %%ebp\n"
- "addl $%c[e],%%esp\n"
- "movl (%%esp), %%esp\n"
- "iret\n"
:
- : "r"(TrapFrame),
- [a] "i"(KTRAP_FRAME_EAX),
- [b] "i"(KTRAP_FRAME_EBX),
- [c] "i"(KTRAP_FRAME_ECX),
- [d] "i"(KTRAP_FRAME_EDX),
- [s] "i"(KTRAP_FRAME_ESI),
- [i] "i"(KTRAP_FRAME_EDI),
- [p] "i"(KTRAP_FRAME_EBP),
- [e] "i"(KTRAP_FRAME_ERROR_CODE) /* We *WANT* the error code since ESP is there! */
+ : [c] "i"(EcxField),
+ [d] "i"(EdxField)
: "%esp"
);
- UNREACHABLE;
+
+ /* Ring 0 system calls jump back to EDX */
+ if (Flags & KI_SYSTEM_CALL_JUMP) __asm__ __volatile__
+ (
+ "movl %c[d](%%esp), %%edx\n":: [d] "i"(FIELD_OFFSET(KTRAP_FRAME, Eip)) : "%esp"
+ );
+
+ /* Now destroy the trap frame on the stack */
+ __asm__ __volatile__ ("addl $%c[e],%%esp\n":: [e] "i"(FrameSize) : "%esp");
+
+ /* Edited traps need to change to a new ESP */
+ if (Flags & KI_EDITED_FRAME) __asm__ __volatile__ ("movl (%%esp), %%esp\n":::"%esp");
+
+ /* Check the exit mechanism and apply it */
+ if (ExitMechanism == KI_EXIT_RET) __asm__ __volatile__("ret\n"::: "%esp");
+ else if (ExitMechanism == KI_EXIT_IRET) __asm__ __volatile__("iret\n"::: "%esp");
+ else if (ExitMechanism == KI_EXIT_JMP) __asm__ __volatile__("jmp *%%edx\n.globl _KiSystemCallExit2\n_KiSystemCallExit2:\n"::: "%esp");
+ else if (ExitMechanism == KI_EXIT_SYSEXIT) __asm__ __volatile__("sti\nsysexit\n"::: "%esp");
}
+//
+// All the specific trap epilog stubs
+//
+KiTrapExitStub (KiTrapReturn, 0);
+KiTrapExitStub (KiDirectTrapReturn, KI_DIRECT_EXIT);
+KiTrapExitStub (KiCallReturn, KI_FUNCTION_CALL);
+KiTrapExitStub (KiEditedTrapReturn, KI_EDITED_FRAME);
+KiTrapExitStub2(KiSystemCallReturn, KI_SYSTEM_CALL_JUMP);
+KiTrapExitStub (KiSystemCallSysExitReturn, KI_FAST_SYSTEM_CALL_EXIT);
+KiTrapExitStub (KiSystemCallTrapReturn, KI_SYSTEM_CALL_EXIT);
+
//
// Generic Exit Routine
//
VOID
FORCEINLINE
-//DECLSPEC_NORETURN
+DECLSPEC_NORETURN
KiExitTrap(IN PKTRAP_FRAME TrapFrame,
IN UCHAR Skip)
{
KiExitTrapDebugChecks(TrapFrame, SkipBits);
/* Restore the SEH handler chain */
- KeGetPcr()->Tib.ExceptionList = TrapFrame->ExceptionList;
+ KeGetPcr()->NtTib.ExceptionList = TrapFrame->ExceptionList;
/* Check if the previous mode must be restored */
if (__builtin_expect(!SkipBits.SkipPreviousMode, 0)) /* More INTS than SYSCALLs */
/* Check for system call -- a system call skips volatiles! */
if (__builtin_expect(SkipBits.SkipVolatiles, 0)) /* More INTs than SYSCALLs */
{
- /* Kernel call or user call? */
- if (__builtin_expect(KiUserTrap(TrapFrame), 1)) /* More Ring 3 than 0 */
- {
- /* Is SYSENTER supported and/or enabled, or are we stepping code? */
- if (__builtin_expect((KiFastSystemCallDisable) ||
- (TrapFrame->EFlags & EFLAGS_TF), 0))
- {
- /* Exit normally */
- KiSystemCallTrapReturn(TrapFrame);
- }
- else
- {
- /* Restore user FS */
- Ke386SetFs(KGDT_R3_TEB | RPL_MASK);
-
- /* Remove interrupt flag */
- TrapFrame->EFlags &= ~EFLAGS_INTERRUPT_MASK;
- __writeeflags(TrapFrame->EFlags);
-
- /* Exit through SYSEXIT */
- KiSystemCallSysExitReturn(TrapFrame);
- }
- }
- else
+ /* User or kernel call? */
+ KiUserSystemCall(TrapFrame);
+
+ /* Restore EFLags */
+ __writeeflags(TrapFrame->EFlags);
+
+ /* Call is kernel, so do a jump back since this wasn't a real INT */
+ KiSystemCallReturn(TrapFrame);
+
+ /* If we got here, this is SYSEXIT: are we stepping code? */
+ if (!(TrapFrame->EFlags & EFLAGS_TF))
{
- /* Restore EFLags */
+ /* Restore user FS */
+ Ke386SetFs(KGDT_R3_TEB | RPL_MASK);
+
+ /* Remove interrupt flag */
+ TrapFrame->EFlags &= ~EFLAGS_INTERRUPT_MASK;
__writeeflags(TrapFrame->EFlags);
-
- /* Call is kernel, so do a jump back since this wasn't a real INT */
- KiSystemCallReturn(TrapFrame);
- }
- }
- else
- {
- /* Return from interrupt */
- KiTrapReturn(TrapFrame);
+
+ /* Exit through SYSEXIT */
+ KiSystemCallSysExitReturn(TrapFrame);
+ }
+
+ /* Exit through IRETD, either due to debugging or due to lack of SYSEXIT */
+ KiSystemCallTrapReturn(TrapFrame);
}
+
+ /* Return from interrupt */
+ KiTrapReturn(TrapFrame);
}
//
FORCEINLINE
KiEnterV86Trap(IN PKTRAP_FRAME TrapFrame)
{
- /* Load correct registers */
- Ke386SetFs(KGDT_R0_PCR);
- Ke386SetDs(KGDT_R3_DATA | RPL_MASK);
- Ke386SetEs(KGDT_R3_DATA | RPL_MASK);
+ /* Save exception list */
+ TrapFrame->ExceptionList = KeGetPcr()->NtTib.ExceptionList;
- /* Save exception list and bogus previous mode */
- TrapFrame->PreviousPreviousMode = -1;
- TrapFrame->ExceptionList = KeGetPcr()->Tib.ExceptionList;
-
- /* Clear direction flag */
- Ke386ClearDirectionFlag();
-
/* Save DR7 and check for debugging */
TrapFrame->Dr7 = __readdr(7);
if (__builtin_expect(TrapFrame->Dr7 & ~DR7_RESERVED_MASK, 0))
FORCEINLINE
KiEnterInterruptTrap(IN PKTRAP_FRAME TrapFrame)
{
- /* Set bogus previous mode */
- TrapFrame->PreviousPreviousMode = -1;
-
- /* Check for V86 mode */
- if (__builtin_expect(TrapFrame->EFlags & EFLAGS_V86_MASK, 0))
- {
- DbgPrint("Need V8086 Interrupt Support!\n");
- while (TRUE);
- }
-
- /* Check if this wasn't kernel code */
- if (__builtin_expect(TrapFrame->SegCs != KGDT_R0_CODE, 1)) /* Ring 3 is more common */
- {
- /* Save segments and then switch to correct ones */
- TrapFrame->SegFs = Ke386GetFs();
- TrapFrame->SegGs = Ke386GetGs();
- TrapFrame->SegDs = Ke386GetDs();
- TrapFrame->SegEs = Ke386GetEs();
- Ke386SetFs(KGDT_R0_PCR);
- Ke386SetDs(KGDT_R3_DATA | RPL_MASK);
- Ke386SetEs(KGDT_R3_DATA | RPL_MASK);
- }
-
/* Save exception list and terminate it */
- TrapFrame->ExceptionList = KeGetPcr()->Tib.ExceptionList;
- KeGetPcr()->Tib.ExceptionList = EXCEPTION_CHAIN_END;
-
- /* No error code */
- TrapFrame->ErrCode = 0;
-
- /* Clear direction flag */
- Ke386ClearDirectionFlag();
-
+ TrapFrame->ExceptionList = KeGetPcr()->NtTib.ExceptionList;
+ KeGetPcr()->NtTib.ExceptionList = EXCEPTION_CHAIN_END;
+
/* Flush DR7 and check for debugging */
TrapFrame->Dr7 = 0;
if (__builtin_expect(KeGetCurrentThread()->DispatcherHeader.DebugActive & 0xFF, 0))
FORCEINLINE
KiEnterTrap(IN PKTRAP_FRAME TrapFrame)
{
- ULONG Ds, Es;
-
- /*
- * We really have to get a good DS/ES first before touching any data.
- *
- * These two reads will either go in a register (with optimizations ON) or
- * a stack variable (which is on SS:ESP, guaranteed to be good/valid).
- *
- * Because the assembly is marked volatile, the order of instructions is
- * as-is, otherwise the optimizer could simply get rid of our DS/ES.
- *
- */
- Ds = Ke386GetDs();
- Es = Ke386GetEs();
- Ke386SetDs(KGDT_R3_DATA | RPL_MASK);
- Ke386SetEs(KGDT_R3_DATA | RPL_MASK);
- TrapFrame->SegDs = Ds;
- TrapFrame->SegEs = Es;
-
- /* Now we can save the other segments and then switch to the correct FS */
- TrapFrame->SegFs = Ke386GetFs();
- TrapFrame->SegGs = Ke386GetGs();
- Ke386SetFs(KGDT_R0_PCR);
-
- /* Save exception list and bogus previous mode */
- TrapFrame->PreviousPreviousMode = -1;
- TrapFrame->ExceptionList = KeGetPcr()->Tib.ExceptionList;
-
- /* Check for V86 mode */
- if (__builtin_expect(TrapFrame->EFlags & EFLAGS_V86_MASK, 0))
- {
- /* Restore V8086 segments into Protected Mode segments */
- TrapFrame->SegFs = TrapFrame->V86Fs;
- TrapFrame->SegGs = TrapFrame->V86Gs;
- TrapFrame->SegDs = TrapFrame->V86Ds;
- TrapFrame->SegEs = TrapFrame->V86Es;
- }
-
- /* Clear direction flag */
- Ke386ClearDirectionFlag();
+ /* Save exception list */
+ TrapFrame->ExceptionList = KeGetPcr()->NtTib.ExceptionList;
/* Flush DR7 and check for debugging */
TrapFrame->Dr7 = 0;
/* Set debug header */
KiFillTrapFrameDebug(TrapFrame);
}
-#endif