* PURPOSE: Internal Inlined Functions for the Trap Handling Code
* PROGRAMMERS: ReactOS Portable Systems Group
*/
-#ifndef _TRAP_X_
-#define _TRAP_X_
+
+#pragma once
//
// Unreachable code hint for GCC 4.5.x, older GCC versions, and MSVC
}
/* Make sure we have a valid SEH chain */
- if (KeGetPcr()->Tib.ExceptionList == 0)
+ if (KeGetPcr()->NtTib.ExceptionList == 0)
{
- DbgPrint("Exiting with NULL exception chain: %p\n", KeGetPcr()->Tib.ExceptionList);
+ DbgPrint("Exiting with NULL exception chain: %p\n", KeGetPcr()->NtTib.ExceptionList);
while (TRUE);
}
}
//
-// Assembly exit stubs
+// "BOP" code used by VDM and V8086 Mode
//
VOID
FORCEINLINE
-/* Do not mark this as DECLSPEC_NORETURN because possibly executing code follows it! */
-KiSystemCallReturn(IN PKTRAP_FRAME TrapFrame)
+KiIssueBop(VOID)
{
- /* Restore nonvolatiles, EAX, and do a "jump" back to the kernel caller */
- __asm__ __volatile__
- (
- "movl %0, %%esp\n"
- "movl %c[b](%%esp), %%ebx\n"
- "movl %c[s](%%esp), %%esi\n"
- "movl %c[i](%%esp), %%edi\n"
- "movl %c[p](%%esp), %%ebp\n"
- "movl %c[a](%%esp), %%eax\n"
- "movl %c[e](%%esp), %%edx\n"
- "addl $%c[v],%%esp\n" /* A WHOLE *KERNEL* frame since we're not IRET'ing */
- "jmp *%%edx\n"
- ".globl _KiSystemCallExit2\n_KiSystemCallExit2:\n"
- :
- : "r"(TrapFrame),
- [b] "i"(KTRAP_FRAME_EBX),
- [s] "i"(KTRAP_FRAME_ESI),
- [i] "i"(KTRAP_FRAME_EDI),
- [p] "i"(KTRAP_FRAME_EBP),
- [a] "i"(KTRAP_FRAME_EAX),
- [e] "i"(KTRAP_FRAME_EIP),
- [v] "i"(KTRAP_FRAME_ESP)
- : "%esp"
- );
+ /* Invalid instruction that an invalid opcode handler must trap and handle */
+ asm volatile(".byte 0xC4\n.byte 0xC4\n");
}
VOID
FORCEINLINE
-DECLSPEC_NORETURN
-KiSystemCallTrapReturn(IN PKTRAP_FRAME TrapFrame)
+KiUserSystemCall(IN PKTRAP_FRAME TrapFrame)
{
- /* Regular interrupt exit, but we only restore EAX as a volatile */
- __asm__ __volatile__
+ /*
+ * Kernel call or user call?
+ *
+ * This decision is made in inlined assembly because we need to patch
+ * the relative offset of the user-mode jump to point to the SYSEXIT
+ * routine if the CPU supports it. The only way to guarantee that a
+ * relative jnz/jz instruction is generated is to force it with the
+ * inline assembler.
+ */
+ asm volatile
(
- ".globl _KiSystemCallExit\n_KiSystemCallExit:\n"
- "movl %0, %%esp\n"
- "movl %c[b](%%esp), %%ebx\n"
- "movl %c[s](%%esp), %%esi\n"
- "movl %c[i](%%esp), %%edi\n"
- "movl %c[p](%%esp), %%ebp\n"
- "movl %c[a](%%esp), %%eax\n"
- "addl $%c[e],%%esp\n"
- "iret\n"
+ "test $1, %0\n" /* MODE_MASK */
+ ".globl _KiSystemCallExitBranch\n_KiSystemCallExitBranch:\n"
+ "jnz _KiSystemCallExit\n"
:
- : "r"(TrapFrame),
- [b] "i"(KTRAP_FRAME_EBX),
- [s] "i"(KTRAP_FRAME_ESI),
- [i] "i"(KTRAP_FRAME_EDI),
- [p] "i"(KTRAP_FRAME_EBP),
- [a] "i"(KTRAP_FRAME_EAX),
- [e] "i"(KTRAP_FRAME_EIP)
- : "%esp"
+ : "r"(TrapFrame->SegCs)
);
- UNREACHABLE;
}
-VOID
-FORCEINLINE
-DECLSPEC_NORETURN
-KiSystemCallSysExitReturn(IN PKTRAP_FRAME TrapFrame)
-{
- /* Restore nonvolatiles, EAX, and do a SYSEXIT back to the user caller */
- __asm__ __volatile__
- (
- "movl %0, %%esp\n"
- "movl %c[s](%%esp), %%esi\n"
- "movl %c[b](%%esp), %%ebx\n"
- "movl %c[i](%%esp), %%edi\n"
- "movl %c[p](%%esp), %%ebp\n"
- "movl %c[a](%%esp), %%eax\n"
- "movl %c[e](%%esp), %%edx\n" /* SYSEXIT says EIP in EDX */
- "movl %c[x](%%esp), %%ecx\n" /* SYSEXIT says ESP in ECX */
- "addl $%c[v],%%esp\n" /* A WHOLE *USER* frame since we're not IRET'ing */
- "sti\nsysexit\n"
- :
- : "r"(TrapFrame),
- [b] "i"(KTRAP_FRAME_EBX),
- [s] "i"(KTRAP_FRAME_ESI),
- [i] "i"(KTRAP_FRAME_EDI),
- [p] "i"(KTRAP_FRAME_EBP),
- [a] "i"(KTRAP_FRAME_EAX),
- [e] "i"(KTRAP_FRAME_EIP),
- [x] "i"(KTRAP_FRAME_ESP),
- [v] "i"(KTRAP_FRAME_V86_ES)
- : "%esp"
- );
- UNREACHABLE;
-}
+//
+// Generates an Exit Epilog Stub for the given name
+//
+#define KI_FUNCTION_CALL 0x1
+#define KI_EDITED_FRAME 0x2
+#define KI_DIRECT_EXIT 0x4
+#define KI_FAST_SYSTEM_CALL_EXIT 0x8
+#define KI_SYSTEM_CALL_EXIT 0x10
+#define KI_SYSTEM_CALL_JUMP 0x20
+#define KiTrapExitStub(x, y) VOID FORCEINLINE DECLSPEC_NORETURN x(IN PKTRAP_FRAME TrapFrame) { KiTrapExit(TrapFrame, y); UNREACHABLE; }
+#define KiTrapExitStub2(x, y) VOID FORCEINLINE x(IN PKTRAP_FRAME TrapFrame) { KiTrapExit(TrapFrame, y); }
+//
+// How volatiles will be restored
+//
+#define KI_EAX_NO_VOLATILES 0x0
+#define KI_EAX_ONLY 0x1
+#define KI_ALL_VOLATILES 0x2
+
+//
+// Exit mechanism to use
+//
+#define KI_EXIT_IRET 0x0
+#define KI_EXIT_SYSEXIT 0x1
+#define KI_EXIT_JMP 0x2
+#define KI_EXIT_RET 0x3
+
+//
+// Master Trap Epilog
+//
VOID
FORCEINLINE
-DECLSPEC_NORETURN
-KiTrapReturn(IN PKTRAP_FRAME TrapFrame)
+KiTrapExit(IN PKTRAP_FRAME TrapFrame,
+ IN ULONG Flags)
{
- /* Regular interrupt exit */
- __asm__ __volatile__
+ ULONG FrameSize = FIELD_OFFSET(KTRAP_FRAME, Eip);
+ ULONG ExitMechanism = KI_EXIT_IRET, Volatiles = KI_ALL_VOLATILES, NonVolatiles = TRUE;
+ ULONG EcxField = FIELD_OFFSET(KTRAP_FRAME, Ecx), EdxField = FIELD_OFFSET(KTRAP_FRAME, Edx);
+
+ /* System call exit needs a special label */
+ if (Flags & KI_SYSTEM_CALL_EXIT) __asm__ __volatile__
(
- "movl %0, %%esp\n"
- "movl %c[a](%%esp), %%eax\n"
- "movl %c[b](%%esp), %%ebx\n"
- "movl %c[c](%%esp), %%ecx\n"
- "movl %c[d](%%esp), %%edx\n"
- "movl %c[s](%%esp), %%esi\n"
- "movl %c[i](%%esp), %%edi\n"
- "movl %c[p](%%esp), %%ebp\n"
- "addl $%c[e],%%esp\n"
- "iret\n"
- :
- : "r"(TrapFrame),
- [a] "i"(KTRAP_FRAME_EAX),
- [b] "i"(KTRAP_FRAME_EBX),
- [c] "i"(KTRAP_FRAME_ECX),
- [d] "i"(KTRAP_FRAME_EDX),
- [s] "i"(KTRAP_FRAME_ESI),
- [i] "i"(KTRAP_FRAME_EDI),
- [p] "i"(KTRAP_FRAME_EBP),
- [e] "i"(KTRAP_FRAME_EIP)
- : "%esp"
+ ".globl _KiSystemCallExit\n_KiSystemCallExit:\n"
);
- UNREACHABLE;
-}
-
-VOID
-FORCEINLINE
-DECLSPEC_NORETURN
-KiDirectTrapReturn(IN PKTRAP_FRAME TrapFrame)
-{
- /* Regular interrupt exit but we're not restoring any registers */
+
+ /* Start by making the trap frame equal to the stack */
__asm__ __volatile__
(
"movl %0, %%esp\n"
- "addl $%c[e],%%esp\n"
- "iret\n"
:
- : "r"(TrapFrame),
- [e] "i"(KTRAP_FRAME_EIP)
+ : "r"(TrapFrame)
: "%esp"
);
- UNREACHABLE;
-}
-
-VOID
-FORCEINLINE
-DECLSPEC_NORETURN
-KiCallReturn(IN PKTRAP_FRAME TrapFrame)
-{
- /* Pops a trap frame out of the stack but returns with RET instead of IRET */
- __asm__ __volatile__
+
+ /* Check what kind of trap frame this trap requires */
+ if (Flags & KI_FUNCTION_CALL)
+ {
+ /* These calls have an EIP on the stack they need */
+ ExitMechanism = KI_EXIT_RET;
+ Volatiles = FALSE;
+ }
+ else if (Flags & KI_EDITED_FRAME)
+ {
+ /* Edited frames store a new ESP in the error code field */
+ FrameSize = FIELD_OFFSET(KTRAP_FRAME, ErrCode);
+ }
+ else if (Flags & KI_DIRECT_EXIT)
+ {
+ /* Exits directly without restoring anything, interrupt frame on stack */
+ NonVolatiles = Volatiles = FALSE;
+ }
+ else if (Flags & KI_FAST_SYSTEM_CALL_EXIT)
+ {
+ /* We have a fake interrupt stack with a ring transition */
+ FrameSize = FIELD_OFFSET(KTRAP_FRAME, V86Es);
+ ExitMechanism = KI_EXIT_SYSEXIT;
+
+ /* SYSEXIT wants EIP in EDX and ESP in ECX */
+ EcxField = FIELD_OFFSET(KTRAP_FRAME, HardwareEsp);
+ EdxField = FIELD_OFFSET(KTRAP_FRAME, Eip);
+ }
+ else if (Flags & KI_SYSTEM_CALL_EXIT)
+ {
+ /* Only restore EAX */
+ NonVolatiles = KI_EAX_ONLY;
+ }
+ else if (Flags & KI_SYSTEM_CALL_JUMP)
+ {
+ /* We have a fake interrupt stack with no ring transition */
+ FrameSize = FIELD_OFFSET(KTRAP_FRAME, HardwareEsp);
+ NonVolatiles = KI_EAX_ONLY;
+ ExitMechanism = KI_EXIT_JMP;
+ }
+
+ /* Restore the non volatiles */
+ if (NonVolatiles) __asm__ __volatile__
(
- "movl %0, %%esp\n"
"movl %c[b](%%esp), %%ebx\n"
"movl %c[s](%%esp), %%esi\n"
"movl %c[i](%%esp), %%edi\n"
"movl %c[p](%%esp), %%ebp\n"
- "addl $%c[e],%%esp\n"
- "ret\n"
:
- : "r"(TrapFrame),
- [b] "i"(KTRAP_FRAME_EBX),
- [s] "i"(KTRAP_FRAME_ESI),
- [i] "i"(KTRAP_FRAME_EDI),
- [p] "i"(KTRAP_FRAME_EBP),
- [e] "i"(KTRAP_FRAME_EIP)
+ : [b] "i"(FIELD_OFFSET(KTRAP_FRAME, Ebx)),
+ [s] "i"(FIELD_OFFSET(KTRAP_FRAME, Esi)),
+ [i] "i"(FIELD_OFFSET(KTRAP_FRAME, Edi)),
+ [p] "i"(FIELD_OFFSET(KTRAP_FRAME, Ebp))
: "%esp"
);
- UNREACHABLE;
-}
-
-VOID
-FORCEINLINE
-DECLSPEC_NORETURN
-KiEditedTrapReturn(IN PKTRAP_FRAME TrapFrame)
-{
- /* Regular interrupt exit */
- __asm__ __volatile__
+
+ /* Restore EAX if volatiles must be restored */
+ if (Volatiles) __asm__ __volatile__
+ (
+ "movl %c[a](%%esp), %%eax\n":: [a] "i"(FIELD_OFFSET(KTRAP_FRAME, Eax)) : "%esp"
+ );
+
+ /* Restore the other volatiles if needed */
+ if (Volatiles == KI_ALL_VOLATILES) __asm__ __volatile__
(
- "movl %0, %%esp\n"
- "movl %c[a](%%esp), %%eax\n"
- "movl %c[b](%%esp), %%ebx\n"
"movl %c[c](%%esp), %%ecx\n"
"movl %c[d](%%esp), %%edx\n"
- "movl %c[s](%%esp), %%esi\n"
- "movl %c[i](%%esp), %%edi\n"
- "movl %c[p](%%esp), %%ebp\n"
- "addl $%c[e],%%esp\n"
- "movl (%%esp), %%esp\n"
- "iret\n"
:
- : "r"(TrapFrame),
- [a] "i"(KTRAP_FRAME_EAX),
- [b] "i"(KTRAP_FRAME_EBX),
- [c] "i"(KTRAP_FRAME_ECX),
- [d] "i"(KTRAP_FRAME_EDX),
- [s] "i"(KTRAP_FRAME_ESI),
- [i] "i"(KTRAP_FRAME_EDI),
- [p] "i"(KTRAP_FRAME_EBP),
- [e] "i"(KTRAP_FRAME_ERROR_CODE) /* We *WANT* the error code since ESP is there! */
+ : [c] "i"(EcxField),
+ [d] "i"(EdxField)
: "%esp"
);
- UNREACHABLE;
+
+ /* Ring 0 system calls jump back to EDX */
+ if (Flags & KI_SYSTEM_CALL_JUMP) __asm__ __volatile__
+ (
+ "movl %c[d](%%esp), %%edx\n":: [d] "i"(FIELD_OFFSET(KTRAP_FRAME, Eip)) : "%esp"
+ );
+
+ /* Now destroy the trap frame on the stack */
+ __asm__ __volatile__ ("addl $%c[e],%%esp\n":: [e] "i"(FrameSize) : "%esp");
+
+ /* Edited traps need to change to a new ESP */
+ if (Flags & KI_EDITED_FRAME) __asm__ __volatile__ ("movl (%%esp), %%esp\n":::"%esp");
+
+ /* Check the exit mechanism and apply it */
+ if (ExitMechanism == KI_EXIT_RET) __asm__ __volatile__("ret\n"::: "%esp");
+ else if (ExitMechanism == KI_EXIT_IRET) __asm__ __volatile__("iret\n"::: "%esp");
+ else if (ExitMechanism == KI_EXIT_JMP) __asm__ __volatile__("jmp *%%edx\n.globl _KiSystemCallExit2\n_KiSystemCallExit2:\n"::: "%esp");
+ else if (ExitMechanism == KI_EXIT_SYSEXIT) __asm__ __volatile__("sti\nsysexit\n"::: "%esp");
}
//
-// "BOP" code used by VDM and V8086 Mode
+// All the specific trap epilog stubs
//
-VOID
-FORCEINLINE
-KiIssueBop(VOID)
-{
- /* Invalid instruction that an invalid opcode handler must trap and handle */
- asm volatile(".byte 0xC4\n.byte 0xC4\n");
-}
+KiTrapExitStub (KiTrapReturn, 0);
+KiTrapExitStub (KiDirectTrapReturn, KI_DIRECT_EXIT);
+KiTrapExitStub (KiCallReturn, KI_FUNCTION_CALL);
+KiTrapExitStub (KiEditedTrapReturn, KI_EDITED_FRAME);
+KiTrapExitStub2(KiSystemCallReturn, KI_SYSTEM_CALL_JUMP);
+KiTrapExitStub (KiSystemCallSysExitReturn, KI_FAST_SYSTEM_CALL_EXIT);
+KiTrapExitStub (KiSystemCallTrapReturn, KI_SYSTEM_CALL_EXIT);
-VOID
-FORCEINLINE
-KiUserSystemCall(IN PKTRAP_FRAME TrapFrame)
-{
- /*
- * Kernel call or user call?
- *
- * This decision is made in inlined assembly because we need to patch
- * the relative offset of the user-mode jump to point to the SYSEXIT
- * routine if the CPU supports it. The only way to guarantee that a
- * relative jnz/jz instruction is generated is to force it with the
- * inline assembler.
- */
- asm volatile
- (
- "test $1, %0\n" /* MODE_MASK */
- ".globl _KiSystemCallExitBranch\n_KiSystemCallExitBranch:\n"
- "jnz _KiSystemCallExit\n"
- :
- : "r"(TrapFrame->SegCs)
- );
-}
-
//
// Generic Exit Routine
//
KiExitTrapDebugChecks(TrapFrame, SkipBits);
/* Restore the SEH handler chain */
- KeGetPcr()->Tib.ExceptionList = TrapFrame->ExceptionList;
+ KeGetPcr()->NtTib.ExceptionList = TrapFrame->ExceptionList;
/* Check if the previous mode must be restored */
if (__builtin_expect(!SkipBits.SkipPreviousMode, 0)) /* More INTS than SYSCALLs */
FORCEINLINE
KiEnterV86Trap(IN PKTRAP_FRAME TrapFrame)
{
- /* Load correct registers */
- Ke386SetFs(KGDT_R0_PCR);
- Ke386SetDs(KGDT_R3_DATA | RPL_MASK);
- Ke386SetEs(KGDT_R3_DATA | RPL_MASK);
-
/* Save exception list */
- TrapFrame->ExceptionList = KeGetPcr()->Tib.ExceptionList;
+ TrapFrame->ExceptionList = KeGetPcr()->NtTib.ExceptionList;
- /* Clear direction flag */
- Ke386ClearDirectionFlag();
-
/* Save DR7 and check for debugging */
TrapFrame->Dr7 = __readdr(7);
if (__builtin_expect(TrapFrame->Dr7 & ~DR7_RESERVED_MASK, 0))
FORCEINLINE
KiEnterInterruptTrap(IN PKTRAP_FRAME TrapFrame)
{
- /* Check for V86 mode, otherwise check for ring 3 code */
- if (__builtin_expect(TrapFrame->EFlags & EFLAGS_V86_MASK, 0))
- {
- /* Restore V8086 segments into Protected Mode segments */
- TrapFrame->SegFs = TrapFrame->V86Fs;
- TrapFrame->SegGs = TrapFrame->V86Gs;
- TrapFrame->SegDs = TrapFrame->V86Ds;
- TrapFrame->SegEs = TrapFrame->V86Es;
- }
- else if (__builtin_expect(TrapFrame->SegCs != KGDT_R0_CODE, 1)) /* Ring 3 is more common */
- {
- /* Save segments and then switch to correct ones */
- TrapFrame->SegFs = Ke386GetFs();
- TrapFrame->SegGs = Ke386GetGs();
- TrapFrame->SegDs = Ke386GetDs();
- TrapFrame->SegEs = Ke386GetEs();
- }
-
- /* Set correct segments */
- Ke386SetFs(KGDT_R0_PCR);
- Ke386SetDs(KGDT_R3_DATA | RPL_MASK);
- Ke386SetEs(KGDT_R3_DATA | RPL_MASK);
-
/* Save exception list and terminate it */
- TrapFrame->ExceptionList = KeGetPcr()->Tib.ExceptionList;
- KeGetPcr()->Tib.ExceptionList = EXCEPTION_CHAIN_END;
+ TrapFrame->ExceptionList = KeGetPcr()->NtTib.ExceptionList;
+ KeGetPcr()->NtTib.ExceptionList = EXCEPTION_CHAIN_END;
- /* Clear direction flag */
- Ke386ClearDirectionFlag();
-
/* Flush DR7 and check for debugging */
TrapFrame->Dr7 = 0;
if (__builtin_expect(KeGetCurrentThread()->DispatcherHeader.DebugActive & 0xFF, 0))
FORCEINLINE
KiEnterTrap(IN PKTRAP_FRAME TrapFrame)
{
- ULONG Ds, Es;
-
- /*
- * We really have to get a good DS/ES first before touching any data.
- *
- * These two reads will either go in a register (with optimizations ON) or
- * a stack variable (which is on SS:ESP, guaranteed to be good/valid).
- *
- * Because the assembly is marked volatile, the order of instructions is
- * as-is, otherwise the optimizer could simply get rid of our DS/ES.
- *
- */
- Ds = Ke386GetDs();
- Es = Ke386GetEs();
- Ke386SetDs(KGDT_R3_DATA | RPL_MASK);
- Ke386SetEs(KGDT_R3_DATA | RPL_MASK);
- TrapFrame->SegDs = Ds;
- TrapFrame->SegEs = Es;
-
- /* Now we can save the other segments and then switch to the correct FS */
- TrapFrame->SegFs = Ke386GetFs();
- TrapFrame->SegGs = Ke386GetGs();
- Ke386SetFs(KGDT_R0_PCR);
-
/* Save exception list */
- TrapFrame->ExceptionList = KeGetPcr()->Tib.ExceptionList;
-
- /* Check for V86 mode */
- if (__builtin_expect(TrapFrame->EFlags & EFLAGS_V86_MASK, 0))
- {
- /* Restore V8086 segments into Protected Mode segments */
- TrapFrame->SegFs = TrapFrame->V86Fs;
- TrapFrame->SegGs = TrapFrame->V86Gs;
- TrapFrame->SegDs = TrapFrame->V86Ds;
- TrapFrame->SegEs = TrapFrame->V86Es;
- }
-
- /* Clear direction flag */
- Ke386ClearDirectionFlag();
+ TrapFrame->ExceptionList = KeGetPcr()->NtTib.ExceptionList;
/* Flush DR7 and check for debugging */
TrapFrame->Dr7 = 0;
/* Set debug header */
KiFillTrapFrameDebug(TrapFrame);
}
-
-//
-// Generates a Trap Prolog Stub for the given name
-//
-#define KI_PUSH_FAKE_ERROR_CODE 0x1
-#define KI_FAST_V86_TRAP 0x2
-#define KI_NONVOLATILES_ONLY 0x4
-#define KI_FAST_SYSTEM_CALL 0x8
-#define KI_SOFTWARE_TRAP 0x10
-#define KI_HARDWARE_INT 0x20
-#define KiTrap(x, y) VOID DECLSPEC_NORETURN x(VOID) { KiTrapStub(y, x##Handler); UNREACHABLE; }
-#define KiTrampoline(x, y) VOID DECLSPEC_NOINLINE x(VOID) { KiTrapStub(y, x##Handler); }
-
-//
-// Trap Prolog Stub
-//
-VOID
-FORCEINLINE
-KiTrapStub(IN ULONG Flags,
- IN PVOID Handler)
-{
- ULONG FrameSize;
-
- /* Is this a fast system call? They don't have a stack! */
- if (Flags & KI_FAST_SYSTEM_CALL) __asm__ __volatile__
- (
- "movl %%ss:%c[t], %%esp\n"
- "movl %c[e](%%esp), %%esp\n"
- :
- : [e] "i"(FIELD_OFFSET(KTSS, Esp0)),
- [t] "i"(&PCR->TSS)
- : "%esp"
- );
-
- /* Check what kind of trap frame this trap requires */
- if (Flags & KI_SOFTWARE_TRAP)
- {
- /* Software traps need a complete non-ring transition trap frame */
- FrameSize = FIELD_OFFSET(KTRAP_FRAME, HardwareEsp);
- }
- else if (Flags & KI_FAST_SYSTEM_CALL)
- {
- /* SYSENTER requires us to build a complete ring transition trap frame */
- FrameSize = FIELD_OFFSET(KTRAP_FRAME, V86Es);
-
- /* And it only preserves nonvolatile registers */
- Flags |= KI_NONVOLATILES_ONLY;
- }
- else if (Flags & KI_PUSH_FAKE_ERROR_CODE)
- {
- /* If the trap doesn't have an error code, we'll make space for it */
- FrameSize = FIELD_OFFSET(KTRAP_FRAME, Eip);
- }
- else
- {
- /* The trap already has an error code, so just make space for the rest */
- FrameSize = FIELD_OFFSET(KTRAP_FRAME, ErrCode);
- }
-
- /* Software traps need to get their EIP from the caller's frame */
- if (Flags & KI_SOFTWARE_TRAP) __asm__ __volatile__ ("popl %%eax\n":::"%esp");
-
- /* Save nonvolatile registers */
- __asm__ __volatile__
- (
- /* EBX, ESI, EDI and EBP are saved */
- "movl %%ebp, %c[p](%%esp)\n"
- "movl %%ebx, %c[b](%%esp)\n"
- "movl %%esi, %c[s](%%esp)\n"
- "movl %%edi, %c[i](%%esp)\n"
- :
- : [b] "i"(- FrameSize + FIELD_OFFSET(KTRAP_FRAME, Ebx)),
- [s] "i"(- FrameSize + FIELD_OFFSET(KTRAP_FRAME, Esi)),
- [i] "i"(- FrameSize + FIELD_OFFSET(KTRAP_FRAME, Edi)),
- [p] "i"(- FrameSize + FIELD_OFFSET(KTRAP_FRAME, Ebp))
- : "%esp"
- );
-
- /* Does the caller want nonvolatiles only? */
- if (!(Flags & KI_NONVOLATILES_ONLY)) __asm__ __volatile__
- (
- /* Otherwise, save the volatiles as well */
- "movl %%eax, %c[a](%%esp)\n"
- "movl %%ecx, %c[c](%%esp)\n"
- "movl %%edx, %c[d](%%esp)\n"
- :
- : [a] "i"(- FrameSize + FIELD_OFFSET(KTRAP_FRAME, Eax)),
- [c] "i"(- FrameSize + FIELD_OFFSET(KTRAP_FRAME, Ecx)),
- [d] "i"(- FrameSize + FIELD_OFFSET(KTRAP_FRAME, Edx))
- : "%esp"
- );
-
- /* Now set parameter 1 (ECX) to point to the frame */
- __asm__ __volatile__ ("movl %%esp, %%ecx\n":::"%esp");
-
- /* Now go ahead and make space for this frame */
- __asm__ __volatile__ ("subl $%c[e],%%esp\n":: [e] "i"(FrameSize) : "%esp");
- __asm__ __volatile__ ("subl $%c[e],%%ecx\n":: [e] "i"(FrameSize) : "%ecx");
-
- /* For Fast-V86 traps, set parameter 2 (EDX) to hold EFlags */
- if (Flags & KI_FAST_V86_TRAP) __asm__ __volatile__
- (
- "movl %c[f](%%esp), %%edx\n"
- :
- : [f] "i"(FIELD_OFFSET(KTRAP_FRAME, EFlags))
- );
- else if (Flags & KI_HARDWARE_INT) __asm__ __volatile__
- (
- /*
- * For hardware interrupts, set parameter 2 (EDX) to hold KINTERRUPT.
- * This code will be dynamically patched when an interrupt is registered!
- */
- ".globl _KiInterruptTemplate2ndDispatch\n_KiInterruptTemplate2ndDispatch:\n"
- "movl $0, %%edx\n"
- ".globl _KiInterruptTemplateObject\n_KiInterruptTemplateObject:\n"
- ::: "%edx"
- );
-
- /* Now jump to the C handler */
- if (Flags & KI_HARDWARE_INT)__asm__ __volatile__
- (
- /*
- * For hardware interrupts, use an absolute JMP instead of a relative JMP
- * since the position of this code is arbitrary in memory, and therefore
- * the compiler-generated offset will not be correct.
- */
- "jmp *%0\n"
- ".globl _KiInterruptTemplateDispatch\n_KiInterruptTemplateDispatch:\n"
- :
- : "a"(Handler)
- );
- else __asm__ __volatile__ ("jmp %c[x]\n":: [x] "i"(Handler));
-}
-
-#endif