[NTOS]: Combine all exit trap stubs into a single function with flags (same thing...
[reactos.git] / reactos / ntoskrnl / include / internal / trap_x.h
1 /*
2 * PROJECT: ReactOS Kernel
3 * LICENSE: BSD - See COPYING.ARM in the top level directory
4 * FILE: ntoskrnl/include/trap_x.h
5 * PURPOSE: Internal Inlined Functions for the Trap Handling Code
6 * PROGRAMMERS: ReactOS Portable Systems Group
7 */
8 #ifndef _TRAP_X_
9 #define _TRAP_X_
10
11 //
12 // Unreachable code hint for GCC 4.5.x, older GCC versions, and MSVC
13 //
14 #ifdef __GNUC__
15 #if __GNUC__ * 100 + __GNUC_MINOR__ >= 405
16 #define UNREACHABLE __builtin_unreachable()
17 #else
18 #define UNREACHABLE __builtin_trap()
19 #endif
20 #elif _MSC_VER
21 #define UNREACHABLE __assume(0)
22 #else
23 #define UNREACHABLE
24 #endif
25
26 //
27 // Debug Macros
28 //
29 VOID
30 FORCEINLINE
31 KiDumpTrapFrame(IN PKTRAP_FRAME TrapFrame)
32 {
33 /* Dump the whole thing */
34 DbgPrint("DbgEbp: %x\n", TrapFrame->DbgEbp);
35 DbgPrint("DbgEip: %x\n", TrapFrame->DbgEip);
36 DbgPrint("DbgArgMark: %x\n", TrapFrame->DbgArgMark);
37 DbgPrint("DbgArgPointer: %x\n", TrapFrame->DbgArgPointer);
38 DbgPrint("TempSegCs: %x\n", TrapFrame->TempSegCs);
39 DbgPrint("TempEsp: %x\n", TrapFrame->TempEsp);
40 DbgPrint("Dr0: %x\n", TrapFrame->Dr0);
41 DbgPrint("Dr1: %x\n", TrapFrame->Dr1);
42 DbgPrint("Dr2: %x\n", TrapFrame->Dr2);
43 DbgPrint("Dr3: %x\n", TrapFrame->Dr3);
44 DbgPrint("Dr6: %x\n", TrapFrame->Dr6);
45 DbgPrint("Dr7: %x\n", TrapFrame->Dr7);
46 DbgPrint("SegGs: %x\n", TrapFrame->SegGs);
47 DbgPrint("SegEs: %x\n", TrapFrame->SegEs);
48 DbgPrint("SegDs: %x\n", TrapFrame->SegDs);
49 DbgPrint("Edx: %x\n", TrapFrame->Edx);
50 DbgPrint("Ecx: %x\n", TrapFrame->Ecx);
51 DbgPrint("Eax: %x\n", TrapFrame->Eax);
52 DbgPrint("PreviousPreviousMode: %x\n", TrapFrame->PreviousPreviousMode);
53 DbgPrint("ExceptionList: %x\n", TrapFrame->ExceptionList);
54 DbgPrint("SegFs: %x\n", TrapFrame->SegFs);
55 DbgPrint("Edi: %x\n", TrapFrame->Edi);
56 DbgPrint("Esi: %x\n", TrapFrame->Esi);
57 DbgPrint("Ebx: %x\n", TrapFrame->Ebx);
58 DbgPrint("Ebp: %x\n", TrapFrame->Ebp);
59 DbgPrint("ErrCode: %x\n", TrapFrame->ErrCode);
60 DbgPrint("Eip: %x\n", TrapFrame->Eip);
61 DbgPrint("SegCs: %x\n", TrapFrame->SegCs);
62 DbgPrint("EFlags: %x\n", TrapFrame->EFlags);
63 DbgPrint("HardwareEsp: %x\n", TrapFrame->HardwareEsp);
64 DbgPrint("HardwareSegSs: %x\n", TrapFrame->HardwareSegSs);
65 DbgPrint("V86Es: %x\n", TrapFrame->V86Es);
66 DbgPrint("V86Ds: %x\n", TrapFrame->V86Ds);
67 DbgPrint("V86Fs: %x\n", TrapFrame->V86Fs);
68 DbgPrint("V86Gs: %x\n", TrapFrame->V86Gs);
69 }
70
71 #ifdef TRAP_DEBUG
72 VOID
73 FORCEINLINE
74 KiFillTrapFrameDebug(IN PKTRAP_FRAME TrapFrame)
75 {
76 /* Set the debug information */
77 TrapFrame->DbgArgPointer = TrapFrame->Edx;
78 TrapFrame->DbgArgMark = 0xBADB0D00;
79 TrapFrame->DbgEip = TrapFrame->Eip;
80 TrapFrame->DbgEbp = TrapFrame->Ebp;
81 }
82
83 VOID
84 FORCEINLINE
85 KiExitTrapDebugChecks(IN PKTRAP_FRAME TrapFrame,
86 IN KTRAP_STATE_BITS SkipBits)
87 {
88 /* Make sure interrupts are disabled */
89 if (__readeflags() & EFLAGS_INTERRUPT_MASK)
90 {
91 DbgPrint("Exiting with interrupts enabled: %lx\n", __readeflags());
92 while (TRUE);
93 }
94
95 /* Make sure this is a real trap frame */
96 if (TrapFrame->DbgArgMark != 0xBADB0D00)
97 {
98 DbgPrint("Exiting with an invalid trap frame? (No MAGIC in trap frame)\n");
99 KiDumpTrapFrame(TrapFrame);
100 while (TRUE);
101 }
102
103 /* Make sure we're not in user-mode or something */
104 if (Ke386GetFs() != KGDT_R0_PCR)
105 {
106 DbgPrint("Exiting with an invalid FS: %lx\n", Ke386GetFs());
107 while (TRUE);
108 }
109
110 /* Make sure we have a valid SEH chain */
111 if (KeGetPcr()->Tib.ExceptionList == 0)
112 {
113 DbgPrint("Exiting with NULL exception chain: %p\n", KeGetPcr()->Tib.ExceptionList);
114 while (TRUE);
115 }
116
117 /* Make sure we're restoring a valid SEH chain */
118 if (TrapFrame->ExceptionList == 0)
119 {
120 DbgPrint("Entered a trap with a NULL exception chain: %p\n", TrapFrame->ExceptionList);
121 while (TRUE);
122 }
123
124 /* If we're ignoring previous mode, make sure caller doesn't actually want it */
125 if ((SkipBits.SkipPreviousMode) && (TrapFrame->PreviousPreviousMode != -1))
126 {
127 DbgPrint("Exiting a trap witout restoring previous mode, yet previous mode seems valid: %lx", TrapFrame->PreviousPreviousMode);
128 while (TRUE);
129 }
130 }
131
132 VOID
133 FORCEINLINE
134 KiExitSystemCallDebugChecks(IN ULONG SystemCall,
135 IN PKTRAP_FRAME TrapFrame)
136 {
137 KIRQL OldIrql;
138
139 /* Check if this was a user call */
140 if (KiUserMode(TrapFrame))
141 {
142 /* Make sure we are not returning with elevated IRQL */
143 OldIrql = KeGetCurrentIrql();
144 if (OldIrql != PASSIVE_LEVEL)
145 {
146 /* Forcibly put us in a sane state */
147 KeGetPcr()->CurrentIrql = PASSIVE_LEVEL;
148 _disable();
149
150 /* Fail */
151 KeBugCheckEx(IRQL_GT_ZERO_AT_SYSTEM_SERVICE,
152 SystemCall,
153 OldIrql,
154 0,
155 0);
156 }
157
158 /* Make sure we're not attached and that APCs are not disabled */
159 if ((KeGetCurrentThread()->ApcStateIndex != CurrentApcEnvironment) ||
160 (KeGetCurrentThread()->CombinedApcDisable != 0))
161 {
162 /* Fail */
163 KeBugCheckEx(APC_INDEX_MISMATCH,
164 SystemCall,
165 KeGetCurrentThread()->ApcStateIndex,
166 KeGetCurrentThread()->CombinedApcDisable,
167 0);
168 }
169 }
170 }
171 #else
172 #define KiExitTrapDebugChecks(x, y)
173 #define KiFillTrapFrameDebug(x)
174 #define KiExitSystemCallDebugChecks(x, y)
175 #endif
176
177 //
178 // Helper Code
179 //
180 BOOLEAN
181 FORCEINLINE
182 KiUserTrap(IN PKTRAP_FRAME TrapFrame)
183 {
184 /* Anything else but Ring 0 is Ring 3 */
185 return (TrapFrame->SegCs & MODE_MASK);
186 }
187
188 //
189 // "BOP" code used by VDM and V8086 Mode
190 //
191 VOID
192 FORCEINLINE
193 KiIssueBop(VOID)
194 {
195 /* Invalid instruction that an invalid opcode handler must trap and handle */
196 asm volatile(".byte 0xC4\n.byte 0xC4\n");
197 }
198
199 //
200 // Returns whether or not this is a V86 trap by checking the EFLAGS field.
201 //
202 // FIXME: GCC 4.5 Can Improve this with "goto labels"
203 //
204 BOOLEAN
205 FORCEINLINE
206 KiIsV8086TrapSafe(IN PKTRAP_FRAME TrapFrame)
207 {
208 BOOLEAN Result;
209
210 /*
211 * The check MUST be done this way, as we guarantee that no DS/ES/FS segment
212 * is used (since it might be garbage).
213 *
214 * Instead, we use the SS segment which is guaranteed to be correct. Because
215 * operate in 32-bit flat mode, this works just fine.
216 */
217 asm volatile
218 (
219 "testl $%c[f], %%ss:%1\n"
220 "setnz %0\n"
221 : "=a"(Result)
222 : "m"(TrapFrame->EFlags),
223 [f] "i"(EFLAGS_V86_MASK)
224 );
225
226 /* If V86 flag was set */
227 return Result;
228 }
229
230 //
231 // Returns whether or not this is a user-mode trap by checking the SegCs field.
232 //
233 // FIXME: GCC 4.5 Can Improve this with "goto labels"
234 //
235 BOOLEAN
236 FORCEINLINE
237 KiIsUserTrapSafe(IN PKTRAP_FRAME TrapFrame)
238 {
239 BOOLEAN Result;
240
241 /*
242 * The check MUST be done this way, as we guarantee that no DS/ES/FS segment
243 * is used (since it might be garbage).
244 *
245 * Instead, we use the SS segment which is guaranteed to be correct. Because
246 * operate in 32-bit flat mode, this works just fine.
247 */
248 asm volatile
249 (
250 "cmp $%c[f], %%ss:%1\n"
251 "setnz %0\n"
252 : "=a"(Result)
253 : "m"(TrapFrame->SegCs),
254 [f] "i"(KGDT_R0_CODE)
255 );
256
257 /* If V86 flag was set */
258 return Result;
259 }
260
261 VOID
262 FORCEINLINE
263 KiUserSystemCall(IN PKTRAP_FRAME TrapFrame)
264 {
265 /*
266 * Kernel call or user call?
267 *
268 * This decision is made in inlined assembly because we need to patch
269 * the relative offset of the user-mode jump to point to the SYSEXIT
270 * routine if the CPU supports it. The only way to guarantee that a
271 * relative jnz/jz instruction is generated is to force it with the
272 * inline assembler.
273 */
274 asm volatile
275 (
276 "test $1, %0\n" /* MODE_MASK */
277 ".globl _KiSystemCallExitBranch\n_KiSystemCallExitBranch:\n"
278 "jnz _KiSystemCallExit\n"
279 :
280 : "r"(TrapFrame->SegCs)
281 );
282 }
283
284 VOID
285 FORCEINLINE
286 KiSetSaneSegments(IN PKTRAP_FRAME TrapFrame)
287 {
288 ULONG Ds, Es;
289
290 /*
291 * We really have to get a good DS/ES first before touching any data.
292 *
293 * These two reads will either go in a register (with optimizations ON) or
294 * a stack variable (which is on SS:ESP, guaranteed to be good/valid).
295 *
296 * Because the assembly is marked volatile, the order of instructions is
297 * as-is, otherwise the optimizer could simply get rid of our DS/ES.
298 *
299 */
300 Ds = Ke386GetDs();
301 Es = Ke386GetEs();
302 Ke386SetDs(KGDT_R3_DATA | RPL_MASK);
303 Ke386SetEs(KGDT_R3_DATA | RPL_MASK);
304 TrapFrame->SegDs = Ds;
305 TrapFrame->SegEs = Es;
306 }
307
308 //
309 // Generates an Exit Epilog Stub for the given name
310 //
311 #define KI_FUNCTION_CALL 0x1
312 #define KI_EDITED_FRAME 0x2
313 #define KI_DIRECT_EXIT 0x4
314 #define KI_FAST_SYSTEM_CALL_EXIT 0x8
315 #define KI_SYSTEM_CALL_EXIT 0x10
316 #define KI_SYSTEM_CALL_JUMP 0x20
317 #define KiTrapExitStub(x, y) VOID FORCEINLINE DECLSPEC_NORETURN x(IN PKTRAP_FRAME TrapFrame) { KiTrapExit(TrapFrame, y); UNREACHABLE; }
318 #define KiTrapExitStub2(x, y) VOID FORCEINLINE x(IN PKTRAP_FRAME TrapFrame) { KiTrapExit(TrapFrame, y); }
319
320 //
321 // How volatiles will be restored
322 //
323 #define KI_EAX_NO_VOLATILES 0x0
324 #define KI_EAX_ONLY 0x1
325 #define KI_ALL_VOLATILES 0x2
326
327 //
328 // Exit mechanism to use
329 //
330 #define KI_EXIT_IRET 0x0
331 #define KI_EXIT_SYSEXIT 0x1
332 #define KI_EXIT_JMP 0x2
333 #define KI_EXIT_RET 0x3
334
335 //
336 // Master Trap Epilog
337 //
338 VOID
339 FORCEINLINE
340 KiTrapExit(IN PKTRAP_FRAME TrapFrame,
341 IN ULONG Flags)
342 {
343 ULONG FrameSize = FIELD_OFFSET(KTRAP_FRAME, Eip);
344 ULONG ExitMechanism = KI_EXIT_IRET, Volatiles = KI_ALL_VOLATILES, NonVolatiles = TRUE;
345 ULONG EcxField = FIELD_OFFSET(KTRAP_FRAME, Ecx), EdxField = FIELD_OFFSET(KTRAP_FRAME, Edx);
346
347 /* System call exit needs a special label */
348 if (Flags & KI_SYSTEM_CALL_EXIT) __asm__ __volatile__
349 (
350 ".globl _KiSystemCallExit\n_KiSystemCallExit:\n"
351 );
352
353 /* Start by making the trap frame equal to the stack */
354 __asm__ __volatile__
355 (
356 "movl %0, %%esp\n"
357 :
358 : "r"(TrapFrame)
359 : "%esp"
360 );
361
362 /* Check what kind of trap frame this trap requires */
363 if (Flags & KI_FUNCTION_CALL)
364 {
365 /* These calls have an EIP on the stack they need */
366 ExitMechanism = KI_EXIT_RET;
367 Volatiles = FALSE;
368 }
369 else if (Flags & KI_EDITED_FRAME)
370 {
371 /* Edited frames store a new ESP in the error code field */
372 FrameSize = FIELD_OFFSET(KTRAP_FRAME, ErrCode);
373 }
374 else if (Flags & KI_DIRECT_EXIT)
375 {
376 /* Exits directly without restoring anything, interrupt frame on stack */
377 NonVolatiles = Volatiles = FALSE;
378 }
379 else if (Flags & KI_FAST_SYSTEM_CALL_EXIT)
380 {
381 /* We have a fake interrupt stack with a ring transition */
382 FrameSize = FIELD_OFFSET(KTRAP_FRAME, V86Es);
383 ExitMechanism = KI_EXIT_SYSEXIT;
384
385 /* SYSEXIT wants EIP in EDX and ESP in ECX */
386 EcxField = FIELD_OFFSET(KTRAP_FRAME, HardwareEsp);
387 EdxField = FIELD_OFFSET(KTRAP_FRAME, Eip);
388 }
389 else if (Flags & KI_SYSTEM_CALL_EXIT)
390 {
391 /* Only restore EAX */
392 NonVolatiles = KI_EAX_ONLY;
393 }
394 else if (Flags & KI_SYSTEM_CALL_JUMP)
395 {
396 /* We have a fake interrupt stack with no ring transition */
397 FrameSize = FIELD_OFFSET(KTRAP_FRAME, HardwareEsp);
398 NonVolatiles = KI_EAX_ONLY;
399 ExitMechanism = KI_EXIT_JMP;
400 }
401
402 /* Restore the non volatiles */
403 if (NonVolatiles) __asm__ __volatile__
404 (
405 "movl %c[b](%%esp), %%ebx\n"
406 "movl %c[s](%%esp), %%esi\n"
407 "movl %c[i](%%esp), %%edi\n"
408 "movl %c[p](%%esp), %%ebp\n"
409 :
410 : [b] "i"(FIELD_OFFSET(KTRAP_FRAME, Ebx)),
411 [s] "i"(FIELD_OFFSET(KTRAP_FRAME, Esi)),
412 [i] "i"(FIELD_OFFSET(KTRAP_FRAME, Edi)),
413 [p] "i"(FIELD_OFFSET(KTRAP_FRAME, Ebp))
414 : "%esp"
415 );
416
417 /* Restore EAX if volatiles must be restored */
418 if (Volatiles) __asm__ __volatile__
419 (
420 "movl %c[a](%%esp), %%eax\n":: [a] "i"(FIELD_OFFSET(KTRAP_FRAME, Eax)) : "%esp"
421 );
422
423 /* Restore the other volatiles if needed */
424 if (Volatiles == KI_ALL_VOLATILES) __asm__ __volatile__
425 (
426 "movl %c[c](%%esp), %%ecx\n"
427 "movl %c[d](%%esp), %%edx\n"
428 :
429 : [c] "i"(EcxField),
430 [d] "i"(EdxField)
431 : "%esp"
432 );
433
434 /* Ring 0 system calls jump back to EDX */
435 if (Flags & KI_SYSTEM_CALL_JUMP) __asm__ __volatile__
436 (
437 "movl %c[d](%%esp), %%edx\n":: [d] "i"(FIELD_OFFSET(KTRAP_FRAME, Eip)) : "%esp"
438 );
439
440 /* Now destroy the trap frame on the stack */
441 __asm__ __volatile__ ("addl $%c[e],%%esp\n":: [e] "i"(FrameSize) : "%esp");
442
443 /* Edited traps need to change to a new ESP */
444 if (Flags & KI_EDITED_FRAME) __asm__ __volatile__ ("movl (%%esp), %%esp\n":::"%esp");
445
446 /* Check the exit mechanism and apply it */
447 if (ExitMechanism == KI_EXIT_RET) __asm__ __volatile__("ret\n"::: "%esp");
448 else if (ExitMechanism == KI_EXIT_IRET) __asm__ __volatile__("iret\n"::: "%esp");
449 else if (ExitMechanism == KI_EXIT_JMP) __asm__ __volatile__("jmp *%%edx\n.globl _KiSystemCallExit2\n_KiSystemCallExit2:\n"::: "%esp");
450 else if (ExitMechanism == KI_EXIT_SYSEXIT) __asm__ __volatile__("sti\nsysexit\n"::: "%esp");
451 }
452
453 //
454 // All the specific trap epilog stubs
455 //
456 KiTrapExitStub (KiTrapReturn, 0);
457 KiTrapExitStub (KiDirectTrapReturn, KI_DIRECT_EXIT);
458 KiTrapExitStub (KiCallReturn, KI_FUNCTION_CALL);
459 KiTrapExitStub (KiEditedTrapReturn, KI_EDITED_FRAME);
460 KiTrapExitStub2(KiSystemCallReturn, KI_SYSTEM_CALL_JUMP);
461 KiTrapExitStub (KiSystemCallSysExitReturn, KI_FAST_SYSTEM_CALL_EXIT);
462 KiTrapExitStub (KiSystemCallTrapReturn, KI_SYSTEM_CALL_EXIT);
463
464 //
465 // Generic Exit Routine
466 //
467 VOID
468 FORCEINLINE
469 DECLSPEC_NORETURN
470 KiExitTrap(IN PKTRAP_FRAME TrapFrame,
471 IN UCHAR Skip)
472 {
473 KTRAP_EXIT_SKIP_BITS SkipBits = { .Bits = Skip };
474 PULONG ReturnStack;
475
476 /* Debugging checks */
477 KiExitTrapDebugChecks(TrapFrame, SkipBits);
478
479 /* Restore the SEH handler chain */
480 KeGetPcr()->Tib.ExceptionList = TrapFrame->ExceptionList;
481
482 /* Check if the previous mode must be restored */
483 if (__builtin_expect(!SkipBits.SkipPreviousMode, 0)) /* More INTS than SYSCALLs */
484 {
485 /* Restore it */
486 KeGetCurrentThread()->PreviousMode = TrapFrame->PreviousPreviousMode;
487 }
488
489 /* Check if there are active debug registers */
490 if (__builtin_expect(TrapFrame->Dr7 & ~DR7_RESERVED_MASK, 0))
491 {
492 /* Not handled yet */
493 DbgPrint("Need Hardware Breakpoint Support!\n");
494 DbgBreakPoint();
495 while (TRUE);
496 }
497
498 /* Check if this was a V8086 trap */
499 if (__builtin_expect(TrapFrame->EFlags & EFLAGS_V86_MASK, 0)) KiTrapReturn(TrapFrame);
500
501 /* Check if the trap frame was edited */
502 if (__builtin_expect(!(TrapFrame->SegCs & FRAME_EDITED), 0))
503 {
504 /*
505 * An edited trap frame happens when we need to modify CS and/or ESP but
506 * don't actually have a ring transition. This happens when a kernelmode
507 * caller wants to perform an NtContinue to another kernel address, such
508 * as in the case of SEH (basically, a longjmp), or to a user address.
509 *
510 * Therefore, the CPU never saved CS/ESP on the stack because we did not
511 * get a trap frame due to a ring transition (there was no interrupt).
512 * Even if we didn't want to restore CS to a new value, a problem occurs
513 * due to the fact a normal RET would not work if we restored ESP since
514 * RET would then try to read the result off the stack.
515 *
516 * The NT kernel solves this by adding 12 bytes of stack to the exiting
517 * trap frame, in which EFLAGS, CS, and EIP are stored, and then saving
518 * the ESP that's being requested into the ErrorCode field. It will then
519 * exit with an IRET. This fixes both issues, because it gives the stack
520 * some space where to hold the return address and then end up with the
521 * wanted stack, and it uses IRET which allows a new CS to be inputted.
522 *
523 */
524
525 /* Set CS that is requested */
526 TrapFrame->SegCs = TrapFrame->TempSegCs;
527
528 /* First make space on requested stack */
529 ReturnStack = (PULONG)(TrapFrame->TempEsp - 12);
530 TrapFrame->ErrCode = (ULONG_PTR)ReturnStack;
531
532 /* Now copy IRET frame */
533 ReturnStack[0] = TrapFrame->Eip;
534 ReturnStack[1] = TrapFrame->SegCs;
535 ReturnStack[2] = TrapFrame->EFlags;
536
537 /* Do special edited return */
538 KiEditedTrapReturn(TrapFrame);
539 }
540
541 /* Check if this is a user trap */
542 if (__builtin_expect(KiUserTrap(TrapFrame), 1)) /* Ring 3 is where we spend time */
543 {
544 /* Check if segments should be restored */
545 if (!SkipBits.SkipSegments)
546 {
547 /* Restore segments */
548 Ke386SetGs(TrapFrame->SegGs);
549 Ke386SetEs(TrapFrame->SegEs);
550 Ke386SetDs(TrapFrame->SegDs);
551 Ke386SetFs(TrapFrame->SegFs);
552 }
553
554 /* Always restore FS since it goes from KPCR to TEB */
555 Ke386SetFs(TrapFrame->SegFs);
556 }
557
558 /* Check for system call -- a system call skips volatiles! */
559 if (__builtin_expect(SkipBits.SkipVolatiles, 0)) /* More INTs than SYSCALLs */
560 {
561 /* User or kernel call? */
562 KiUserSystemCall(TrapFrame);
563
564 /* Restore EFLags */
565 __writeeflags(TrapFrame->EFlags);
566
567 /* Call is kernel, so do a jump back since this wasn't a real INT */
568 KiSystemCallReturn(TrapFrame);
569
570 /* If we got here, this is SYSEXIT: are we stepping code? */
571 if (!(TrapFrame->EFlags & EFLAGS_TF))
572 {
573 /* Restore user FS */
574 Ke386SetFs(KGDT_R3_TEB | RPL_MASK);
575
576 /* Remove interrupt flag */
577 TrapFrame->EFlags &= ~EFLAGS_INTERRUPT_MASK;
578 __writeeflags(TrapFrame->EFlags);
579
580 /* Exit through SYSEXIT */
581 KiSystemCallSysExitReturn(TrapFrame);
582 }
583
584 /* Exit through IRETD, either due to debugging or due to lack of SYSEXIT */
585 KiSystemCallTrapReturn(TrapFrame);
586 }
587
588 /* Return from interrupt */
589 KiTrapReturn(TrapFrame);
590 }
591
592 //
593 // Virtual 8086 Mode Optimized Trap Exit
594 //
595 VOID
596 FORCEINLINE
597 KiExitV86Trap(IN PKTRAP_FRAME TrapFrame)
598 {
599 PKTHREAD Thread;
600 KIRQL OldIrql;
601
602 /* Get the thread */
603 Thread = KeGetCurrentThread();
604 while (TRUE)
605 {
606 /* Turn off the alerted state for kernel mode */
607 Thread->Alerted[KernelMode] = FALSE;
608
609 /* Are there pending user APCs? */
610 if (__builtin_expect(!Thread->ApcState.UserApcPending, 1)) break;
611
612 /* Raise to APC level and enable interrupts */
613 OldIrql = KfRaiseIrql(APC_LEVEL);
614 _enable();
615
616 /* Deliver APCs */
617 KiDeliverApc(UserMode, NULL, TrapFrame);
618
619 /* Restore IRQL and disable interrupts once again */
620 KfLowerIrql(OldIrql);
621 _disable();
622
623 /* Return if this isn't V86 mode anymore */
624 if (__builtin_expect(TrapFrame->EFlags & EFLAGS_V86_MASK, 0)) return;
625 }
626
627 /* If we got here, we're still in a valid V8086 context, so quit it */
628 if (__builtin_expect(TrapFrame->Dr7 & ~DR7_RESERVED_MASK, 0))
629 {
630 /* Not handled yet */
631 DbgPrint("Need Hardware Breakpoint Support!\n");
632 while (TRUE);
633 }
634
635 /* Return from interrupt */
636 KiTrapReturn(TrapFrame);
637 }
638
639 //
640 // Virtual 8086 Mode Optimized Trap Entry
641 //
642 VOID
643 FORCEINLINE
644 KiEnterV86Trap(IN PKTRAP_FRAME TrapFrame)
645 {
646 /* Load correct registers */
647 Ke386SetFs(KGDT_R0_PCR);
648 Ke386SetDs(KGDT_R3_DATA | RPL_MASK);
649 Ke386SetEs(KGDT_R3_DATA | RPL_MASK);
650
651 /* Save exception list */
652 TrapFrame->ExceptionList = KeGetPcr()->Tib.ExceptionList;
653
654 /* Clear direction flag */
655 Ke386ClearDirectionFlag();
656
657 /* Save DR7 and check for debugging */
658 TrapFrame->Dr7 = __readdr(7);
659 if (__builtin_expect(TrapFrame->Dr7 & ~DR7_RESERVED_MASK, 0))
660 {
661 DbgPrint("Need Hardware Breakpoint Support!\n");
662 while (TRUE);
663 }
664 }
665
666 //
667 // Interrupt Trap Entry
668 //
669 VOID
670 FORCEINLINE
671 KiEnterInterruptTrap(IN PKTRAP_FRAME TrapFrame)
672 {
673 /* Check for V86 mode, otherwise check for ring 3 code */
674 if (__builtin_expect(KiIsV8086TrapSafe(TrapFrame), 0))
675 {
676 /* Set correct segments */
677 Ke386SetDs(KGDT_R3_DATA | RPL_MASK);
678 Ke386SetEs(KGDT_R3_DATA | RPL_MASK);
679 Ke386SetFs(KGDT_R0_PCR);
680
681 /* Restore V8086 segments into Protected Mode segments */
682 TrapFrame->SegFs = TrapFrame->V86Fs;
683 TrapFrame->SegGs = TrapFrame->V86Gs;
684 TrapFrame->SegDs = TrapFrame->V86Ds;
685 TrapFrame->SegEs = TrapFrame->V86Es;
686 }
687 else if (__builtin_expect(KiIsUserTrapSafe(TrapFrame), 1)) /* Ring 3 is more common */
688 {
689 /* Switch to sane segments */
690 KiSetSaneSegments(TrapFrame);
691
692 /* Save FS/GS */
693 TrapFrame->SegFs = Ke386GetFs();
694 TrapFrame->SegGs = Ke386GetGs();
695
696 /* Set correct FS */
697 Ke386SetFs(KGDT_R0_PCR);
698 }
699
700 /* Save exception list and terminate it */
701 TrapFrame->ExceptionList = KeGetPcr()->Tib.ExceptionList;
702 KeGetPcr()->Tib.ExceptionList = EXCEPTION_CHAIN_END;
703
704 /* Clear direction flag */
705 Ke386ClearDirectionFlag();
706
707 /* Flush DR7 and check for debugging */
708 TrapFrame->Dr7 = 0;
709 if (__builtin_expect(KeGetCurrentThread()->DispatcherHeader.DebugActive & 0xFF, 0))
710 {
711 DbgPrint("Need Hardware Breakpoint Support!\n");
712 while (TRUE);
713 }
714
715 /* Set debug header */
716 KiFillTrapFrameDebug(TrapFrame);
717 }
718
719 //
720 // Generic Trap Entry
721 //
722 VOID
723 FORCEINLINE
724 KiEnterTrap(IN PKTRAP_FRAME TrapFrame)
725 {
726 /* Switch to sane segments */
727 KiSetSaneSegments(TrapFrame);
728
729 /* Now we can save the other segments and then switch to the correct FS */
730 TrapFrame->SegFs = Ke386GetFs();
731 TrapFrame->SegGs = Ke386GetGs();
732 Ke386SetFs(KGDT_R0_PCR);
733
734 /* Save exception list */
735 TrapFrame->ExceptionList = KeGetPcr()->Tib.ExceptionList;
736
737 /* Check for V86 mode */
738 if (__builtin_expect(TrapFrame->EFlags & EFLAGS_V86_MASK, 0))
739 {
740 /* Restore V8086 segments into Protected Mode segments */
741 TrapFrame->SegFs = TrapFrame->V86Fs;
742 TrapFrame->SegGs = TrapFrame->V86Gs;
743 TrapFrame->SegDs = TrapFrame->V86Ds;
744 TrapFrame->SegEs = TrapFrame->V86Es;
745 }
746
747 /* Clear direction flag */
748 Ke386ClearDirectionFlag();
749
750 /* Flush DR7 and check for debugging */
751 TrapFrame->Dr7 = 0;
752 if (__builtin_expect(KeGetCurrentThread()->DispatcherHeader.DebugActive & 0xFF, 0))
753 {
754 DbgPrint("Need Hardware Breakpoint Support!\n");
755 while (TRUE);
756 }
757
758 /* Set debug header */
759 KiFillTrapFrameDebug(TrapFrame);
760 }
761
762 //
763 // Generates a Trap Prolog Stub for the given name
764 //
765 #define KI_PUSH_FAKE_ERROR_CODE 0x1
766 #define KI_UNUSED 0x2
767 #define KI_NONVOLATILES_ONLY 0x4
768 #define KI_FAST_SYSTEM_CALL 0x8
769 #define KI_SOFTWARE_TRAP 0x10
770 #define KI_HARDWARE_INT 0x20
771 #define KiTrap(x, y) VOID DECLSPEC_NORETURN x(VOID) { KiTrapStub(y, x##Handler); UNREACHABLE; }
772 #define KiTrampoline(x, y) VOID DECLSPEC_NOINLINE x(VOID) { KiTrapStub(y, x##Handler); }
773
774 //
775 // Trap Prolog Stub
776 //
777 VOID
778 FORCEINLINE
779 KiTrapStub(IN ULONG Flags,
780 IN PVOID Handler)
781 {
782 ULONG FrameSize;
783
784 /* Is this a fast system call? They don't have a stack! */
785 if (Flags & KI_FAST_SYSTEM_CALL) __asm__ __volatile__
786 (
787 "movl %%ss:%c[t], %%esp\n"
788 "movl %c[e](%%esp), %%esp\n"
789 :
790 : [e] "i"(FIELD_OFFSET(KTSS, Esp0)),
791 [t] "i"(&PCR->TSS)
792 : "%esp"
793 );
794
795 /* Check what kind of trap frame this trap requires */
796 if (Flags & KI_SOFTWARE_TRAP)
797 {
798 /* Software traps need a complete non-ring transition trap frame */
799 FrameSize = FIELD_OFFSET(KTRAP_FRAME, HardwareEsp);
800 }
801 else if (Flags & KI_FAST_SYSTEM_CALL)
802 {
803 /* SYSENTER requires us to build a complete ring transition trap frame */
804 FrameSize = FIELD_OFFSET(KTRAP_FRAME, V86Es);
805
806 /* And it only preserves nonvolatile registers */
807 Flags |= KI_NONVOLATILES_ONLY;
808 }
809 else if (Flags & KI_PUSH_FAKE_ERROR_CODE)
810 {
811 /* If the trap doesn't have an error code, we'll make space for it */
812 FrameSize = FIELD_OFFSET(KTRAP_FRAME, Eip);
813 }
814 else
815 {
816 /* The trap already has an error code, so just make space for the rest */
817 FrameSize = FIELD_OFFSET(KTRAP_FRAME, ErrCode);
818 }
819
820 /* Software traps need to get their EIP from the caller's frame */
821 if (Flags & KI_SOFTWARE_TRAP) __asm__ __volatile__ ("popl %%eax\n":::"%esp");
822
823 /* Save nonvolatile registers */
824 __asm__ __volatile__
825 (
826 /* EBX, ESI, EDI and EBP are saved */
827 "movl %%ebp, %c[p](%%esp)\n"
828 "movl %%ebx, %c[b](%%esp)\n"
829 "movl %%esi, %c[s](%%esp)\n"
830 "movl %%edi, %c[i](%%esp)\n"
831 :
832 : [b] "i"(- FrameSize + FIELD_OFFSET(KTRAP_FRAME, Ebx)),
833 [s] "i"(- FrameSize + FIELD_OFFSET(KTRAP_FRAME, Esi)),
834 [i] "i"(- FrameSize + FIELD_OFFSET(KTRAP_FRAME, Edi)),
835 [p] "i"(- FrameSize + FIELD_OFFSET(KTRAP_FRAME, Ebp))
836 : "%esp"
837 );
838
839 /* Does the caller want nonvolatiles only? */
840 if (!(Flags & KI_NONVOLATILES_ONLY)) __asm__ __volatile__
841 (
842 /* Otherwise, save the volatiles as well */
843 "movl %%eax, %c[a](%%esp)\n"
844 "movl %%ecx, %c[c](%%esp)\n"
845 "movl %%edx, %c[d](%%esp)\n"
846 :
847 : [a] "i"(- FrameSize + FIELD_OFFSET(KTRAP_FRAME, Eax)),
848 [c] "i"(- FrameSize + FIELD_OFFSET(KTRAP_FRAME, Ecx)),
849 [d] "i"(- FrameSize + FIELD_OFFSET(KTRAP_FRAME, Edx))
850 : "%esp"
851 );
852
853 /* Now set parameter 1 (ECX) to point to the frame */
854 __asm__ __volatile__ ("movl %%esp, %%ecx\n":::"%esp");
855
856 /* Now go ahead and make space for this frame */
857 __asm__ __volatile__ ("subl $%c[e],%%esp\n":: [e] "i"(FrameSize) : "%esp");
858 __asm__ __volatile__ ("subl $%c[e],%%ecx\n":: [e] "i"(FrameSize) : "%ecx");
859
860 /*
861 * For hardware interrupts, set parameter 2 (EDX) to hold KINTERRUPT.
862 * This code will be dynamically patched when an interrupt is registered!
863 */
864 if (Flags & KI_HARDWARE_INT) __asm__ __volatile__
865 (
866 ".globl _KiInterruptTemplate2ndDispatch\n_KiInterruptTemplate2ndDispatch:\n"
867 "movl $0, %%edx\n"
868 ".globl _KiInterruptTemplateObject\n_KiInterruptTemplateObject:\n"
869 ::: "%edx"
870 );
871
872 /* Now jump to the C handler */
873 if (Flags & KI_HARDWARE_INT)__asm__ __volatile__
874 (
875 /*
876 * For hardware interrupts, use an absolute JMP instead of a relative JMP
877 * since the position of this code is arbitrary in memory, and therefore
878 * the compiler-generated offset will not be correct.
879 */
880 "jmp *%0\n"
881 ".globl _KiInterruptTemplateDispatch\n_KiInterruptTemplateDispatch:\n"
882 :
883 : "a"(Handler)
884 );
885 else __asm__ __volatile__ ("jmp %c[x]\n":: [x] "i"(Handler));
886 }
887
888 #endif