[PERF]: Omit setting certain useless trap frame fields.
[reactos.git] / reactos / ntoskrnl / include / internal / trap_x.h
1 /*
2 * PROJECT: ReactOS Kernel
3 * LICENSE: BSD - See COPYING.ARM in the top level directory
4 * FILE: ntoskrnl/include/trap_x.h
5 * PURPOSE: Internal Inlined Functions for the Trap Handling Code
6 * PROGRAMMERS: ReactOS Portable Systems Group
7 */
8 #ifndef _TRAP_X_
9 #define _TRAP_X_
10
11 //
12 // Unreachable code hint for GCC 4.5.x, older GCC versions, and MSVC
13 //
14 #ifdef __GNUC__
15 #if __GNUC__ * 100 + __GNUC_MINOR__ >= 405
16 #define UNREACHABLE __builtin_unreachable()
17 #else
18 #define UNREACHABLE __builtin_trap()
19 #endif
20 #elif _MSC_VER
21 #define UNREACHABLE __assume(0)
22 #else
23 #define UNREACHABLE
24 #endif
25
26 //
27 // Debug Macros
28 //
29 VOID
30 FORCEINLINE
31 KiDumpTrapFrame(IN PKTRAP_FRAME TrapFrame)
32 {
33 /* Dump the whole thing */
34 DbgPrint("DbgEbp: %x\n", TrapFrame->DbgEbp);
35 DbgPrint("DbgEip: %x\n", TrapFrame->DbgEip);
36 DbgPrint("DbgArgMark: %x\n", TrapFrame->DbgArgMark);
37 DbgPrint("DbgArgPointer: %x\n", TrapFrame->DbgArgPointer);
38 DbgPrint("TempSegCs: %x\n", TrapFrame->TempSegCs);
39 DbgPrint("TempEsp: %x\n", TrapFrame->TempEsp);
40 DbgPrint("Dr0: %x\n", TrapFrame->Dr0);
41 DbgPrint("Dr1: %x\n", TrapFrame->Dr1);
42 DbgPrint("Dr2: %x\n", TrapFrame->Dr2);
43 DbgPrint("Dr3: %x\n", TrapFrame->Dr3);
44 DbgPrint("Dr6: %x\n", TrapFrame->Dr6);
45 DbgPrint("Dr7: %x\n", TrapFrame->Dr7);
46 DbgPrint("SegGs: %x\n", TrapFrame->SegGs);
47 DbgPrint("SegEs: %x\n", TrapFrame->SegEs);
48 DbgPrint("SegDs: %x\n", TrapFrame->SegDs);
49 DbgPrint("Edx: %x\n", TrapFrame->Edx);
50 DbgPrint("Ecx: %x\n", TrapFrame->Ecx);
51 DbgPrint("Eax: %x\n", TrapFrame->Eax);
52 DbgPrint("PreviousPreviousMode: %x\n", TrapFrame->PreviousPreviousMode);
53 DbgPrint("ExceptionList: %x\n", TrapFrame->ExceptionList);
54 DbgPrint("SegFs: %x\n", TrapFrame->SegFs);
55 DbgPrint("Edi: %x\n", TrapFrame->Edi);
56 DbgPrint("Esi: %x\n", TrapFrame->Esi);
57 DbgPrint("Ebx: %x\n", TrapFrame->Ebx);
58 DbgPrint("Ebp: %x\n", TrapFrame->Ebp);
59 DbgPrint("ErrCode: %x\n", TrapFrame->ErrCode);
60 DbgPrint("Eip: %x\n", TrapFrame->Eip);
61 DbgPrint("SegCs: %x\n", TrapFrame->SegCs);
62 DbgPrint("EFlags: %x\n", TrapFrame->EFlags);
63 DbgPrint("HardwareEsp: %x\n", TrapFrame->HardwareEsp);
64 DbgPrint("HardwareSegSs: %x\n", TrapFrame->HardwareSegSs);
65 DbgPrint("V86Es: %x\n", TrapFrame->V86Es);
66 DbgPrint("V86Ds: %x\n", TrapFrame->V86Ds);
67 DbgPrint("V86Fs: %x\n", TrapFrame->V86Fs);
68 DbgPrint("V86Gs: %x\n", TrapFrame->V86Gs);
69 }
70
71 #ifdef TRAP_DEBUG
72 VOID
73 FORCEINLINE
74 KiFillTrapFrameDebug(IN PKTRAP_FRAME TrapFrame)
75 {
76 /* Set the debug information */
77 TrapFrame->DbgArgPointer = TrapFrame->Edx;
78 TrapFrame->DbgArgMark = 0xBADB0D00;
79 TrapFrame->DbgEip = TrapFrame->Eip;
80 TrapFrame->DbgEbp = TrapFrame->Ebp;
81 }
82
83 VOID
84 FORCEINLINE
85 KiExitTrapDebugChecks(IN PKTRAP_FRAME TrapFrame,
86 IN KTRAP_STATE_BITS SkipBits)
87 {
88 /* Make sure interrupts are disabled */
89 if (__readeflags() & EFLAGS_INTERRUPT_MASK)
90 {
91 DbgPrint("Exiting with interrupts enabled: %lx\n", __readeflags());
92 while (TRUE);
93 }
94
95 /* Make sure this is a real trap frame */
96 if (TrapFrame->DbgArgMark != 0xBADB0D00)
97 {
98 DbgPrint("Exiting with an invalid trap frame? (No MAGIC in trap frame)\n");
99 KiDumpTrapFrame(TrapFrame);
100 while (TRUE);
101 }
102
103 /* Make sure we're not in user-mode or something */
104 if (Ke386GetFs() != KGDT_R0_PCR)
105 {
106 DbgPrint("Exiting with an invalid FS: %lx\n", Ke386GetFs());
107 while (TRUE);
108 }
109
110 /* Make sure we have a valid SEH chain */
111 if (KeGetPcr()->Tib.ExceptionList == 0)
112 {
113 DbgPrint("Exiting with NULL exception chain: %p\n", KeGetPcr()->Tib.ExceptionList);
114 while (TRUE);
115 }
116
117 /* Make sure we're restoring a valid SEH chain */
118 if (TrapFrame->ExceptionList == 0)
119 {
120 DbgPrint("Entered a trap with a NULL exception chain: %p\n", TrapFrame->ExceptionList);
121 while (TRUE);
122 }
123
124 /* If we're ignoring previous mode, make sure caller doesn't actually want it */
125 if ((SkipBits.SkipPreviousMode) && (TrapFrame->PreviousPreviousMode != -1))
126 {
127 DbgPrint("Exiting a trap witout restoring previous mode, yet previous mode seems valid: %lx", TrapFrame->PreviousPreviousMode);
128 while (TRUE);
129 }
130 }
131
132 VOID
133 FORCEINLINE
134 KiExitSystemCallDebugChecks(IN ULONG SystemCall,
135 IN PKTRAP_FRAME TrapFrame)
136 {
137 KIRQL OldIrql;
138
139 /* Check if this was a user call */
140 if (KiUserMode(TrapFrame))
141 {
142 /* Make sure we are not returning with elevated IRQL */
143 OldIrql = KeGetCurrentIrql();
144 if (OldIrql != PASSIVE_LEVEL)
145 {
146 /* Forcibly put us in a sane state */
147 KeGetPcr()->CurrentIrql = PASSIVE_LEVEL;
148 _disable();
149
150 /* Fail */
151 KeBugCheckEx(IRQL_GT_ZERO_AT_SYSTEM_SERVICE,
152 SystemCall,
153 OldIrql,
154 0,
155 0);
156 }
157
158 /* Make sure we're not attached and that APCs are not disabled */
159 if ((KeGetCurrentThread()->ApcStateIndex != CurrentApcEnvironment) ||
160 (KeGetCurrentThread()->CombinedApcDisable != 0))
161 {
162 /* Fail */
163 KeBugCheckEx(APC_INDEX_MISMATCH,
164 SystemCall,
165 KeGetCurrentThread()->ApcStateIndex,
166 KeGetCurrentThread()->CombinedApcDisable,
167 0);
168 }
169 }
170 }
171 #else
172 #define KiExitTrapDebugChecks(x, y)
173 #define KiFillTrapFrameDebug(x)
174 #define KiExitSystemCallDebugChecks(x, y)
175 #endif
176
177 //
178 // Helper Code
179 //
180 BOOLEAN
181 FORCEINLINE
182 KiUserTrap(IN PKTRAP_FRAME TrapFrame)
183 {
184 /* Anything else but Ring 0 is Ring 3 */
185 return (TrapFrame->SegCs & MODE_MASK);
186 }
187
188 //
189 // Assembly exit stubs
190 //
191 VOID
192 FORCEINLINE
193 /* Do not mark this as DECLSPEC_NORETURN because possibly executing code follows it! */
194 KiSystemCallReturn(IN PKTRAP_FRAME TrapFrame)
195 {
196 /* Restore nonvolatiles, EAX, and do a "jump" back to the kernel caller */
197 __asm__ __volatile__
198 (
199 "movl %0, %%esp\n"
200 "movl %c[b](%%esp), %%ebx\n"
201 "movl %c[s](%%esp), %%esi\n"
202 "movl %c[i](%%esp), %%edi\n"
203 "movl %c[p](%%esp), %%ebp\n"
204 "movl %c[a](%%esp), %%eax\n"
205 "movl %c[e](%%esp), %%edx\n"
206 "addl $%c[v],%%esp\n" /* A WHOLE *KERNEL* frame since we're not IRET'ing */
207 "jmp *%%edx\n"
208 ".globl _KiSystemCallExit2\n_KiSystemCallExit2:\n"
209 :
210 : "r"(TrapFrame),
211 [b] "i"(KTRAP_FRAME_EBX),
212 [s] "i"(KTRAP_FRAME_ESI),
213 [i] "i"(KTRAP_FRAME_EDI),
214 [p] "i"(KTRAP_FRAME_EBP),
215 [a] "i"(KTRAP_FRAME_EAX),
216 [e] "i"(KTRAP_FRAME_EIP),
217 [v] "i"(KTRAP_FRAME_ESP)
218 : "%esp"
219 );
220 }
221
222 VOID
223 FORCEINLINE
224 DECLSPEC_NORETURN
225 KiSystemCallTrapReturn(IN PKTRAP_FRAME TrapFrame)
226 {
227 /* Regular interrupt exit, but we only restore EAX as a volatile */
228 __asm__ __volatile__
229 (
230 ".globl _KiSystemCallExit\n_KiSystemCallExit:\n"
231 "movl %0, %%esp\n"
232 "movl %c[b](%%esp), %%ebx\n"
233 "movl %c[s](%%esp), %%esi\n"
234 "movl %c[i](%%esp), %%edi\n"
235 "movl %c[p](%%esp), %%ebp\n"
236 "movl %c[a](%%esp), %%eax\n"
237 "addl $%c[e],%%esp\n"
238 "iret\n"
239 :
240 : "r"(TrapFrame),
241 [b] "i"(KTRAP_FRAME_EBX),
242 [s] "i"(KTRAP_FRAME_ESI),
243 [i] "i"(KTRAP_FRAME_EDI),
244 [p] "i"(KTRAP_FRAME_EBP),
245 [a] "i"(KTRAP_FRAME_EAX),
246 [e] "i"(KTRAP_FRAME_EIP)
247 : "%esp"
248 );
249 UNREACHABLE;
250 }
251
252 VOID
253 FORCEINLINE
254 DECLSPEC_NORETURN
255 KiSystemCallSysExitReturn(IN PKTRAP_FRAME TrapFrame)
256 {
257 /* Restore nonvolatiles, EAX, and do a SYSEXIT back to the user caller */
258 __asm__ __volatile__
259 (
260 "movl %0, %%esp\n"
261 "movl %c[s](%%esp), %%esi\n"
262 "movl %c[b](%%esp), %%ebx\n"
263 "movl %c[i](%%esp), %%edi\n"
264 "movl %c[p](%%esp), %%ebp\n"
265 "movl %c[a](%%esp), %%eax\n"
266 "movl %c[e](%%esp), %%edx\n" /* SYSEXIT says EIP in EDX */
267 "movl %c[x](%%esp), %%ecx\n" /* SYSEXIT says ESP in ECX */
268 "addl $%c[v],%%esp\n" /* A WHOLE *USER* frame since we're not IRET'ing */
269 "sti\nsysexit\n"
270 :
271 : "r"(TrapFrame),
272 [b] "i"(KTRAP_FRAME_EBX),
273 [s] "i"(KTRAP_FRAME_ESI),
274 [i] "i"(KTRAP_FRAME_EDI),
275 [p] "i"(KTRAP_FRAME_EBP),
276 [a] "i"(KTRAP_FRAME_EAX),
277 [e] "i"(KTRAP_FRAME_EIP),
278 [x] "i"(KTRAP_FRAME_ESP),
279 [v] "i"(KTRAP_FRAME_V86_ES)
280 : "%esp"
281 );
282 UNREACHABLE;
283 }
284
285 VOID
286 FORCEINLINE
287 DECLSPEC_NORETURN
288 KiTrapReturn(IN PKTRAP_FRAME TrapFrame)
289 {
290 /* Regular interrupt exit */
291 __asm__ __volatile__
292 (
293 "movl %0, %%esp\n"
294 "movl %c[a](%%esp), %%eax\n"
295 "movl %c[b](%%esp), %%ebx\n"
296 "movl %c[c](%%esp), %%ecx\n"
297 "movl %c[d](%%esp), %%edx\n"
298 "movl %c[s](%%esp), %%esi\n"
299 "movl %c[i](%%esp), %%edi\n"
300 "movl %c[p](%%esp), %%ebp\n"
301 "addl $%c[e],%%esp\n"
302 "iret\n"
303 :
304 : "r"(TrapFrame),
305 [a] "i"(KTRAP_FRAME_EAX),
306 [b] "i"(KTRAP_FRAME_EBX),
307 [c] "i"(KTRAP_FRAME_ECX),
308 [d] "i"(KTRAP_FRAME_EDX),
309 [s] "i"(KTRAP_FRAME_ESI),
310 [i] "i"(KTRAP_FRAME_EDI),
311 [p] "i"(KTRAP_FRAME_EBP),
312 [e] "i"(KTRAP_FRAME_EIP)
313 : "%esp"
314 );
315 UNREACHABLE;
316 }
317
318 VOID
319 FORCEINLINE
320 DECLSPEC_NORETURN
321 KiDirectTrapReturn(IN PKTRAP_FRAME TrapFrame)
322 {
323 /* Regular interrupt exit but we're not restoring any registers */
324 __asm__ __volatile__
325 (
326 "movl %0, %%esp\n"
327 "addl $%c[e],%%esp\n"
328 "iret\n"
329 :
330 : "r"(TrapFrame),
331 [e] "i"(KTRAP_FRAME_EIP)
332 : "%esp"
333 );
334 UNREACHABLE;
335 }
336
337 VOID
338 FORCEINLINE
339 DECLSPEC_NORETURN
340 KiCallReturn(IN PKTRAP_FRAME TrapFrame)
341 {
342 /* Pops a trap frame out of the stack but returns with RET instead of IRET */
343 __asm__ __volatile__
344 (
345 "movl %0, %%esp\n"
346 "movl %c[b](%%esp), %%ebx\n"
347 "movl %c[s](%%esp), %%esi\n"
348 "movl %c[i](%%esp), %%edi\n"
349 "movl %c[p](%%esp), %%ebp\n"
350 "addl $%c[e],%%esp\n"
351 "ret\n"
352 :
353 : "r"(TrapFrame),
354 [b] "i"(KTRAP_FRAME_EBX),
355 [s] "i"(KTRAP_FRAME_ESI),
356 [i] "i"(KTRAP_FRAME_EDI),
357 [p] "i"(KTRAP_FRAME_EBP),
358 [e] "i"(KTRAP_FRAME_EIP)
359 : "%esp"
360 );
361 UNREACHABLE;
362 }
363
364 VOID
365 FORCEINLINE
366 DECLSPEC_NORETURN
367 KiEditedTrapReturn(IN PKTRAP_FRAME TrapFrame)
368 {
369 /* Regular interrupt exit */
370 __asm__ __volatile__
371 (
372 "movl %0, %%esp\n"
373 "movl %c[a](%%esp), %%eax\n"
374 "movl %c[b](%%esp), %%ebx\n"
375 "movl %c[c](%%esp), %%ecx\n"
376 "movl %c[d](%%esp), %%edx\n"
377 "movl %c[s](%%esp), %%esi\n"
378 "movl %c[i](%%esp), %%edi\n"
379 "movl %c[p](%%esp), %%ebp\n"
380 "addl $%c[e],%%esp\n"
381 "movl (%%esp), %%esp\n"
382 "iret\n"
383 :
384 : "r"(TrapFrame),
385 [a] "i"(KTRAP_FRAME_EAX),
386 [b] "i"(KTRAP_FRAME_EBX),
387 [c] "i"(KTRAP_FRAME_ECX),
388 [d] "i"(KTRAP_FRAME_EDX),
389 [s] "i"(KTRAP_FRAME_ESI),
390 [i] "i"(KTRAP_FRAME_EDI),
391 [p] "i"(KTRAP_FRAME_EBP),
392 [e] "i"(KTRAP_FRAME_ERROR_CODE) /* We *WANT* the error code since ESP is there! */
393 : "%esp"
394 );
395 UNREACHABLE;
396 }
397
398 //
399 // "BOP" code used by VDM and V8086 Mode
400 //
401 VOID
402 FORCEINLINE
403 KiIssueBop(VOID)
404 {
405 /* Invalid instruction that an invalid opcode handler must trap and handle */
406 asm volatile(".byte 0xC4\n.byte 0xC4\n");
407 }
408
409 VOID
410 FORCEINLINE
411 KiUserSystemCall(IN PKTRAP_FRAME TrapFrame)
412 {
413 /*
414 * Kernel call or user call?
415 *
416 * This decision is made in inlined assembly because we need to patch
417 * the relative offset of the user-mode jump to point to the SYSEXIT
418 * routine if the CPU supports it. The only way to guarantee that a
419 * relative jnz/jz instruction is generated is to force it with the
420 * inline assembler.
421 */
422 asm volatile
423 (
424 "test $1, %0\n" /* MODE_MASK */
425 ".globl _KiSystemCallExitBranch\n_KiSystemCallExitBranch:\n"
426 "jnz _KiSystemCallExit\n"
427 :
428 : "r"(TrapFrame->SegCs)
429 );
430 }
431
432 //
433 // Generic Exit Routine
434 //
435 VOID
436 FORCEINLINE
437 DECLSPEC_NORETURN
438 KiExitTrap(IN PKTRAP_FRAME TrapFrame,
439 IN UCHAR Skip)
440 {
441 KTRAP_EXIT_SKIP_BITS SkipBits = { .Bits = Skip };
442 PULONG ReturnStack;
443
444 /* Debugging checks */
445 KiExitTrapDebugChecks(TrapFrame, SkipBits);
446
447 /* Restore the SEH handler chain */
448 KeGetPcr()->Tib.ExceptionList = TrapFrame->ExceptionList;
449
450 /* Check if the previous mode must be restored */
451 if (__builtin_expect(!SkipBits.SkipPreviousMode, 0)) /* More INTS than SYSCALLs */
452 {
453 /* Restore it */
454 KeGetCurrentThread()->PreviousMode = TrapFrame->PreviousPreviousMode;
455 }
456
457 /* Check if there are active debug registers */
458 if (__builtin_expect(TrapFrame->Dr7 & ~DR7_RESERVED_MASK, 0))
459 {
460 /* Not handled yet */
461 DbgPrint("Need Hardware Breakpoint Support!\n");
462 DbgBreakPoint();
463 while (TRUE);
464 }
465
466 /* Check if this was a V8086 trap */
467 if (__builtin_expect(TrapFrame->EFlags & EFLAGS_V86_MASK, 0)) KiTrapReturn(TrapFrame);
468
469 /* Check if the trap frame was edited */
470 if (__builtin_expect(!(TrapFrame->SegCs & FRAME_EDITED), 0))
471 {
472 /*
473 * An edited trap frame happens when we need to modify CS and/or ESP but
474 * don't actually have a ring transition. This happens when a kernelmode
475 * caller wants to perform an NtContinue to another kernel address, such
476 * as in the case of SEH (basically, a longjmp), or to a user address.
477 *
478 * Therefore, the CPU never saved CS/ESP on the stack because we did not
479 * get a trap frame due to a ring transition (there was no interrupt).
480 * Even if we didn't want to restore CS to a new value, a problem occurs
481 * due to the fact a normal RET would not work if we restored ESP since
482 * RET would then try to read the result off the stack.
483 *
484 * The NT kernel solves this by adding 12 bytes of stack to the exiting
485 * trap frame, in which EFLAGS, CS, and EIP are stored, and then saving
486 * the ESP that's being requested into the ErrorCode field. It will then
487 * exit with an IRET. This fixes both issues, because it gives the stack
488 * some space where to hold the return address and then end up with the
489 * wanted stack, and it uses IRET which allows a new CS to be inputted.
490 *
491 */
492
493 /* Set CS that is requested */
494 TrapFrame->SegCs = TrapFrame->TempSegCs;
495
496 /* First make space on requested stack */
497 ReturnStack = (PULONG)(TrapFrame->TempEsp - 12);
498 TrapFrame->ErrCode = (ULONG_PTR)ReturnStack;
499
500 /* Now copy IRET frame */
501 ReturnStack[0] = TrapFrame->Eip;
502 ReturnStack[1] = TrapFrame->SegCs;
503 ReturnStack[2] = TrapFrame->EFlags;
504
505 /* Do special edited return */
506 KiEditedTrapReturn(TrapFrame);
507 }
508
509 /* Check if this is a user trap */
510 if (__builtin_expect(KiUserTrap(TrapFrame), 1)) /* Ring 3 is where we spend time */
511 {
512 /* Check if segments should be restored */
513 if (!SkipBits.SkipSegments)
514 {
515 /* Restore segments */
516 Ke386SetGs(TrapFrame->SegGs);
517 Ke386SetEs(TrapFrame->SegEs);
518 Ke386SetDs(TrapFrame->SegDs);
519 Ke386SetFs(TrapFrame->SegFs);
520 }
521
522 /* Always restore FS since it goes from KPCR to TEB */
523 Ke386SetFs(TrapFrame->SegFs);
524 }
525
526 /* Check for system call -- a system call skips volatiles! */
527 if (__builtin_expect(SkipBits.SkipVolatiles, 0)) /* More INTs than SYSCALLs */
528 {
529 /* User or kernel call? */
530 KiUserSystemCall(TrapFrame);
531
532 /* Restore EFLags */
533 __writeeflags(TrapFrame->EFlags);
534
535 /* Call is kernel, so do a jump back since this wasn't a real INT */
536 KiSystemCallReturn(TrapFrame);
537
538 /* If we got here, this is SYSEXIT: are we stepping code? */
539 if (!(TrapFrame->EFlags & EFLAGS_TF))
540 {
541 /* Restore user FS */
542 Ke386SetFs(KGDT_R3_TEB | RPL_MASK);
543
544 /* Remove interrupt flag */
545 TrapFrame->EFlags &= ~EFLAGS_INTERRUPT_MASK;
546 __writeeflags(TrapFrame->EFlags);
547
548 /* Exit through SYSEXIT */
549 KiSystemCallSysExitReturn(TrapFrame);
550 }
551
552 /* Exit through IRETD, either due to debugging or due to lack of SYSEXIT */
553 KiSystemCallTrapReturn(TrapFrame);
554 }
555
556 /* Return from interrupt */
557 KiTrapReturn(TrapFrame);
558 }
559
560 //
561 // Virtual 8086 Mode Optimized Trap Exit
562 //
563 VOID
564 FORCEINLINE
565 KiExitV86Trap(IN PKTRAP_FRAME TrapFrame)
566 {
567 PKTHREAD Thread;
568 KIRQL OldIrql;
569
570 /* Get the thread */
571 Thread = KeGetCurrentThread();
572 while (TRUE)
573 {
574 /* Turn off the alerted state for kernel mode */
575 Thread->Alerted[KernelMode] = FALSE;
576
577 /* Are there pending user APCs? */
578 if (__builtin_expect(!Thread->ApcState.UserApcPending, 1)) break;
579
580 /* Raise to APC level and enable interrupts */
581 OldIrql = KfRaiseIrql(APC_LEVEL);
582 _enable();
583
584 /* Deliver APCs */
585 KiDeliverApc(UserMode, NULL, TrapFrame);
586
587 /* Restore IRQL and disable interrupts once again */
588 KfLowerIrql(OldIrql);
589 _disable();
590
591 /* Return if this isn't V86 mode anymore */
592 if (__builtin_expect(TrapFrame->EFlags & EFLAGS_V86_MASK, 0)) return;
593 }
594
595 /* If we got here, we're still in a valid V8086 context, so quit it */
596 if (__builtin_expect(TrapFrame->Dr7 & ~DR7_RESERVED_MASK, 0))
597 {
598 /* Not handled yet */
599 DbgPrint("Need Hardware Breakpoint Support!\n");
600 while (TRUE);
601 }
602
603 /* Return from interrupt */
604 KiTrapReturn(TrapFrame);
605 }
606
607 //
608 // Virtual 8086 Mode Optimized Trap Entry
609 //
610 VOID
611 FORCEINLINE
612 KiEnterV86Trap(IN PKTRAP_FRAME TrapFrame)
613 {
614 /* Load correct registers */
615 Ke386SetFs(KGDT_R0_PCR);
616 Ke386SetDs(KGDT_R3_DATA | RPL_MASK);
617 Ke386SetEs(KGDT_R3_DATA | RPL_MASK);
618
619 /* Save exception list */
620 TrapFrame->ExceptionList = KeGetPcr()->Tib.ExceptionList;
621
622 /* Clear direction flag */
623 Ke386ClearDirectionFlag();
624
625 /* Save DR7 and check for debugging */
626 TrapFrame->Dr7 = __readdr(7);
627 if (__builtin_expect(TrapFrame->Dr7 & ~DR7_RESERVED_MASK, 0))
628 {
629 DbgPrint("Need Hardware Breakpoint Support!\n");
630 while (TRUE);
631 }
632 }
633
634 //
635 // Interrupt Trap Entry
636 //
637 VOID
638 FORCEINLINE
639 KiEnterInterruptTrap(IN PKTRAP_FRAME TrapFrame)
640 {
641 /* Check for V86 mode, otherwise check for ring 3 code */
642 if (__builtin_expect(TrapFrame->EFlags & EFLAGS_V86_MASK, 0))
643 {
644 /* Restore V8086 segments into Protected Mode segments */
645 TrapFrame->SegFs = TrapFrame->V86Fs;
646 TrapFrame->SegGs = TrapFrame->V86Gs;
647 TrapFrame->SegDs = TrapFrame->V86Ds;
648 TrapFrame->SegEs = TrapFrame->V86Es;
649 }
650 else if (__builtin_expect(TrapFrame->SegCs != KGDT_R0_CODE, 1)) /* Ring 3 is more common */
651 {
652 /* Save segments and then switch to correct ones */
653 TrapFrame->SegFs = Ke386GetFs();
654 TrapFrame->SegGs = Ke386GetGs();
655 TrapFrame->SegDs = Ke386GetDs();
656 TrapFrame->SegEs = Ke386GetEs();
657 Ke386SetFs(KGDT_R0_PCR);
658 Ke386SetDs(KGDT_R3_DATA | RPL_MASK);
659 Ke386SetEs(KGDT_R3_DATA | RPL_MASK);
660 }
661
662 /* Save exception list and terminate it */
663 TrapFrame->ExceptionList = KeGetPcr()->Tib.ExceptionList;
664 KeGetPcr()->Tib.ExceptionList = EXCEPTION_CHAIN_END;
665
666 /* Clear direction flag */
667 Ke386ClearDirectionFlag();
668
669 /* Flush DR7 and check for debugging */
670 TrapFrame->Dr7 = 0;
671 if (__builtin_expect(KeGetCurrentThread()->DispatcherHeader.DebugActive & 0xFF, 0))
672 {
673 DbgPrint("Need Hardware Breakpoint Support!\n");
674 while (TRUE);
675 }
676
677 /* Set debug header */
678 KiFillTrapFrameDebug(TrapFrame);
679 }
680
681 //
682 // Generic Trap Entry
683 //
684 VOID
685 FORCEINLINE
686 KiEnterTrap(IN PKTRAP_FRAME TrapFrame)
687 {
688 ULONG Ds, Es;
689
690 /*
691 * We really have to get a good DS/ES first before touching any data.
692 *
693 * These two reads will either go in a register (with optimizations ON) or
694 * a stack variable (which is on SS:ESP, guaranteed to be good/valid).
695 *
696 * Because the assembly is marked volatile, the order of instructions is
697 * as-is, otherwise the optimizer could simply get rid of our DS/ES.
698 *
699 */
700 Ds = Ke386GetDs();
701 Es = Ke386GetEs();
702 Ke386SetDs(KGDT_R3_DATA | RPL_MASK);
703 Ke386SetEs(KGDT_R3_DATA | RPL_MASK);
704 TrapFrame->SegDs = Ds;
705 TrapFrame->SegEs = Es;
706
707 /* Now we can save the other segments and then switch to the correct FS */
708 TrapFrame->SegFs = Ke386GetFs();
709 TrapFrame->SegGs = Ke386GetGs();
710 Ke386SetFs(KGDT_R0_PCR);
711
712 /* Save exception list */
713 TrapFrame->ExceptionList = KeGetPcr()->Tib.ExceptionList;
714
715 /* Check for V86 mode */
716 if (__builtin_expect(TrapFrame->EFlags & EFLAGS_V86_MASK, 0))
717 {
718 /* Restore V8086 segments into Protected Mode segments */
719 TrapFrame->SegFs = TrapFrame->V86Fs;
720 TrapFrame->SegGs = TrapFrame->V86Gs;
721 TrapFrame->SegDs = TrapFrame->V86Ds;
722 TrapFrame->SegEs = TrapFrame->V86Es;
723 }
724
725 /* Clear direction flag */
726 Ke386ClearDirectionFlag();
727
728 /* Flush DR7 and check for debugging */
729 TrapFrame->Dr7 = 0;
730 if (__builtin_expect(KeGetCurrentThread()->DispatcherHeader.DebugActive & 0xFF, 0))
731 {
732 DbgPrint("Need Hardware Breakpoint Support!\n");
733 while (TRUE);
734 }
735
736 /* Set debug header */
737 KiFillTrapFrameDebug(TrapFrame);
738 }
739
740 //
741 // Generates a Trap Prolog Stub for the given name
742 //
743 #define KI_PUSH_FAKE_ERROR_CODE 0x1
744 #define KI_FAST_V86_TRAP 0x2
745 #define KI_NONVOLATILES_ONLY 0x4
746 #define KI_FAST_SYSTEM_CALL 0x8
747 #define KI_SOFTWARE_TRAP 0x10
748 #define KI_HARDWARE_INT 0x20
749 #define KiTrap(x, y) VOID DECLSPEC_NORETURN x(VOID) { KiTrapStub(y, x##Handler); UNREACHABLE; }
750 #define KiTrampoline(x, y) VOID DECLSPEC_NOINLINE x(VOID) { KiTrapStub(y, x##Handler); }
751
752 //
753 // Trap Prolog Stub
754 //
755 VOID
756 FORCEINLINE
757 KiTrapStub(IN ULONG Flags,
758 IN PVOID Handler)
759 {
760 ULONG FrameSize;
761
762 /* Is this a fast system call? They don't have a stack! */
763 if (Flags & KI_FAST_SYSTEM_CALL) __asm__ __volatile__
764 (
765 "movl %%ss:%c[t], %%esp\n"
766 "movl %c[e](%%esp), %%esp\n"
767 :
768 : [e] "i"(FIELD_OFFSET(KTSS, Esp0)),
769 [t] "i"(&PCR->TSS)
770 : "%esp"
771 );
772
773 /* Check what kind of trap frame this trap requires */
774 if (Flags & KI_SOFTWARE_TRAP)
775 {
776 /* Software traps need a complete non-ring transition trap frame */
777 FrameSize = FIELD_OFFSET(KTRAP_FRAME, HardwareEsp);
778 }
779 else if (Flags & KI_FAST_SYSTEM_CALL)
780 {
781 /* SYSENTER requires us to build a complete ring transition trap frame */
782 FrameSize = FIELD_OFFSET(KTRAP_FRAME, V86Es);
783
784 /* And it only preserves nonvolatile registers */
785 Flags |= KI_NONVOLATILES_ONLY;
786 }
787 else if (Flags & KI_PUSH_FAKE_ERROR_CODE)
788 {
789 /* If the trap doesn't have an error code, we'll make space for it */
790 FrameSize = FIELD_OFFSET(KTRAP_FRAME, Eip);
791 }
792 else
793 {
794 /* The trap already has an error code, so just make space for the rest */
795 FrameSize = FIELD_OFFSET(KTRAP_FRAME, ErrCode);
796 }
797
798 /* Software traps need to get their EIP from the caller's frame */
799 if (Flags & KI_SOFTWARE_TRAP) __asm__ __volatile__ ("popl %%eax\n":::"%esp");
800
801 /* Now go ahead and make space for this frame */
802 __asm__ __volatile__ ("subl $%c[e],%%esp\n":: [e] "i"(FrameSize) : "%esp");
803
804 /* Does the caller want volatiles only? */
805 if (Flags & KI_NONVOLATILES_ONLY) __asm__ __volatile__
806 (
807 /* Then only EBX, ESI, EDI and EBP are saved */
808 "movl %%ebx, %c[b](%%esp)\n"
809 "movl %%esi, %c[s](%%esp)\n"
810 "movl %%edi, %c[i](%%esp)\n"
811 "movl %%ebp, %c[p](%%esp)\n"
812 :
813 : [b] "i"(FIELD_OFFSET(KTRAP_FRAME, Ebx)),
814 [s] "i"(FIELD_OFFSET(KTRAP_FRAME, Esi)),
815 [i] "i"(FIELD_OFFSET(KTRAP_FRAME, Edi)),
816 [p] "i"(FIELD_OFFSET(KTRAP_FRAME, Ebp))
817 : "%esp"
818 );
819 else __asm__ __volatile__
820 (
821 /* Otherwise, we save all the registers (except ESP) */
822 "movl %%eax, %c[a](%%esp)\n"
823 "movl %%ebx, %c[b](%%esp)\n"
824 "movl %%ecx, %c[c](%%esp)\n"
825 "movl %%edx, %c[d](%%esp)\n"
826 "movl %%esi, %c[s](%%esp)\n"
827 "movl %%edi, %c[i](%%esp)\n"
828 "movl %%ebp, %c[p](%%esp)\n"
829 :
830 : [a] "i"(FIELD_OFFSET(KTRAP_FRAME, Eax)),
831 [b] "i"(FIELD_OFFSET(KTRAP_FRAME, Ebx)),
832 [c] "i"(FIELD_OFFSET(KTRAP_FRAME, Ecx)),
833 [d] "i"(FIELD_OFFSET(KTRAP_FRAME, Edx)),
834 [s] "i"(FIELD_OFFSET(KTRAP_FRAME, Esi)),
835 [i] "i"(FIELD_OFFSET(KTRAP_FRAME, Edi)),
836 [p] "i"(FIELD_OFFSET(KTRAP_FRAME, Ebp))
837 : "%esp"
838 );
839
840 /* Now set parameter 1 (ECX) to point to the frame */
841 __asm__ __volatile__ ("movl %%esp, %%ecx\n":::"%esp");
842
843 /* For Fast-V86 traps, set parameter 2 (EDX) to hold EFlags */
844 if (Flags & KI_FAST_V86_TRAP) __asm__ __volatile__
845 (
846 "movl %c[f](%%esp), %%edx\n"
847 :
848 : [f] "i"(FIELD_OFFSET(KTRAP_FRAME, EFlags))
849 );
850 else if (Flags & KI_HARDWARE_INT) __asm__ __volatile__
851 (
852 /*
853 * For hardware interrupts, set parameter 2 (EDX) to hold KINTERRUPT.
854 * This code will be dynamically patched when an interrupt is registered!
855 */
856 ".globl _KiInterruptTemplate2ndDispatch\n_KiInterruptTemplate2ndDispatch:\n"
857 "movl $0, %%edx\n"
858 ".globl _KiInterruptTemplateObject\n_KiInterruptTemplateObject:\n"
859 ::: "%edx"
860 );
861
862 /* Now jump to the C handler */
863 if (Flags & KI_HARDWARE_INT)__asm__ __volatile__
864 (
865 /*
866 * For hardware interrupts, use an absolute JMP instead of a relative JMP
867 * since the position of this code is arbitrary in memory, and therefore
868 * the compiler-generated offset will not be correct.
869 */
870 "jmp *%0\n"
871 ".globl _KiInterruptTemplateDispatch\n_KiInterruptTemplateDispatch:\n"
872 :
873 : "a"(Handler)
874 );
875 else __asm__ __volatile__ ("jmp %c[x]\n":: [x] "i"(Handler));
876 }
877
878 #endif