- Move from using include guards to pragma once.
[reactos.git] / reactos / ntoskrnl / include / internal / trap_x.h
1 /*
2 * PROJECT: ReactOS Kernel
3 * LICENSE: BSD - See COPYING.ARM in the top level directory
4 * FILE: ntoskrnl/include/trap_x.h
5 * PURPOSE: Internal Inlined Functions for the Trap Handling Code
6 * PROGRAMMERS: ReactOS Portable Systems Group
7 */
8
9 #pragma once
10
11 //
12 // Unreachable code hint for GCC 4.5.x, older GCC versions, and MSVC
13 //
14 #ifdef __GNUC__
15 #if __GNUC__ * 100 + __GNUC_MINOR__ >= 405
16 #define UNREACHABLE __builtin_unreachable()
17 #else
18 #define UNREACHABLE __builtin_trap()
19 #endif
20 #elif _MSC_VER
21 #define UNREACHABLE __assume(0)
22 #else
23 #define UNREACHABLE
24 #endif
25
26 //
27 // Debug Macros
28 //
29 VOID
30 FORCEINLINE
31 KiDumpTrapFrame(IN PKTRAP_FRAME TrapFrame)
32 {
33 /* Dump the whole thing */
34 DbgPrint("DbgEbp: %x\n", TrapFrame->DbgEbp);
35 DbgPrint("DbgEip: %x\n", TrapFrame->DbgEip);
36 DbgPrint("DbgArgMark: %x\n", TrapFrame->DbgArgMark);
37 DbgPrint("DbgArgPointer: %x\n", TrapFrame->DbgArgPointer);
38 DbgPrint("TempSegCs: %x\n", TrapFrame->TempSegCs);
39 DbgPrint("TempEsp: %x\n", TrapFrame->TempEsp);
40 DbgPrint("Dr0: %x\n", TrapFrame->Dr0);
41 DbgPrint("Dr1: %x\n", TrapFrame->Dr1);
42 DbgPrint("Dr2: %x\n", TrapFrame->Dr2);
43 DbgPrint("Dr3: %x\n", TrapFrame->Dr3);
44 DbgPrint("Dr6: %x\n", TrapFrame->Dr6);
45 DbgPrint("Dr7: %x\n", TrapFrame->Dr7);
46 DbgPrint("SegGs: %x\n", TrapFrame->SegGs);
47 DbgPrint("SegEs: %x\n", TrapFrame->SegEs);
48 DbgPrint("SegDs: %x\n", TrapFrame->SegDs);
49 DbgPrint("Edx: %x\n", TrapFrame->Edx);
50 DbgPrint("Ecx: %x\n", TrapFrame->Ecx);
51 DbgPrint("Eax: %x\n", TrapFrame->Eax);
52 DbgPrint("PreviousPreviousMode: %x\n", TrapFrame->PreviousPreviousMode);
53 DbgPrint("ExceptionList: %x\n", TrapFrame->ExceptionList);
54 DbgPrint("SegFs: %x\n", TrapFrame->SegFs);
55 DbgPrint("Edi: %x\n", TrapFrame->Edi);
56 DbgPrint("Esi: %x\n", TrapFrame->Esi);
57 DbgPrint("Ebx: %x\n", TrapFrame->Ebx);
58 DbgPrint("Ebp: %x\n", TrapFrame->Ebp);
59 DbgPrint("ErrCode: %x\n", TrapFrame->ErrCode);
60 DbgPrint("Eip: %x\n", TrapFrame->Eip);
61 DbgPrint("SegCs: %x\n", TrapFrame->SegCs);
62 DbgPrint("EFlags: %x\n", TrapFrame->EFlags);
63 DbgPrint("HardwareEsp: %x\n", TrapFrame->HardwareEsp);
64 DbgPrint("HardwareSegSs: %x\n", TrapFrame->HardwareSegSs);
65 DbgPrint("V86Es: %x\n", TrapFrame->V86Es);
66 DbgPrint("V86Ds: %x\n", TrapFrame->V86Ds);
67 DbgPrint("V86Fs: %x\n", TrapFrame->V86Fs);
68 DbgPrint("V86Gs: %x\n", TrapFrame->V86Gs);
69 }
70
71 #ifdef TRAP_DEBUG
72 VOID
73 FORCEINLINE
74 KiFillTrapFrameDebug(IN PKTRAP_FRAME TrapFrame)
75 {
76 /* Set the debug information */
77 TrapFrame->DbgArgPointer = TrapFrame->Edx;
78 TrapFrame->DbgArgMark = 0xBADB0D00;
79 TrapFrame->DbgEip = TrapFrame->Eip;
80 TrapFrame->DbgEbp = TrapFrame->Ebp;
81 }
82
83 VOID
84 FORCEINLINE
85 KiExitTrapDebugChecks(IN PKTRAP_FRAME TrapFrame,
86 IN KTRAP_STATE_BITS SkipBits)
87 {
88 /* Make sure interrupts are disabled */
89 if (__readeflags() & EFLAGS_INTERRUPT_MASK)
90 {
91 DbgPrint("Exiting with interrupts enabled: %lx\n", __readeflags());
92 while (TRUE);
93 }
94
95 /* Make sure this is a real trap frame */
96 if (TrapFrame->DbgArgMark != 0xBADB0D00)
97 {
98 DbgPrint("Exiting with an invalid trap frame? (No MAGIC in trap frame)\n");
99 KiDumpTrapFrame(TrapFrame);
100 while (TRUE);
101 }
102
103 /* Make sure we're not in user-mode or something */
104 if (Ke386GetFs() != KGDT_R0_PCR)
105 {
106 DbgPrint("Exiting with an invalid FS: %lx\n", Ke386GetFs());
107 while (TRUE);
108 }
109
110 /* Make sure we have a valid SEH chain */
111 if (KeGetPcr()->Tib.ExceptionList == 0)
112 {
113 DbgPrint("Exiting with NULL exception chain: %p\n", KeGetPcr()->Tib.ExceptionList);
114 while (TRUE);
115 }
116
117 /* Make sure we're restoring a valid SEH chain */
118 if (TrapFrame->ExceptionList == 0)
119 {
120 DbgPrint("Entered a trap with a NULL exception chain: %p\n", TrapFrame->ExceptionList);
121 while (TRUE);
122 }
123
124 /* If we're ignoring previous mode, make sure caller doesn't actually want it */
125 if ((SkipBits.SkipPreviousMode) && (TrapFrame->PreviousPreviousMode != -1))
126 {
127 DbgPrint("Exiting a trap witout restoring previous mode, yet previous mode seems valid: %lx", TrapFrame->PreviousPreviousMode);
128 while (TRUE);
129 }
130 }
131
132 VOID
133 FORCEINLINE
134 KiExitSystemCallDebugChecks(IN ULONG SystemCall,
135 IN PKTRAP_FRAME TrapFrame)
136 {
137 KIRQL OldIrql;
138
139 /* Check if this was a user call */
140 if (KiUserMode(TrapFrame))
141 {
142 /* Make sure we are not returning with elevated IRQL */
143 OldIrql = KeGetCurrentIrql();
144 if (OldIrql != PASSIVE_LEVEL)
145 {
146 /* Forcibly put us in a sane state */
147 KeGetPcr()->CurrentIrql = PASSIVE_LEVEL;
148 _disable();
149
150 /* Fail */
151 KeBugCheckEx(IRQL_GT_ZERO_AT_SYSTEM_SERVICE,
152 SystemCall,
153 OldIrql,
154 0,
155 0);
156 }
157
158 /* Make sure we're not attached and that APCs are not disabled */
159 if ((KeGetCurrentThread()->ApcStateIndex != CurrentApcEnvironment) ||
160 (KeGetCurrentThread()->CombinedApcDisable != 0))
161 {
162 /* Fail */
163 KeBugCheckEx(APC_INDEX_MISMATCH,
164 SystemCall,
165 KeGetCurrentThread()->ApcStateIndex,
166 KeGetCurrentThread()->CombinedApcDisable,
167 0);
168 }
169 }
170 }
171 #else
172 #define KiExitTrapDebugChecks(x, y)
173 #define KiFillTrapFrameDebug(x)
174 #define KiExitSystemCallDebugChecks(x, y)
175 #endif
176
177 //
178 // Helper Code
179 //
180 BOOLEAN
181 FORCEINLINE
182 KiUserTrap(IN PKTRAP_FRAME TrapFrame)
183 {
184 /* Anything else but Ring 0 is Ring 3 */
185 return (TrapFrame->SegCs & MODE_MASK);
186 }
187
188 //
189 // "BOP" code used by VDM and V8086 Mode
190 //
191 VOID
192 FORCEINLINE
193 KiIssueBop(VOID)
194 {
195 /* Invalid instruction that an invalid opcode handler must trap and handle */
196 asm volatile(".byte 0xC4\n.byte 0xC4\n");
197 }
198
199 VOID
200 FORCEINLINE
201 KiUserSystemCall(IN PKTRAP_FRAME TrapFrame)
202 {
203 /*
204 * Kernel call or user call?
205 *
206 * This decision is made in inlined assembly because we need to patch
207 * the relative offset of the user-mode jump to point to the SYSEXIT
208 * routine if the CPU supports it. The only way to guarantee that a
209 * relative jnz/jz instruction is generated is to force it with the
210 * inline assembler.
211 */
212 asm volatile
213 (
214 "test $1, %0\n" /* MODE_MASK */
215 ".globl _KiSystemCallExitBranch\n_KiSystemCallExitBranch:\n"
216 "jnz _KiSystemCallExit\n"
217 :
218 : "r"(TrapFrame->SegCs)
219 );
220 }
221
222 //
223 // Generates an Exit Epilog Stub for the given name
224 //
225 #define KI_FUNCTION_CALL 0x1
226 #define KI_EDITED_FRAME 0x2
227 #define KI_DIRECT_EXIT 0x4
228 #define KI_FAST_SYSTEM_CALL_EXIT 0x8
229 #define KI_SYSTEM_CALL_EXIT 0x10
230 #define KI_SYSTEM_CALL_JUMP 0x20
231 #define KiTrapExitStub(x, y) VOID FORCEINLINE DECLSPEC_NORETURN x(IN PKTRAP_FRAME TrapFrame) { KiTrapExit(TrapFrame, y); UNREACHABLE; }
232 #define KiTrapExitStub2(x, y) VOID FORCEINLINE x(IN PKTRAP_FRAME TrapFrame) { KiTrapExit(TrapFrame, y); }
233
234 //
235 // How volatiles will be restored
236 //
237 #define KI_EAX_NO_VOLATILES 0x0
238 #define KI_EAX_ONLY 0x1
239 #define KI_ALL_VOLATILES 0x2
240
241 //
242 // Exit mechanism to use
243 //
244 #define KI_EXIT_IRET 0x0
245 #define KI_EXIT_SYSEXIT 0x1
246 #define KI_EXIT_JMP 0x2
247 #define KI_EXIT_RET 0x3
248
249 //
250 // Master Trap Epilog
251 //
252 VOID
253 FORCEINLINE
254 KiTrapExit(IN PKTRAP_FRAME TrapFrame,
255 IN ULONG Flags)
256 {
257 ULONG FrameSize = FIELD_OFFSET(KTRAP_FRAME, Eip);
258 ULONG ExitMechanism = KI_EXIT_IRET, Volatiles = KI_ALL_VOLATILES, NonVolatiles = TRUE;
259 ULONG EcxField = FIELD_OFFSET(KTRAP_FRAME, Ecx), EdxField = FIELD_OFFSET(KTRAP_FRAME, Edx);
260
261 /* System call exit needs a special label */
262 if (Flags & KI_SYSTEM_CALL_EXIT) __asm__ __volatile__
263 (
264 ".globl _KiSystemCallExit\n_KiSystemCallExit:\n"
265 );
266
267 /* Start by making the trap frame equal to the stack */
268 __asm__ __volatile__
269 (
270 "movl %0, %%esp\n"
271 :
272 : "r"(TrapFrame)
273 : "%esp"
274 );
275
276 /* Check what kind of trap frame this trap requires */
277 if (Flags & KI_FUNCTION_CALL)
278 {
279 /* These calls have an EIP on the stack they need */
280 ExitMechanism = KI_EXIT_RET;
281 Volatiles = FALSE;
282 }
283 else if (Flags & KI_EDITED_FRAME)
284 {
285 /* Edited frames store a new ESP in the error code field */
286 FrameSize = FIELD_OFFSET(KTRAP_FRAME, ErrCode);
287 }
288 else if (Flags & KI_DIRECT_EXIT)
289 {
290 /* Exits directly without restoring anything, interrupt frame on stack */
291 NonVolatiles = Volatiles = FALSE;
292 }
293 else if (Flags & KI_FAST_SYSTEM_CALL_EXIT)
294 {
295 /* We have a fake interrupt stack with a ring transition */
296 FrameSize = FIELD_OFFSET(KTRAP_FRAME, V86Es);
297 ExitMechanism = KI_EXIT_SYSEXIT;
298
299 /* SYSEXIT wants EIP in EDX and ESP in ECX */
300 EcxField = FIELD_OFFSET(KTRAP_FRAME, HardwareEsp);
301 EdxField = FIELD_OFFSET(KTRAP_FRAME, Eip);
302 }
303 else if (Flags & KI_SYSTEM_CALL_EXIT)
304 {
305 /* Only restore EAX */
306 NonVolatiles = KI_EAX_ONLY;
307 }
308 else if (Flags & KI_SYSTEM_CALL_JUMP)
309 {
310 /* We have a fake interrupt stack with no ring transition */
311 FrameSize = FIELD_OFFSET(KTRAP_FRAME, HardwareEsp);
312 NonVolatiles = KI_EAX_ONLY;
313 ExitMechanism = KI_EXIT_JMP;
314 }
315
316 /* Restore the non volatiles */
317 if (NonVolatiles) __asm__ __volatile__
318 (
319 "movl %c[b](%%esp), %%ebx\n"
320 "movl %c[s](%%esp), %%esi\n"
321 "movl %c[i](%%esp), %%edi\n"
322 "movl %c[p](%%esp), %%ebp\n"
323 :
324 : [b] "i"(FIELD_OFFSET(KTRAP_FRAME, Ebx)),
325 [s] "i"(FIELD_OFFSET(KTRAP_FRAME, Esi)),
326 [i] "i"(FIELD_OFFSET(KTRAP_FRAME, Edi)),
327 [p] "i"(FIELD_OFFSET(KTRAP_FRAME, Ebp))
328 : "%esp"
329 );
330
331 /* Restore EAX if volatiles must be restored */
332 if (Volatiles) __asm__ __volatile__
333 (
334 "movl %c[a](%%esp), %%eax\n":: [a] "i"(FIELD_OFFSET(KTRAP_FRAME, Eax)) : "%esp"
335 );
336
337 /* Restore the other volatiles if needed */
338 if (Volatiles == KI_ALL_VOLATILES) __asm__ __volatile__
339 (
340 "movl %c[c](%%esp), %%ecx\n"
341 "movl %c[d](%%esp), %%edx\n"
342 :
343 : [c] "i"(EcxField),
344 [d] "i"(EdxField)
345 : "%esp"
346 );
347
348 /* Ring 0 system calls jump back to EDX */
349 if (Flags & KI_SYSTEM_CALL_JUMP) __asm__ __volatile__
350 (
351 "movl %c[d](%%esp), %%edx\n":: [d] "i"(FIELD_OFFSET(KTRAP_FRAME, Eip)) : "%esp"
352 );
353
354 /* Now destroy the trap frame on the stack */
355 __asm__ __volatile__ ("addl $%c[e],%%esp\n":: [e] "i"(FrameSize) : "%esp");
356
357 /* Edited traps need to change to a new ESP */
358 if (Flags & KI_EDITED_FRAME) __asm__ __volatile__ ("movl (%%esp), %%esp\n":::"%esp");
359
360 /* Check the exit mechanism and apply it */
361 if (ExitMechanism == KI_EXIT_RET) __asm__ __volatile__("ret\n"::: "%esp");
362 else if (ExitMechanism == KI_EXIT_IRET) __asm__ __volatile__("iret\n"::: "%esp");
363 else if (ExitMechanism == KI_EXIT_JMP) __asm__ __volatile__("jmp *%%edx\n.globl _KiSystemCallExit2\n_KiSystemCallExit2:\n"::: "%esp");
364 else if (ExitMechanism == KI_EXIT_SYSEXIT) __asm__ __volatile__("sti\nsysexit\n"::: "%esp");
365 }
366
367 //
368 // All the specific trap epilog stubs
369 //
370 KiTrapExitStub (KiTrapReturn, 0);
371 KiTrapExitStub (KiDirectTrapReturn, KI_DIRECT_EXIT);
372 KiTrapExitStub (KiCallReturn, KI_FUNCTION_CALL);
373 KiTrapExitStub (KiEditedTrapReturn, KI_EDITED_FRAME);
374 KiTrapExitStub2(KiSystemCallReturn, KI_SYSTEM_CALL_JUMP);
375 KiTrapExitStub (KiSystemCallSysExitReturn, KI_FAST_SYSTEM_CALL_EXIT);
376 KiTrapExitStub (KiSystemCallTrapReturn, KI_SYSTEM_CALL_EXIT);
377
378 //
379 // Generic Exit Routine
380 //
381 VOID
382 FORCEINLINE
383 DECLSPEC_NORETURN
384 KiExitTrap(IN PKTRAP_FRAME TrapFrame,
385 IN UCHAR Skip)
386 {
387 KTRAP_EXIT_SKIP_BITS SkipBits = { .Bits = Skip };
388 PULONG ReturnStack;
389
390 /* Debugging checks */
391 KiExitTrapDebugChecks(TrapFrame, SkipBits);
392
393 /* Restore the SEH handler chain */
394 KeGetPcr()->Tib.ExceptionList = TrapFrame->ExceptionList;
395
396 /* Check if the previous mode must be restored */
397 if (__builtin_expect(!SkipBits.SkipPreviousMode, 0)) /* More INTS than SYSCALLs */
398 {
399 /* Restore it */
400 KeGetCurrentThread()->PreviousMode = TrapFrame->PreviousPreviousMode;
401 }
402
403 /* Check if there are active debug registers */
404 if (__builtin_expect(TrapFrame->Dr7 & ~DR7_RESERVED_MASK, 0))
405 {
406 /* Not handled yet */
407 DbgPrint("Need Hardware Breakpoint Support!\n");
408 DbgBreakPoint();
409 while (TRUE);
410 }
411
412 /* Check if this was a V8086 trap */
413 if (__builtin_expect(TrapFrame->EFlags & EFLAGS_V86_MASK, 0)) KiTrapReturn(TrapFrame);
414
415 /* Check if the trap frame was edited */
416 if (__builtin_expect(!(TrapFrame->SegCs & FRAME_EDITED), 0))
417 {
418 /*
419 * An edited trap frame happens when we need to modify CS and/or ESP but
420 * don't actually have a ring transition. This happens when a kernelmode
421 * caller wants to perform an NtContinue to another kernel address, such
422 * as in the case of SEH (basically, a longjmp), or to a user address.
423 *
424 * Therefore, the CPU never saved CS/ESP on the stack because we did not
425 * get a trap frame due to a ring transition (there was no interrupt).
426 * Even if we didn't want to restore CS to a new value, a problem occurs
427 * due to the fact a normal RET would not work if we restored ESP since
428 * RET would then try to read the result off the stack.
429 *
430 * The NT kernel solves this by adding 12 bytes of stack to the exiting
431 * trap frame, in which EFLAGS, CS, and EIP are stored, and then saving
432 * the ESP that's being requested into the ErrorCode field. It will then
433 * exit with an IRET. This fixes both issues, because it gives the stack
434 * some space where to hold the return address and then end up with the
435 * wanted stack, and it uses IRET which allows a new CS to be inputted.
436 *
437 */
438
439 /* Set CS that is requested */
440 TrapFrame->SegCs = TrapFrame->TempSegCs;
441
442 /* First make space on requested stack */
443 ReturnStack = (PULONG)(TrapFrame->TempEsp - 12);
444 TrapFrame->ErrCode = (ULONG_PTR)ReturnStack;
445
446 /* Now copy IRET frame */
447 ReturnStack[0] = TrapFrame->Eip;
448 ReturnStack[1] = TrapFrame->SegCs;
449 ReturnStack[2] = TrapFrame->EFlags;
450
451 /* Do special edited return */
452 KiEditedTrapReturn(TrapFrame);
453 }
454
455 /* Check if this is a user trap */
456 if (__builtin_expect(KiUserTrap(TrapFrame), 1)) /* Ring 3 is where we spend time */
457 {
458 /* Check if segments should be restored */
459 if (!SkipBits.SkipSegments)
460 {
461 /* Restore segments */
462 Ke386SetGs(TrapFrame->SegGs);
463 Ke386SetEs(TrapFrame->SegEs);
464 Ke386SetDs(TrapFrame->SegDs);
465 Ke386SetFs(TrapFrame->SegFs);
466 }
467
468 /* Always restore FS since it goes from KPCR to TEB */
469 Ke386SetFs(TrapFrame->SegFs);
470 }
471
472 /* Check for system call -- a system call skips volatiles! */
473 if (__builtin_expect(SkipBits.SkipVolatiles, 0)) /* More INTs than SYSCALLs */
474 {
475 /* User or kernel call? */
476 KiUserSystemCall(TrapFrame);
477
478 /* Restore EFLags */
479 __writeeflags(TrapFrame->EFlags);
480
481 /* Call is kernel, so do a jump back since this wasn't a real INT */
482 KiSystemCallReturn(TrapFrame);
483
484 /* If we got here, this is SYSEXIT: are we stepping code? */
485 if (!(TrapFrame->EFlags & EFLAGS_TF))
486 {
487 /* Restore user FS */
488 Ke386SetFs(KGDT_R3_TEB | RPL_MASK);
489
490 /* Remove interrupt flag */
491 TrapFrame->EFlags &= ~EFLAGS_INTERRUPT_MASK;
492 __writeeflags(TrapFrame->EFlags);
493
494 /* Exit through SYSEXIT */
495 KiSystemCallSysExitReturn(TrapFrame);
496 }
497
498 /* Exit through IRETD, either due to debugging or due to lack of SYSEXIT */
499 KiSystemCallTrapReturn(TrapFrame);
500 }
501
502 /* Return from interrupt */
503 KiTrapReturn(TrapFrame);
504 }
505
506 //
507 // Virtual 8086 Mode Optimized Trap Exit
508 //
509 VOID
510 FORCEINLINE
511 KiExitV86Trap(IN PKTRAP_FRAME TrapFrame)
512 {
513 PKTHREAD Thread;
514 KIRQL OldIrql;
515
516 /* Get the thread */
517 Thread = KeGetCurrentThread();
518 while (TRUE)
519 {
520 /* Turn off the alerted state for kernel mode */
521 Thread->Alerted[KernelMode] = FALSE;
522
523 /* Are there pending user APCs? */
524 if (__builtin_expect(!Thread->ApcState.UserApcPending, 1)) break;
525
526 /* Raise to APC level and enable interrupts */
527 OldIrql = KfRaiseIrql(APC_LEVEL);
528 _enable();
529
530 /* Deliver APCs */
531 KiDeliverApc(UserMode, NULL, TrapFrame);
532
533 /* Restore IRQL and disable interrupts once again */
534 KfLowerIrql(OldIrql);
535 _disable();
536
537 /* Return if this isn't V86 mode anymore */
538 if (__builtin_expect(TrapFrame->EFlags & EFLAGS_V86_MASK, 0)) return;
539 }
540
541 /* If we got here, we're still in a valid V8086 context, so quit it */
542 if (__builtin_expect(TrapFrame->Dr7 & ~DR7_RESERVED_MASK, 0))
543 {
544 /* Not handled yet */
545 DbgPrint("Need Hardware Breakpoint Support!\n");
546 while (TRUE);
547 }
548
549 /* Return from interrupt */
550 KiTrapReturn(TrapFrame);
551 }
552
553 //
554 // Virtual 8086 Mode Optimized Trap Entry
555 //
556 VOID
557 FORCEINLINE
558 KiEnterV86Trap(IN PKTRAP_FRAME TrapFrame)
559 {
560 /* Save exception list */
561 TrapFrame->ExceptionList = KeGetPcr()->Tib.ExceptionList;
562
563 /* Save DR7 and check for debugging */
564 TrapFrame->Dr7 = __readdr(7);
565 if (__builtin_expect(TrapFrame->Dr7 & ~DR7_RESERVED_MASK, 0))
566 {
567 DbgPrint("Need Hardware Breakpoint Support!\n");
568 while (TRUE);
569 }
570 }
571
572 //
573 // Interrupt Trap Entry
574 //
575 VOID
576 FORCEINLINE
577 KiEnterInterruptTrap(IN PKTRAP_FRAME TrapFrame)
578 {
579 /* Save exception list and terminate it */
580 TrapFrame->ExceptionList = KeGetPcr()->Tib.ExceptionList;
581 KeGetPcr()->Tib.ExceptionList = EXCEPTION_CHAIN_END;
582
583 /* Flush DR7 and check for debugging */
584 TrapFrame->Dr7 = 0;
585 if (__builtin_expect(KeGetCurrentThread()->DispatcherHeader.DebugActive & 0xFF, 0))
586 {
587 DbgPrint("Need Hardware Breakpoint Support!\n");
588 while (TRUE);
589 }
590
591 /* Set debug header */
592 KiFillTrapFrameDebug(TrapFrame);
593 }
594
595 //
596 // Generic Trap Entry
597 //
598 VOID
599 FORCEINLINE
600 KiEnterTrap(IN PKTRAP_FRAME TrapFrame)
601 {
602 /* Save exception list */
603 TrapFrame->ExceptionList = KeGetPcr()->Tib.ExceptionList;
604
605 /* Flush DR7 and check for debugging */
606 TrapFrame->Dr7 = 0;
607 if (__builtin_expect(KeGetCurrentThread()->DispatcherHeader.DebugActive & 0xFF, 0))
608 {
609 DbgPrint("Need Hardware Breakpoint Support!\n");
610 while (TRUE);
611 }
612
613 /* Set debug header */
614 KiFillTrapFrameDebug(TrapFrame);
615 }