[PERF]: Make all trap handlers, trap exit code, fatal exit code, and exception dispat...
[reactos.git] / reactos / ntoskrnl / include / internal / trap_x.h
1 /*
2 * PROJECT: ReactOS Kernel
3 * LICENSE: BSD - See COPYING.ARM in the top level directory
4 * FILE: ntoskrnl/include/trap_x.h
5 * PURPOSE: Internal Inlined Functions for the Trap Handling Code
6 * PROGRAMMERS: ReactOS Portable Systems Group
7 */
8
9 //
10 // Debug Macros
11 //
12 VOID
13 FORCEINLINE
14 KiDumpTrapFrame(IN PKTRAP_FRAME TrapFrame)
15 {
16 /* Dump the whole thing */
17 DPRINT1("DbgEbp: %x\n", TrapFrame->DbgEbp);
18 DPRINT1("DbgEip: %x\n", TrapFrame->DbgEip);
19 DPRINT1("DbgArgMark: %x\n", TrapFrame->DbgArgMark);
20 DPRINT1("DbgArgPointer: %x\n", TrapFrame->DbgArgPointer);
21 DPRINT1("TempSegCs: %x\n", TrapFrame->TempSegCs);
22 DPRINT1("TempEsp: %x\n", TrapFrame->TempEsp);
23 DPRINT1("Dr0: %x\n", TrapFrame->Dr0);
24 DPRINT1("Dr1: %x\n", TrapFrame->Dr1);
25 DPRINT1("Dr2: %x\n", TrapFrame->Dr2);
26 DPRINT1("Dr3: %x\n", TrapFrame->Dr3);
27 DPRINT1("Dr6: %x\n", TrapFrame->Dr6);
28 DPRINT1("Dr7: %x\n", TrapFrame->Dr7);
29 DPRINT1("SegGs: %x\n", TrapFrame->SegGs);
30 DPRINT1("SegEs: %x\n", TrapFrame->SegEs);
31 DPRINT1("SegDs: %x\n", TrapFrame->SegDs);
32 DPRINT1("Edx: %x\n", TrapFrame->Edx);
33 DPRINT1("Ecx: %x\n", TrapFrame->Ecx);
34 DPRINT1("Eax: %x\n", TrapFrame->Eax);
35 DPRINT1("PreviousPreviousMode: %x\n", TrapFrame->PreviousPreviousMode);
36 DPRINT1("ExceptionList: %x\n", TrapFrame->ExceptionList);
37 DPRINT1("SegFs: %x\n", TrapFrame->SegFs);
38 DPRINT1("Edi: %x\n", TrapFrame->Edi);
39 DPRINT1("Esi: %x\n", TrapFrame->Esi);
40 DPRINT1("Ebx: %x\n", TrapFrame->Ebx);
41 DPRINT1("Ebp: %x\n", TrapFrame->Ebp);
42 DPRINT1("ErrCode: %x\n", TrapFrame->ErrCode);
43 DPRINT1("Eip: %x\n", TrapFrame->Eip);
44 DPRINT1("SegCs: %x\n", TrapFrame->SegCs);
45 DPRINT1("EFlags: %x\n", TrapFrame->EFlags);
46 DPRINT1("HardwareEsp: %x\n", TrapFrame->HardwareEsp);
47 DPRINT1("HardwareSegSs: %x\n", TrapFrame->HardwareSegSs);
48 DPRINT1("V86Es: %x\n", TrapFrame->V86Es);
49 DPRINT1("V86Ds: %x\n", TrapFrame->V86Ds);
50 DPRINT1("V86Fs: %x\n", TrapFrame->V86Fs);
51 DPRINT1("V86Gs: %x\n", TrapFrame->V86Gs);
52 }
53
54 #if YDEBUG
55 FORCEINLINE
56 VOID
57 KiFillTrapFrameDebug(IN PKTRAP_FRAME TrapFrame)
58 {
59 /* Set the debug information */
60 TrapFrame->DbgArgPointer = TrapFrame->Edx;
61 TrapFrame->DbgArgMark = 0xBADB0D00;
62 TrapFrame->DbgEip = TrapFrame->Eip;
63 TrapFrame->DbgEbp = TrapFrame->Ebp;
64 }
65
66 FORCEINLINE
67 VOID
68 KiExitTrapDebugChecks(IN PKTRAP_FRAME TrapFrame,
69 IN KTRAP_STATE_BITS SkipBits)
70 {
71 /* Make sure interrupts are disabled */
72 if (__readeflags() & EFLAGS_INTERRUPT_MASK)
73 {
74 DPRINT1("Exiting with interrupts enabled: %lx\n", __readeflags());
75 while (TRUE);
76 }
77
78 /* Make sure this is a real trap frame */
79 if (TrapFrame->DbgArgMark != 0xBADB0D00)
80 {
81 DPRINT1("Exiting with an invalid trap frame? (No MAGIC in trap frame)\n");
82 KiDumpTrapFrame(TrapFrame);
83 while (TRUE);
84 }
85
86 /* Make sure we're not in user-mode or something */
87 if (Ke386GetFs() != KGDT_R0_PCR)
88 {
89 DPRINT1("Exiting with an invalid FS: %lx\n", Ke386GetFs());
90 while (TRUE);
91 }
92
93 /* Make sure we have a valid SEH chain */
94 if (KeGetPcr()->Tib.ExceptionList == 0)
95 {
96 DPRINT1("Exiting with NULL exception chain: %p\n", KeGetPcr()->Tib.ExceptionList);
97 while (TRUE);
98 }
99
100 /* Make sure we're restoring a valid SEH chain */
101 if (TrapFrame->ExceptionList == 0)
102 {
103 DPRINT1("Entered a trap with a NULL exception chain: %p\n", TrapFrame->ExceptionList);
104 while (TRUE);
105 }
106
107 /* If we're ignoring previous mode, make sure caller doesn't actually want it */
108 if ((SkipBits.SkipPreviousMode) && (TrapFrame->PreviousPreviousMode != -1))
109 {
110 DPRINT1("Exiting a trap witout restoring previous mode, yet previous mode seems valid: %lx", TrapFrame->PreviousPreviousMode);
111 while (TRUE);
112 }
113 }
114
115 FORCEINLINE
116 VOID
117 KiExitSystemCallDebugChecks(IN ULONG SystemCall,
118 IN PKTRAP_FRAME TrapFrame)
119 {
120 KIRQL OldIrql;
121
122 /* Check if this was a user call */
123 if (KiUserMode(TrapFrame))
124 {
125 /* Make sure we are not returning with elevated IRQL */
126 OldIrql = KeGetCurrentIrql();
127 if (OldIrql != PASSIVE_LEVEL)
128 {
129 /* Forcibly put us in a sane state */
130 KeGetPcr()->CurrentIrql = PASSIVE_LEVEL;
131 _disable();
132
133 /* Fail */
134 KeBugCheckEx(IRQL_GT_ZERO_AT_SYSTEM_SERVICE,
135 SystemCall,
136 OldIrql,
137 0,
138 0);
139 }
140
141 /* Make sure we're not attached and that APCs are not disabled */
142 if ((KeGetCurrentThread()->ApcStateIndex != CurrentApcEnvironment) ||
143 (KeGetCurrentThread()->CombinedApcDisable != 0))
144 {
145 /* Fail */
146 KeBugCheckEx(APC_INDEX_MISMATCH,
147 SystemCall,
148 KeGetCurrentThread()->ApcStateIndex,
149 KeGetCurrentThread()->CombinedApcDisable,
150 0);
151 }
152 }
153 }
154 #else
155 #define KiExitTrapDebugChecks(x, y)
156 #define KiFillTrapFrameDebug(x)
157 #define KiExitSystemCallDebugChecks(x, y)
158 #endif
159
160 //
161 // Helper Code
162 //
163 BOOLEAN
164 FORCEINLINE
165 KiUserTrap(IN PKTRAP_FRAME TrapFrame)
166 {
167 /* Anything else but Ring 0 is Ring 3 */
168 return (TrapFrame->SegCs & MODE_MASK);
169 }
170
171 BOOLEAN
172 FORCEINLINE
173 KiVdmTrap(IN PKTRAP_FRAME TrapFrame)
174 {
175 /* Either the V8086 flag is on, or this is user-mode with a VDM */
176 return ((TrapFrame->EFlags & EFLAGS_V86_MASK) ||
177 ((KiUserTrap(TrapFrame)) && (PsGetCurrentProcess()->VdmObjects)));
178 }
179
180 VOID
181 FORCEINLINE
182 KiCheckForApcDelivery(IN PKTRAP_FRAME TrapFrame)
183 {
184 PKTHREAD Thread;
185 KIRQL OldIrql;
186
187 /* Check for V8086 or user-mode trap */
188 if ((TrapFrame->EFlags & EFLAGS_V86_MASK) ||
189 (KiUserTrap(TrapFrame)))
190 {
191 /* Get the thread */
192 Thread = KeGetCurrentThread();
193 while (TRUE)
194 {
195 /* Turn off the alerted state for kernel mode */
196 Thread->Alerted[KernelMode] = FALSE;
197
198 /* Are there pending user APCs? */
199 if (!Thread->ApcState.UserApcPending) break;
200
201 /* Raise to APC level and enable interrupts */
202 OldIrql = KfRaiseIrql(APC_LEVEL);
203 _enable();
204
205 /* Deliver APCs */
206 KiDeliverApc(UserMode, NULL, TrapFrame);
207
208 /* Restore IRQL and disable interrupts once again */
209 KfLowerIrql(OldIrql);
210 _disable();
211 }
212 }
213 }
214
215 VOID
216 FORCEINLINE
217 DECLSPEC_NORETURN
218 KiDispatchException0Args(IN NTSTATUS Code,
219 IN ULONG_PTR Address,
220 IN PKTRAP_FRAME TrapFrame)
221 {
222 /* Helper for exceptions with no arguments */
223 KiDispatchExceptionFromTrapFrame(Code, Address, 0, 0, 0, 0, TrapFrame);
224 }
225
226 VOID
227 FORCEINLINE
228 DECLSPEC_NORETURN
229 KiDispatchException1Args(IN NTSTATUS Code,
230 IN ULONG_PTR Address,
231 IN ULONG P1,
232 IN PKTRAP_FRAME TrapFrame)
233 {
234 /* Helper for exceptions with no arguments */
235 KiDispatchExceptionFromTrapFrame(Code, Address, 1, P1, 0, 0, TrapFrame);
236 }
237
238 VOID
239 FORCEINLINE
240 DECLSPEC_NORETURN
241 KiDispatchException2Args(IN NTSTATUS Code,
242 IN ULONG_PTR Address,
243 IN ULONG P1,
244 IN ULONG P2,
245 IN PKTRAP_FRAME TrapFrame)
246 {
247 /* Helper for exceptions with no arguments */
248 KiDispatchExceptionFromTrapFrame(Code, Address, 2, P1, P2, 0, TrapFrame);
249 }
250
251 FORCEINLINE
252 VOID
253 DECLSPEC_NORETURN
254 KiSystemCallReturn(IN PKTRAP_FRAME TrapFrame)
255 {
256 /* Restore nonvolatiles, EAX, and do a "jump" back to the kernel caller */
257 __asm__ __volatile__
258 (
259 "movl %0, %%esp\n"
260 "movl %c[b](%%esp), %%ebx\n"
261 "movl %c[s](%%esp), %%esi\n"
262 "movl %c[i](%%esp), %%edi\n"
263 "movl %c[p](%%esp), %%ebp\n"
264 "movl %c[a](%%esp), %%eax\n"
265 "movl %c[e](%%esp), %%edx\n"
266 "addl $%c[v],%%esp\n" /* A WHOLE *KERNEL* frame since we're not IRET'ing */
267 "jmp *%%edx\n"
268 :
269 : "r"(TrapFrame),
270 [b] "i"(KTRAP_FRAME_EBX),
271 [s] "i"(KTRAP_FRAME_ESI),
272 [i] "i"(KTRAP_FRAME_EDI),
273 [p] "i"(KTRAP_FRAME_EBP),
274 [a] "i"(KTRAP_FRAME_EAX),
275 [e] "i"(KTRAP_FRAME_EIP),
276 [v] "i"(KTRAP_FRAME_ESP)
277 : "%esp"
278 );
279 exit(0);
280 }
281
282 FORCEINLINE
283 VOID
284 DECLSPEC_NORETURN
285 KiSystemCallTrapReturn(IN PKTRAP_FRAME TrapFrame)
286 {
287 /* Regular interrupt exit, but we only restore EAX as a volatile */
288 __asm__ __volatile__
289 (
290 "movl %0, %%esp\n"
291 "movl %c[b](%%esp), %%ebx\n"
292 "movl %c[s](%%esp), %%esi\n"
293 "movl %c[i](%%esp), %%edi\n"
294 "movl %c[p](%%esp), %%ebp\n"
295 "movl %c[a](%%esp), %%eax\n"
296 "addl $%c[e],%%esp\n"
297 "iret\n"
298 :
299 : "r"(TrapFrame),
300 [b] "i"(KTRAP_FRAME_EBX),
301 [s] "i"(KTRAP_FRAME_ESI),
302 [i] "i"(KTRAP_FRAME_EDI),
303 [p] "i"(KTRAP_FRAME_EBP),
304 [a] "i"(KTRAP_FRAME_EAX),
305 [e] "i"(KTRAP_FRAME_EIP)
306 : "%esp"
307 );
308 exit(0);
309 }
310
311 FORCEINLINE
312 VOID
313 DECLSPEC_NORETURN
314 KiSystemCallSysExitReturn(IN PKTRAP_FRAME TrapFrame)
315 {
316 /* Restore nonvolatiles, EAX, and do a SYSEXIT back to the user caller */
317 __asm__ __volatile__
318 (
319 "movl %0, %%esp\n"
320 "movl %c[s](%%esp), %%esi\n"
321 "movl %c[b](%%esp), %%ebx\n"
322 "movl %c[i](%%esp), %%edi\n"
323 "movl %c[p](%%esp), %%ebp\n"
324 "movl %c[a](%%esp), %%eax\n"
325 "movl %c[e](%%esp), %%edx\n" /* SYSEXIT says EIP in EDX */
326 "movl %c[x](%%esp), %%ecx\n" /* SYSEXIT says ESP in ECX */
327 "addl $%c[v],%%esp\n" /* A WHOLE *USER* frame since we're not IRET'ing */
328 "sti\nsysexit\n"
329 :
330 : "r"(TrapFrame),
331 [b] "i"(KTRAP_FRAME_EBX),
332 [s] "i"(KTRAP_FRAME_ESI),
333 [i] "i"(KTRAP_FRAME_EDI),
334 [p] "i"(KTRAP_FRAME_EBP),
335 [a] "i"(KTRAP_FRAME_EAX),
336 [e] "i"(KTRAP_FRAME_EIP),
337 [x] "i"(KTRAP_FRAME_ESP),
338 [v] "i"(KTRAP_FRAME_V86_ES)
339 : "%esp"
340 );
341 exit(0);
342 }
343
344 FORCEINLINE
345 VOID
346 DECLSPEC_NORETURN
347 KiTrapReturn(IN PKTRAP_FRAME TrapFrame)
348 {
349 /* Regular interrupt exit */
350 __asm__ __volatile__
351 (
352 "movl %0, %%esp\n"
353 "movl %c[a](%%esp), %%eax\n"
354 "movl %c[b](%%esp), %%ebx\n"
355 "movl %c[c](%%esp), %%ecx\n"
356 "movl %c[d](%%esp), %%edx\n"
357 "movl %c[s](%%esp), %%esi\n"
358 "movl %c[i](%%esp), %%edi\n"
359 "movl %c[p](%%esp), %%ebp\n"
360 "addl $%c[e],%%esp\n"
361 "iret\n"
362 :
363 : "r"(TrapFrame),
364 [a] "i"(KTRAP_FRAME_EAX),
365 [b] "i"(KTRAP_FRAME_EBX),
366 [c] "i"(KTRAP_FRAME_ECX),
367 [d] "i"(KTRAP_FRAME_EDX),
368 [s] "i"(KTRAP_FRAME_ESI),
369 [i] "i"(KTRAP_FRAME_EDI),
370 [p] "i"(KTRAP_FRAME_EBP),
371 [e] "i"(KTRAP_FRAME_EIP)
372 : "%esp"
373 );
374 exit(0);
375 }
376
377 FORCEINLINE
378 VOID
379 DECLSPEC_NORETURN
380 KiEditedTrapReturn(IN PKTRAP_FRAME TrapFrame)
381 {
382 /* Regular interrupt exit */
383 __asm__ __volatile__
384 (
385 "movl %0, %%esp\n"
386 "movl %c[a](%%esp), %%eax\n"
387 "movl %c[b](%%esp), %%ebx\n"
388 "movl %c[c](%%esp), %%ecx\n"
389 "movl %c[d](%%esp), %%edx\n"
390 "movl %c[s](%%esp), %%esi\n"
391 "movl %c[i](%%esp), %%edi\n"
392 "movl %c[p](%%esp), %%ebp\n"
393 "addl $%c[e],%%esp\n"
394 "movl (%%esp), %%esp\n"
395 "iret\n"
396 :
397 : "r"(TrapFrame),
398 [a] "i"(KTRAP_FRAME_EAX),
399 [b] "i"(KTRAP_FRAME_EBX),
400 [c] "i"(KTRAP_FRAME_ECX),
401 [d] "i"(KTRAP_FRAME_EDX),
402 [s] "i"(KTRAP_FRAME_ESI),
403 [i] "i"(KTRAP_FRAME_EDI),
404 [p] "i"(KTRAP_FRAME_EBP),
405 [e] "i"(KTRAP_FRAME_ERROR_CODE) /* We *WANT* the error code since ESP is there! */
406 : "%esp"
407 );
408 exit(0);
409 }
410
411 NTSTATUS
412 FORCEINLINE
413 KiSystemCallTrampoline(IN PVOID Handler,
414 IN PVOID Arguments,
415 IN ULONG StackBytes)
416 {
417 NTSTATUS Result;
418
419 /*
420 * This sequence does a RtlCopyMemory(Stack - StackBytes, Arguments, StackBytes)
421 * and then calls the function associated with the system call.
422 *
423 * It's done in assembly for two reasons: we need to muck with the stack,
424 * and the call itself restores the stack back for us. The only way to do
425 * this in C is to do manual C handlers for every possible number of args on
426 * the stack, and then have the handler issue a call by pointer. This is
427 * wasteful since it'll basically push the values twice and require another
428 * level of call indirection.
429 *
430 * The ARM kernel currently does this, but it should probably be changed
431 * later to function like this as well.
432 *
433 */
434 __asm__ __volatile__
435 (
436 "subl %1, %%esp\n"
437 "movl %%esp, %%edi\n"
438 "movl %2, %%esi\n"
439 "shrl $2, %1\n"
440 "rep movsd\n"
441 "call *%3\n"
442 "movl %%eax, %0\n"
443 : "=r"(Result)
444 : "c"(StackBytes),
445 "d"(Arguments),
446 "r"(Handler)
447 : "%esp", "%esi", "%edi"
448 );
449
450 return Result;
451 }
452
453 NTSTATUS
454 FORCEINLINE
455 KiConvertToGuiThread(VOID)
456 {
457 NTSTATUS Result;
458 PVOID StackFrame;
459
460 /*
461 * Converting to a GUI thread safely updates ESP in-place as well as the
462 * current Thread->TrapFrame and EBP when KeSwitchKernelStack is called.
463 *
464 * However, PsConvertToGuiThread "helpfully" restores EBP to the original
465 * caller's value, since it is considered a nonvolatile register. As such,
466 * as soon as we're back after the conversion and we try to store the result
467 * which will probably be in some stack variable (EBP-based), we'll crash as
468 * we are touching the de-allocated non-expanded stack.
469 *
470 * Thus we need a way to update our EBP before EBP is touched, and the only
471 * way to guarantee this is to do the call itself in assembly, use the EAX
472 * register to store the result, fixup EBP, and then let the C code continue
473 * on its merry way.
474 *
475 */
476 __asm__ __volatile__
477 (
478 "movl %%ebp, %1\n"
479 "subl %%esp, %1\n"
480 "call _PsConvertToGuiThread@0\n"
481 "addl %%esp, %1\n"
482 "movl %1, %%ebp\n"
483 "movl %%eax, %0\n"
484 : "=r"(Result), "=r"(StackFrame)
485 :
486 : "%esp", "%ecx", "%edx"
487 );
488
489 return Result;
490 }
491
492 VOID
493 FORCEINLINE
494 KiSwitchToBootStack(IN ULONG_PTR InitialStack)
495 {
496 /* We have to switch to a new stack before continuing kernel initialization */
497 __asm__ __volatile__
498 (
499 "movl %0, %%esp\n"
500 "subl %1, %%esp\n"
501 "pushl %2\n"
502 "jmp _KiSystemStartupBootStack@0\n"
503 :
504 : "c"(InitialStack),
505 "i"(NPX_FRAME_LENGTH + KTRAP_FRAME_ALIGN + KTRAP_FRAME_LENGTH),
506 "i"(CR0_EM | CR0_TS | CR0_MP)
507 : "%esp"
508 );
509 }