Merge from amd64-branch:
[reactos.git] / reactos / ntoskrnl / include / internal / trap_x.h
1 /*
2 * PROJECT: ReactOS Kernel
3 * LICENSE: BSD - See COPYING.ARM in the top level directory
4 * FILE: ntoskrnl/include/trap_x.h
5 * PURPOSE: Internal Inlined Functions for the Trap Handling Code
6 * PROGRAMMERS: ReactOS Portable Systems Group
7 */
8
9 //
10 // Debug Macros
11 //
12 VOID
13 FORCEINLINE
14 KiDumpTrapFrame(IN PKTRAP_FRAME TrapFrame)
15 {
16 /* Dump the whole thing */
17 DPRINT1("DbgEbp: %x\n", TrapFrame->DbgEbp);
18 DPRINT1("DbgEip: %x\n", TrapFrame->DbgEip);
19 DPRINT1("DbgArgMark: %x\n", TrapFrame->DbgArgMark);
20 DPRINT1("DbgArgPointer: %x\n", TrapFrame->DbgArgPointer);
21 DPRINT1("TempSegCs: %x\n", TrapFrame->TempSegCs);
22 DPRINT1("TempEsp: %x\n", TrapFrame->TempEsp);
23 DPRINT1("Dr0: %x\n", TrapFrame->Dr0);
24 DPRINT1("Dr1: %x\n", TrapFrame->Dr1);
25 DPRINT1("Dr2: %x\n", TrapFrame->Dr2);
26 DPRINT1("Dr3: %x\n", TrapFrame->Dr3);
27 DPRINT1("Dr6: %x\n", TrapFrame->Dr6);
28 DPRINT1("Dr7: %x\n", TrapFrame->Dr7);
29 DPRINT1("SegGs: %x\n", TrapFrame->SegGs);
30 DPRINT1("SegEs: %x\n", TrapFrame->SegEs);
31 DPRINT1("SegDs: %x\n", TrapFrame->SegDs);
32 DPRINT1("Edx: %x\n", TrapFrame->Edx);
33 DPRINT1("Ecx: %x\n", TrapFrame->Ecx);
34 DPRINT1("Eax: %x\n", TrapFrame->Eax);
35 DPRINT1("PreviousPreviousMode: %x\n", TrapFrame->PreviousPreviousMode);
36 DPRINT1("ExceptionList: %x\n", TrapFrame->ExceptionList);
37 DPRINT1("SegFs: %x\n", TrapFrame->SegFs);
38 DPRINT1("Edi: %x\n", TrapFrame->Edi);
39 DPRINT1("Esi: %x\n", TrapFrame->Esi);
40 DPRINT1("Ebx: %x\n", TrapFrame->Ebx);
41 DPRINT1("Ebp: %x\n", TrapFrame->Ebp);
42 DPRINT1("ErrCode: %x\n", TrapFrame->ErrCode);
43 DPRINT1("Eip: %x\n", TrapFrame->Eip);
44 DPRINT1("SegCs: %x\n", TrapFrame->SegCs);
45 DPRINT1("EFlags: %x\n", TrapFrame->EFlags);
46 DPRINT1("HardwareEsp: %x\n", TrapFrame->HardwareEsp);
47 DPRINT1("HardwareSegSs: %x\n", TrapFrame->HardwareSegSs);
48 DPRINT1("V86Es: %x\n", TrapFrame->V86Es);
49 DPRINT1("V86Ds: %x\n", TrapFrame->V86Ds);
50 DPRINT1("V86Fs: %x\n", TrapFrame->V86Fs);
51 DPRINT1("V86Gs: %x\n", TrapFrame->V86Gs);
52 }
53
54 #if YDEBUG
55 FORCEINLINE
56 VOID
57 KiFillTrapFrameDebug(IN PKTRAP_FRAME TrapFrame)
58 {
59 /* Set the debug information */
60 TrapFrame->DbgArgPointer = TrapFrame->Edx;
61 TrapFrame->DbgArgMark = 0xBADB0D00;
62 TrapFrame->DbgEip = TrapFrame->Eip;
63 TrapFrame->DbgEbp = TrapFrame->Ebp;
64 }
65
66 FORCEINLINE
67 VOID
68 KiExitTrapDebugChecks(IN PKTRAP_FRAME TrapFrame,
69 IN KTRAP_STATE_BITS SkipBits)
70 {
71 /* Make sure interrupts are disabled */
72 if (__readeflags() & EFLAGS_INTERRUPT_MASK)
73 {
74 DPRINT1("Exiting with interrupts enabled: %lx\n", __readeflags());
75 while (TRUE);
76 }
77
78 /* Make sure this is a real trap frame */
79 if (TrapFrame->DbgArgMark != 0xBADB0D00)
80 {
81 DPRINT1("Exiting with an invalid trap frame? (No MAGIC in trap frame)\n");
82 KiDumpTrapFrame(TrapFrame);
83 while (TRUE);
84 }
85
86 /* Make sure we're not in user-mode or something */
87 if (Ke386GetFs() != KGDT_R0_PCR)
88 {
89 DPRINT1("Exiting with an invalid FS: %lx\n", Ke386GetFs());
90 while (TRUE);
91 }
92
93 /* Make sure we have a valid SEH chain */
94 if (KeGetPcr()->Tib.ExceptionList == 0)
95 {
96 DPRINT1("Exiting with NULL exception chain: %p\n", KeGetPcr()->Tib.ExceptionList);
97 while (TRUE);
98 }
99
100 /* Make sure we're restoring a valid SEH chain */
101 if (TrapFrame->ExceptionList == 0)
102 {
103 DPRINT1("Entered a trap with a NULL exception chain: %p\n", TrapFrame->ExceptionList);
104 while (TRUE);
105 }
106
107 /* If we're ignoring previous mode, make sure caller doesn't actually want it */
108 if ((SkipBits.SkipPreviousMode) && (TrapFrame->PreviousPreviousMode != -1))
109 {
110 DPRINT1("Exiting a trap witout restoring previous mode, yet previous mode seems valid: %lx", TrapFrame->PreviousPreviousMode);
111 while (TRUE);
112 }
113 }
114
115 FORCEINLINE
116 VOID
117 KiExitSystemCallDebugChecks(IN ULONG SystemCall,
118 IN PKTRAP_FRAME TrapFrame)
119 {
120 KIRQL OldIrql;
121
122 /* Check if this was a user call */
123 if (KiUserMode(TrapFrame))
124 {
125 /* Make sure we are not returning with elevated IRQL */
126 OldIrql = KeGetCurrentIrql();
127 if (OldIrql != PASSIVE_LEVEL)
128 {
129 /* Forcibly put us in a sane state */
130 KeGetPcr()->CurrentIrql = PASSIVE_LEVEL;
131 _disable();
132
133 /* Fail */
134 KeBugCheckEx(IRQL_GT_ZERO_AT_SYSTEM_SERVICE,
135 SystemCall,
136 OldIrql,
137 0,
138 0);
139 }
140
141 /* Make sure we're not attached and that APCs are not disabled */
142 if ((KeGetCurrentThread()->ApcStateIndex != CurrentApcEnvironment) ||
143 (KeGetCurrentThread()->CombinedApcDisable != 0))
144 {
145 /* Fail */
146 KeBugCheckEx(APC_INDEX_MISMATCH,
147 SystemCall,
148 KeGetCurrentThread()->ApcStateIndex,
149 KeGetCurrentThread()->CombinedApcDisable,
150 0);
151 }
152 }
153 }
154 #else
155 #define KiExitTrapDebugChecks(x, y)
156 #define KiFillTrapFrameDebug(x)
157 #define KiExitSystemCallDebugChecks(x, y)
158 #endif
159
160 //
161 // Helper Code
162 //
163 BOOLEAN
164 FORCEINLINE
165 KiUserTrap(IN PKTRAP_FRAME TrapFrame)
166 {
167 /* Anything else but Ring 0 is Ring 3 */
168 return (TrapFrame->SegCs != KGDT_R0_CODE);
169 }
170
171 BOOLEAN
172 FORCEINLINE
173 KiVdmTrap(IN PKTRAP_FRAME TrapFrame)
174 {
175 /* Either the V8086 flag is on, or this is user-mode with a VDM */
176 return ((TrapFrame->EFlags & EFLAGS_V86_MASK) ||
177 ((KiUserTrap(TrapFrame)) && (PsGetCurrentProcess()->VdmObjects)));
178 }
179
180 VOID
181 FORCEINLINE
182 KiCheckForApcDelivery(IN PKTRAP_FRAME TrapFrame)
183 {
184 PKTHREAD Thread;
185 KIRQL OldIrql;
186
187 /* Check for V8086 or user-mode trap */
188 if ((TrapFrame->EFlags & EFLAGS_V86_MASK) ||
189 (KiUserTrap(TrapFrame)))
190 {
191 /* Get the thread */
192 Thread = KeGetCurrentThread();
193 while (TRUE)
194 {
195 /* Turn off the alerted state for kernel mode */
196 Thread->Alerted[KernelMode] = FALSE;
197
198 /* Are there pending user APCs? */
199 if (!Thread->ApcState.UserApcPending) break;
200
201 /* Raise to APC level and enable interrupts */
202 OldIrql = KfRaiseIrql(APC_LEVEL);
203 _enable();
204
205 /* Deliver APCs */
206 KiDeliverApc(UserMode, NULL, TrapFrame);
207
208 /* Restore IRQL and disable interrupts once again */
209 KfLowerIrql(OldIrql);
210 _disable();
211 }
212 }
213 }
214
215 VOID
216 FORCEINLINE
217 KiDispatchException0Args(IN NTSTATUS Code,
218 IN ULONG_PTR Address,
219 IN PKTRAP_FRAME TrapFrame)
220 {
221 /* Helper for exceptions with no arguments */
222 KiDispatchExceptionFromTrapFrame(Code, Address, 0, 0, 0, 0, TrapFrame);
223 }
224
225 VOID
226 FORCEINLINE
227 KiDispatchException1Args(IN NTSTATUS Code,
228 IN ULONG_PTR Address,
229 IN ULONG P1,
230 IN PKTRAP_FRAME TrapFrame)
231 {
232 /* Helper for exceptions with no arguments */
233 KiDispatchExceptionFromTrapFrame(Code, Address, 1, P1, 0, 0, TrapFrame);
234 }
235
236 VOID
237 FORCEINLINE
238 KiDispatchException2Args(IN NTSTATUS Code,
239 IN ULONG_PTR Address,
240 IN ULONG P1,
241 IN ULONG P2,
242 IN PKTRAP_FRAME TrapFrame)
243 {
244 /* Helper for exceptions with no arguments */
245 KiDispatchExceptionFromTrapFrame(Code, Address, 2, P1, P2, 0, TrapFrame);
246 }
247
248 FORCEINLINE
249 VOID
250 KiSystemCallReturn(IN PKTRAP_FRAME TrapFrame)
251 {
252 /* Restore nonvolatiles, EAX, and do a "jump" back to the kernel caller */
253 __asm__ __volatile__
254 (
255 "movl %0, %%esp\n"
256 "movl %c[b](%%esp), %%ebx\n"
257 "movl %c[s](%%esp), %%esi\n"
258 "movl %c[i](%%esp), %%edi\n"
259 "movl %c[p](%%esp), %%ebp\n"
260 "movl %c[a](%%esp), %%eax\n"
261 "movl %c[e](%%esp), %%edx\n"
262 "addl $%c[v],%%esp\n" /* A WHOLE *KERNEL* frame since we're not IRET'ing */
263 "jmp *%%edx\n"
264 :
265 : "r"(TrapFrame),
266 [b] "i"(KTRAP_FRAME_EBX),
267 [s] "i"(KTRAP_FRAME_ESI),
268 [i] "i"(KTRAP_FRAME_EDI),
269 [p] "i"(KTRAP_FRAME_EBP),
270 [a] "i"(KTRAP_FRAME_EAX),
271 [e] "i"(KTRAP_FRAME_EIP),
272 [v] "i"(KTRAP_FRAME_ESP)
273 : "%esp"
274 );
275 }
276
277 FORCEINLINE
278 VOID
279 KiSystemCallTrapReturn(IN PKTRAP_FRAME TrapFrame)
280 {
281 /* Regular interrupt exit, but we only restore EAX as a volatile */
282 __asm__ __volatile__
283 (
284 "movl %0, %%esp\n"
285 "movl %c[b](%%esp), %%ebx\n"
286 "movl %c[s](%%esp), %%esi\n"
287 "movl %c[i](%%esp), %%edi\n"
288 "movl %c[p](%%esp), %%ebp\n"
289 "movl %c[a](%%esp), %%eax\n"
290 "addl $%c[e],%%esp\n"
291 "iret\n"
292 :
293 : "r"(TrapFrame),
294 [b] "i"(KTRAP_FRAME_EBX),
295 [s] "i"(KTRAP_FRAME_ESI),
296 [i] "i"(KTRAP_FRAME_EDI),
297 [p] "i"(KTRAP_FRAME_EBP),
298 [a] "i"(KTRAP_FRAME_EAX),
299 [e] "i"(KTRAP_FRAME_EIP)
300 : "%esp"
301 );
302 }
303
304 FORCEINLINE
305 VOID
306 KiSystemCallSysExitReturn(IN PKTRAP_FRAME TrapFrame)
307 {
308 /* Restore nonvolatiles, EAX, and do a SYSEXIT back to the user caller */
309 __asm__ __volatile__
310 (
311 "movl %0, %%esp\n"
312 "movl %c[s](%%esp), %%esi\n"
313 "movl %c[b](%%esp), %%ebx\n"
314 "movl %c[i](%%esp), %%edi\n"
315 "movl %c[p](%%esp), %%ebp\n"
316 "movl %c[a](%%esp), %%eax\n"
317 "movl %c[e](%%esp), %%edx\n" /* SYSEXIT says EIP in EDX */
318 "movl %c[x](%%esp), %%ecx\n" /* SYSEXIT says ESP in ECX */
319 "addl $%c[v],%%esp\n" /* A WHOLE *USER* frame since we're not IRET'ing */
320 "sti\nsysexit\n"
321 :
322 : "r"(TrapFrame),
323 [b] "i"(KTRAP_FRAME_EBX),
324 [s] "i"(KTRAP_FRAME_ESI),
325 [i] "i"(KTRAP_FRAME_EDI),
326 [p] "i"(KTRAP_FRAME_EBP),
327 [a] "i"(KTRAP_FRAME_EAX),
328 [e] "i"(KTRAP_FRAME_EIP),
329 [x] "i"(KTRAP_FRAME_ESP),
330 [v] "i"(KTRAP_FRAME_V86_ES)
331 : "%esp"
332 );
333 }
334
335 FORCEINLINE
336 VOID
337 KiTrapReturn(IN PKTRAP_FRAME TrapFrame)
338 {
339 /* Regular interrupt exit */
340 __asm__ __volatile__
341 (
342 "movl %0, %%esp\n"
343 "movl %c[a](%%esp), %%eax\n"
344 "movl %c[b](%%esp), %%ebx\n"
345 "movl %c[c](%%esp), %%ecx\n"
346 "movl %c[d](%%esp), %%edx\n"
347 "movl %c[s](%%esp), %%esi\n"
348 "movl %c[i](%%esp), %%edi\n"
349 "movl %c[p](%%esp), %%ebp\n"
350 "addl $%c[e],%%esp\n"
351 "iret\n"
352 :
353 : "r"(TrapFrame),
354 [a] "i"(KTRAP_FRAME_EAX),
355 [b] "i"(KTRAP_FRAME_EBX),
356 [c] "i"(KTRAP_FRAME_ECX),
357 [d] "i"(KTRAP_FRAME_EDX),
358 [s] "i"(KTRAP_FRAME_ESI),
359 [i] "i"(KTRAP_FRAME_EDI),
360 [p] "i"(KTRAP_FRAME_EBP),
361 [e] "i"(KTRAP_FRAME_EIP)
362 : "%esp"
363 );
364 }
365
366 FORCEINLINE
367 VOID
368 KiEditedTrapReturn(IN PKTRAP_FRAME TrapFrame)
369 {
370 /* Regular interrupt exit */
371 __asm__ __volatile__
372 (
373 "movl %0, %%esp\n"
374 "movl %c[a](%%esp), %%eax\n"
375 "movl %c[b](%%esp), %%ebx\n"
376 "movl %c[c](%%esp), %%ecx\n"
377 "movl %c[d](%%esp), %%edx\n"
378 "movl %c[s](%%esp), %%esi\n"
379 "movl %c[i](%%esp), %%edi\n"
380 "movl %c[p](%%esp), %%ebp\n"
381 "addl $%c[e],%%esp\n"
382 "movl (%%esp), %%esp\n"
383 "iret\n"
384 :
385 : "r"(TrapFrame),
386 [a] "i"(KTRAP_FRAME_EAX),
387 [b] "i"(KTRAP_FRAME_EBX),
388 [c] "i"(KTRAP_FRAME_ECX),
389 [d] "i"(KTRAP_FRAME_EDX),
390 [s] "i"(KTRAP_FRAME_ESI),
391 [i] "i"(KTRAP_FRAME_EDI),
392 [p] "i"(KTRAP_FRAME_EBP),
393 [e] "i"(KTRAP_FRAME_ERROR_CODE) /* We *WANT* the error code since ESP is there! */
394 : "%esp"
395 );
396 }
397
398 NTSTATUS
399 FORCEINLINE
400 KiSystemCallTrampoline(IN PVOID Handler,
401 IN PVOID Arguments,
402 IN ULONG StackBytes)
403 {
404 NTSTATUS Result;
405
406 /*
407 * This sequence does a RtlCopyMemory(Stack - StackBytes, Arguments, StackBytes)
408 * and then calls the function associated with the system call.
409 *
410 * It's done in assembly for two reasons: we need to muck with the stack,
411 * and the call itself restores the stack back for us. The only way to do
412 * this in C is to do manual C handlers for every possible number of args on
413 * the stack, and then have the handler issue a call by pointer. This is
414 * wasteful since it'll basically push the values twice and require another
415 * level of call indirection.
416 *
417 * The ARM kernel currently does this, but it should probably be changed
418 * later to function like this as well.
419 *
420 */
421 __asm__ __volatile__
422 (
423 "subl %1, %%esp\n"
424 "movl %%esp, %%edi\n"
425 "movl %2, %%esi\n"
426 "shrl $2, %1\n"
427 "rep movsd\n"
428 "call *%3\n"
429 "movl %%eax, %0\n"
430 : "=r"(Result)
431 : "c"(StackBytes),
432 "d"(Arguments),
433 "r"(Handler)
434 : "%esp", "%esi", "%edi"
435 );
436
437 return Result;
438 }
439
440 NTSTATUS
441 FORCEINLINE
442 KiConvertToGuiThread(VOID)
443 {
444 NTSTATUS Result;
445 PVOID StackFrame;
446
447 /*
448 * Converting to a GUI thread safely updates ESP in-place as well as the
449 * current Thread->TrapFrame and EBP when KeSwitchKernelStack is called.
450 *
451 * However, PsConvertToGuiThread "helpfully" restores EBP to the original
452 * caller's value, since it is considered a nonvolatile register. As such,
453 * as soon as we're back after the conversion and we try to store the result
454 * which will probably be in some stack variable (EBP-based), we'll crash as
455 * we are touching the de-allocated non-expanded stack.
456 *
457 * Thus we need a way to update our EBP before EBP is touched, and the only
458 * way to guarantee this is to do the call itself in assembly, use the EAX
459 * register to store the result, fixup EBP, and then let the C code continue
460 * on its merry way.
461 *
462 */
463 __asm__ __volatile__
464 (
465 "movl %%ebp, %1\n"
466 "subl %%esp, %1\n"
467 "call _PsConvertToGuiThread@0\n"
468 "addl %%esp, %1\n"
469 "movl %1, %%ebp\n"
470 "movl %%eax, %0\n"
471 : "=r"(Result), "=r"(StackFrame)
472 :
473 : "%esp", "%ecx", "%edx"
474 );
475
476 return Result;
477 }
478
479 VOID
480 FORCEINLINE
481 KiSwitchToBootStack(IN ULONG_PTR InitialStack)
482 {
483 /* We have to switch to a new stack before continuing kernel initialization */
484 __asm__ __volatile__
485 (
486 "movl %0, %%esp\n"
487 "subl %1, %%esp\n"
488 "pushl %2\n"
489 "jmp _KiSystemStartupBootStack@0\n"
490 :
491 : "c"(InitialStack),
492 "i"(NPX_FRAME_LENGTH + KTRAP_FRAME_ALIGN + KTRAP_FRAME_LENGTH),
493 "i"(CR0_EM | CR0_TS | CR0_MP)
494 : "%esp"
495 );
496 }