[CMAKE]
[reactos.git] / dll / win32 / dbghelp / cpu_x86_64.c
1 /*
2 * File cpu_x86_64.c
3 *
4 * Copyright (C) 1999, 2005 Alexandre Julliard
5 * Copyright (C) 2009 Eric Pouech.
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
20 */
21
22 #include <assert.h>
23
24 #define NONAMELESSUNION
25 #define NONAMELESSSTRUCT
26 #include "ntstatus.h"
27 #define WIN32_NO_STATUS
28 #include "dbghelp_private.h"
29 #include "winternl.h"
30 #include "wine/debug.h"
31
32 WINE_DEFAULT_DEBUG_CHANNEL(dbghelp);
33
34 /* x86-64 unwind information, for PE modules, as described on MSDN */
35
36 typedef enum _UNWIND_OP_CODES
37 {
38 UWOP_PUSH_NONVOL = 0,
39 UWOP_ALLOC_LARGE,
40 UWOP_ALLOC_SMALL,
41 UWOP_SET_FPREG,
42 UWOP_SAVE_NONVOL,
43 UWOP_SAVE_NONVOL_FAR,
44 UWOP_SAVE_XMM128,
45 UWOP_SAVE_XMM128_FAR,
46 UWOP_PUSH_MACHFRAME
47 } UNWIND_CODE_OPS;
48
49 typedef union _UNWIND_CODE
50 {
51 struct
52 {
53 BYTE CodeOffset;
54 BYTE UnwindOp : 4;
55 BYTE OpInfo : 4;
56 };
57 USHORT FrameOffset;
58 } UNWIND_CODE, *PUNWIND_CODE;
59
60 typedef struct _UNWIND_INFO
61 {
62 BYTE Version : 3;
63 BYTE Flags : 5;
64 BYTE SizeOfProlog;
65 BYTE CountOfCodes;
66 BYTE FrameRegister : 4;
67 BYTE FrameOffset : 4;
68 UNWIND_CODE UnwindCode[1]; /* actually CountOfCodes (aligned) */
69 /*
70 * union
71 * {
72 * OPTIONAL ULONG ExceptionHandler;
73 * OPTIONAL ULONG FunctionEntry;
74 * };
75 * OPTIONAL ULONG ExceptionData[];
76 */
77 } UNWIND_INFO, *PUNWIND_INFO;
78
79 #define GetUnwindCodeEntry(info, index) \
80 ((info)->UnwindCode[index])
81
82 #define GetLanguageSpecificDataPtr(info) \
83 ((PVOID)&GetUnwindCodeEntry((info),((info)->CountOfCodes + 1) & ~1))
84
85 #define GetExceptionHandler(base, info) \
86 ((PEXCEPTION_HANDLER)((base) + *(PULONG)GetLanguageSpecificDataPtr(info)))
87
88 #define GetChainedFunctionEntry(base, info) \
89 ((PRUNTIME_FUNCTION)((base) + *(PULONG)GetLanguageSpecificDataPtr(info)))
90
91 #define GetExceptionDataPtr(info) \
92 ((PVOID)((PULONG)GetLanguageSpecificData(info) + 1)
93
94 static unsigned x86_64_get_addr(HANDLE hThread, const CONTEXT* ctx,
95 enum cpu_addr ca, ADDRESS64* addr)
96 {
97 addr->Mode = AddrModeFlat;
98 switch (ca)
99 {
100 #ifdef __x86_64__
101 case cpu_addr_pc: addr->Segment = ctx->SegCs; addr->Offset = ctx->Rip; return TRUE;
102 case cpu_addr_stack: addr->Segment = ctx->SegSs; addr->Offset = ctx->Rsp; return TRUE;
103 case cpu_addr_frame: addr->Segment = ctx->SegSs; addr->Offset = ctx->Rbp; return TRUE;
104 #endif
105 default: addr->Mode = -1;
106 return FALSE;
107 }
108 }
109
110 enum st_mode {stm_start, stm_64bit, stm_done};
111
112 /* indexes in Reserved array */
113 #define __CurrentMode 0
114 #define __CurrentSwitch 1
115 #define __NextSwitch 2
116
117 #define curr_mode (frame->Reserved[__CurrentMode])
118 #define curr_switch (frame->Reserved[__CurrentSwitch])
119 #define next_switch (frame->Reserved[__NextSwitch])
120
121 #ifdef __x86_64__
122 union handler_data
123 {
124 RUNTIME_FUNCTION chain;
125 ULONG handler;
126 };
127
128 static void dump_unwind_info(HANDLE hProcess, ULONG64 base, RUNTIME_FUNCTION *function)
129 {
130 static const char * const reg_names[16] =
131 { "rax", "rcx", "rdx", "rbx", "rsp", "rbp", "rsi", "rdi",
132 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15" };
133
134 union handler_data *handler_data;
135 char buffer[sizeof(UNWIND_INFO) + 256 * sizeof(UNWIND_CODE)];
136 UNWIND_INFO* info = (UNWIND_INFO*)buffer;
137 unsigned int i, count;
138 SIZE_T r;
139
140 TRACE("**** func %x-%x\n", function->BeginAddress, function->EndAddress);
141 for (;;)
142 {
143 if (function->UnwindData & 1)
144 {
145 #if 0
146 RUNTIME_FUNCTION *next = (RUNTIME_FUNCTION*)((char*)base + (function->UnwindData & ~1));
147 TRACE("unwind info for function %p-%p chained to function %p-%p\n",
148 (char*)base + function->BeginAddress, (char*)base + function->EndAddress,
149 (char*)base + next->BeginAddress, (char*)base + next->EndAddress);
150 function = next;
151 continue;
152 #else
153 FIXME("NOT SUPPORTED\n");
154 #endif
155 }
156 ReadProcessMemory(hProcess, (char*)base + function->UnwindData, info, sizeof(*info), &r);
157 ReadProcessMemory(hProcess, (char*)base + function->UnwindData + FIELD_OFFSET(UNWIND_INFO, UnwindCode),
158 info->UnwindCode, 256 * sizeof(UNWIND_CODE), &r);
159 TRACE("unwind info at %p flags %x prolog 0x%x bytes function %p-%p\n",
160 info, info->Flags, info->SizeOfProlog,
161 (char*)base + function->BeginAddress, (char*)base + function->EndAddress);
162
163 if (info->FrameRegister)
164 TRACE(" frame register %s offset 0x%x(%%rsp)\n",
165 reg_names[info->FrameRegister], info->FrameOffset * 16);
166
167 for (i = 0; i < info->CountOfCodes; i++)
168 {
169 TRACE(" 0x%x: ", info->UnwindCode[i].CodeOffset);
170 switch (info->UnwindCode[i].UnwindOp)
171 {
172 case UWOP_PUSH_NONVOL:
173 TRACE("pushq %%%s\n", reg_names[info->UnwindCode[i].OpInfo]);
174 break;
175 case UWOP_ALLOC_LARGE:
176 if (info->UnwindCode[i].OpInfo)
177 {
178 count = *(DWORD*)&info->UnwindCode[i+1];
179 i += 2;
180 }
181 else
182 {
183 count = *(USHORT*)&info->UnwindCode[i+1] * 8;
184 i++;
185 }
186 TRACE("subq $0x%x,%%rsp\n", count);
187 break;
188 case UWOP_ALLOC_SMALL:
189 count = (info->UnwindCode[i].OpInfo + 1) * 8;
190 TRACE("subq $0x%x,%%rsp\n", count);
191 break;
192 case UWOP_SET_FPREG:
193 TRACE("leaq 0x%x(%%rsp),%s\n",
194 info->FrameOffset * 16, reg_names[info->FrameRegister]);
195 break;
196 case UWOP_SAVE_NONVOL:
197 count = *(USHORT*)&info->UnwindCode[i+1] * 8;
198 TRACE("movq %%%s,0x%x(%%rsp)\n", reg_names[info->UnwindCode[i].OpInfo], count);
199 i++;
200 break;
201 case UWOP_SAVE_NONVOL_FAR:
202 count = *(DWORD*)&info->UnwindCode[i+1];
203 TRACE("movq %%%s,0x%x(%%rsp)\n", reg_names[info->UnwindCode[i].OpInfo], count);
204 i += 2;
205 break;
206 case UWOP_SAVE_XMM128:
207 count = *(USHORT*)&info->UnwindCode[i+1] * 16;
208 TRACE("movaps %%xmm%u,0x%x(%%rsp)\n", info->UnwindCode[i].OpInfo, count);
209 i++;
210 break;
211 case UWOP_SAVE_XMM128_FAR:
212 count = *(DWORD*)&info->UnwindCode[i+1];
213 TRACE("movaps %%xmm%u,0x%x(%%rsp)\n", info->UnwindCode[i].OpInfo, count);
214 i += 2;
215 break;
216 case UWOP_PUSH_MACHFRAME:
217 TRACE("PUSH_MACHFRAME %u\n", info->UnwindCode[i].OpInfo);
218 break;
219 default:
220 FIXME("unknown code %u\n", info->UnwindCode[i].UnwindOp);
221 break;
222 }
223 }
224
225 handler_data = (union handler_data*)&info->UnwindCode[(info->CountOfCodes + 1) & ~1];
226 if (info->Flags & UNW_FLAG_CHAININFO)
227 {
228 TRACE(" chained to function %p-%p\n",
229 (char*)base + handler_data->chain.BeginAddress,
230 (char*)base + handler_data->chain.EndAddress);
231 function = &handler_data->chain;
232 continue;
233 }
234 if (info->Flags & (UNW_FLAG_EHANDLER | UNW_FLAG_UHANDLER))
235 TRACE(" handler %p data at %p\n",
236 (char*)base + handler_data->handler, &handler_data->handler + 1);
237 break;
238 }
239 }
240
241 /* highly derived from dlls/ntdll/signal_x86_64.c */
242 static ULONG64 get_int_reg(CONTEXT *context, int reg)
243 {
244 return *(&context->Rax + reg);
245 }
246
247 static void set_int_reg(CONTEXT *context, int reg, ULONG64 val)
248 {
249 *(&context->Rax + reg) = val;
250 }
251
252 static void set_float_reg(CONTEXT *context, int reg, M128A val)
253 {
254 *(&context->u.s.Xmm0 + reg) = val;
255 }
256
257 static int get_opcode_size(UNWIND_CODE op)
258 {
259 switch (op.UnwindOp)
260 {
261 case UWOP_ALLOC_LARGE:
262 return 2 + (op.OpInfo != 0);
263 case UWOP_SAVE_NONVOL:
264 case UWOP_SAVE_XMM128:
265 return 2;
266 case UWOP_SAVE_NONVOL_FAR:
267 case UWOP_SAVE_XMM128_FAR:
268 return 3;
269 default:
270 return 1;
271 }
272 }
273
274 static BOOL is_inside_epilog(struct cpu_stack_walk* csw, DWORD64 pc)
275 {
276 BYTE op0, op1, op2;
277
278 if (!sw_read_mem(csw, pc, &op0, 1)) return FALSE;
279
280 /* add or lea must be the first instruction, and it must have a rex.W prefix */
281 if ((op0 & 0xf8) == 0x48)
282 {
283 if (!sw_read_mem(csw, pc + 1, &op1, 1)) return FALSE;
284 switch (op1)
285 {
286 case 0x81: /* add $nnnn,%rsp */
287 if (!sw_read_mem(csw, pc + 2, &op2, 1)) return FALSE;
288 if (op0 == 0x48 && op2 == 0xc4)
289 {
290 pc += 7;
291 break;
292 }
293 return FALSE;
294 case 0x83: /* add $n,%rsp */
295 if (op0 == 0x48 && op2 == 0xc4)
296 {
297 pc += 4;
298 break;
299 }
300 return FALSE;
301 case 0x8d: /* lea n(reg),%rsp */
302 if (op0 & 0x06) return FALSE; /* rex.RX must be cleared */
303 if (((op2 >> 3) & 7) != 4) return FALSE; /* dest reg mus be %rsp */
304 if ((op2 & 7) == 4) return FALSE; /* no SIB byte allowed */
305 if ((op2 >> 6) == 1) /* 8-bit offset */
306 {
307 pc += 4;
308 break;
309 }
310 if ((op2 >> 6) == 2) /* 32-bit offset */
311 {
312 pc += 7;
313 break;
314 }
315 return FALSE;
316 }
317 }
318
319 /* now check for various pop instructions */
320 for (;;)
321 {
322 BYTE rex = 0;
323
324 if (!sw_read_mem(csw, pc, &op0, 1)) return FALSE;
325 if ((op0 & 0xf0) == 0x40)
326 {
327 rex = op0 & 0x0f; /* rex prefix */
328 if (!sw_read_mem(csw, ++pc, &op0, 1)) return FALSE;
329 }
330
331 switch (op0)
332 {
333 case 0x58: /* pop %rax/%r8 */
334 case 0x59: /* pop %rcx/%r9 */
335 case 0x5a: /* pop %rdx/%r10 */
336 case 0x5b: /* pop %rbx/%r11 */
337 case 0x5c: /* pop %rsp/%r12 */
338 case 0x5d: /* pop %rbp/%r13 */
339 case 0x5e: /* pop %rsi/%r14 */
340 case 0x5f: /* pop %rdi/%r15 */
341 pc++;
342 continue;
343 case 0xc2: /* ret $nn */
344 case 0xc3: /* ret */
345 return TRUE;
346 /* FIXME: add various jump instructions */
347 }
348 return FALSE;
349 }
350 }
351
352 static BOOL default_unwind(struct cpu_stack_walk* csw, LPSTACKFRAME64 frame, CONTEXT* context)
353 {
354 if (!sw_read_mem(csw, frame->AddrStack.Offset,
355 &frame->AddrReturn.Offset, sizeof(DWORD64)))
356 {
357 WARN("Cannot read new frame offset %s\n", wine_dbgstr_longlong(frame->AddrStack.Offset));
358 return FALSE;
359 }
360 context->Rip = frame->AddrReturn.Offset;
361 frame->AddrStack.Offset += sizeof(DWORD64);
362 context->Rsp += sizeof(DWORD64);
363 return TRUE;
364 }
365
366 static BOOL interpret_function_table_entry(struct cpu_stack_walk* csw, LPSTACKFRAME64 frame,
367 CONTEXT* context, RUNTIME_FUNCTION* function, DWORD64 base)
368 {
369 char buffer[sizeof(UNWIND_INFO) + 256 * sizeof(UNWIND_CODE)];
370 UNWIND_INFO* info = (UNWIND_INFO*)buffer;
371 unsigned i;
372 DWORD64 newframe, prolog_offset, off, value;
373 M128A floatvalue;
374 union handler_data handler_data;
375
376 /* FIXME: we have some assumptions here */
377 assert(context);
378 if (context->Rsp != frame->AddrStack.Offset) FIXME("unconsistent Stack Pointer\n");
379 if (context->Rip != frame->AddrPC.Offset) FIXME("unconsistent Instruction Pointer\n");
380 dump_unwind_info(csw->hProcess, sw_module_base(csw, frame->AddrPC.Offset), frame->FuncTableEntry);
381 newframe = context->Rsp;
382 for (;;)
383 {
384 if (!sw_read_mem(csw, base + function->UnwindData, info, sizeof(*info)) ||
385 !sw_read_mem(csw, base + function->UnwindData + FIELD_OFFSET(UNWIND_INFO, UnwindCode),
386 info->UnwindCode, info->CountOfCodes * sizeof(UNWIND_CODE)))
387 {
388 WARN("Couldn't read unwind_code at %lx\n", base + function->UnwindData);
389 return FALSE;
390 }
391
392 if (info->Version != 1)
393 {
394 WARN("unknown unwind info version %u at %lx\n", info->Version, base + function->UnwindData);
395 return FALSE;
396 }
397
398 if (info->FrameRegister)
399 newframe = get_int_reg(context, info->FrameRegister) - info->FrameOffset * 16;
400
401 /* check if in prolog */
402 if (frame->AddrPC.Offset >= base + function->BeginAddress &&
403 frame->AddrPC.Offset < base + function->BeginAddress + info->SizeOfProlog)
404 {
405 prolog_offset = frame->AddrPC.Offset - base - function->BeginAddress;
406 }
407 else
408 {
409 prolog_offset = ~0;
410 if (is_inside_epilog(csw, frame->AddrPC.Offset))
411 {
412 FIXME("epilog management not fully done\n");
413 /* interpret_epilog((const BYTE*)frame->AddrPC.Offset, context); */
414 return TRUE;
415 }
416 }
417
418 for (i = 0; i < info->CountOfCodes; i += get_opcode_size(info->UnwindCode[i]))
419 {
420 if (prolog_offset < info->UnwindCode[i].CodeOffset) continue; /* skip it */
421
422 switch (info->UnwindCode[i].UnwindOp)
423 {
424 case UWOP_PUSH_NONVOL: /* pushq %reg */
425 if (!sw_read_mem(csw, context->Rsp, &value, sizeof(DWORD64))) return FALSE;
426 set_int_reg(context, info->UnwindCode[i].OpInfo, value);
427 context->Rsp += sizeof(ULONG64);
428 break;
429 case UWOP_ALLOC_LARGE: /* subq $nn,%rsp */
430 if (info->UnwindCode[i].OpInfo) context->Rsp += *(DWORD*)&info->UnwindCode[i+1];
431 else context->Rsp += *(USHORT*)&info->UnwindCode[i+1] * 8;
432 break;
433 case UWOP_ALLOC_SMALL: /* subq $n,%rsp */
434 context->Rsp += (info->UnwindCode[i].OpInfo + 1) * 8;
435 break;
436 case UWOP_SET_FPREG: /* leaq nn(%rsp),%framereg */
437 context->Rsp = newframe;
438 break;
439 case UWOP_SAVE_NONVOL: /* movq %reg,n(%rsp) */
440 off = newframe + *(USHORT*)&info->UnwindCode[i+1] * 8;
441 if (!sw_read_mem(csw, context->Rsp, &value, sizeof(DWORD64))) return FALSE;
442 set_int_reg(context, info->UnwindCode[i].OpInfo, value);
443 break;
444 case UWOP_SAVE_NONVOL_FAR: /* movq %reg,nn(%rsp) */
445 off = newframe + *(DWORD*)&info->UnwindCode[i+1];
446 if (!sw_read_mem(csw, context->Rsp, &value, sizeof(DWORD64))) return FALSE;
447 set_int_reg(context, info->UnwindCode[i].OpInfo, value);
448 break;
449 case UWOP_SAVE_XMM128: /* movaps %xmmreg,n(%rsp) */
450 off = newframe + *(USHORT*)&info->UnwindCode[i+1] * 16;
451 if (!sw_read_mem(csw, context->Rsp, &floatvalue, sizeof(M128A))) return FALSE;
452 set_float_reg(context, info->UnwindCode[i].OpInfo, floatvalue);
453 break;
454 case UWOP_SAVE_XMM128_FAR: /* movaps %xmmreg,nn(%rsp) */
455 off = newframe + *(DWORD*)&info->UnwindCode[i+1];
456 if (!sw_read_mem(csw, context->Rsp, &floatvalue, sizeof(M128A))) return FALSE;
457 set_float_reg(context, info->UnwindCode[i].OpInfo, floatvalue);
458 break;
459 case UWOP_PUSH_MACHFRAME:
460 FIXME("PUSH_MACHFRAME %u\n", info->UnwindCode[i].OpInfo);
461 break;
462 default:
463 FIXME("unknown code %u\n", info->UnwindCode[i].UnwindOp);
464 break;
465 }
466 }
467 if (!(info->Flags & UNW_FLAG_CHAININFO)) break;
468 if (!sw_read_mem(csw, base + function->UnwindData + FIELD_OFFSET(UNWIND_INFO, UnwindCode) +
469 ((info->CountOfCodes + 1) & ~1) * sizeof(UNWIND_CODE),
470 &handler_data, sizeof(handler_data))) return FALSE;
471 function = &handler_data.chain; /* restart with the chained info */
472 }
473 frame->AddrStack.Offset = context->Rsp;
474 return default_unwind(csw, frame, context);
475 }
476
477 static BOOL x86_64_stack_walk(struct cpu_stack_walk* csw, LPSTACKFRAME64 frame, CONTEXT* context)
478 {
479 DWORD64 base;
480 DWORD_PTR cfa;
481 unsigned deltapc = 0;
482
483 /* sanity check */
484 if (curr_mode >= stm_done) return FALSE;
485 assert(!csw->is32);
486
487 TRACE("Enter: PC=%s Frame=%s Return=%s Stack=%s Mode=%s\n",
488 wine_dbgstr_addr(&frame->AddrPC),
489 wine_dbgstr_addr(&frame->AddrFrame),
490 wine_dbgstr_addr(&frame->AddrReturn),
491 wine_dbgstr_addr(&frame->AddrStack),
492 curr_mode == stm_start ? "start" : "64bit");
493
494 if (curr_mode == stm_start)
495 {
496 if ((frame->AddrPC.Mode == AddrModeFlat) &&
497 (frame->AddrFrame.Mode != AddrModeFlat))
498 {
499 WARN("Bad AddrPC.Mode / AddrFrame.Mode combination\n");
500 goto done_err;
501 }
502
503 /* Init done */
504 curr_mode = stm_64bit;
505 curr_switch = 0;
506 frame->AddrReturn.Mode = frame->AddrStack.Mode = AddrModeFlat;
507 /* don't set up AddrStack on first call. Either the caller has set it up, or
508 * we will get it in the next frame
509 */
510 memset(&frame->AddrBStore, 0, sizeof(frame->AddrBStore));
511 }
512 else
513 {
514 if (frame->AddrReturn.Offset == 0) goto done_err;
515 frame->AddrPC = frame->AddrReturn;
516 deltapc = 1;
517 }
518
519 if (!frame->AddrPC.Offset || !(base = sw_module_base(csw, frame->AddrPC.Offset))) goto done_err;
520 frame->FuncTableEntry = sw_table_access(csw, frame->AddrPC.Offset);
521 frame->AddrStack.Mode = frame->AddrFrame.Mode = frame->AddrReturn.Mode = AddrModeFlat;
522 if (frame->FuncTableEntry)
523 {
524 if (!interpret_function_table_entry(csw, frame, context, frame->FuncTableEntry, base))
525 goto done_err;
526 }
527 else if (dwarf2_virtual_unwind(csw, frame->AddrPC.Offset - deltapc, context, &cfa))
528 {
529 frame->AddrStack.Offset = context->Rsp = cfa;
530 frame->AddrReturn.Offset = context->Rip;
531 TRACE("next function rip=%016lx\n", context->Rip);
532 TRACE(" rax=%016lx rbx=%016lx rcx=%016lx rdx=%016lx\n",
533 context->Rax, context->Rbx, context->Rcx, context->Rdx);
534 TRACE(" rsi=%016lx rdi=%016lx rbp=%016lx rsp=%016lx\n",
535 context->Rsi, context->Rdi, context->Rbp, context->Rsp);
536 TRACE(" r8=%016lx r9=%016lx r10=%016lx r11=%016lx\n",
537 context->R8, context->R9, context->R10, context->R11);
538 TRACE(" r12=%016lx r13=%016lx r14=%016lx r15=%016lx\n",
539 context->R12, context->R13, context->R14, context->R15);
540 }
541 else if (!default_unwind(csw, frame, context)) goto done_err;
542
543 memset(&frame->Params, 0, sizeof(frame->Params));
544
545 frame->Far = TRUE;
546 frame->Virtual = TRUE;
547
548 TRACE("Leave: PC=%s Frame=%s Return=%s Stack=%s Mode=%s FuncTable=%p\n",
549 wine_dbgstr_addr(&frame->AddrPC),
550 wine_dbgstr_addr(&frame->AddrFrame),
551 wine_dbgstr_addr(&frame->AddrReturn),
552 wine_dbgstr_addr(&frame->AddrStack),
553 curr_mode == stm_start ? "start" : "64bit",
554 frame->FuncTableEntry);
555
556 return TRUE;
557 done_err:
558 curr_mode = stm_done;
559 return FALSE;
560 }
561 #else
562 static BOOL x86_64_stack_walk(struct cpu_stack_walk* csw, LPSTACKFRAME64 frame, CONTEXT* context)
563 {
564 return FALSE;
565 }
566 #endif
567
568 static void* x86_64_find_runtime_function(struct module* module, DWORD64 addr)
569 {
570 #ifdef __x86_64__
571 RUNTIME_FUNCTION* rtf;
572 ULONG size;
573 int min, max;
574
575 rtf = (RUNTIME_FUNCTION*)pe_map_directory(module, IMAGE_DIRECTORY_ENTRY_EXCEPTION, &size);
576 if (rtf) for (min = 0, max = size / sizeof(*rtf); min <= max; )
577 {
578 int pos = (min + max) / 2;
579 if (addr < module->module.BaseOfImage + rtf[pos].BeginAddress) max = pos - 1;
580 else if (addr >= module->module.BaseOfImage + rtf[pos].EndAddress) min = pos + 1;
581 else
582 {
583 rtf += pos;
584 while (rtf->UnwindData & 1) /* follow chained entry */
585 {
586 FIXME("RunTime_Function outside IMAGE_DIRECTORY_ENTRY_EXCEPTION unimplemented yet!\n");
587 /* we need to read into the other process */
588 /* rtf = (RUNTIME_FUNCTION*)(module->module.BaseOfImage + (rtf->UnwindData & ~1)); */
589 }
590 return rtf;
591 }
592 }
593 #endif
594 return NULL;
595 }
596
597 static unsigned x86_64_map_dwarf_register(unsigned regno)
598 {
599 unsigned reg;
600
601 if (regno >= 17 && regno <= 24)
602 reg = CV_AMD64_XMM0 + regno - 17;
603 else if (regno >= 25 && regno <= 32)
604 reg = CV_AMD64_XMM8 + regno - 25;
605 else if (regno >= 33 && regno <= 40)
606 reg = CV_AMD64_ST0 + regno - 33;
607 else switch (regno)
608 {
609 case 0: reg = CV_AMD64_RAX; break;
610 case 1: reg = CV_AMD64_RDX; break;
611 case 2: reg = CV_AMD64_RCX; break;
612 case 3: reg = CV_AMD64_RBX; break;
613 case 4: reg = CV_AMD64_RSI; break;
614 case 5: reg = CV_AMD64_RDI; break;
615 case 6: reg = CV_AMD64_RBP; break;
616 case 7: reg = CV_AMD64_RSP; break;
617 case 8: reg = CV_AMD64_R8; break;
618 case 9: reg = CV_AMD64_R9; break;
619 case 10: reg = CV_AMD64_R10; break;
620 case 11: reg = CV_AMD64_R11; break;
621 case 12: reg = CV_AMD64_R12; break;
622 case 13: reg = CV_AMD64_R13; break;
623 case 14: reg = CV_AMD64_R14; break;
624 case 15: reg = CV_AMD64_R15; break;
625 case 16: reg = CV_AMD64_RIP; break;
626 case 49: reg = CV_AMD64_EFLAGS; break;
627 case 50: reg = CV_AMD64_ES; break;
628 case 51: reg = CV_AMD64_CS; break;
629 case 52: reg = CV_AMD64_SS; break;
630 case 53: reg = CV_AMD64_DS; break;
631 case 54: reg = CV_AMD64_FS; break;
632 case 55: reg = CV_AMD64_GS; break;
633 case 62: reg = CV_AMD64_TR; break;
634 case 63: reg = CV_AMD64_LDTR; break;
635 case 64: reg = CV_AMD64_MXCSR; break;
636 case 65: reg = CV_AMD64_CTRL; break;
637 case 66: reg = CV_AMD64_STAT; break;
638 /*
639 * 56-57 reserved
640 * 58 %fs.base
641 * 59 %gs.base
642 * 60-61 reserved
643 */
644 default:
645 FIXME("Don't know how to map register %d\n", regno);
646 return 0;
647 }
648 return reg;
649 }
650
651 static void* x86_64_fetch_context_reg(CONTEXT* ctx, unsigned regno, unsigned* size)
652 {
653 #ifdef __x86_64__
654 switch (regno)
655 {
656 case CV_AMD64_RAX: *size = sizeof(ctx->Rax); return &ctx->Rax;
657 case CV_AMD64_RDX: *size = sizeof(ctx->Rdx); return &ctx->Rdx;
658 case CV_AMD64_RCX: *size = sizeof(ctx->Rcx); return &ctx->Rcx;
659 case CV_AMD64_RBX: *size = sizeof(ctx->Rbx); return &ctx->Rbx;
660 case CV_AMD64_RSI: *size = sizeof(ctx->Rsi); return &ctx->Rsi;
661 case CV_AMD64_RDI: *size = sizeof(ctx->Rdi); return &ctx->Rdi;
662 case CV_AMD64_RBP: *size = sizeof(ctx->Rbp); return &ctx->Rbp;
663 case CV_AMD64_RSP: *size = sizeof(ctx->Rsp); return &ctx->Rsp;
664 case CV_AMD64_R8: *size = sizeof(ctx->R8); return &ctx->R8;
665 case CV_AMD64_R9: *size = sizeof(ctx->R9); return &ctx->R9;
666 case CV_AMD64_R10: *size = sizeof(ctx->R10); return &ctx->R10;
667 case CV_AMD64_R11: *size = sizeof(ctx->R11); return &ctx->R11;
668 case CV_AMD64_R12: *size = sizeof(ctx->R12); return &ctx->R12;
669 case CV_AMD64_R13: *size = sizeof(ctx->R13); return &ctx->R13;
670 case CV_AMD64_R14: *size = sizeof(ctx->R14); return &ctx->R14;
671 case CV_AMD64_R15: *size = sizeof(ctx->R15); return &ctx->R15;
672 case CV_AMD64_RIP: *size = sizeof(ctx->Rip); return &ctx->Rip;
673
674 case CV_AMD64_XMM0 + 0: *size = sizeof(ctx->u.s.Xmm0 ); return &ctx->u.s.Xmm0;
675 case CV_AMD64_XMM0 + 1: *size = sizeof(ctx->u.s.Xmm1 ); return &ctx->u.s.Xmm1;
676 case CV_AMD64_XMM0 + 2: *size = sizeof(ctx->u.s.Xmm2 ); return &ctx->u.s.Xmm2;
677 case CV_AMD64_XMM0 + 3: *size = sizeof(ctx->u.s.Xmm3 ); return &ctx->u.s.Xmm3;
678 case CV_AMD64_XMM0 + 4: *size = sizeof(ctx->u.s.Xmm4 ); return &ctx->u.s.Xmm4;
679 case CV_AMD64_XMM0 + 5: *size = sizeof(ctx->u.s.Xmm5 ); return &ctx->u.s.Xmm5;
680 case CV_AMD64_XMM0 + 6: *size = sizeof(ctx->u.s.Xmm6 ); return &ctx->u.s.Xmm6;
681 case CV_AMD64_XMM0 + 7: *size = sizeof(ctx->u.s.Xmm7 ); return &ctx->u.s.Xmm7;
682 case CV_AMD64_XMM8 + 0: *size = sizeof(ctx->u.s.Xmm8 ); return &ctx->u.s.Xmm8;
683 case CV_AMD64_XMM8 + 1: *size = sizeof(ctx->u.s.Xmm9 ); return &ctx->u.s.Xmm9;
684 case CV_AMD64_XMM8 + 2: *size = sizeof(ctx->u.s.Xmm10); return &ctx->u.s.Xmm10;
685 case CV_AMD64_XMM8 + 3: *size = sizeof(ctx->u.s.Xmm11); return &ctx->u.s.Xmm11;
686 case CV_AMD64_XMM8 + 4: *size = sizeof(ctx->u.s.Xmm12); return &ctx->u.s.Xmm12;
687 case CV_AMD64_XMM8 + 5: *size = sizeof(ctx->u.s.Xmm13); return &ctx->u.s.Xmm13;
688 case CV_AMD64_XMM8 + 6: *size = sizeof(ctx->u.s.Xmm14); return &ctx->u.s.Xmm14;
689 case CV_AMD64_XMM8 + 7: *size = sizeof(ctx->u.s.Xmm15); return &ctx->u.s.Xmm15;
690
691 case CV_AMD64_ST0 + 0: *size = sizeof(ctx->u.s.Legacy[0]); return &ctx->u.s.Legacy[0];
692 case CV_AMD64_ST0 + 1: *size = sizeof(ctx->u.s.Legacy[1]); return &ctx->u.s.Legacy[1];
693 case CV_AMD64_ST0 + 2: *size = sizeof(ctx->u.s.Legacy[2]); return &ctx->u.s.Legacy[2];
694 case CV_AMD64_ST0 + 3: *size = sizeof(ctx->u.s.Legacy[3]); return &ctx->u.s.Legacy[3];
695 case CV_AMD64_ST0 + 4: *size = sizeof(ctx->u.s.Legacy[4]); return &ctx->u.s.Legacy[4];
696 case CV_AMD64_ST0 + 5: *size = sizeof(ctx->u.s.Legacy[5]); return &ctx->u.s.Legacy[5];
697 case CV_AMD64_ST0 + 6: *size = sizeof(ctx->u.s.Legacy[6]); return &ctx->u.s.Legacy[6];
698 case CV_AMD64_ST0 + 7: *size = sizeof(ctx->u.s.Legacy[7]); return &ctx->u.s.Legacy[7];
699
700 case CV_AMD64_EFLAGS: *size = sizeof(ctx->EFlags); return &ctx->EFlags;
701 case CV_AMD64_ES: *size = sizeof(ctx->SegEs); return &ctx->SegEs;
702 case CV_AMD64_CS: *size = sizeof(ctx->SegCs); return &ctx->SegCs;
703 case CV_AMD64_SS: *size = sizeof(ctx->SegSs); return &ctx->SegSs;
704 case CV_AMD64_DS: *size = sizeof(ctx->SegDs); return &ctx->SegDs;
705 case CV_AMD64_FS: *size = sizeof(ctx->SegFs); return &ctx->SegFs;
706 case CV_AMD64_GS: *size = sizeof(ctx->SegGs); return &ctx->SegGs;
707
708 }
709 #endif
710 FIXME("Unknown register %x\n", regno);
711 return NULL;
712 }
713
714 static const char* x86_64_fetch_regname(unsigned regno)
715 {
716 switch (regno)
717 {
718 case CV_AMD64_RAX: return "rax";
719 case CV_AMD64_RDX: return "rdx";
720 case CV_AMD64_RCX: return "rcx";
721 case CV_AMD64_RBX: return "rbx";
722 case CV_AMD64_RSI: return "rsi";
723 case CV_AMD64_RDI: return "rdi";
724 case CV_AMD64_RBP: return "rbp";
725 case CV_AMD64_RSP: return "rsp";
726 case CV_AMD64_R8: return "r8";
727 case CV_AMD64_R9: return "r9";
728 case CV_AMD64_R10: return "r10";
729 case CV_AMD64_R11: return "r11";
730 case CV_AMD64_R12: return "r12";
731 case CV_AMD64_R13: return "r13";
732 case CV_AMD64_R14: return "r14";
733 case CV_AMD64_R15: return "r15";
734 case CV_AMD64_RIP: return "rip";
735
736 case CV_AMD64_XMM0 + 0: return "xmm0";
737 case CV_AMD64_XMM0 + 1: return "xmm1";
738 case CV_AMD64_XMM0 + 2: return "xmm2";
739 case CV_AMD64_XMM0 + 3: return "xmm3";
740 case CV_AMD64_XMM0 + 4: return "xmm4";
741 case CV_AMD64_XMM0 + 5: return "xmm5";
742 case CV_AMD64_XMM0 + 6: return "xmm6";
743 case CV_AMD64_XMM0 + 7: return "xmm7";
744 case CV_AMD64_XMM8 + 0: return "xmm8";
745 case CV_AMD64_XMM8 + 1: return "xmm9";
746 case CV_AMD64_XMM8 + 2: return "xmm10";
747 case CV_AMD64_XMM8 + 3: return "xmm11";
748 case CV_AMD64_XMM8 + 4: return "xmm12";
749 case CV_AMD64_XMM8 + 5: return "xmm13";
750 case CV_AMD64_XMM8 + 6: return "xmm14";
751 case CV_AMD64_XMM8 + 7: return "xmm15";
752
753 case CV_AMD64_ST0 + 0: return "st0";
754 case CV_AMD64_ST0 + 1: return "st1";
755 case CV_AMD64_ST0 + 2: return "st2";
756 case CV_AMD64_ST0 + 3: return "st3";
757 case CV_AMD64_ST0 + 4: return "st4";
758 case CV_AMD64_ST0 + 5: return "st5";
759 case CV_AMD64_ST0 + 6: return "st6";
760 case CV_AMD64_ST0 + 7: return "st7";
761
762 case CV_AMD64_EFLAGS: return "eflags";
763 case CV_AMD64_ES: return "es";
764 case CV_AMD64_CS: return "cs";
765 case CV_AMD64_SS: return "ss";
766 case CV_AMD64_DS: return "ds";
767 case CV_AMD64_FS: return "fs";
768 case CV_AMD64_GS: return "gs";
769 }
770 FIXME("Unknown register %x\n", regno);
771 return NULL;
772 }
773
774 struct cpu cpu_x86_64 = {
775 IMAGE_FILE_MACHINE_AMD64,
776 8,
777 x86_64_get_addr,
778 x86_64_stack_walk,
779 x86_64_find_runtime_function,
780 x86_64_map_dwarf_register,
781 x86_64_fetch_context_reg,
782 x86_64_fetch_regname,
783 };