GDB (xrefs)
Loading...
Searching...
No Matches
aarch64-tdep.c
Go to the documentation of this file.
1/* Common target dependent code for GDB on AArch64 systems.
2
3 Copyright (C) 2009-2023 Free Software Foundation, Inc.
4 Contributed by ARM Ltd.
5
6 This file is part of GDB.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20
21#include "defs.h"
22
23#include "frame.h"
24#include "gdbcmd.h"
25#include "gdbcore.h"
26#include "dis-asm.h"
27#include "regcache.h"
28#include "reggroups.h"
29#include "value.h"
30#include "arch-utils.h"
31#include "osabi.h"
32#include "frame-unwind.h"
33#include "frame-base.h"
34#include "trad-frame.h"
35#include "objfiles.h"
36#include "dwarf2.h"
37#include "dwarf2/frame.h"
38#include "gdbtypes.h"
39#include "prologue-value.h"
40#include "target-descriptions.h"
41#include "user-regs.h"
42#include "ax-gdb.h"
43#include "gdbsupport/selftest.h"
44
45#include "aarch64-tdep.h"
47
48#include "record.h"
49#include "record-full.h"
50#include "arch/aarch64-insn.h"
51#include "gdbarch.h"
52
53#include "opcode/aarch64.h"
54#include <algorithm>
55#include <unordered_map>
56
57/* A Homogeneous Floating-Point or Short-Vector Aggregate may have at most
58 four members. */
59#define HA_MAX_NUM_FLDS 4
60
61/* All possible aarch64 target descriptors. */
62static std::unordered_map <aarch64_features, target_desc *> tdesc_aarch64_map;
63
64/* The standard register names, and all the valid aliases for them. */
65static const struct
66{
67 const char *const name;
68 int regnum;
70{
71 /* 64-bit register names. */
72 {"fp", AARCH64_FP_REGNUM},
73 {"lr", AARCH64_LR_REGNUM},
74 {"sp", AARCH64_SP_REGNUM},
75 /* specials */
76 {"ip0", AARCH64_X0_REGNUM + 16},
77 {"ip1", AARCH64_X0_REGNUM + 17}
78};
79
80/* The required core 'R' registers. */
81static const char *const aarch64_r_register_names[] =
82{
83 /* These registers must appear in consecutive RAW register number
84 order and they must begin with AARCH64_X0_REGNUM! */
85 "x0", "x1", "x2", "x3",
86 "x4", "x5", "x6", "x7",
87 "x8", "x9", "x10", "x11",
88 "x12", "x13", "x14", "x15",
89 "x16", "x17", "x18", "x19",
90 "x20", "x21", "x22", "x23",
91 "x24", "x25", "x26", "x27",
92 "x28", "x29", "x30", "sp",
93 "pc", "cpsr"
94};
95
96/* The FP/SIMD 'V' registers. */
97static const char *const aarch64_v_register_names[] =
98{
99 /* These registers must appear in consecutive RAW register number
100 order and they must begin with AARCH64_V0_REGNUM! */
101 "v0", "v1", "v2", "v3",
102 "v4", "v5", "v6", "v7",
103 "v8", "v9", "v10", "v11",
104 "v12", "v13", "v14", "v15",
105 "v16", "v17", "v18", "v19",
106 "v20", "v21", "v22", "v23",
107 "v24", "v25", "v26", "v27",
108 "v28", "v29", "v30", "v31",
109 "fpsr",
110 "fpcr"
111};
112
113/* The SVE 'Z' and 'P' registers. */
114static const char *const aarch64_sve_register_names[] =
115{
116 /* These registers must appear in consecutive RAW register number
117 order and they must begin with AARCH64_SVE_Z0_REGNUM! */
118 "z0", "z1", "z2", "z3",
119 "z4", "z5", "z6", "z7",
120 "z8", "z9", "z10", "z11",
121 "z12", "z13", "z14", "z15",
122 "z16", "z17", "z18", "z19",
123 "z20", "z21", "z22", "z23",
124 "z24", "z25", "z26", "z27",
125 "z28", "z29", "z30", "z31",
126 "fpsr", "fpcr",
127 "p0", "p1", "p2", "p3",
128 "p4", "p5", "p6", "p7",
129 "p8", "p9", "p10", "p11",
130 "p12", "p13", "p14", "p15",
131 "ffr", "vg"
132};
133
134static const char *const aarch64_pauth_register_names[] =
135{
136 /* Authentication mask for data pointer. */
137 "pauth_dmask",
138 /* Authentication mask for code pointer. */
139 "pauth_cmask"
140};
141
142static const char *const aarch64_mte_register_names[] =
143{
144 /* Tag Control Register. */
145 "tag_ctl"
146};
147
148/* AArch64 prologue cache structure. */
150{
151 /* The program counter at the start of the function. It is used to
152 identify this frame as a prologue frame. */
153 CORE_ADDR func;
154
155 /* The program counter at the time this frame was created; i.e. where
156 this function was called from. It is used to identify this frame as a
157 stub frame. */
158 CORE_ADDR prev_pc;
159
160 /* The stack pointer at the time this frame was created; i.e. the
161 caller's stack pointer when this function was called. It is used
162 to identify this frame. */
163 CORE_ADDR prev_sp;
164
165 /* Is the target available to read from? */
167
168 /* The frame base for this frame is just prev_sp - frame size.
169 FRAMESIZE is the distance from the frame pointer to the
170 initial stack pointer. */
172
173 /* The register used to hold the frame pointer for this frame. */
175
176 /* Saved register offsets. */
178};
179
180static void
181show_aarch64_debug (struct ui_file *file, int from_tty,
182 struct cmd_list_element *c, const char *value)
183{
184 gdb_printf (file, _("AArch64 debugging is %s.\n"), value);
185}
186
187namespace {
188
189/* Abstract instruction reader. */
190
191class abstract_instruction_reader
192{
193public:
194 /* Read in one instruction. */
195 virtual ULONGEST read (CORE_ADDR memaddr, int len,
196 enum bfd_endian byte_order) = 0;
197};
198
199/* Instruction reader from real target. */
200
201class instruction_reader : public abstract_instruction_reader
202{
203 public:
204 ULONGEST read (CORE_ADDR memaddr, int len, enum bfd_endian byte_order)
205 override
206 {
207 return read_code_unsigned_integer (memaddr, len, byte_order);
208 }
209};
210
211} // namespace
212
213/* If address signing is enabled, mask off the signature bits from the link
214 register, which is passed by value in ADDR, using the register values in
215 THIS_FRAME. */
216
217static CORE_ADDR
219 frame_info_ptr this_frame, CORE_ADDR addr)
220{
221 if (tdep->has_pauth ()
222 && frame_unwind_register_unsigned (this_frame,
224 {
225 int cmask_num = AARCH64_PAUTH_CMASK_REGNUM (tdep->pauth_reg_base);
226 CORE_ADDR cmask = frame_unwind_register_unsigned (this_frame, cmask_num);
227 addr = addr & ~cmask;
228
229 /* Record in the frame that the link register required unmasking. */
230 set_frame_previous_pc_masked (this_frame);
231 }
232
233 return addr;
234}
235
236/* Implement the "get_pc_address_flags" gdbarch method. */
237
238static std::string
240{
241 if (pc != 0 && get_frame_pc_masked (frame))
242 return "PAC";
243
244 return "";
245}
246
247/* Analyze a prologue, looking for a recognizable stack frame
248 and frame pointer. Scan until we encounter a store that could
249 clobber the stack frame unexpectedly, or an unknown instruction. */
250
251static CORE_ADDR
253 CORE_ADDR start, CORE_ADDR limit,
254 struct aarch64_prologue_cache *cache,
255 abstract_instruction_reader& reader)
256{
257 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
258 int i;
259
260 /* Whether the stack has been set. This should be true when we notice a SP
261 to FP move or if we are using the SP as the base register for storing
262 data, in case the FP is ommitted. */
263 bool seen_stack_set = false;
264
265 /* Track X registers and D registers in prologue. */
267
269 regs[i] = pv_register (i, 0);
271
272 for (; start < limit; start += 4)
273 {
274 uint32_t insn;
275 aarch64_inst inst;
276
277 insn = reader.read (start, 4, byte_order_for_code);
278
279 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
280 break;
281
282 if (inst.opcode->iclass == addsub_imm
283 && (inst.opcode->op == OP_ADD
284 || strcmp ("sub", inst.opcode->name) == 0))
285 {
286 unsigned rd = inst.operands[0].reg.regno;
287 unsigned rn = inst.operands[1].reg.regno;
288
289 gdb_assert (aarch64_num_of_operands (inst.opcode) == 3);
290 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd_SP);
291 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rn_SP);
292 gdb_assert (inst.operands[2].type == AARCH64_OPND_AIMM);
293
294 if (inst.opcode->op == OP_ADD)
295 {
296 regs[rd] = pv_add_constant (regs[rn],
297 inst.operands[2].imm.value);
298 }
299 else
300 {
301 regs[rd] = pv_add_constant (regs[rn],
302 -inst.operands[2].imm.value);
303 }
304
305 /* Did we move SP to FP? */
306 if (rn == AARCH64_SP_REGNUM && rd == AARCH64_FP_REGNUM)
307 seen_stack_set = true;
308 }
309 else if (inst.opcode->iclass == addsub_ext
310 && strcmp ("sub", inst.opcode->name) == 0)
311 {
312 unsigned rd = inst.operands[0].reg.regno;
313 unsigned rn = inst.operands[1].reg.regno;
314 unsigned rm = inst.operands[2].reg.regno;
315
316 gdb_assert (aarch64_num_of_operands (inst.opcode) == 3);
317 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd_SP);
318 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rn_SP);
319 gdb_assert (inst.operands[2].type == AARCH64_OPND_Rm_EXT);
320
321 regs[rd] = pv_subtract (regs[rn], regs[rm]);
322 }
323 else if (inst.opcode->iclass == branch_imm)
324 {
325 /* Stop analysis on branch. */
326 break;
327 }
328 else if (inst.opcode->iclass == condbranch)
329 {
330 /* Stop analysis on branch. */
331 break;
332 }
333 else if (inst.opcode->iclass == branch_reg)
334 {
335 /* Stop analysis on branch. */
336 break;
337 }
338 else if (inst.opcode->iclass == compbranch)
339 {
340 /* Stop analysis on branch. */
341 break;
342 }
343 else if (inst.opcode->op == OP_MOVZ)
344 {
345 unsigned rd = inst.operands[0].reg.regno;
346
347 gdb_assert (aarch64_num_of_operands (inst.opcode) == 2);
348 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
349 gdb_assert (inst.operands[1].type == AARCH64_OPND_HALF);
350 gdb_assert (inst.operands[1].shifter.kind == AARCH64_MOD_LSL);
351
352 /* If this shows up before we set the stack, keep going. Otherwise
353 stop the analysis. */
354 if (seen_stack_set)
355 break;
356
357 regs[rd] = pv_constant (inst.operands[1].imm.value
358 << inst.operands[1].shifter.amount);
359 }
360 else if (inst.opcode->iclass == log_shift
361 && strcmp (inst.opcode->name, "orr") == 0)
362 {
363 unsigned rd = inst.operands[0].reg.regno;
364 unsigned rn = inst.operands[1].reg.regno;
365 unsigned rm = inst.operands[2].reg.regno;
366
367 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
368 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rn);
369 gdb_assert (inst.operands[2].type == AARCH64_OPND_Rm_SFT);
370
371 if (inst.operands[2].shifter.amount == 0
372 && rn == AARCH64_SP_REGNUM)
373 regs[rd] = regs[rm];
374 else
375 {
376 aarch64_debug_printf ("prologue analysis gave up "
377 "addr=%s opcode=0x%x (orr x register)",
378 core_addr_to_string_nz (start), insn);
379
380 break;
381 }
382 }
383 else if (inst.opcode->op == OP_STUR)
384 {
385 unsigned rt = inst.operands[0].reg.regno;
386 unsigned rn = inst.operands[1].addr.base_regno;
387 int size = aarch64_get_qualifier_esize (inst.operands[0].qualifier);
388
389 gdb_assert (aarch64_num_of_operands (inst.opcode) == 2);
390 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt);
391 gdb_assert (inst.operands[1].type == AARCH64_OPND_ADDR_SIMM9);
392 gdb_assert (!inst.operands[1].addr.offset.is_reg);
393
394 stack.store
395 (pv_add_constant (regs[rn], inst.operands[1].addr.offset.imm),
396 size, regs[rt]);
397
398 /* Are we storing with SP as a base? */
399 if (rn == AARCH64_SP_REGNUM)
400 seen_stack_set = true;
401 }
402 else if ((inst.opcode->iclass == ldstpair_off
403 || (inst.opcode->iclass == ldstpair_indexed
404 && inst.operands[2].addr.preind))
405 && strcmp ("stp", inst.opcode->name) == 0)
406 {
407 /* STP with addressing mode Pre-indexed and Base register. */
408 unsigned rt1;
409 unsigned rt2;
410 unsigned rn = inst.operands[2].addr.base_regno;
411 int32_t imm = inst.operands[2].addr.offset.imm;
412 int size = aarch64_get_qualifier_esize (inst.operands[0].qualifier);
413
414 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt
415 || inst.operands[0].type == AARCH64_OPND_Ft);
416 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rt2
417 || inst.operands[1].type == AARCH64_OPND_Ft2);
418 gdb_assert (inst.operands[2].type == AARCH64_OPND_ADDR_SIMM7);
419 gdb_assert (!inst.operands[2].addr.offset.is_reg);
420
421 /* If recording this store would invalidate the store area
422 (perhaps because rn is not known) then we should abandon
423 further prologue analysis. */
424 if (stack.store_would_trash (pv_add_constant (regs[rn], imm)))
425 break;
426
427 if (stack.store_would_trash (pv_add_constant (regs[rn], imm + 8)))
428 break;
429
430 rt1 = inst.operands[0].reg.regno;
431 rt2 = inst.operands[1].reg.regno;
432 if (inst.operands[0].type == AARCH64_OPND_Ft)
433 {
436 }
437
438 stack.store (pv_add_constant (regs[rn], imm), size, regs[rt1]);
439 stack.store (pv_add_constant (regs[rn], imm + size), size, regs[rt2]);
440
441 if (inst.operands[2].addr.writeback)
442 regs[rn] = pv_add_constant (regs[rn], imm);
443
444 /* Ignore the instruction that allocates stack space and sets
445 the SP. */
446 if (rn == AARCH64_SP_REGNUM && !inst.operands[2].addr.writeback)
447 seen_stack_set = true;
448 }
449 else if ((inst.opcode->iclass == ldst_imm9 /* Signed immediate. */
450 || (inst.opcode->iclass == ldst_pos /* Unsigned immediate. */
451 && (inst.opcode->op == OP_STR_POS
452 || inst.opcode->op == OP_STRF_POS)))
453 && inst.operands[1].addr.base_regno == AARCH64_SP_REGNUM
454 && strcmp ("str", inst.opcode->name) == 0)
455 {
456 /* STR (immediate) */
457 unsigned int rt = inst.operands[0].reg.regno;
458 int32_t imm = inst.operands[1].addr.offset.imm;
459 unsigned int rn = inst.operands[1].addr.base_regno;
460 int size = aarch64_get_qualifier_esize (inst.operands[0].qualifier);
461 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt
462 || inst.operands[0].type == AARCH64_OPND_Ft);
463
464 if (inst.operands[0].type == AARCH64_OPND_Ft)
466
467 stack.store (pv_add_constant (regs[rn], imm), size, regs[rt]);
468 if (inst.operands[1].addr.writeback)
469 regs[rn] = pv_add_constant (regs[rn], imm);
470
471 /* Are we storing with SP as a base? */
472 if (rn == AARCH64_SP_REGNUM)
473 seen_stack_set = true;
474 }
475 else if (inst.opcode->iclass == testbranch)
476 {
477 /* Stop analysis on branch. */
478 break;
479 }
480 else if (inst.opcode->iclass == ic_system)
481 {
483 = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
484 int ra_state_val = 0;
485
486 if (insn == 0xd503233f /* paciasp. */
487 || insn == 0xd503237f /* pacibsp. */)
488 {
489 /* Return addresses are mangled. */
490 ra_state_val = 1;
491 }
492 else if (insn == 0xd50323bf /* autiasp. */
493 || insn == 0xd50323ff /* autibsp. */)
494 {
495 /* Return addresses are not mangled. */
496 ra_state_val = 0;
497 }
498 else if (IS_BTI (insn))
499 /* We don't need to do anything special for a BTI instruction. */
500 continue;
501 else
502 {
503 aarch64_debug_printf ("prologue analysis gave up addr=%s"
504 " opcode=0x%x (iclass)",
505 core_addr_to_string_nz (start), insn);
506 break;
507 }
508
509 if (tdep->has_pauth () && cache != nullptr)
510 {
511 int regnum = tdep->ra_sign_state_regnum;
512 cache->saved_regs[regnum].set_value (ra_state_val);
513 }
514 }
515 else
516 {
517 aarch64_debug_printf ("prologue analysis gave up addr=%s"
518 " opcode=0x%x",
519 core_addr_to_string_nz (start), insn);
520
521 break;
522 }
523 }
524
525 if (cache == NULL)
526 return start;
527
529 {
530 /* Frame pointer is fp. Frame size is constant. */
532 cache->framesize = -regs[AARCH64_FP_REGNUM].k;
533 }
535 {
536 /* Try the stack pointer. */
537 cache->framesize = -regs[AARCH64_SP_REGNUM].k;
539 }
540 else
541 {
542 /* We're just out of luck. We don't know where the frame is. */
543 cache->framereg = -1;
544 cache->framesize = 0;
545 }
546
547 for (i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
548 {
549 CORE_ADDR offset;
550
551 if (stack.find_reg (gdbarch, i, &offset))
552 cache->saved_regs[i].set_addr (offset);
553 }
554
555 for (i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
556 {
558 CORE_ADDR offset;
559
561 &offset))
562 cache->saved_regs[i + regnum + AARCH64_D0_REGNUM].set_addr (offset);
563 }
564
565 return start;
566}
567
568static CORE_ADDR
570 CORE_ADDR start, CORE_ADDR limit,
571 struct aarch64_prologue_cache *cache)
572{
573 instruction_reader reader;
574
575 return aarch64_analyze_prologue (gdbarch, start, limit, cache,
576 reader);
577}
578
579#if GDB_SELF_TEST
580
581namespace selftests {
582
583/* Instruction reader from manually cooked instruction sequences. */
584
585class instruction_reader_test : public abstract_instruction_reader
586{
587public:
588 template<size_t SIZE>
589 explicit instruction_reader_test (const uint32_t (&insns)[SIZE])
590 : m_insns (insns), m_insns_size (SIZE)
591 {}
592
593 ULONGEST read (CORE_ADDR memaddr, int len, enum bfd_endian byte_order)
594 override
595 {
596 SELF_CHECK (len == 4);
597 SELF_CHECK (memaddr % 4 == 0);
598 SELF_CHECK (memaddr / 4 < m_insns_size);
599
600 return m_insns[memaddr / 4];
601 }
602
603private:
604 const uint32_t *m_insns;
605 size_t m_insns_size;
606};
607
608static void
609aarch64_analyze_prologue_test (void)
610{
611 struct gdbarch_info info;
612
613 info.bfd_arch_info = bfd_scan_arch ("aarch64");
614
615 struct gdbarch *gdbarch = gdbarch_find_by_info (info);
616 SELF_CHECK (gdbarch != NULL);
617
618 struct aarch64_prologue_cache cache;
620
621 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
622
623 /* Test the simple prologue in which frame pointer is used. */
624 {
625 static const uint32_t insns[] = {
626 0xa9af7bfd, /* stp x29, x30, [sp,#-272]! */
627 0x910003fd, /* mov x29, sp */
628 0x97ffffe6, /* bl 0x400580 */
629 };
630 instruction_reader_test reader (insns);
631
632 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
633 SELF_CHECK (end == 4 * 2);
634
635 SELF_CHECK (cache.framereg == AARCH64_FP_REGNUM);
636 SELF_CHECK (cache.framesize == 272);
637
638 for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
639 {
640 if (i == AARCH64_FP_REGNUM)
641 SELF_CHECK (cache.saved_regs[i].addr () == -272);
642 else if (i == AARCH64_LR_REGNUM)
643 SELF_CHECK (cache.saved_regs[i].addr () == -264);
644 else
645 SELF_CHECK (cache.saved_regs[i].is_realreg ()
646 && cache.saved_regs[i].realreg () == i);
647 }
648
649 for (int i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
650 {
651 int num_regs = gdbarch_num_regs (gdbarch);
652 int regnum = i + num_regs + AARCH64_D0_REGNUM;
653
654 SELF_CHECK (cache.saved_regs[regnum].is_realreg ()
655 && cache.saved_regs[regnum].realreg () == regnum);
656 }
657 }
658
659 /* Test a prologue in which STR is used and frame pointer is not
660 used. */
661 {
662 static const uint32_t insns[] = {
663 0xf81d0ff3, /* str x19, [sp, #-48]! */
664 0xb9002fe0, /* str w0, [sp, #44] */
665 0xf90013e1, /* str x1, [sp, #32]*/
666 0xfd000fe0, /* str d0, [sp, #24] */
667 0xaa0203f3, /* mov x19, x2 */
668 0xf94013e0, /* ldr x0, [sp, #32] */
669 };
670 instruction_reader_test reader (insns);
671
672 trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
673 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
674
675 SELF_CHECK (end == 4 * 5);
676
677 SELF_CHECK (cache.framereg == AARCH64_SP_REGNUM);
678 SELF_CHECK (cache.framesize == 48);
679
680 for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
681 {
682 if (i == 1)
683 SELF_CHECK (cache.saved_regs[i].addr () == -16);
684 else if (i == 19)
685 SELF_CHECK (cache.saved_regs[i].addr () == -48);
686 else
687 SELF_CHECK (cache.saved_regs[i].is_realreg ()
688 && cache.saved_regs[i].realreg () == i);
689 }
690
691 for (int i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
692 {
693 int num_regs = gdbarch_num_regs (gdbarch);
694 int regnum = i + num_regs + AARCH64_D0_REGNUM;
695
696
697 if (i == 0)
698 SELF_CHECK (cache.saved_regs[regnum].addr () == -24);
699 else
700 SELF_CHECK (cache.saved_regs[regnum].is_realreg ()
701 && cache.saved_regs[regnum].realreg () == regnum);
702 }
703 }
704
705 /* Test handling of movz before setting the frame pointer. */
706 {
707 static const uint32_t insns[] = {
708 0xa9bf7bfd, /* stp x29, x30, [sp, #-16]! */
709 0x52800020, /* mov w0, #0x1 */
710 0x910003fd, /* mov x29, sp */
711 0x528000a2, /* mov w2, #0x5 */
712 0x97fffff8, /* bl 6e4 */
713 };
714
715 instruction_reader_test reader (insns);
716
717 trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
718 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
719
720 /* We should stop at the 4th instruction. */
721 SELF_CHECK (end == (4 - 1) * 4);
722 SELF_CHECK (cache.framereg == AARCH64_FP_REGNUM);
723 SELF_CHECK (cache.framesize == 16);
724 }
725
726 /* Test handling of movz/stp when using the stack pointer as frame
727 pointer. */
728 {
729 static const uint32_t insns[] = {
730 0xa9bc7bfd, /* stp x29, x30, [sp, #-64]! */
731 0x52800020, /* mov w0, #0x1 */
732 0x290207e0, /* stp w0, w1, [sp, #16] */
733 0xa9018fe2, /* stp x2, x3, [sp, #24] */
734 0x528000a2, /* mov w2, #0x5 */
735 0x97fffff8, /* bl 6e4 */
736 };
737
738 instruction_reader_test reader (insns);
739
740 trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
741 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
742
743 /* We should stop at the 5th instruction. */
744 SELF_CHECK (end == (5 - 1) * 4);
745 SELF_CHECK (cache.framereg == AARCH64_SP_REGNUM);
746 SELF_CHECK (cache.framesize == 64);
747 }
748
749 /* Test handling of movz/str when using the stack pointer as frame
750 pointer */
751 {
752 static const uint32_t insns[] = {
753 0xa9bc7bfd, /* stp x29, x30, [sp, #-64]! */
754 0x52800020, /* mov w0, #0x1 */
755 0xb9002be4, /* str w4, [sp, #40] */
756 0xf9001be5, /* str x5, [sp, #48] */
757 0x528000a2, /* mov w2, #0x5 */
758 0x97fffff8, /* bl 6e4 */
759 };
760
761 instruction_reader_test reader (insns);
762
763 trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
764 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
765
766 /* We should stop at the 5th instruction. */
767 SELF_CHECK (end == (5 - 1) * 4);
768 SELF_CHECK (cache.framereg == AARCH64_SP_REGNUM);
769 SELF_CHECK (cache.framesize == 64);
770 }
771
772 /* Test handling of movz/stur when using the stack pointer as frame
773 pointer. */
774 {
775 static const uint32_t insns[] = {
776 0xa9bc7bfd, /* stp x29, x30, [sp, #-64]! */
777 0x52800020, /* mov w0, #0x1 */
778 0xb80343e6, /* stur w6, [sp, #52] */
779 0xf80383e7, /* stur x7, [sp, #56] */
780 0x528000a2, /* mov w2, #0x5 */
781 0x97fffff8, /* bl 6e4 */
782 };
783
784 instruction_reader_test reader (insns);
785
786 trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
787 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
788
789 /* We should stop at the 5th instruction. */
790 SELF_CHECK (end == (5 - 1) * 4);
791 SELF_CHECK (cache.framereg == AARCH64_SP_REGNUM);
792 SELF_CHECK (cache.framesize == 64);
793 }
794
795 /* Test handling of movz when there is no frame pointer set or no stack
796 pointer used. */
797 {
798 static const uint32_t insns[] = {
799 0xa9bf7bfd, /* stp x29, x30, [sp, #-16]! */
800 0x52800020, /* mov w0, #0x1 */
801 0x528000a2, /* mov w2, #0x5 */
802 0x97fffff8, /* bl 6e4 */
803 };
804
805 instruction_reader_test reader (insns);
806
807 trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
808 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
809
810 /* We should stop at the 4th instruction. */
811 SELF_CHECK (end == (4 - 1) * 4);
812 SELF_CHECK (cache.framereg == AARCH64_SP_REGNUM);
813 SELF_CHECK (cache.framesize == 16);
814 }
815
816 /* Test a prologue in which there is a return address signing instruction. */
817 if (tdep->has_pauth ())
818 {
819 static const uint32_t insns[] = {
820 0xd503233f, /* paciasp */
821 0xa9bd7bfd, /* stp x29, x30, [sp, #-48]! */
822 0x910003fd, /* mov x29, sp */
823 0xf801c3f3, /* str x19, [sp, #28] */
824 0xb9401fa0, /* ldr x19, [x29, #28] */
825 };
826 instruction_reader_test reader (insns);
827
828 trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
829 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache,
830 reader);
831
832 SELF_CHECK (end == 4 * 4);
833 SELF_CHECK (cache.framereg == AARCH64_FP_REGNUM);
834 SELF_CHECK (cache.framesize == 48);
835
836 for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
837 {
838 if (i == 19)
839 SELF_CHECK (cache.saved_regs[i].addr () == -20);
840 else if (i == AARCH64_FP_REGNUM)
841 SELF_CHECK (cache.saved_regs[i].addr () == -48);
842 else if (i == AARCH64_LR_REGNUM)
843 SELF_CHECK (cache.saved_regs[i].addr () == -40);
844 else
845 SELF_CHECK (cache.saved_regs[i].is_realreg ()
846 && cache.saved_regs[i].realreg () == i);
847 }
848
849 if (tdep->has_pauth ())
850 {
851 int regnum = tdep->ra_sign_state_regnum;
852 SELF_CHECK (cache.saved_regs[regnum].is_value ());
853 }
854 }
855
856 /* Test a prologue with a BTI instruction. */
857 {
858 static const uint32_t insns[] = {
859 0xd503245f, /* bti */
860 0xa9bd7bfd, /* stp x29, x30, [sp, #-48]! */
861 0x910003fd, /* mov x29, sp */
862 0xf801c3f3, /* str x19, [sp, #28] */
863 0xb9401fa0, /* ldr x19, [x29, #28] */
864 };
865 instruction_reader_test reader (insns);
866
867 trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
868 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache,
869 reader);
870
871 SELF_CHECK (end == 4 * 4);
872 SELF_CHECK (cache.framereg == AARCH64_FP_REGNUM);
873 SELF_CHECK (cache.framesize == 48);
874
875 for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
876 {
877 if (i == 19)
878 SELF_CHECK (cache.saved_regs[i].addr () == -20);
879 else if (i == AARCH64_FP_REGNUM)
880 SELF_CHECK (cache.saved_regs[i].addr () == -48);
881 else if (i == AARCH64_LR_REGNUM)
882 SELF_CHECK (cache.saved_regs[i].addr () == -40);
883 else
884 SELF_CHECK (cache.saved_regs[i].is_realreg ()
885 && cache.saved_regs[i].realreg () == i);
886 }
887 }
888}
889} // namespace selftests
890#endif /* GDB_SELF_TEST */
891
892/* Implement the "skip_prologue" gdbarch method. */
893
894static CORE_ADDR
895aarch64_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR pc)
896{
897 CORE_ADDR func_addr, limit_pc;
898
899 /* See if we can determine the end of the prologue via the symbol
900 table. If so, then return either PC, or the PC after the
901 prologue, whichever is greater. */
902 if (find_pc_partial_function (pc, NULL, &func_addr, NULL))
903 {
904 CORE_ADDR post_prologue_pc
905 = skip_prologue_using_sal (gdbarch, func_addr);
906
907 if (post_prologue_pc != 0)
908 return std::max (pc, post_prologue_pc);
909 }
910
911 /* Can't determine prologue from the symbol table, need to examine
912 instructions. */
913
914 /* Find an upper limit on the function prologue using the debug
915 information. If the debug information could not be used to
916 provide that bound, then use an arbitrary large number as the
917 upper bound. */
918 limit_pc = skip_prologue_using_sal (gdbarch, pc);
919 if (limit_pc == 0)
920 limit_pc = pc + 128; /* Magic. */
921
922 /* Try disassembling prologue. */
923 return aarch64_analyze_prologue (gdbarch, pc, limit_pc, NULL);
924}
925
926/* Scan the function prologue for THIS_FRAME and populate the prologue
927 cache CACHE. */
928
929static void
931 struct aarch64_prologue_cache *cache)
932{
933 CORE_ADDR block_addr = get_frame_address_in_block (this_frame);
934 CORE_ADDR prologue_start;
935 CORE_ADDR prologue_end;
936 CORE_ADDR prev_pc = get_frame_pc (this_frame);
937 struct gdbarch *gdbarch = get_frame_arch (this_frame);
938
939 cache->prev_pc = prev_pc;
940
941 /* Assume we do not find a frame. */
942 cache->framereg = -1;
943 cache->framesize = 0;
944
945 if (find_pc_partial_function (block_addr, NULL, &prologue_start,
946 &prologue_end))
947 {
948 struct symtab_and_line sal = find_pc_line (prologue_start, 0);
949
950 if (sal.line == 0)
951 {
952 /* No line info so use the current PC. */
953 prologue_end = prev_pc;
954 }
955 else if (sal.end < prologue_end)
956 {
957 /* The next line begins after the function end. */
958 prologue_end = sal.end;
959 }
960
961 prologue_end = std::min (prologue_end, prev_pc);
962 aarch64_analyze_prologue (gdbarch, prologue_start, prologue_end, cache);
963 }
964 else
965 {
966 CORE_ADDR frame_loc;
967
968 frame_loc = get_frame_register_unsigned (this_frame, AARCH64_FP_REGNUM);
969 if (frame_loc == 0)
970 return;
971
973 cache->framesize = 16;
974 cache->saved_regs[29].set_addr (0);
975 cache->saved_regs[30].set_addr (8);
976 }
977}
978
979/* Fill in *CACHE with information about the prologue of *THIS_FRAME. This
980 function may throw an exception if the inferior's registers or memory is
981 not available. */
982
983static void
985 struct aarch64_prologue_cache *cache)
986{
987 CORE_ADDR unwound_fp;
988 int reg;
989
990 aarch64_scan_prologue (this_frame, cache);
991
992 if (cache->framereg == -1)
993 return;
994
995 unwound_fp = get_frame_register_unsigned (this_frame, cache->framereg);
996 if (unwound_fp == 0)
997 return;
998
999 cache->prev_sp = unwound_fp + cache->framesize;
1000
1001 /* Calculate actual addresses of saved registers using offsets
1002 determined by aarch64_analyze_prologue. */
1003 for (reg = 0; reg < gdbarch_num_regs (get_frame_arch (this_frame)); reg++)
1004 if (cache->saved_regs[reg].is_addr ())
1005 cache->saved_regs[reg].set_addr (cache->saved_regs[reg].addr ()
1006 + cache->prev_sp);
1007
1008 cache->func = get_frame_func (this_frame);
1009
1010 cache->available_p = 1;
1011}
1012
1013/* Allocate and fill in *THIS_CACHE with information about the prologue of
1014 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
1015 Return a pointer to the current aarch64_prologue_cache in
1016 *THIS_CACHE. */
1017
1018static struct aarch64_prologue_cache *
1019aarch64_make_prologue_cache (frame_info_ptr this_frame, void **this_cache)
1020{
1021 struct aarch64_prologue_cache *cache;
1022
1023 if (*this_cache != NULL)
1024 return (struct aarch64_prologue_cache *) *this_cache;
1025
1027 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
1028 *this_cache = cache;
1029
1030 try
1031 {
1032 aarch64_make_prologue_cache_1 (this_frame, cache);
1033 }
1034 catch (const gdb_exception_error &ex)
1035 {
1036 if (ex.error != NOT_AVAILABLE_ERROR)
1037 throw;
1038 }
1039
1040 return cache;
1041}
1042
1043/* Implement the "stop_reason" frame_unwind method. */
1044
1045static enum unwind_stop_reason
1047 void **this_cache)
1048{
1049 struct aarch64_prologue_cache *cache
1050 = aarch64_make_prologue_cache (this_frame, this_cache);
1051
1052 if (!cache->available_p)
1053 return UNWIND_UNAVAILABLE;
1054
1055 /* Halt the backtrace at "_start". */
1056 gdbarch *arch = get_frame_arch (this_frame);
1057 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (arch);
1058 if (cache->prev_pc <= tdep->lowest_pc)
1059 return UNWIND_OUTERMOST;
1060
1061 /* We've hit a wall, stop. */
1062 if (cache->prev_sp == 0)
1063 return UNWIND_OUTERMOST;
1064
1065 return UNWIND_NO_REASON;
1066}
1067
1068/* Our frame ID for a normal frame is the current function's starting
1069 PC and the caller's SP when we were called. */
1070
1071static void
1073 void **this_cache, struct frame_id *this_id)
1074{
1075 struct aarch64_prologue_cache *cache
1076 = aarch64_make_prologue_cache (this_frame, this_cache);
1077
1078 if (!cache->available_p)
1079 *this_id = frame_id_build_unavailable_stack (cache->func);
1080 else
1081 *this_id = frame_id_build (cache->prev_sp, cache->func);
1082}
1083
1084/* Implement the "prev_register" frame_unwind method. */
1085
1086static struct value *
1088 void **this_cache, int prev_regnum)
1089{
1090 struct aarch64_prologue_cache *cache
1091 = aarch64_make_prologue_cache (this_frame, this_cache);
1092
1093 /* If we are asked to unwind the PC, then we need to return the LR
1094 instead. The prologue may save PC, but it will point into this
1095 frame's prologue, not the next frame's resume location. */
1096 if (prev_regnum == AARCH64_PC_REGNUM)
1097 {
1098 CORE_ADDR lr;
1099 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1101 = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
1102
1104
1105 if (tdep->has_pauth ()
1106 && cache->saved_regs[tdep->ra_sign_state_regnum].is_value ())
1107 lr = aarch64_frame_unmask_lr (tdep, this_frame, lr);
1108
1109 return frame_unwind_got_constant (this_frame, prev_regnum, lr);
1110 }
1111
1112 /* SP is generally not saved to the stack, but this frame is
1113 identified by the next frame's stack pointer at the time of the
1114 call. The value was already reconstructed into PREV_SP. */
1115 /*
1116 +----------+ ^
1117 | saved lr | |
1118 +->| saved fp |--+
1119 | | |
1120 | | | <- Previous SP
1121 | +----------+
1122 | | saved lr |
1123 +--| saved fp |<- FP
1124 | |
1125 | |<- SP
1126 +----------+ */
1127 if (prev_regnum == AARCH64_SP_REGNUM)
1128 return frame_unwind_got_constant (this_frame, prev_regnum,
1129 cache->prev_sp);
1130
1131 return trad_frame_get_prev_register (this_frame, cache->saved_regs,
1132 prev_regnum);
1133}
1134
1135/* AArch64 prologue unwinder. */
1137{
1138 "aarch64 prologue",
1143 NULL,
1145};
1146
1147/* Allocate and fill in *THIS_CACHE with information about the prologue of
1148 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
1149 Return a pointer to the current aarch64_prologue_cache in
1150 *THIS_CACHE. */
1151
1152static struct aarch64_prologue_cache *
1153aarch64_make_stub_cache (frame_info_ptr this_frame, void **this_cache)
1154{
1155 struct aarch64_prologue_cache *cache;
1156
1157 if (*this_cache != NULL)
1158 return (struct aarch64_prologue_cache *) *this_cache;
1159
1161 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
1162 *this_cache = cache;
1163
1164 try
1165 {
1166 cache->prev_sp = get_frame_register_unsigned (this_frame,
1168 cache->prev_pc = get_frame_pc (this_frame);
1169 cache->available_p = 1;
1170 }
1171 catch (const gdb_exception_error &ex)
1172 {
1173 if (ex.error != NOT_AVAILABLE_ERROR)
1174 throw;
1175 }
1176
1177 return cache;
1178}
1179
1180/* Implement the "stop_reason" frame_unwind method. */
1181
1182static enum unwind_stop_reason
1184 void **this_cache)
1185{
1186 struct aarch64_prologue_cache *cache
1187 = aarch64_make_stub_cache (this_frame, this_cache);
1188
1189 if (!cache->available_p)
1190 return UNWIND_UNAVAILABLE;
1191
1192 return UNWIND_NO_REASON;
1193}
1194
1195/* Our frame ID for a stub frame is the current SP and LR. */
1196
1197static void
1199 void **this_cache, struct frame_id *this_id)
1200{
1201 struct aarch64_prologue_cache *cache
1202 = aarch64_make_stub_cache (this_frame, this_cache);
1203
1204 if (cache->available_p)
1205 *this_id = frame_id_build (cache->prev_sp, cache->prev_pc);
1206 else
1207 *this_id = frame_id_build_unavailable_stack (cache->prev_pc);
1208}
1209
1210/* Implement the "sniffer" frame_unwind method. */
1211
1212static int
1214 frame_info_ptr this_frame,
1215 void **this_prologue_cache)
1216{
1217 CORE_ADDR addr_in_block;
1218 gdb_byte dummy[4];
1219
1220 addr_in_block = get_frame_address_in_block (this_frame);
1221 if (in_plt_section (addr_in_block)
1222 /* We also use the stub winder if the target memory is unreadable
1223 to avoid having the prologue unwinder trying to read it. */
1224 || target_read_memory (get_frame_pc (this_frame), dummy, 4) != 0)
1225 return 1;
1226
1227 return 0;
1228}
1229
1230/* AArch64 stub unwinder. */
1232{
1233 "aarch64 stub",
1238 NULL,
1240};
1241
1242/* Return the frame base address of *THIS_FRAME. */
1243
1244static CORE_ADDR
1245aarch64_normal_frame_base (frame_info_ptr this_frame, void **this_cache)
1246{
1247 struct aarch64_prologue_cache *cache
1248 = aarch64_make_prologue_cache (this_frame, this_cache);
1249
1250 return cache->prev_sp - cache->framesize;
1251}
1252
1253/* AArch64 default frame base information. */
1255{
1260};
1261
1262/* Return the value of the REGNUM register in the previous frame of
1263 *THIS_FRAME. */
1264
1265static struct value *
1267 void **this_cache, int regnum)
1268{
1269 gdbarch *arch = get_frame_arch (this_frame);
1270 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (arch);
1271 CORE_ADDR lr;
1272
1273 switch (regnum)
1274 {
1275 case AARCH64_PC_REGNUM:
1277 lr = aarch64_frame_unmask_lr (tdep, this_frame, lr);
1278 return frame_unwind_got_constant (this_frame, regnum, lr);
1279
1280 default:
1281 internal_error (_("Unexpected register %d"), regnum);
1282 }
1283}
1284
1285static const unsigned char op_lit0 = DW_OP_lit0;
1286static const unsigned char op_lit1 = DW_OP_lit1;
1287
1288/* Implement the "init_reg" dwarf2_frame_ops method. */
1289
1290static void
1293 frame_info_ptr this_frame)
1294{
1295 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
1296
1297 switch (regnum)
1298 {
1299 case AARCH64_PC_REGNUM:
1300 reg->how = DWARF2_FRAME_REG_FN;
1302 return;
1303
1304 case AARCH64_SP_REGNUM:
1306 return;
1307 }
1308
1309 /* Init pauth registers. */
1310 if (tdep->has_pauth ())
1311 {
1312 if (regnum == tdep->ra_sign_state_regnum)
1313 {
1314 /* Initialize RA_STATE to zero. */
1316 reg->loc.exp.start = &op_lit0;
1317 reg->loc.exp.len = 1;
1318 return;
1319 }
1322 {
1324 return;
1325 }
1326 }
1327}
1328
1329/* Implement the execute_dwarf_cfa_vendor_op method. */
1330
1331static bool
1333 struct dwarf2_frame_state *fs)
1334{
1335 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
1336 struct dwarf2_frame_state_reg *ra_state;
1337
1338 if (op == DW_CFA_AARCH64_negate_ra_state)
1339 {
1340 /* On systems without pauth, treat as a nop. */
1341 if (!tdep->has_pauth ())
1342 return true;
1343
1344 /* Allocate RA_STATE column if it's not allocated yet. */
1345 fs->regs.alloc_regs (AARCH64_DWARF_RA_SIGN_STATE + 1);
1346
1347 /* Toggle the status of RA_STATE between 0 and 1. */
1348 ra_state = &(fs->regs.reg[AARCH64_DWARF_RA_SIGN_STATE]);
1350
1351 if (ra_state->loc.exp.start == nullptr
1352 || ra_state->loc.exp.start == &op_lit0)
1353 ra_state->loc.exp.start = &op_lit1;
1354 else
1355 ra_state->loc.exp.start = &op_lit0;
1356
1357 ra_state->loc.exp.len = 1;
1358
1359 return true;
1360 }
1361
1362 return false;
1363}
1364
1365/* Used for matching BRK instructions for AArch64. */
1366static constexpr uint32_t BRK_INSN_MASK = 0xffe0001f;
1367static constexpr uint32_t BRK_INSN_BASE = 0xd4200000;
1368
1369/* Implementation of gdbarch_program_breakpoint_here_p for aarch64. */
1370
1371static bool
1373{
1374 const uint32_t insn_len = 4;
1375 gdb_byte target_mem[4];
1376
1377 /* Enable the automatic memory restoration from breakpoints while
1378 we read the memory. Otherwise we may find temporary breakpoints, ones
1379 inserted by GDB, and flag them as permanent breakpoints. */
1380 scoped_restore restore_memory
1382
1383 if (target_read_memory (address, target_mem, insn_len) == 0)
1384 {
1385 uint32_t insn =
1386 (uint32_t) extract_unsigned_integer (target_mem, insn_len,
1388
1389 /* Check if INSN is a BRK instruction pattern. There are multiple choices
1390 of such instructions with different immediate values. Different OS'
1391 may use a different variation, but they have the same outcome. */
1392 return ((insn & BRK_INSN_MASK) == BRK_INSN_BASE);
1393 }
1394
1395 return false;
1396}
1397
1398/* When arguments must be pushed onto the stack, they go on in reverse
1399 order. The code below implements a FILO (stack) to do this. */
1400
1402{
1403 /* Value to pass on stack. It can be NULL if this item is for stack
1404 padding. */
1405 const gdb_byte *data;
1406
1407 /* Size in bytes of value to pass on stack. */
1408 int len;
1409};
1410
1411/* Implement the gdbarch type alignment method, overrides the generic
1412 alignment algorithm for anything that is aarch64 specific. */
1413
1414static ULONGEST
1416{
1417 t = check_typedef (t);
1418 if (t->code () == TYPE_CODE_ARRAY && t->is_vector ())
1419 {
1420 /* Use the natural alignment for vector types (the same for
1421 scalar type), but the maximum alignment is 128-bit. */
1422 if (t->length () > 16)
1423 return 16;
1424 else
1425 return t->length ();
1426 }
1427
1428 /* Allow the common code to calculate the alignment. */
1429 return 0;
1430}
1431
1432/* Worker function for aapcs_is_vfp_call_or_return_candidate.
1433
1434 Return the number of register required, or -1 on failure.
1435
1436 When encountering a base element, if FUNDAMENTAL_TYPE is not set then set it
1437 to the element, else fail if the type of this element does not match the
1438 existing value. */
1439
1440static int
1442 struct type **fundamental_type)
1443{
1444 if (type == nullptr)
1445 return -1;
1446
1447 switch (type->code ())
1448 {
1449 case TYPE_CODE_FLT:
1450 case TYPE_CODE_DECFLOAT:
1451 if (type->length () > 16)
1452 return -1;
1453
1454 if (*fundamental_type == nullptr)
1455 *fundamental_type = type;
1456 else if (type->length () != (*fundamental_type)->length ()
1457 || type->code () != (*fundamental_type)->code ())
1458 return -1;
1459
1460 return 1;
1461
1462 case TYPE_CODE_COMPLEX:
1463 {
1465 if (target_type->length () > 16)
1466 return -1;
1467
1468 if (*fundamental_type == nullptr)
1469 *fundamental_type = target_type;
1470 else if (target_type->length () != (*fundamental_type)->length ()
1471 || target_type->code () != (*fundamental_type)->code ())
1472 return -1;
1473
1474 return 2;
1475 }
1476
1477 case TYPE_CODE_ARRAY:
1478 {
1479 if (type->is_vector ())
1480 {
1481 if (type->length () != 8 && type->length () != 16)
1482 return -1;
1483
1484 if (*fundamental_type == nullptr)
1485 *fundamental_type = type;
1486 else if (type->length () != (*fundamental_type)->length ()
1487 || type->code () != (*fundamental_type)->code ())
1488 return -1;
1489
1490 return 1;
1491 }
1492 else
1493 {
1494 struct type *target_type = type->target_type ();
1496 (target_type, fundamental_type);
1497
1498 if (count == -1)
1499 return count;
1500
1501 count *= (type->length () / target_type->length ());
1502 return count;
1503 }
1504 }
1505
1506 case TYPE_CODE_STRUCT:
1507 case TYPE_CODE_UNION:
1508 {
1509 int count = 0;
1510
1511 for (int i = 0; i < type->num_fields (); i++)
1512 {
1513 /* Ignore any static fields. */
1514 if (field_is_static (&type->field (i)))
1515 continue;
1516
1517 struct type *member = check_typedef (type->field (i).type ());
1518
1520 (member, fundamental_type);
1521 if (sub_count == -1)
1522 return -1;
1523 count += sub_count;
1524 }
1525
1526 /* Ensure there is no padding between the fields (allowing for empty
1527 zero length structs) */
1528 int ftype_length = (*fundamental_type == nullptr)
1529 ? 0 : (*fundamental_type)->length ();
1530 if (count * ftype_length != type->length ())
1531 return -1;
1532
1533 return count;
1534 }
1535
1536 default:
1537 break;
1538 }
1539
1540 return -1;
1541}
1542
1543/* Return true if an argument, whose type is described by TYPE, can be passed or
1544 returned in simd/fp registers, providing enough parameter passing registers
1545 are available. This is as described in the AAPCS64.
1546
1547 Upon successful return, *COUNT returns the number of needed registers,
1548 *FUNDAMENTAL_TYPE contains the type of those registers.
1549
1550 Candidate as per the AAPCS64 5.4.2.C is either a:
1551 - float.
1552 - short-vector.
1553 - HFA (Homogeneous Floating-point Aggregate, 4.3.5.1). A Composite type where
1554 all the members are floats and has at most 4 members.
1555 - HVA (Homogeneous Short-vector Aggregate, 4.3.5.2). A Composite type where
1556 all the members are short vectors and has at most 4 members.
1557 - Complex (7.1.1)
1558
1559 Note that HFAs and HVAs can include nested structures and arrays. */
1560
1561static bool
1563 struct type **fundamental_type)
1564{
1565 if (type == nullptr)
1566 return false;
1567
1568 *fundamental_type = nullptr;
1569
1571 fundamental_type);
1572
1573 if (ag_count > 0 && ag_count <= HA_MAX_NUM_FLDS)
1574 {
1575 *count = ag_count;
1576 return true;
1577 }
1578 else
1579 return false;
1580}
1581
1582/* AArch64 function call information structure. */
1584{
1585 /* the current argument number. */
1586 unsigned argnum = 0;
1587
1588 /* The next general purpose register number, equivalent to NGRN as
1589 described in the AArch64 Procedure Call Standard. */
1590 unsigned ngrn = 0;
1591
1592 /* The next SIMD and floating point register number, equivalent to
1593 NSRN as described in the AArch64 Procedure Call Standard. */
1594 unsigned nsrn = 0;
1595
1596 /* The next stacked argument address, equivalent to NSAA as
1597 described in the AArch64 Procedure Call Standard. */
1598 unsigned nsaa = 0;
1599
1600 /* Stack item vector. */
1601 std::vector<stack_item_t> si;
1602};
1603
1604/* Pass a value in a sequence of consecutive X registers. The caller
1605 is responsible for ensuring sufficient registers are available. */
1606
1607static void
1609 struct aarch64_call_info *info, struct type *type,
1610 struct value *arg)
1611{
1612 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1613 int len = type->length ();
1614 enum type_code typecode = type->code ();
1615 int regnum = AARCH64_X0_REGNUM + info->ngrn;
1616 const bfd_byte *buf = value_contents (arg).data ();
1617
1618 info->argnum++;
1619
1620 while (len > 0)
1621 {
1622 int partial_len = len < X_REGISTER_SIZE ? len : X_REGISTER_SIZE;
1623 CORE_ADDR regval = extract_unsigned_integer (buf, partial_len,
1624 byte_order);
1625
1626
1627 /* Adjust sub-word struct/union args when big-endian. */
1628 if (byte_order == BFD_ENDIAN_BIG
1629 && partial_len < X_REGISTER_SIZE
1630 && (typecode == TYPE_CODE_STRUCT || typecode == TYPE_CODE_UNION))
1631 regval <<= ((X_REGISTER_SIZE - partial_len) * TARGET_CHAR_BIT);
1632
1633 aarch64_debug_printf ("arg %d in %s = 0x%s", info->argnum,
1635 phex (regval, X_REGISTER_SIZE));
1636
1638 len -= partial_len;
1639 buf += partial_len;
1640 regnum++;
1641 }
1642}
1643
1644/* Attempt to marshall a value in a V register. Return 1 if
1645 successful, or 0 if insufficient registers are available. This
1646 function, unlike the equivalent pass_in_x() function does not
1647 handle arguments spread across multiple registers. */
1648
1649static int
1651 struct regcache *regcache,
1652 struct aarch64_call_info *info,
1653 int len, const bfd_byte *buf)
1654{
1655 if (info->nsrn < 8)
1656 {
1657 int regnum = AARCH64_V0_REGNUM + info->nsrn;
1658 /* Enough space for a full vector register. */
1659 gdb_byte reg[register_size (gdbarch, regnum)];
1660 gdb_assert (len <= sizeof (reg));
1661
1662 info->argnum++;
1663 info->nsrn++;
1664
1665 memset (reg, 0, sizeof (reg));
1666 /* PCS C.1, the argument is allocated to the least significant
1667 bits of V register. */
1668 memcpy (reg, buf, len);
1670
1671 aarch64_debug_printf ("arg %d in %s", info->argnum,
1673
1674 return 1;
1675 }
1676 info->nsrn = 8;
1677 return 0;
1678}
1679
1680/* Marshall an argument onto the stack. */
1681
1682static void
1684 struct value *arg)
1685{
1686 const bfd_byte *buf = value_contents (arg).data ();
1687 int len = type->length ();
1688 int align;
1689 stack_item_t item;
1690
1691 info->argnum++;
1692
1693 align = type_align (type);
1694
1695 /* PCS C.17 Stack should be aligned to the larger of 8 bytes or the
1696 Natural alignment of the argument's type. */
1697 align = align_up (align, 8);
1698
1699 /* The AArch64 PCS requires at most doubleword alignment. */
1700 if (align > 16)
1701 align = 16;
1702
1703 aarch64_debug_printf ("arg %d len=%d @ sp + %d\n", info->argnum, len,
1704 info->nsaa);
1705
1706 item.len = len;
1707 item.data = buf;
1708 info->si.push_back (item);
1709
1710 info->nsaa += len;
1711 if (info->nsaa & (align - 1))
1712 {
1713 /* Push stack alignment padding. */
1714 int pad = align - (info->nsaa & (align - 1));
1715
1716 item.len = pad;
1717 item.data = NULL;
1718
1719 info->si.push_back (item);
1720 info->nsaa += pad;
1721 }
1722}
1723
1724/* Marshall an argument into a sequence of one or more consecutive X
1725 registers or, if insufficient X registers are available then onto
1726 the stack. */
1727
1728static void
1730 struct aarch64_call_info *info, struct type *type,
1731 struct value *arg)
1732{
1733 int len = type->length ();
1734 int nregs = (len + X_REGISTER_SIZE - 1) / X_REGISTER_SIZE;
1735
1736 /* PCS C.13 - Pass in registers if we have enough spare */
1737 if (info->ngrn + nregs <= 8)
1738 {
1739 pass_in_x (gdbarch, regcache, info, type, arg);
1740 info->ngrn += nregs;
1741 }
1742 else
1743 {
1744 info->ngrn = 8;
1745 pass_on_stack (info, type, arg);
1746 }
1747}
1748
1749/* Pass a value, which is of type arg_type, in a V register. Assumes value is a
1750 aapcs_is_vfp_call_or_return_candidate and there are enough spare V
1751 registers. A return value of false is an error state as the value will have
1752 been partially passed to the stack. */
1753static bool
1755 struct aarch64_call_info *info, struct type *arg_type,
1756 struct value *arg)
1757{
1758 switch (arg_type->code ())
1759 {
1760 case TYPE_CODE_FLT:
1761 case TYPE_CODE_DECFLOAT:
1762 return pass_in_v (gdbarch, regcache, info, arg_type->length (),
1763 value_contents (arg).data ());
1764 break;
1765
1766 case TYPE_CODE_COMPLEX:
1767 {
1768 const bfd_byte *buf = value_contents (arg).data ();
1769 struct type *target_type = check_typedef (arg_type->target_type ());
1770
1771 if (!pass_in_v (gdbarch, regcache, info, target_type->length (),
1772 buf))
1773 return false;
1774
1775 return pass_in_v (gdbarch, regcache, info, target_type->length (),
1776 buf + target_type->length ());
1777 }
1778
1779 case TYPE_CODE_ARRAY:
1780 if (arg_type->is_vector ())
1781 return pass_in_v (gdbarch, regcache, info, arg_type->length (),
1782 value_contents (arg).data ());
1783 /* fall through. */
1784
1785 case TYPE_CODE_STRUCT:
1786 case TYPE_CODE_UNION:
1787 for (int i = 0; i < arg_type->num_fields (); i++)
1788 {
1789 /* Don't include static fields. */
1790 if (field_is_static (&arg_type->field (i)))
1791 continue;
1792
1793 struct value *field = value_primitive_field (arg, 0, i, arg_type);
1794 struct type *field_type = check_typedef (value_type (field));
1795
1796 if (!pass_in_v_vfp_candidate (gdbarch, regcache, info, field_type,
1797 field))
1798 return false;
1799 }
1800 return true;
1801
1802 default:
1803 return false;
1804 }
1805}
1806
1807/* Implement the "push_dummy_call" gdbarch method. */
1808
1809static CORE_ADDR
1811 struct regcache *regcache, CORE_ADDR bp_addr,
1812 int nargs,
1813 struct value **args, CORE_ADDR sp,
1814 function_call_return_method return_method,
1815 CORE_ADDR struct_addr)
1816{
1817 int argnum;
1818 struct aarch64_call_info info;
1819
1820 /* We need to know what the type of the called function is in order
1821 to determine the number of named/anonymous arguments for the
1822 actual argument placement, and the return type in order to handle
1823 return value correctly.
1824
1825 The generic code above us views the decision of return in memory
1826 or return in registers as a two stage processes. The language
1827 handler is consulted first and may decide to return in memory (eg
1828 class with copy constructor returned by value), this will cause
1829 the generic code to allocate space AND insert an initial leading
1830 argument.
1831
1832 If the language code does not decide to pass in memory then the
1833 target code is consulted.
1834
1835 If the language code decides to pass in memory we want to move
1836 the pointer inserted as the initial argument from the argument
1837 list and into X8, the conventional AArch64 struct return pointer
1838 register. */
1839
1840 /* Set the return address. For the AArch64, the return breakpoint
1841 is always at BP_ADDR. */
1843
1844 /* If we were given an initial argument for the return slot, lose it. */
1845 if (return_method == return_method_hidden_param)
1846 {
1847 args++;
1848 nargs--;
1849 }
1850
1851 /* The struct_return pointer occupies X8. */
1852 if (return_method != return_method_normal)
1853 {
1854 aarch64_debug_printf ("struct return in %s = 0x%s",
1857 paddress (gdbarch, struct_addr));
1858
1860 struct_addr);
1861 }
1862
1863 for (argnum = 0; argnum < nargs; argnum++)
1864 {
1865 struct value *arg = args[argnum];
1866 struct type *arg_type, *fundamental_type;
1867 int len, elements;
1868
1869 arg_type = check_typedef (value_type (arg));
1870 len = arg_type->length ();
1871
1872 /* If arg can be passed in v registers as per the AAPCS64, then do so if
1873 if there are enough spare registers. */
1874 if (aapcs_is_vfp_call_or_return_candidate (arg_type, &elements,
1875 &fundamental_type))
1876 {
1877 if (info.nsrn + elements <= 8)
1878 {
1879 /* We know that we have sufficient registers available therefore
1880 this will never need to fallback to the stack. */
1881 if (!pass_in_v_vfp_candidate (gdbarch, regcache, &info, arg_type,
1882 arg))
1883 gdb_assert_not_reached ("Failed to push args");
1884 }
1885 else
1886 {
1887 info.nsrn = 8;
1888 pass_on_stack (&info, arg_type, arg);
1889 }
1890 continue;
1891 }
1892
1893 switch (arg_type->code ())
1894 {
1895 case TYPE_CODE_INT:
1896 case TYPE_CODE_BOOL:
1897 case TYPE_CODE_CHAR:
1898 case TYPE_CODE_RANGE:
1899 case TYPE_CODE_ENUM:
1900 if (len < 4 && !is_fixed_point_type (arg_type))
1901 {
1902 /* Promote to 32 bit integer. */
1903 if (arg_type->is_unsigned ())
1904 arg_type = builtin_type (gdbarch)->builtin_uint32;
1905 else
1906 arg_type = builtin_type (gdbarch)->builtin_int32;
1907 arg = value_cast (arg_type, arg);
1908 }
1909 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1910 break;
1911
1912 case TYPE_CODE_STRUCT:
1913 case TYPE_CODE_ARRAY:
1914 case TYPE_CODE_UNION:
1915 if (len > 16)
1916 {
1917 /* PCS B.7 Aggregates larger than 16 bytes are passed by
1918 invisible reference. */
1919
1920 /* Allocate aligned storage. */
1921 sp = align_down (sp - len, 16);
1922
1923 /* Write the real data into the stack. */
1924 write_memory (sp, value_contents (arg).data (), len);
1925
1926 /* Construct the indirection. */
1927 arg_type = lookup_pointer_type (arg_type);
1928 arg = value_from_pointer (arg_type, sp);
1929 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1930 }
1931 else
1932 /* PCS C.15 / C.18 multiple values pass. */
1933 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1934 break;
1935
1936 default:
1937 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1938 break;
1939 }
1940 }
1941
1942 /* Make sure stack retains 16 byte alignment. */
1943 if (info.nsaa & 15)
1944 sp -= 16 - (info.nsaa & 15);
1945
1946 while (!info.si.empty ())
1947 {
1948 const stack_item_t &si = info.si.back ();
1949
1950 sp -= si.len;
1951 if (si.data != NULL)
1952 write_memory (sp, si.data, si.len);
1953 info.si.pop_back ();
1954 }
1955
1956 /* Finally, update the SP register. */
1958
1959 return sp;
1960}
1961
1962/* Implement the "frame_align" gdbarch method. */
1963
1964static CORE_ADDR
1965aarch64_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
1966{
1967 /* Align the stack to sixteen bytes. */
1968 return sp & ~(CORE_ADDR) 15;
1969}
1970
1971/* Return the type for an AdvSISD Q register. */
1972
1973static struct type *
1975{
1976 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
1977
1978 if (tdep->vnq_type == NULL)
1979 {
1980 struct type *t;
1981 struct type *elem;
1982
1983 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnq",
1984 TYPE_CODE_UNION);
1985
1987 append_composite_type_field (t, "u", elem);
1988
1990 append_composite_type_field (t, "s", elem);
1991
1992 tdep->vnq_type = t;
1993 }
1994
1995 return tdep->vnq_type;
1996}
1997
1998/* Return the type for an AdvSISD D register. */
1999
2000static struct type *
2002{
2003 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
2004
2005 if (tdep->vnd_type == NULL)
2006 {
2007 struct type *t;
2008 struct type *elem;
2009
2010 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnd",
2011 TYPE_CODE_UNION);
2012
2014 append_composite_type_field (t, "f", elem);
2015
2017 append_composite_type_field (t, "u", elem);
2018
2020 append_composite_type_field (t, "s", elem);
2021
2022 tdep->vnd_type = t;
2023 }
2024
2025 return tdep->vnd_type;
2026}
2027
2028/* Return the type for an AdvSISD S register. */
2029
2030static struct type *
2032{
2033 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
2034
2035 if (tdep->vns_type == NULL)
2036 {
2037 struct type *t;
2038 struct type *elem;
2039
2040 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vns",
2041 TYPE_CODE_UNION);
2042
2044 append_composite_type_field (t, "f", elem);
2045
2047 append_composite_type_field (t, "u", elem);
2048
2050 append_composite_type_field (t, "s", elem);
2051
2052 tdep->vns_type = t;
2053 }
2054
2055 return tdep->vns_type;
2056}
2057
2058/* Return the type for an AdvSISD H register. */
2059
2060static struct type *
2062{
2063 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
2064
2065 if (tdep->vnh_type == NULL)
2066 {
2067 struct type *t;
2068 struct type *elem;
2069
2070 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnh",
2071 TYPE_CODE_UNION);
2072
2074 append_composite_type_field (t, "bf", elem);
2075
2077 append_composite_type_field (t, "f", elem);
2078
2080 append_composite_type_field (t, "u", elem);
2081
2083 append_composite_type_field (t, "s", elem);
2084
2085 tdep->vnh_type = t;
2086 }
2087
2088 return tdep->vnh_type;
2089}
2090
2091/* Return the type for an AdvSISD B register. */
2092
2093static struct type *
2095{
2096 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
2097
2098 if (tdep->vnb_type == NULL)
2099 {
2100 struct type *t;
2101 struct type *elem;
2102
2103 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnb",
2104 TYPE_CODE_UNION);
2105
2107 append_composite_type_field (t, "u", elem);
2108
2110 append_composite_type_field (t, "s", elem);
2111
2112 tdep->vnb_type = t;
2113 }
2114
2115 return tdep->vnb_type;
2116}
2117
2118/* Return the type for an AdvSISD V register. */
2119
2120static struct type *
2122{
2123 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
2124
2125 if (tdep->vnv_type == NULL)
2126 {
2127 /* The other AArch64 pseudo registers (Q,D,H,S,B) refer to a single value
2128 slice from the non-pseudo vector registers. However NEON V registers
2129 are always vector registers, and need constructing as such. */
2130 const struct builtin_type *bt = builtin_type (gdbarch);
2131
2132 struct type *t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnv",
2133 TYPE_CODE_UNION);
2134
2135 struct type *sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vnd",
2136 TYPE_CODE_UNION);
2143 append_composite_type_field (t, "d", sub);
2144
2145 sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vns",
2146 TYPE_CODE_UNION);
2153 append_composite_type_field (t, "s", sub);
2154
2155 sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vnh",
2156 TYPE_CODE_UNION);
2157 append_composite_type_field (sub, "bf",
2165 append_composite_type_field (t, "h", sub);
2166
2167 sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vnb",
2168 TYPE_CODE_UNION);
2172 init_vector_type (bt->builtin_int8, 16));
2173 append_composite_type_field (t, "b", sub);
2174
2175 sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vnq",
2176 TYPE_CODE_UNION);
2181 append_composite_type_field (t, "q", sub);
2182
2183 tdep->vnv_type = t;
2184 }
2185
2186 return tdep->vnv_type;
2187}
2188
2189/* Implement the "dwarf2_reg_to_regnum" gdbarch method. */
2190
2191static int
2193{
2194 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
2195
2196 if (reg >= AARCH64_DWARF_X0 && reg <= AARCH64_DWARF_X0 + 30)
2197 return AARCH64_X0_REGNUM + reg - AARCH64_DWARF_X0;
2198
2199 if (reg == AARCH64_DWARF_SP)
2200 return AARCH64_SP_REGNUM;
2201
2202 if (reg == AARCH64_DWARF_PC)
2203 return AARCH64_PC_REGNUM;
2204
2205 if (reg >= AARCH64_DWARF_V0 && reg <= AARCH64_DWARF_V0 + 31)
2206 return AARCH64_V0_REGNUM + reg - AARCH64_DWARF_V0;
2207
2208 if (reg == AARCH64_DWARF_SVE_VG)
2209 return AARCH64_SVE_VG_REGNUM;
2210
2211 if (reg == AARCH64_DWARF_SVE_FFR)
2213
2214 if (reg >= AARCH64_DWARF_SVE_P0 && reg <= AARCH64_DWARF_SVE_P0 + 15)
2216
2217 if (reg >= AARCH64_DWARF_SVE_Z0 && reg <= AARCH64_DWARF_SVE_Z0 + 15)
2219
2220 if (tdep->has_pauth ())
2221 {
2222 if (reg == AARCH64_DWARF_RA_SIGN_STATE)
2223 return tdep->ra_sign_state_regnum;
2224 }
2225
2226 return -1;
2227}
2228
2229/* Implement the "print_insn" gdbarch method. */
2230
2231static int
2232aarch64_gdb_print_insn (bfd_vma memaddr, disassemble_info *info)
2233{
2234 info->symbols = NULL;
2235 return default_print_insn (memaddr, info);
2236}
2237
2238/* AArch64 BRK software debug mode instruction.
2239 Note that AArch64 code is always little-endian.
2240 1101.0100.0010.0000.0000.0000.0000.0000 = 0xd4200000. */
2241constexpr gdb_byte aarch64_default_breakpoint[] = {0x00, 0x00, 0x20, 0xd4};
2242
2243typedef BP_MANIPULATION (aarch64_default_breakpoint) aarch64_breakpoint;
2244
2245/* Extract from an array REGS containing the (raw) register state a
2246 function return value of type TYPE, and copy that, in virtual
2247 format, into VALBUF. */
2248
2249static void
2250aarch64_extract_return_value (struct type *type, struct regcache *regs,
2251 gdb_byte *valbuf)
2252{
2253 struct gdbarch *gdbarch = regs->arch ();
2254 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2255 int elements;
2256 struct type *fundamental_type;
2257
2259 &fundamental_type))
2260 {
2261 int len = fundamental_type->length ();
2262
2263 for (int i = 0; i < elements; i++)
2264 {
2265 int regno = AARCH64_V0_REGNUM + i;
2266 /* Enough space for a full vector register. */
2267 gdb_byte buf[register_size (gdbarch, regno)];
2268 gdb_assert (len <= sizeof (buf));
2269
2271 ("read HFA or HVA return value element %d from %s",
2272 i + 1, gdbarch_register_name (gdbarch, regno));
2273
2274 regs->cooked_read (regno, buf);
2275
2276 memcpy (valbuf, buf, len);
2277 valbuf += len;
2278 }
2279 }
2280 else if (type->code () == TYPE_CODE_INT
2281 || type->code () == TYPE_CODE_CHAR
2282 || type->code () == TYPE_CODE_BOOL
2283 || type->code () == TYPE_CODE_PTR
2285 || type->code () == TYPE_CODE_ENUM)
2286 {
2287 /* If the type is a plain integer, then the access is
2288 straight-forward. Otherwise we have to play around a bit
2289 more. */
2290 int len = type->length ();
2291 int regno = AARCH64_X0_REGNUM;
2292 ULONGEST tmp;
2293
2294 while (len > 0)
2295 {
2296 /* By using store_unsigned_integer we avoid having to do
2297 anything special for small big-endian values. */
2298 regcache_cooked_read_unsigned (regs, regno++, &tmp);
2299 store_unsigned_integer (valbuf,
2300 (len > X_REGISTER_SIZE
2301 ? X_REGISTER_SIZE : len), byte_order, tmp);
2302 len -= X_REGISTER_SIZE;
2303 valbuf += X_REGISTER_SIZE;
2304 }
2305 }
2306 else
2307 {
2308 /* For a structure or union the behaviour is as if the value had
2309 been stored to word-aligned memory and then loaded into
2310 registers with 64-bit load instruction(s). */
2311 int len = type->length ();
2312 int regno = AARCH64_X0_REGNUM;
2313 bfd_byte buf[X_REGISTER_SIZE];
2314
2315 while (len > 0)
2316 {
2317 regs->cooked_read (regno++, buf);
2318 memcpy (valbuf, buf, len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
2319 len -= X_REGISTER_SIZE;
2320 valbuf += X_REGISTER_SIZE;
2321 }
2322 }
2323}
2324
2325
2326/* Will a function return an aggregate type in memory or in a
2327 register? Return 0 if an aggregate type can be returned in a
2328 register, 1 if it must be returned in memory. */
2329
2330static int
2332{
2334 int elements;
2335 struct type *fundamental_type;
2336
2338 &fundamental_type))
2339 {
2340 /* v0-v7 are used to return values and one register is allocated
2341 for one member. However, HFA or HVA has at most four members. */
2342 return 0;
2343 }
2344
2345 if (type->length () > 16
2346 || !language_pass_by_reference (type).trivially_copyable)
2347 {
2348 /* PCS B.6 Aggregates larger than 16 bytes are passed by
2349 invisible reference. */
2350
2351 return 1;
2352 }
2353
2354 return 0;
2355}
2356
2357/* Write into appropriate registers a function return value of type
2358 TYPE, given in virtual format. */
2359
2360static void
2362 const gdb_byte *valbuf)
2363{
2364 struct gdbarch *gdbarch = regs->arch ();
2365 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2366 int elements;
2367 struct type *fundamental_type;
2368
2370 &fundamental_type))
2371 {
2372 int len = fundamental_type->length ();
2373
2374 for (int i = 0; i < elements; i++)
2375 {
2376 int regno = AARCH64_V0_REGNUM + i;
2377 /* Enough space for a full vector register. */
2378 gdb_byte tmpbuf[register_size (gdbarch, regno)];
2379 gdb_assert (len <= sizeof (tmpbuf));
2380
2382 ("write HFA or HVA return value element %d to %s",
2383 i + 1, gdbarch_register_name (gdbarch, regno));
2384
2385 memcpy (tmpbuf, valbuf,
2386 len > V_REGISTER_SIZE ? V_REGISTER_SIZE : len);
2387 regs->cooked_write (regno, tmpbuf);
2388 valbuf += len;
2389 }
2390 }
2391 else if (type->code () == TYPE_CODE_INT
2392 || type->code () == TYPE_CODE_CHAR
2393 || type->code () == TYPE_CODE_BOOL
2394 || type->code () == TYPE_CODE_PTR
2396 || type->code () == TYPE_CODE_ENUM)
2397 {
2398 if (type->length () <= X_REGISTER_SIZE)
2399 {
2400 /* Values of one word or less are zero/sign-extended and
2401 returned in r0. */
2402 bfd_byte tmpbuf[X_REGISTER_SIZE];
2403 LONGEST val = unpack_long (type, valbuf);
2404
2405 store_signed_integer (tmpbuf, X_REGISTER_SIZE, byte_order, val);
2406 regs->cooked_write (AARCH64_X0_REGNUM, tmpbuf);
2407 }
2408 else
2409 {
2410 /* Integral values greater than one word are stored in
2411 consecutive registers starting with r0. This will always
2412 be a multiple of the regiser size. */
2413 int len = type->length ();
2414 int regno = AARCH64_X0_REGNUM;
2415
2416 while (len > 0)
2417 {
2418 regs->cooked_write (regno++, valbuf);
2419 len -= X_REGISTER_SIZE;
2420 valbuf += X_REGISTER_SIZE;
2421 }
2422 }
2423 }
2424 else
2425 {
2426 /* For a structure or union the behaviour is as if the value had
2427 been stored to word-aligned memory and then loaded into
2428 registers with 64-bit load instruction(s). */
2429 int len = type->length ();
2430 int regno = AARCH64_X0_REGNUM;
2431 bfd_byte tmpbuf[X_REGISTER_SIZE];
2432
2433 while (len > 0)
2434 {
2435 memcpy (tmpbuf, valbuf,
2436 len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
2437 regs->cooked_write (regno++, tmpbuf);
2438 len -= X_REGISTER_SIZE;
2439 valbuf += X_REGISTER_SIZE;
2440 }
2441 }
2442}
2443
2444/* Implement the "return_value" gdbarch method. */
2445
2446static enum return_value_convention
2447aarch64_return_value (struct gdbarch *gdbarch, struct value *func_value,
2448 struct type *valtype, struct regcache *regcache,
2449 gdb_byte *readbuf, const gdb_byte *writebuf)
2450{
2451
2452 if (valtype->code () == TYPE_CODE_STRUCT
2453 || valtype->code () == TYPE_CODE_UNION
2454 || valtype->code () == TYPE_CODE_ARRAY)
2455 {
2456 if (aarch64_return_in_memory (gdbarch, valtype))
2457 {
2458 /* From the AAPCS64's Result Return section:
2459
2460 "Otherwise, the caller shall reserve a block of memory of
2461 sufficient size and alignment to hold the result. The address
2462 of the memory block shall be passed as an additional argument to
2463 the function in x8. */
2464
2465 aarch64_debug_printf ("return value in memory");
2466
2467 if (readbuf)
2468 {
2469 CORE_ADDR addr;
2470
2472 read_memory (addr, readbuf, valtype->length ());
2473 }
2474
2476 }
2477 }
2478
2479 if (writebuf)
2480 aarch64_store_return_value (valtype, regcache, writebuf);
2481
2482 if (readbuf)
2483 aarch64_extract_return_value (valtype, regcache, readbuf);
2484
2485 aarch64_debug_printf ("return value in registers");
2486
2488}
2489
2490/* Implement the "get_longjmp_target" gdbarch method. */
2491
2492static int
2494{
2495 CORE_ADDR jb_addr;
2496 gdb_byte buf[X_REGISTER_SIZE];
2497 struct gdbarch *gdbarch = get_frame_arch (frame);
2498 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
2499 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2500
2502
2503 if (target_read_memory (jb_addr + tdep->jb_pc * tdep->jb_elt_size, buf,
2505 return 0;
2506
2507 *pc = extract_unsigned_integer (buf, X_REGISTER_SIZE, byte_order);
2508 return 1;
2509}
2510
2511/* Implement the "gen_return_address" gdbarch method. */
2512
2513static void
2515 struct agent_expr *ax, struct axs_value *value,
2516 CORE_ADDR scope)
2517{
2519 value->kind = axs_lvalue_register;
2521}
2522
2523
2524/* Return TRUE if REGNUM is a W pseudo-register number. Return FALSE
2525 otherwise. */
2526
2527static bool
2529{
2530 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
2531
2532 if (tdep->w_pseudo_base <= regnum
2533 && regnum < tdep->w_pseudo_base + tdep->w_pseudo_count)
2534 return true;
2535
2536 return false;
2537}
2538
2539/* Return the pseudo register name corresponding to register regnum. */
2540
2541static const char *
2543{
2544 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
2545
2546 /* W pseudo-registers. Bottom halves of the X registers. */
2547 static const char *const w_name[] =
2548 {
2549 "w0", "w1", "w2", "w3",
2550 "w4", "w5", "w6", "w7",
2551 "w8", "w9", "w10", "w11",
2552 "w12", "w13", "w14", "w15",
2553 "w16", "w17", "w18", "w19",
2554 "w20", "w21", "w22", "w23",
2555 "w24", "w25", "w26", "w27",
2556 "w28", "w29", "w30",
2557 };
2558
2559 static const char *const q_name[] =
2560 {
2561 "q0", "q1", "q2", "q3",
2562 "q4", "q5", "q6", "q7",
2563 "q8", "q9", "q10", "q11",
2564 "q12", "q13", "q14", "q15",
2565 "q16", "q17", "q18", "q19",
2566 "q20", "q21", "q22", "q23",
2567 "q24", "q25", "q26", "q27",
2568 "q28", "q29", "q30", "q31",
2569 };
2570
2571 static const char *const d_name[] =
2572 {
2573 "d0", "d1", "d2", "d3",
2574 "d4", "d5", "d6", "d7",
2575 "d8", "d9", "d10", "d11",
2576 "d12", "d13", "d14", "d15",
2577 "d16", "d17", "d18", "d19",
2578 "d20", "d21", "d22", "d23",
2579 "d24", "d25", "d26", "d27",
2580 "d28", "d29", "d30", "d31",
2581 };
2582
2583 static const char *const s_name[] =
2584 {
2585 "s0", "s1", "s2", "s3",
2586 "s4", "s5", "s6", "s7",
2587 "s8", "s9", "s10", "s11",
2588 "s12", "s13", "s14", "s15",
2589 "s16", "s17", "s18", "s19",
2590 "s20", "s21", "s22", "s23",
2591 "s24", "s25", "s26", "s27",
2592 "s28", "s29", "s30", "s31",
2593 };
2594
2595 static const char *const h_name[] =
2596 {
2597 "h0", "h1", "h2", "h3",
2598 "h4", "h5", "h6", "h7",
2599 "h8", "h9", "h10", "h11",
2600 "h12", "h13", "h14", "h15",
2601 "h16", "h17", "h18", "h19",
2602 "h20", "h21", "h22", "h23",
2603 "h24", "h25", "h26", "h27",
2604 "h28", "h29", "h30", "h31",
2605 };
2606
2607 static const char *const b_name[] =
2608 {
2609 "b0", "b1", "b2", "b3",
2610 "b4", "b5", "b6", "b7",
2611 "b8", "b9", "b10", "b11",
2612 "b12", "b13", "b14", "b15",
2613 "b16", "b17", "b18", "b19",
2614 "b20", "b21", "b22", "b23",
2615 "b24", "b25", "b26", "b27",
2616 "b28", "b29", "b30", "b31",
2617 };
2618
2619 int p_regnum = regnum - gdbarch_num_regs (gdbarch);
2620
2621 if (p_regnum >= AARCH64_Q0_REGNUM && p_regnum < AARCH64_Q0_REGNUM + 32)
2622 return q_name[p_regnum - AARCH64_Q0_REGNUM];
2623
2624 if (p_regnum >= AARCH64_D0_REGNUM && p_regnum < AARCH64_D0_REGNUM + 32)
2625 return d_name[p_regnum - AARCH64_D0_REGNUM];
2626
2627 if (p_regnum >= AARCH64_S0_REGNUM && p_regnum < AARCH64_S0_REGNUM + 32)
2628 return s_name[p_regnum - AARCH64_S0_REGNUM];
2629
2630 if (p_regnum >= AARCH64_H0_REGNUM && p_regnum < AARCH64_H0_REGNUM + 32)
2631 return h_name[p_regnum - AARCH64_H0_REGNUM];
2632
2633 if (p_regnum >= AARCH64_B0_REGNUM && p_regnum < AARCH64_B0_REGNUM + 32)
2634 return b_name[p_regnum - AARCH64_B0_REGNUM];
2635
2636 /* W pseudo-registers? */
2638 return w_name[regnum - tdep->w_pseudo_base];
2639
2640 if (tdep->has_sve ())
2641 {
2642 static const char *const sve_v_name[] =
2643 {
2644 "v0", "v1", "v2", "v3",
2645 "v4", "v5", "v6", "v7",
2646 "v8", "v9", "v10", "v11",
2647 "v12", "v13", "v14", "v15",
2648 "v16", "v17", "v18", "v19",
2649 "v20", "v21", "v22", "v23",
2650 "v24", "v25", "v26", "v27",
2651 "v28", "v29", "v30", "v31",
2652 };
2653
2654 if (p_regnum >= AARCH64_SVE_V0_REGNUM
2656 return sve_v_name[p_regnum - AARCH64_SVE_V0_REGNUM];
2657 }
2658
2659 /* RA_STATE is used for unwinding only. Do not assign it a name - this
2660 prevents it from being read by methods such as
2661 mi_cmd_trace_frame_collected. */
2662 if (tdep->has_pauth () && regnum == tdep->ra_sign_state_regnum)
2663 return "";
2664
2665 internal_error (_("aarch64_pseudo_register_name: bad register number %d"),
2666 p_regnum);
2667}
2668
2669/* Implement the "pseudo_register_type" tdesc_arch_data method. */
2670
2671static struct type *
2673{
2674 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
2675
2676 int p_regnum = regnum - gdbarch_num_regs (gdbarch);
2677
2678 if (p_regnum >= AARCH64_Q0_REGNUM && p_regnum < AARCH64_Q0_REGNUM + 32)
2679 return aarch64_vnq_type (gdbarch);
2680
2681 if (p_regnum >= AARCH64_D0_REGNUM && p_regnum < AARCH64_D0_REGNUM + 32)
2682 return aarch64_vnd_type (gdbarch);
2683
2684 if (p_regnum >= AARCH64_S0_REGNUM && p_regnum < AARCH64_S0_REGNUM + 32)
2685 return aarch64_vns_type (gdbarch);
2686
2687 if (p_regnum >= AARCH64_H0_REGNUM && p_regnum < AARCH64_H0_REGNUM + 32)
2688 return aarch64_vnh_type (gdbarch);
2689
2690 if (p_regnum >= AARCH64_B0_REGNUM && p_regnum < AARCH64_B0_REGNUM + 32)
2691 return aarch64_vnb_type (gdbarch);
2692
2693 if (tdep->has_sve () && p_regnum >= AARCH64_SVE_V0_REGNUM
2695 return aarch64_vnv_type (gdbarch);
2696
2697 /* W pseudo-registers are 32-bit. */
2700
2701 if (tdep->has_pauth () && regnum == tdep->ra_sign_state_regnum)
2703
2704 internal_error (_("aarch64_pseudo_register_type: bad register number %d"),
2705 p_regnum);
2706}
2707
2708/* Implement the "pseudo_register_reggroup_p" tdesc_arch_data method. */
2709
2710static int
2712 const struct reggroup *group)
2713{
2714 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
2715
2716 int p_regnum = regnum - gdbarch_num_regs (gdbarch);
2717
2718 if (p_regnum >= AARCH64_Q0_REGNUM && p_regnum < AARCH64_Q0_REGNUM + 32)
2719 return group == all_reggroup || group == vector_reggroup;
2720 else if (p_regnum >= AARCH64_D0_REGNUM && p_regnum < AARCH64_D0_REGNUM + 32)
2721 return (group == all_reggroup || group == vector_reggroup
2722 || group == float_reggroup);
2723 else if (p_regnum >= AARCH64_S0_REGNUM && p_regnum < AARCH64_S0_REGNUM + 32)
2724 return (group == all_reggroup || group == vector_reggroup
2725 || group == float_reggroup);
2726 else if (p_regnum >= AARCH64_H0_REGNUM && p_regnum < AARCH64_H0_REGNUM + 32)
2727 return group == all_reggroup || group == vector_reggroup;
2728 else if (p_regnum >= AARCH64_B0_REGNUM && p_regnum < AARCH64_B0_REGNUM + 32)
2729 return group == all_reggroup || group == vector_reggroup;
2730 else if (tdep->has_sve () && p_regnum >= AARCH64_SVE_V0_REGNUM
2732 return group == all_reggroup || group == vector_reggroup;
2733 /* RA_STATE is used for unwinding only. Do not assign it to any groups. */
2734 if (tdep->has_pauth () && regnum == tdep->ra_sign_state_regnum)
2735 return 0;
2736
2737 return group == all_reggroup;
2738}
2739
2740/* Helper for aarch64_pseudo_read_value. */
2741
2742static struct value *
2744 readable_regcache *regcache, int regnum_offset,
2745 int regsize, struct value *result_value)
2746{
2747 unsigned v_regnum = AARCH64_V0_REGNUM + regnum_offset;
2748
2749 /* Enough space for a full vector register. */
2750 gdb_byte reg_buf[register_size (gdbarch, AARCH64_V0_REGNUM)];
2752
2753 if (regcache->raw_read (v_regnum, reg_buf) != REG_VALID)
2754 mark_value_bytes_unavailable (result_value, 0,
2755 value_type (result_value)->length ());
2756 else
2757 memcpy (value_contents_raw (result_value).data (), reg_buf, regsize);
2758
2759 return result_value;
2760 }
2761
2762/* Implement the "pseudo_register_read_value" gdbarch method. */
2763
2764static struct value *
2766 int regnum)
2767{
2768 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
2769 struct value *result_value = allocate_value (register_type (gdbarch, regnum));
2770
2771 VALUE_LVAL (result_value) = lval_register;
2772 VALUE_REGNUM (result_value) = regnum;
2773
2775 {
2776 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2777 /* Default offset for little endian. */
2778 int offset = 0;
2779
2780 if (byte_order == BFD_ENDIAN_BIG)
2781 offset = 4;
2782
2783 /* Find the correct X register to extract the data from. */
2784 int x_regnum = AARCH64_X0_REGNUM + (regnum - tdep->w_pseudo_base);
2785 gdb_byte data[4];
2786
2787 /* Read the bottom 4 bytes of X. */
2788 if (regcache->raw_read_part (x_regnum, offset, 4, data) != REG_VALID)
2789 mark_value_bytes_unavailable (result_value, 0, 4);
2790 else
2791 memcpy (value_contents_raw (result_value).data (), data, 4);
2792
2793 return result_value;
2794 }
2795
2797
2801 Q_REGISTER_SIZE, result_value);
2802
2806 D_REGISTER_SIZE, result_value);
2807
2811 S_REGISTER_SIZE, result_value);
2812
2816 H_REGISTER_SIZE, result_value);
2817
2821 B_REGISTER_SIZE, result_value);
2822
2823 if (tdep->has_sve () && regnum >= AARCH64_SVE_V0_REGNUM
2824 && regnum < AARCH64_SVE_V0_REGNUM + 32)
2827 V_REGISTER_SIZE, result_value);
2828
2829 gdb_assert_not_reached ("regnum out of bound");
2830}
2831
2832/* Helper for aarch64_pseudo_write. */
2833
2834static void
2836 int regnum_offset, int regsize, const gdb_byte *buf)
2837{
2838 unsigned v_regnum = AARCH64_V0_REGNUM + regnum_offset;
2839
2840 /* Enough space for a full vector register. */
2841 gdb_byte reg_buf[register_size (gdbarch, AARCH64_V0_REGNUM)];
2843
2844 /* Ensure the register buffer is zero, we want gdb writes of the
2845 various 'scalar' pseudo registers to behavior like architectural
2846 writes, register width bytes are written the remainder are set to
2847 zero. */
2848 memset (reg_buf, 0, register_size (gdbarch, AARCH64_V0_REGNUM));
2849
2850 memcpy (reg_buf, buf, regsize);
2851 regcache->raw_write (v_regnum, reg_buf);
2852}
2853
2854/* Implement the "pseudo_register_write" gdbarch method. */
2855
2856static void
2858 int regnum, const gdb_byte *buf)
2859{
2860 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
2861
2863 {
2864 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2865 /* Default offset for little endian. */
2866 int offset = 0;
2867
2868 if (byte_order == BFD_ENDIAN_BIG)
2869 offset = 4;
2870
2871 /* Find the correct X register to extract the data from. */
2872 int x_regnum = AARCH64_X0_REGNUM + (regnum - tdep->w_pseudo_base);
2873
2874 /* First zero-out the contents of X. */
2875 ULONGEST zero = 0;
2876 regcache->raw_write (x_regnum, zero);
2877 /* Write to the bottom 4 bytes of X. */
2878 regcache->raw_write_part (x_regnum, offset, 4, buf);
2879 return;
2880 }
2881
2883
2887 buf);
2888
2892 buf);
2893
2897 buf);
2898
2902 buf);
2903
2907 buf);
2908
2909 if (tdep->has_sve () && regnum >= AARCH64_SVE_V0_REGNUM
2910 && regnum < AARCH64_SVE_V0_REGNUM + 32)
2913 V_REGISTER_SIZE, buf);
2914
2915 gdb_assert_not_reached ("regnum out of bound");
2916}
2917
2918/* Callback function for user_reg_add. */
2919
2920static struct value *
2922{
2923 const int *reg_p = (const int *) baton;
2924
2925 return value_of_register (*reg_p, frame);
2926}
2927
2928
2929/* Implement the "software_single_step" gdbarch method, needed to
2930 single step through atomic sequences on AArch64. */
2931
2932static std::vector<CORE_ADDR>
2934{
2935 struct gdbarch *gdbarch = regcache->arch ();
2936 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2937 const int insn_size = 4;
2938 const int atomic_sequence_length = 16; /* Instruction sequence length. */
2939 CORE_ADDR pc = regcache_read_pc (regcache);
2940 CORE_ADDR breaks[2] = { CORE_ADDR_MAX, CORE_ADDR_MAX };
2941 CORE_ADDR loc = pc;
2942 CORE_ADDR closing_insn = 0;
2943
2944 ULONGEST insn_from_memory;
2945 if (!safe_read_memory_unsigned_integer (loc, insn_size,
2946 byte_order_for_code,
2947 &insn_from_memory))
2948 {
2949 /* Assume we don't have a atomic sequence, as we couldn't read the
2950 instruction in this location. */
2951 return {};
2952 }
2953
2954 uint32_t insn = insn_from_memory;
2955 int index;
2956 int insn_count;
2957 int bc_insn_count = 0; /* Conditional branch instruction count. */
2958 int last_breakpoint = 0; /* Defaults to 0 (no breakpoints placed). */
2959 aarch64_inst inst;
2960
2961 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
2962 return {};
2963
2964 /* Look for a Load Exclusive instruction which begins the sequence. */
2965 if (inst.opcode->iclass != ldstexcl || bit (insn, 22) == 0)
2966 return {};
2967
2968 for (insn_count = 0; insn_count < atomic_sequence_length; ++insn_count)
2969 {
2970 loc += insn_size;
2971
2972 if (!safe_read_memory_unsigned_integer (loc, insn_size,
2973 byte_order_for_code,
2974 &insn_from_memory))
2975 {
2976 /* Assume we don't have a atomic sequence, as we couldn't read the
2977 instruction in this location. */
2978 return {};
2979 }
2980
2981 insn = insn_from_memory;
2982 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
2983 return {};
2984 /* Check if the instruction is a conditional branch. */
2985 if (inst.opcode->iclass == condbranch)
2986 {
2987 gdb_assert (inst.operands[0].type == AARCH64_OPND_ADDR_PCREL19);
2988
2989 if (bc_insn_count >= 1)
2990 return {};
2991
2992 /* It is, so we'll try to set a breakpoint at the destination. */
2993 breaks[1] = loc + inst.operands[0].imm.value;
2994
2995 bc_insn_count++;
2996 last_breakpoint++;
2997 }
2998
2999 /* Look for the Store Exclusive which closes the atomic sequence. */
3000 if (inst.opcode->iclass == ldstexcl && bit (insn, 22) == 0)
3001 {
3002 closing_insn = loc;
3003 break;
3004 }
3005 }
3006
3007 /* We didn't find a closing Store Exclusive instruction, fall back. */
3008 if (!closing_insn)
3009 return {};
3010
3011 /* Insert breakpoint after the end of the atomic sequence. */
3012 breaks[0] = loc + insn_size;
3013
3014 /* Check for duplicated breakpoints, and also check that the second
3015 breakpoint is not within the atomic sequence. */
3016 if (last_breakpoint
3017 && (breaks[1] == breaks[0]
3018 || (breaks[1] >= pc && breaks[1] <= closing_insn)))
3019 last_breakpoint = 0;
3020
3021 std::vector<CORE_ADDR> next_pcs;
3022
3023 /* Insert the breakpoint at the end of the sequence, and one at the
3024 destination of the conditional branch, if it exists. */
3025 for (index = 0; index <= last_breakpoint; index++)
3026 next_pcs.push_back (breaks[index]);
3027
3028 return next_pcs;
3029}
3030
3033{
3034 /* It is true when condition instruction, such as B.CON, TBZ, etc,
3035 is being displaced stepping. */
3036 bool cond = false;
3037
3038 /* PC adjustment offset after displaced stepping. If 0, then we don't
3039 write the PC back, assuming the PC is already the right address. */
3040 int32_t pc_adjust = 0;
3041};
3042
3043/* Data when visiting instructions for displaced stepping. */
3044
3046{
3048
3049 /* The address where the instruction will be executed at. */
3050 CORE_ADDR new_addr;
3051 /* Buffer of instructions to be copied to NEW_ADDR to execute. */
3053 /* Number of instructions in INSN_BUF. */
3054 unsigned insn_count;
3055 /* Registers when doing displaced stepping. */
3057
3059};
3060
3061/* Implementation of aarch64_insn_visitor method "b". */
3062
3063static void
3064aarch64_displaced_step_b (const int is_bl, const int32_t offset,
3065 struct aarch64_insn_data *data)
3066{
3067 struct aarch64_displaced_step_data *dsd
3068 = (struct aarch64_displaced_step_data *) data;
3069 int64_t new_offset = data->insn_addr - dsd->new_addr + offset;
3070
3071 if (can_encode_int32 (new_offset, 28))
3072 {
3073 /* Emit B rather than BL, because executing BL on a new address
3074 will get the wrong address into LR. In order to avoid this,
3075 we emit B, and update LR if the instruction is BL. */
3076 emit_b (dsd->insn_buf, 0, new_offset);
3077 dsd->insn_count++;
3078 }
3079 else
3080 {
3081 /* Write NOP. */
3082 emit_nop (dsd->insn_buf);
3083 dsd->insn_count++;
3084 dsd->dsc->pc_adjust = offset;
3085 }
3086
3087 if (is_bl)
3088 {
3089 /* Update LR. */
3091 data->insn_addr + 4);
3092 }
3093}
3094
3095/* Implementation of aarch64_insn_visitor method "b_cond". */
3096
3097static void
3098aarch64_displaced_step_b_cond (const unsigned cond, const int32_t offset,
3099 struct aarch64_insn_data *data)
3100{
3101 struct aarch64_displaced_step_data *dsd
3102 = (struct aarch64_displaced_step_data *) data;
3103
3104 /* GDB has to fix up PC after displaced step this instruction
3105 differently according to the condition is true or false. Instead
3106 of checking COND against conditional flags, we can use
3107 the following instructions, and GDB can tell how to fix up PC
3108 according to the PC value.
3109
3110 B.COND TAKEN ; If cond is true, then jump to TAKEN.
3111 INSN1 ;
3112 TAKEN:
3113 INSN2
3114 */
3115
3116 emit_bcond (dsd->insn_buf, cond, 8);
3117 dsd->dsc->cond = true;
3118 dsd->dsc->pc_adjust = offset;
3119 dsd->insn_count = 1;
3120}
3121
3122/* Dynamically allocate a new register. If we know the register
3123 statically, we should make it a global as above instead of using this
3124 helper function. */
3125
3126static struct aarch64_register
3128{
3129 return (struct aarch64_register) { num, is64 };
3130}
3131
3132/* Implementation of aarch64_insn_visitor method "cb". */
3133
3134static void
3135aarch64_displaced_step_cb (const int32_t offset, const int is_cbnz,
3136 const unsigned rn, int is64,
3137 struct aarch64_insn_data *data)
3138{
3139 struct aarch64_displaced_step_data *dsd
3140 = (struct aarch64_displaced_step_data *) data;
3141
3142 /* The offset is out of range for a compare and branch
3143 instruction. We can use the following instructions instead:
3144
3145 CBZ xn, TAKEN ; xn == 0, then jump to TAKEN.
3146 INSN1 ;
3147 TAKEN:
3148 INSN2
3149 */
3150 emit_cb (dsd->insn_buf, is_cbnz, aarch64_register (rn, is64), 8);
3151 dsd->insn_count = 1;
3152 dsd->dsc->cond = true;
3153 dsd->dsc->pc_adjust = offset;
3154}
3155
3156/* Implementation of aarch64_insn_visitor method "tb". */
3157
3158static void
3159aarch64_displaced_step_tb (const int32_t offset, int is_tbnz,
3160 const unsigned rt, unsigned bit,
3161 struct aarch64_insn_data *data)
3162{
3163 struct aarch64_displaced_step_data *dsd
3164 = (struct aarch64_displaced_step_data *) data;
3165
3166 /* The offset is out of range for a test bit and branch
3167 instruction We can use the following instructions instead:
3168
3169 TBZ xn, #bit, TAKEN ; xn[bit] == 0, then jump to TAKEN.
3170 INSN1 ;
3171 TAKEN:
3172 INSN2
3173
3174 */
3175 emit_tb (dsd->insn_buf, is_tbnz, bit, aarch64_register (rt, 1), 8);
3176 dsd->insn_count = 1;
3177 dsd->dsc->cond = true;
3178 dsd->dsc->pc_adjust = offset;
3179}
3180
3181/* Implementation of aarch64_insn_visitor method "adr". */
3182
3183static void
3184aarch64_displaced_step_adr (const int32_t offset, const unsigned rd,
3185 const int is_adrp, struct aarch64_insn_data *data)
3186{
3187 struct aarch64_displaced_step_data *dsd
3188 = (struct aarch64_displaced_step_data *) data;
3189 /* We know exactly the address the ADR{P,} instruction will compute.
3190 We can just write it to the destination register. */
3191 CORE_ADDR address = data->insn_addr + offset;
3192
3193 if (is_adrp)
3194 {
3195 /* Clear the lower 12 bits of the offset to get the 4K page. */
3197 address & ~0xfff);
3198 }
3199 else
3201 address);
3202
3203 dsd->dsc->pc_adjust = 4;
3204 emit_nop (dsd->insn_buf);
3205 dsd->insn_count = 1;
3206}
3207
3208/* Implementation of aarch64_insn_visitor method "ldr_literal". */
3209
3210static void
3211aarch64_displaced_step_ldr_literal (const int32_t offset, const int is_sw,
3212 const unsigned rt, const int is64,
3213 struct aarch64_insn_data *data)
3214{
3215 struct aarch64_displaced_step_data *dsd
3216 = (struct aarch64_displaced_step_data *) data;
3217 CORE_ADDR address = data->insn_addr + offset;
3218 struct aarch64_memory_operand zero = { MEMORY_OPERAND_OFFSET, 0 };
3219
3221 address);
3222
3223 if (is_sw)
3224 dsd->insn_count = emit_ldrsw (dsd->insn_buf, aarch64_register (rt, 1),
3225 aarch64_register (rt, 1), zero);
3226 else
3227 dsd->insn_count = emit_ldr (dsd->insn_buf, aarch64_register (rt, is64),
3228 aarch64_register (rt, 1), zero);
3229
3230 dsd->dsc->pc_adjust = 4;
3231}
3232
3233/* Implementation of aarch64_insn_visitor method "others". */
3234
3235static void
3237 struct aarch64_insn_data *data)
3238{
3239 struct aarch64_displaced_step_data *dsd
3240 = (struct aarch64_displaced_step_data *) data;
3241
3242 uint32_t masked_insn = (insn & CLEAR_Rn_MASK);
3243 if (masked_insn == BLR)
3244 {
3245 /* Emit a BR to the same register and then update LR to the original
3246 address (similar to aarch64_displaced_step_b). */
3247 aarch64_emit_insn (dsd->insn_buf, insn & 0xffdfffff);
3249 data->insn_addr + 4);
3250 }
3251 else
3252 aarch64_emit_insn (dsd->insn_buf, insn);
3253 dsd->insn_count = 1;
3254
3255 if (masked_insn == RET || masked_insn == BR || masked_insn == BLR)
3256 dsd->dsc->pc_adjust = 0;
3257 else
3258 dsd->dsc->pc_adjust = 4;
3259}
3260
3261static const struct aarch64_insn_visitor visitor =
3262{
3270};
3271
3272/* Implement the "displaced_step_copy_insn" gdbarch method. */
3273
3276 CORE_ADDR from, CORE_ADDR to,
3277 struct regcache *regs)
3278{
3279 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
3280 struct aarch64_displaced_step_data dsd;
3281 aarch64_inst inst;
3282 ULONGEST insn_from_memory;
3283
3284 if (!safe_read_memory_unsigned_integer (from, 4, byte_order_for_code,
3285 &insn_from_memory))
3286 return nullptr;
3287
3288 uint32_t insn = insn_from_memory;
3289
3290 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
3291 return NULL;
3292
3293 /* Look for a Load Exclusive instruction which begins the sequence. */
3294 if (inst.opcode->iclass == ldstexcl && bit (insn, 22))
3295 {
3296 /* We can't displaced step atomic sequences. */
3297 return NULL;
3298 }
3299
3300 std::unique_ptr<aarch64_displaced_step_copy_insn_closure> dsc
3302 dsd.base.insn_addr = from;
3303 dsd.new_addr = to;
3304 dsd.regs = regs;
3305 dsd.dsc = dsc.get ();
3306 dsd.insn_count = 0;
3308 (struct aarch64_insn_data *) &dsd);
3309 gdb_assert (dsd.insn_count <= AARCH64_DISPLACED_MODIFIED_INSNS);
3310
3311 if (dsd.insn_count != 0)
3312 {
3313 int i;
3314
3315 /* Instruction can be relocated to scratch pad. Copy
3316 relocated instruction(s) there. */
3317 for (i = 0; i < dsd.insn_count; i++)
3318 {
3319 displaced_debug_printf ("writing insn %.8x at %s",
3320 dsd.insn_buf[i],
3321 paddress (gdbarch, to + i * 4));
3322
3323 write_memory_unsigned_integer (to + i * 4, 4, byte_order_for_code,
3324 (ULONGEST) dsd.insn_buf[i]);
3325 }
3326 }
3327 else
3328 {
3329 dsc = NULL;
3330 }
3331
3332 /* This is a work around for a problem with g++ 4.8. */
3333 return displaced_step_copy_insn_closure_up (dsc.release ());
3334}
3335
3336/* Implement the "displaced_step_fixup" gdbarch method. */
3337
3338void
3341 CORE_ADDR from, CORE_ADDR to,
3342 struct regcache *regs)
3343{
3346
3347 ULONGEST pc;
3348
3350
3351 displaced_debug_printf ("PC after stepping: %s (was %s).",
3352 paddress (gdbarch, pc), paddress (gdbarch, to));
3353
3354 if (dsc->cond)
3355 {
3356 displaced_debug_printf ("[Conditional] pc_adjust before: %d",
3357 dsc->pc_adjust);
3358
3359 if (pc - to == 8)
3360 {
3361 /* Condition is true. */
3362 }
3363 else if (pc - to == 4)
3364 {
3365 /* Condition is false. */
3366 dsc->pc_adjust = 4;
3367 }
3368 else
3369 gdb_assert_not_reached ("Unexpected PC value after displaced stepping");
3370
3371 displaced_debug_printf ("[Conditional] pc_adjust after: %d",
3372 dsc->pc_adjust);
3373 }
3374
3375 displaced_debug_printf ("%s PC by %d",
3376 dsc->pc_adjust ? "adjusting" : "not adjusting",
3377 dsc->pc_adjust);
3378
3379 if (dsc->pc_adjust != 0)
3380 {
3381 /* Make sure the previous instruction was executed (that is, the PC
3382 has changed). If the PC didn't change, then discard the adjustment
3383 offset. Otherwise we may skip an instruction before its execution
3384 took place. */
3385 if ((pc - to) == 0)
3386 {
3387 displaced_debug_printf ("PC did not move. Discarding PC adjustment.");
3388 dsc->pc_adjust = 0;
3389 }
3390
3391 displaced_debug_printf ("fixup: set PC to %s:%d",
3392 paddress (gdbarch, from), dsc->pc_adjust);
3393
3395 from + dsc->pc_adjust);
3396 }
3397}
3398
3399/* Implement the "displaced_step_hw_singlestep" gdbarch method. */
3400
3401bool
3403{
3404 return true;
3405}
3406
3407/* Get the correct target description for the given VQ value.
3408 If VQ is zero then it is assumed SVE is not supported.
3409 (It is not possible to set VQ to zero on an SVE system).
3410
3411 MTE_P indicates the presence of the Memory Tagging Extension feature.
3412
3413 TLS_P indicates the presence of the Thread Local Storage feature. */
3414
3415const target_desc *
3417{
3418 if (features.vq > AARCH64_MAX_SVE_VQ)
3419 error (_("VQ is %" PRIu64 ", maximum supported value is %d"), features.vq,
3421
3422 struct target_desc *tdesc = tdesc_aarch64_map[features];
3423
3424 if (tdesc == NULL)
3425 {
3427 tdesc_aarch64_map[features] = tdesc;
3428 }
3429
3430 return tdesc;
3431}
3432
3433/* Return the VQ used when creating the target description TDESC. */
3434
3435static uint64_t
3437{
3438 const struct tdesc_feature *feature_sve;
3439
3440 if (!tdesc_has_registers (tdesc))
3441 return 0;
3442
3443 feature_sve = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.sve");
3444
3445 if (feature_sve == nullptr)
3446 return 0;
3447
3448 uint64_t vl = tdesc_register_bitsize (feature_sve,
3450 return sve_vq_from_vl (vl);
3451}
3452
3453/* Get the AArch64 features present in the given target description. */
3454
3457{
3458 aarch64_features features;
3459
3460 if (tdesc == nullptr)
3461 return features;
3462
3463 features.vq = aarch64_get_tdesc_vq (tdesc);
3464 features.pauth
3465 = (tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.pauth") != nullptr);
3466 features.mte
3467 = (tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.mte") != nullptr);
3468
3469 const struct tdesc_feature *tls_feature
3470 = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.tls");
3471
3472 if (tls_feature != nullptr)
3473 {
3474 /* We have TLS registers. Find out how many. */
3475 if (tdesc_unnumbered_register (tls_feature, "tpidr2"))
3476 features.tls = 2;
3477 else
3478 features.tls = 1;
3479 }
3480
3481 return features;
3482}
3483
3484/* Implement the "cannot_store_register" gdbarch method. */
3485
3486static int
3488{
3489 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
3490
3491 if (!tdep->has_pauth ())
3492 return 0;
3493
3494 /* Pointer authentication registers are read-only. */
3497}
3498
3499/* Implement the stack_frame_destroyed_p gdbarch method. */
3500
3501static int
3503{
3504 CORE_ADDR func_start, func_end;
3505 if (!find_pc_partial_function (pc, NULL, &func_start, &func_end))
3506 return 0;
3507
3508 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
3509
3510 ULONGEST insn_from_memory;
3511 if (!safe_read_memory_unsigned_integer (pc, 4, byte_order_for_code,
3512 &insn_from_memory))
3513 return 0;
3514
3515 uint32_t insn = insn_from_memory;
3516
3517 aarch64_inst inst;
3518 if (aarch64_decode_insn (insn, &inst, 1, nullptr) != 0)
3519 return 0;
3520
3521 return streq (inst.opcode->name, "ret");
3522}
3523
3524/* Initialize the current architecture based on INFO. If possible,
3525 re-use an architecture from ARCHES, which is a list of
3526 architectures already created during this debugging session.
3527
3528 Called e.g. at program startup, when reading a core file, and when
3529 reading a binary file. */
3530
3531static struct gdbarch *
3533{
3534 const struct tdesc_feature *feature_core, *feature_fpu, *feature_sve;
3535 const struct tdesc_feature *feature_pauth;
3536 bool valid_p = true;
3537 int i, num_regs = 0, num_pseudo_regs = 0;
3538 int first_pauth_regnum = -1, ra_sign_state_offset = -1;
3539 int first_mte_regnum = -1, first_tls_regnum = -1;
3540 uint64_t vq = aarch64_get_tdesc_vq (info.target_desc);
3541
3542 if (vq > AARCH64_MAX_SVE_VQ)
3543 internal_error (_("VQ out of bounds: %s (max %d)"),
3544 pulongest (vq), AARCH64_MAX_SVE_VQ);
3545
3546 /* If there is already a candidate, use it. */
3547 for (gdbarch_list *best_arch = gdbarch_list_lookup_by_info (arches, &info);
3548 best_arch != nullptr;
3549 best_arch = gdbarch_list_lookup_by_info (best_arch->next, &info))
3550 {
3552 = gdbarch_tdep<aarch64_gdbarch_tdep> (best_arch->gdbarch);
3553 if (tdep && tdep->vq == vq)
3554 return best_arch->gdbarch;
3555 }
3556
3557 /* Ensure we always have a target descriptor, and that it is for the given VQ
3558 value. */
3559 const struct target_desc *tdesc = info.target_desc;
3560 if (!tdesc_has_registers (tdesc))
3561 tdesc = aarch64_read_description ({});
3562 gdb_assert (tdesc);
3563
3564 feature_core = tdesc_find_feature (tdesc,"org.gnu.gdb.aarch64.core");
3565 feature_fpu = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.fpu");
3566 feature_sve = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.sve");
3567 feature_pauth = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.pauth");
3568 const struct tdesc_feature *feature_mte
3569 = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.mte");
3570 const struct tdesc_feature *feature_tls
3571 = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.tls");
3572
3573 if (feature_core == nullptr)
3574 return nullptr;
3575
3577
3578 /* Validate the description provides the mandatory core R registers
3579 and allocate their numbers. */
3580 for (i = 0; i < ARRAY_SIZE (aarch64_r_register_names); i++)
3581 valid_p &= tdesc_numbered_register (feature_core, tdesc_data.get (),
3584
3585 num_regs = AARCH64_X0_REGNUM + i;
3586
3587 /* Add the V registers. */
3588 if (feature_fpu != nullptr)
3589 {
3590 if (feature_sve != nullptr)
3591 error (_("Program contains both fpu and SVE features."));
3592
3593 /* Validate the description provides the mandatory V registers
3594 and allocate their numbers. */
3595 for (i = 0; i < ARRAY_SIZE (aarch64_v_register_names); i++)
3596 valid_p &= tdesc_numbered_register (feature_fpu, tdesc_data.get (),
3599
3600 num_regs = AARCH64_V0_REGNUM + i;
3601 }
3602
3603 /* Add the SVE registers. */
3604 if (feature_sve != nullptr)
3605 {
3606 /* Validate the description provides the mandatory SVE registers
3607 and allocate their numbers. */
3608 for (i = 0; i < ARRAY_SIZE (aarch64_sve_register_names); i++)
3609 valid_p &= tdesc_numbered_register (feature_sve, tdesc_data.get (),
3612
3613 num_regs = AARCH64_SVE_Z0_REGNUM + i;
3614 num_pseudo_regs += 32; /* add the Vn register pseudos. */
3615 }
3616
3617 if (feature_fpu != nullptr || feature_sve != nullptr)
3618 {
3619 num_pseudo_regs += 32; /* add the Qn scalar register pseudos */
3620 num_pseudo_regs += 32; /* add the Dn scalar register pseudos */
3621 num_pseudo_regs += 32; /* add the Sn scalar register pseudos */
3622 num_pseudo_regs += 32; /* add the Hn scalar register pseudos */
3623 num_pseudo_regs += 32; /* add the Bn scalar register pseudos */
3624 }
3625
3626 /* Add the TLS register. */
3627 int tls_register_count = 0;
3628 if (feature_tls != nullptr)
3629 {
3630 first_tls_regnum = num_regs;
3631
3632 /* Look for the TLS registers. tpidr is required, but tpidr2 is
3633 optional. */
3634 valid_p
3635 = tdesc_numbered_register (feature_tls, tdesc_data.get (),
3636 first_tls_regnum, "tpidr");
3637
3638 if (valid_p)
3639 {
3640 tls_register_count++;
3641
3642 bool has_tpidr2
3643 = tdesc_numbered_register (feature_tls, tdesc_data.get (),
3644 first_tls_regnum + tls_register_count,
3645 "tpidr2");
3646
3647 /* Figure out how many TLS registers we have. */
3648 if (has_tpidr2)
3649 tls_register_count++;
3650
3651 num_regs += tls_register_count;
3652 }
3653 else
3654 {
3655 warning (_("Provided TLS register feature doesn't contain "
3656 "required tpidr register."));
3657 return nullptr;
3658 }
3659 }
3660
3661 /* Add the pauth registers. */
3662 if (feature_pauth != NULL)
3663 {
3664 first_pauth_regnum = num_regs;
3665 ra_sign_state_offset = num_pseudo_regs;
3666 /* Validate the descriptor provides the mandatory PAUTH registers and
3667 allocate their numbers. */
3668 for (i = 0; i < ARRAY_SIZE (aarch64_pauth_register_names); i++)
3669 valid_p &= tdesc_numbered_register (feature_pauth, tdesc_data.get (),
3670 first_pauth_regnum + i,
3672
3673 num_regs += i;
3674 num_pseudo_regs += 1; /* Count RA_STATE pseudo register. */
3675 }
3676
3677 /* Add the MTE registers. */
3678 if (feature_mte != NULL)
3679 {
3680 first_mte_regnum = num_regs;
3681 /* Validate the descriptor provides the mandatory MTE registers and
3682 allocate their numbers. */
3683 for (i = 0; i < ARRAY_SIZE (aarch64_mte_register_names); i++)
3684 valid_p &= tdesc_numbered_register (feature_mte, tdesc_data.get (),
3685 first_mte_regnum + i,
3687
3688 num_regs += i;
3689 }
3690 /* W pseudo-registers */
3691 int first_w_regnum = num_pseudo_regs;
3692 num_pseudo_regs += 31;
3693
3694 if (!valid_p)
3695 return nullptr;
3696
3697 /* AArch64 code is always little-endian. */
3698 info.byte_order_for_code = BFD_ENDIAN_LITTLE;
3699
3701 struct gdbarch *gdbarch = gdbarch_alloc (&info, tdep);
3702
3703 /* This should be low enough for everything. */
3704 tdep->lowest_pc = 0x20;
3705 tdep->jb_pc = -1; /* Longjump support not enabled by default. */
3706 tdep->jb_elt_size = 8;
3707 tdep->vq = vq;
3708 tdep->pauth_reg_base = first_pauth_regnum;
3709 tdep->ra_sign_state_regnum = -1;
3710 tdep->mte_reg_base = first_mte_regnum;
3711 tdep->tls_regnum_base = first_tls_regnum;
3712 tdep->tls_register_count = tls_register_count;
3713
3716
3717 /* Advance PC across function entry code. */
3719
3720 /* The stack grows downward. */
3722
3723 /* Breakpoint manipulation. */
3725 aarch64_breakpoint::kind_from_pc);
3727 aarch64_breakpoint::bp_from_kind);
3730
3731 /* Information about registers, etc. */
3734 set_gdbarch_num_regs (gdbarch, num_regs);
3735
3736 set_gdbarch_num_pseudo_regs (gdbarch, num_pseudo_regs);
3744
3745 /* ABI */
3760
3761 /* Detect whether PC is at a point where the stack has been destroyed. */
3763
3764 /* Internal <-> external register number maps. */
3766
3767 /* Returning results. */
3769
3770 /* Disassembly. */
3772
3773 /* Virtual tables. */
3775
3776 /* Hook in the ABI-specific overrides, if they have been registered. */
3777 info.target_desc = tdesc;
3778 info.tdesc_data = tdesc_data.get ();
3780
3782 /* Register DWARF CFA vendor handler. */
3785
3786 /* Permanent/Program breakpoint handling. */
3789
3790 /* Add some default predicates. */
3794
3796
3797 /* Now we have tuned the configuration, set a few final things,
3798 based on what the OS ABI has told us. */
3799
3800 if (tdep->jb_pc >= 0)
3802
3804
3806
3807 tdesc_use_registers (gdbarch, tdesc, std::move (tdesc_data));
3808
3809 /* Fetch the updated number of registers after we're done adding all
3810 entries from features we don't explicitly care about. This is the case
3811 for bare metal debugging stubs that include a lot of system registers. */
3812 num_regs = gdbarch_num_regs (gdbarch);
3813
3814 /* With the number of real registers updated, setup the pseudo-registers and
3815 record their numbers. */
3816
3817 /* Setup W pseudo-register numbers. */
3818 tdep->w_pseudo_base = first_w_regnum + num_regs;
3819 tdep->w_pseudo_count = 31;
3820
3821 /* Pointer authentication pseudo-registers. */
3822 if (tdep->has_pauth ())
3823 tdep->ra_sign_state_regnum = ra_sign_state_offset + num_regs;
3824
3825 /* Add standard register aliases. */
3826 for (i = 0; i < ARRAY_SIZE (aarch64_register_aliases); i++)
3830
3832
3833 return gdbarch;
3834}
3835
3836static void
3838{
3839 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
3840
3841 if (tdep == NULL)
3842 return;
3843
3844 gdb_printf (file, _("aarch64_dump_tdep: Lowest pc = 0x%s\n"),
3845 paddress (gdbarch, tdep->lowest_pc));
3846}
3847
3848#if GDB_SELF_TEST
3849namespace selftests
3850{
3851static void aarch64_process_record_test (void);
3852}
3853#endif
3854
3856void
3858{
3859 gdbarch_register (bfd_arch_aarch64, aarch64_gdbarch_init,
3861
3862 /* Debug this file's internals. */
3864Set AArch64 debugging."), _("\
3865Show AArch64 debugging."), _("\
3866When on, AArch64 specific debugging is enabled."),
3867 NULL,
3870
3871#if GDB_SELF_TEST
3872 selftests::register_test ("aarch64-analyze-prologue",
3873 selftests::aarch64_analyze_prologue_test);
3874 selftests::register_test ("aarch64-process-record",
3875 selftests::aarch64_process_record_test);
3876#endif
3877}
3878
3879/* AArch64 process record-replay related structures, defines etc. */
3880
3881#define REG_ALLOC(REGS, LENGTH, RECORD_BUF) \
3882 do \
3883 { \
3884 unsigned int reg_len = LENGTH; \
3885 if (reg_len) \
3886 { \
3887 REGS = XNEWVEC (uint32_t, reg_len); \
3888 memcpy(&REGS[0], &RECORD_BUF[0], sizeof(uint32_t)*LENGTH); \
3889 } \
3890 } \
3891 while (0)
3892
3893#define MEM_ALLOC(MEMS, LENGTH, RECORD_BUF) \
3894 do \
3895 { \
3896 unsigned int mem_len = LENGTH; \
3897 if (mem_len) \
3898 { \
3899 MEMS = XNEWVEC (struct aarch64_mem_r, mem_len); \
3900 memcpy(MEMS, &RECORD_BUF[0], \
3901 sizeof(struct aarch64_mem_r) * LENGTH); \
3902 } \
3903 } \
3904 while (0)
3905
3906/* AArch64 record/replay structures and enumerations. */
3907
3909{
3910 uint64_t len; /* Record length. */
3911 uint64_t addr; /* Memory address. */
3912};
3913
3915{
3920
3922{
3925 CORE_ADDR this_addr; /* Address of insn to be recorded. */
3926 uint32_t aarch64_insn; /* Insn to be recorded. */
3927 uint32_t mem_rec_count; /* Count of memory records. */
3928 uint32_t reg_rec_count; /* Count of register records. */
3929 uint32_t *aarch64_regs; /* Registers to be recorded. */
3930 struct aarch64_mem_r *aarch64_mems; /* Memory locations to be recorded. */
3931};
3932
3933/* Record handler for data processing - register instructions. */
3934
3935static unsigned int
3937{
3938 uint8_t reg_rd, insn_bits24_27, insn_bits21_23;
3939 uint32_t record_buf[4];
3940
3941 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3942 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3943 insn_bits21_23 = bits (aarch64_insn_r->aarch64_insn, 21, 23);
3944
3945 if (!bit (aarch64_insn_r->aarch64_insn, 28))
3946 {
3947 uint8_t setflags;
3948
3949 /* Logical (shifted register). */
3950 if (insn_bits24_27 == 0x0a)
3951 setflags = (bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03);
3952 /* Add/subtract. */
3953 else if (insn_bits24_27 == 0x0b)
3954 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
3955 else
3957
3958 record_buf[0] = reg_rd;
3959 aarch64_insn_r->reg_rec_count = 1;
3960 if (setflags)
3961 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3962 }
3963 else
3964 {
3965 if (insn_bits24_27 == 0x0b)
3966 {
3967 /* Data-processing (3 source). */
3968 record_buf[0] = reg_rd;
3969 aarch64_insn_r->reg_rec_count = 1;
3970 }
3971 else if (insn_bits24_27 == 0x0a)
3972 {
3973 if (insn_bits21_23 == 0x00)
3974 {
3975 /* Add/subtract (with carry). */
3976 record_buf[0] = reg_rd;
3977 aarch64_insn_r->reg_rec_count = 1;
3978 if (bit (aarch64_insn_r->aarch64_insn, 29))
3979 {
3980 record_buf[1] = AARCH64_CPSR_REGNUM;
3981 aarch64_insn_r->reg_rec_count = 2;
3982 }
3983 }
3984 else if (insn_bits21_23 == 0x02)
3985 {
3986 /* Conditional compare (register) and conditional compare
3987 (immediate) instructions. */
3988 record_buf[0] = AARCH64_CPSR_REGNUM;
3989 aarch64_insn_r->reg_rec_count = 1;
3990 }
3991 else if (insn_bits21_23 == 0x04 || insn_bits21_23 == 0x06)
3992 {
3993 /* Conditional select. */
3994 /* Data-processing (2 source). */
3995 /* Data-processing (1 source). */
3996 record_buf[0] = reg_rd;
3997 aarch64_insn_r->reg_rec_count = 1;
3998 }
3999 else
4001 }
4002 }
4003
4004 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
4005 record_buf);
4007}
4008
4009/* Record handler for data processing - immediate instructions. */
4010
4011static unsigned int
4013{
4014 uint8_t reg_rd, insn_bit23, insn_bits24_27, setflags;
4015 uint32_t record_buf[4];
4016
4017 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
4018 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
4019 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
4020
4021 if (insn_bits24_27 == 0x00 /* PC rel addressing. */
4022 || insn_bits24_27 == 0x03 /* Bitfield and Extract. */
4023 || (insn_bits24_27 == 0x02 && insn_bit23)) /* Move wide (immediate). */
4024 {
4025 record_buf[0] = reg_rd;
4026 aarch64_insn_r->reg_rec_count = 1;
4027 }
4028 else if (insn_bits24_27 == 0x01)
4029 {
4030 /* Add/Subtract (immediate). */
4031 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
4032 record_buf[0] = reg_rd;
4033 aarch64_insn_r->reg_rec_count = 1;
4034 if (setflags)
4035 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
4036 }
4037 else if (insn_bits24_27 == 0x02 && !insn_bit23)
4038 {
4039 /* Logical (immediate). */
4040 setflags = bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03;
4041 record_buf[0] = reg_rd;
4042 aarch64_insn_r->reg_rec_count = 1;
4043 if (setflags)
4044 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
4045 }
4046 else
4048
4049 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
4050 record_buf);
4052}
4053
4054/* Record handler for branch, exception generation and system instructions. */
4055
4056static unsigned int
4058{
4059
4061 = gdbarch_tdep<aarch64_gdbarch_tdep> (aarch64_insn_r->gdbarch);
4062 uint8_t insn_bits24_27, insn_bits28_31, insn_bits22_23;
4063 uint32_t record_buf[4];
4064
4065 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
4066 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
4067 insn_bits22_23 = bits (aarch64_insn_r->aarch64_insn, 22, 23);
4068
4069 if (insn_bits28_31 == 0x0d)
4070 {
4071 /* Exception generation instructions. */
4072 if (insn_bits24_27 == 0x04)
4073 {
4074 if (!bits (aarch64_insn_r->aarch64_insn, 2, 4)
4075 && !bits (aarch64_insn_r->aarch64_insn, 21, 23)
4076 && bits (aarch64_insn_r->aarch64_insn, 0, 1) == 0x01)
4077 {
4078 ULONGEST svc_number;
4079
4080 regcache_raw_read_unsigned (aarch64_insn_r->regcache, 8,
4081 &svc_number);
4082 return tdep->aarch64_syscall_record (aarch64_insn_r->regcache,
4083 svc_number);
4084 }
4085 else
4087 }
4088 /* System instructions. */
4089 else if (insn_bits24_27 == 0x05 && insn_bits22_23 == 0x00)
4090 {
4091 uint32_t reg_rt, reg_crn;
4092
4093 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
4094 reg_crn = bits (aarch64_insn_r->aarch64_insn, 12, 15);
4095
4096 /* Record rt in case of sysl and mrs instructions. */
4097 if (bit (aarch64_insn_r->aarch64_insn, 21))
4098 {
4099 record_buf[0] = reg_rt;
4100 aarch64_insn_r->reg_rec_count = 1;
4101 }
4102 /* Record cpsr for hint and msr(immediate) instructions. */
4103 else if (reg_crn == 0x02 || reg_crn == 0x04)
4104 {
4105 record_buf[0] = AARCH64_CPSR_REGNUM;
4106 aarch64_insn_r->reg_rec_count = 1;
4107 }
4108 }
4109 /* Unconditional branch (register). */
4110 else if((insn_bits24_27 & 0x0e) == 0x06)
4111 {
4112 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
4113 if (bits (aarch64_insn_r->aarch64_insn, 21, 22) == 0x01)
4114 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
4115 }
4116 else
4118 }
4119 /* Unconditional branch (immediate). */
4120 else if ((insn_bits28_31 & 0x07) == 0x01 && (insn_bits24_27 & 0x0c) == 0x04)
4121 {
4122 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
4123 if (bit (aarch64_insn_r->aarch64_insn, 31))
4124 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
4125 }
4126 else
4127 /* Compare & branch (immediate), Test & branch (immediate) and
4128 Conditional branch (immediate). */
4129 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
4130
4131 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
4132 record_buf);
4134}
4135
4136/* Record handler for advanced SIMD load and store instructions. */
4137
4138static unsigned int
4140{
4141 CORE_ADDR address;
4142 uint64_t addr_offset = 0;
4143 uint32_t record_buf[24];
4144 uint64_t record_buf_mem[24];
4145 uint32_t reg_rn, reg_rt;
4146 uint32_t reg_index = 0, mem_index = 0;
4147 uint8_t opcode_bits, size_bits;
4148
4149 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
4150 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
4151 size_bits = bits (aarch64_insn_r->aarch64_insn, 10, 11);
4152 opcode_bits = bits (aarch64_insn_r->aarch64_insn, 12, 15);
4153 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn, &address);
4154
4155 if (record_debug)
4156 debug_printf ("Process record: Advanced SIMD load/store\n");
4157
4158 /* Load/store single structure. */
4159 if (bit (aarch64_insn_r->aarch64_insn, 24))
4160 {
4161 uint8_t sindex, scale, selem, esize, replicate = 0;
4162 scale = opcode_bits >> 2;
4163 selem = ((opcode_bits & 0x02) |
4164 bit (aarch64_insn_r->aarch64_insn, 21)) + 1;
4165 switch (scale)
4166 {
4167 case 1:
4168 if (size_bits & 0x01)
4170 break;
4171 case 2:
4172 if ((size_bits >> 1) & 0x01)
4174 if (size_bits & 0x01)
4175 {
4176 if (!((opcode_bits >> 1) & 0x01))
4177 scale = 3;
4178 else
4180 }
4181 break;
4182 case 3:
4183 if (bit (aarch64_insn_r->aarch64_insn, 22) && !(opcode_bits & 0x01))
4184 {
4185 scale = size_bits;
4186 replicate = 1;
4187 break;
4188 }
4189 else
4191 default:
4192 break;
4193 }
4194 esize = 8 << scale;
4195 if (replicate)
4196 for (sindex = 0; sindex < selem; sindex++)
4197 {
4198 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
4199 reg_rt = (reg_rt + 1) % 32;
4200 }
4201 else
4202 {
4203 for (sindex = 0; sindex < selem; sindex++)
4204 {
4205 if (bit (aarch64_insn_r->aarch64_insn, 22))
4206 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
4207 else
4208 {
4209 record_buf_mem[mem_index++] = esize / 8;
4210 record_buf_mem[mem_index++] = address + addr_offset;
4211 }
4212 addr_offset = addr_offset + (esize / 8);
4213 reg_rt = (reg_rt + 1) % 32;
4214 }
4215 }
4216 }
4217 /* Load/store multiple structure. */
4218 else
4219 {
4220 uint8_t selem, esize, rpt, elements;
4221 uint8_t eindex, rindex;
4222
4223 esize = 8 << size_bits;
4224 if (bit (aarch64_insn_r->aarch64_insn, 30))
4225 elements = 128 / esize;
4226 else
4227 elements = 64 / esize;
4228
4229 switch (opcode_bits)
4230 {
4231 /*LD/ST4 (4 Registers). */
4232 case 0:
4233 rpt = 1;
4234 selem = 4;
4235 break;
4236 /*LD/ST1 (4 Registers). */
4237 case 2:
4238 rpt = 4;
4239 selem = 1;
4240 break;
4241 /*LD/ST3 (3 Registers). */
4242 case 4:
4243 rpt = 1;
4244 selem = 3;
4245 break;
4246 /*LD/ST1 (3 Registers). */
4247 case 6:
4248 rpt = 3;
4249 selem = 1;
4250 break;
4251 /*LD/ST1 (1 Register). */
4252 case 7:
4253 rpt = 1;
4254 selem = 1;
4255 break;
4256 /*LD/ST2 (2 Registers). */
4257 case 8:
4258 rpt = 1;
4259 selem = 2;
4260 break;
4261 /*LD/ST1 (2 Registers). */
4262 case 10:
4263 rpt = 2;
4264 selem = 1;
4265 break;
4266 default:
4268 break;
4269 }
4270 for (rindex = 0; rindex < rpt; rindex++)
4271 for (eindex = 0; eindex < elements; eindex++)
4272 {
4273 uint8_t reg_tt, sindex;
4274 reg_tt = (reg_rt + rindex) % 32;
4275 for (sindex = 0; sindex < selem; sindex++)
4276 {
4277 if (bit (aarch64_insn_r->aarch64_insn, 22))
4278 record_buf[reg_index++] = reg_tt + AARCH64_V0_REGNUM;
4279 else
4280 {
4281 record_buf_mem[mem_index++] = esize / 8;
4282 record_buf_mem[mem_index++] = address + addr_offset;
4283 }
4284 addr_offset = addr_offset + (esize / 8);
4285 reg_tt = (reg_tt + 1) % 32;
4286 }
4287 }
4288 }
4289
4290 if (bit (aarch64_insn_r->aarch64_insn, 23))
4291 record_buf[reg_index++] = reg_rn;
4292
4293 aarch64_insn_r->reg_rec_count = reg_index;
4294 aarch64_insn_r->mem_rec_count = mem_index / 2;
4295 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
4296 record_buf_mem);
4297 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
4298 record_buf);
4300}
4301
4302/* Record handler for load and store instructions. */
4303
4304static unsigned int
4306{
4307 uint8_t insn_bits24_27, insn_bits28_29, insn_bits10_11;
4308 uint8_t insn_bit23, insn_bit21;
4309 uint8_t opc, size_bits, ld_flag, vector_flag;
4310 uint32_t reg_rn, reg_rt, reg_rt2;
4311 uint64_t datasize, offset;
4312 uint32_t record_buf[8];
4313 uint64_t record_buf_mem[8];
4314 CORE_ADDR address;
4315
4316 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
4317 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
4318 insn_bits28_29 = bits (aarch64_insn_r->aarch64_insn, 28, 29);
4319 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
4320 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
4321 ld_flag = bit (aarch64_insn_r->aarch64_insn, 22);
4322 vector_flag = bit (aarch64_insn_r->aarch64_insn, 26);
4323 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
4324 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
4325 reg_rt2 = bits (aarch64_insn_r->aarch64_insn, 10, 14);
4326 size_bits = bits (aarch64_insn_r->aarch64_insn, 30, 31);
4327
4328 /* Load/store exclusive. */
4329 if (insn_bits24_27 == 0x08 && insn_bits28_29 == 0x00)
4330 {
4331 if (record_debug)
4332 debug_printf ("Process record: load/store exclusive\n");
4333
4334 if (ld_flag)
4335 {
4336 record_buf[0] = reg_rt;
4337 aarch64_insn_r->reg_rec_count = 1;
4338 if (insn_bit21)
4339 {
4340 record_buf[1] = reg_rt2;
4341 aarch64_insn_r->reg_rec_count = 2;
4342 }
4343 }
4344 else
4345 {
4346 if (insn_bit21)
4347 datasize = (8 << size_bits) * 2;
4348 else
4349 datasize = (8 << size_bits);
4350 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
4351 &address);
4352 record_buf_mem[0] = datasize / 8;
4353 record_buf_mem[1] = address;
4354 aarch64_insn_r->mem_rec_count = 1;
4355 if (!insn_bit23)
4356 {
4357 /* Save register rs. */
4358 record_buf[0] = bits (aarch64_insn_r->aarch64_insn, 16, 20);
4359 aarch64_insn_r->reg_rec_count = 1;
4360 }
4361 }
4362 }
4363 /* Load register (literal) instructions decoding. */
4364 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x01)
4365 {
4366 if (record_debug)
4367 debug_printf ("Process record: load register (literal)\n");
4368 if (vector_flag)
4369 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
4370 else
4371 record_buf[0] = reg_rt;
4372 aarch64_insn_r->reg_rec_count = 1;
4373 }
4374 /* All types of load/store pair instructions decoding. */
4375 else if ((insn_bits24_27 & 0x0a) == 0x08 && insn_bits28_29 == 0x02)
4376 {
4377 if (record_debug)
4378 debug_printf ("Process record: load/store pair\n");
4379
4380 if (ld_flag)
4381 {
4382 if (vector_flag)
4383 {
4384 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
4385 record_buf[1] = reg_rt2 + AARCH64_V0_REGNUM;
4386 }
4387 else
4388 {
4389 record_buf[0] = reg_rt;
4390 record_buf[1] = reg_rt2;
4391 }
4392 aarch64_insn_r->reg_rec_count = 2;
4393 }
4394 else
4395 {
4396 uint16_t imm7_off;
4397 imm7_off = bits (aarch64_insn_r->aarch64_insn, 15, 21);
4398 if (!vector_flag)
4399 size_bits = size_bits >> 1;
4400 datasize = 8 << (2 + size_bits);
4401 offset = (imm7_off & 0x40) ? (~imm7_off & 0x007f) + 1 : imm7_off;
4402 offset = offset << (2 + size_bits);
4403 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
4404 &address);
4405 if (!((insn_bits24_27 & 0x0b) == 0x08 && insn_bit23))
4406 {
4407 if (imm7_off & 0x40)
4408 address = address - offset;
4409 else
4410 address = address + offset;
4411 }
4412
4413 record_buf_mem[0] = datasize / 8;
4414 record_buf_mem[1] = address;
4415 record_buf_mem[2] = datasize / 8;
4416 record_buf_mem[3] = address + (datasize / 8);
4417 aarch64_insn_r->mem_rec_count = 2;
4418 }
4419 if (bit (aarch64_insn_r->aarch64_insn, 23))
4420 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
4421 }
4422 /* Load/store register (unsigned immediate) instructions. */
4423 else if ((insn_bits24_27 & 0x0b) == 0x09 && insn_bits28_29 == 0x03)
4424 {
4425 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
4426 if (!(opc >> 1))
4427 {
4428 if (opc & 0x01)
4429 ld_flag = 0x01;
4430 else
4431 ld_flag = 0x0;
4432 }
4433 else
4434 {
4435 if (size_bits == 0x3 && vector_flag == 0x0 && opc == 0x2)
4436 {
4437 /* PRFM (immediate) */
4439 }
4440 else if (size_bits == 0x2 && vector_flag == 0x0 && opc == 0x2)
4441 {
4442 /* LDRSW (immediate) */
4443 ld_flag = 0x1;
4444 }
4445 else
4446 {
4447 if (opc & 0x01)
4448 ld_flag = 0x01;
4449 else
4450 ld_flag = 0x0;
4451 }
4452 }
4453
4454 if (record_debug)
4455 {
4456 debug_printf ("Process record: load/store (unsigned immediate):"
4457 " size %x V %d opc %x\n", size_bits, vector_flag,
4458 opc);
4459 }
4460
4461 if (!ld_flag)
4462 {
4463 offset = bits (aarch64_insn_r->aarch64_insn, 10, 21);
4464 datasize = 8 << size_bits;
4465 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
4466 &address);
4467 offset = offset << size_bits;
4468 address = address + offset;
4469
4470 record_buf_mem[0] = datasize >> 3;
4471 record_buf_mem[1] = address;
4472 aarch64_insn_r->mem_rec_count = 1;
4473 }
4474 else
4475 {
4476 if (vector_flag)
4477 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
4478 else
4479 record_buf[0] = reg_rt;
4480 aarch64_insn_r->reg_rec_count = 1;
4481 }
4482 }
4483 /* Load/store register (register offset) instructions. */
4484 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
4485 && insn_bits10_11 == 0x02 && insn_bit21)
4486 {
4487 if (record_debug)
4488 debug_printf ("Process record: load/store (register offset)\n");
4489 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
4490 if (!(opc >> 1))
4491 if (opc & 0x01)
4492 ld_flag = 0x01;
4493 else
4494 ld_flag = 0x0;
4495 else
4496 if (size_bits != 0x03)
4497 ld_flag = 0x01;
4498 else
4500
4501 if (!ld_flag)
4502 {
4503 ULONGEST reg_rm_val;
4504
4505 regcache_raw_read_unsigned (aarch64_insn_r->regcache,
4506 bits (aarch64_insn_r->aarch64_insn, 16, 20), &reg_rm_val);
4507 if (bit (aarch64_insn_r->aarch64_insn, 12))
4508 offset = reg_rm_val << size_bits;
4509 else
4510 offset = reg_rm_val;
4511 datasize = 8 << size_bits;
4512 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
4513 &address);
4514 address = address + offset;
4515 record_buf_mem[0] = datasize >> 3;
4516 record_buf_mem[1] = address;
4517 aarch64_insn_r->mem_rec_count = 1;
4518 }
4519 else
4520 {
4521 if (vector_flag)
4522 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
4523 else
4524 record_buf[0] = reg_rt;
4525 aarch64_insn_r->reg_rec_count = 1;
4526 }
4527 }
4528 /* Load/store register (immediate and unprivileged) instructions. */
4529 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
4530 && !insn_bit21)
4531 {
4532 if (record_debug)
4533 {
4534 debug_printf ("Process record: load/store "
4535 "(immediate and unprivileged)\n");
4536 }
4537 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
4538 if (!(opc >> 1))
4539 if (opc & 0x01)
4540 ld_flag = 0x01;
4541 else
4542 ld_flag = 0x0;
4543 else
4544 if (size_bits != 0x03)
4545 ld_flag = 0x01;
4546 else
4548
4549 if (!ld_flag)
4550 {
4551 uint16_t imm9_off;
4552 imm9_off = bits (aarch64_insn_r->aarch64_insn, 12, 20);
4553 offset = (imm9_off & 0x0100) ? (((~imm9_off) & 0x01ff) + 1) : imm9_off;
4554 datasize = 8 << size_bits;
4555 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
4556 &address);
4557 if (insn_bits10_11 != 0x01)
4558 {
4559 if (imm9_off & 0x0100)
4560 address = address - offset;
4561 else
4562 address = address + offset;
4563 }
4564 record_buf_mem[0] = datasize >> 3;
4565 record_buf_mem[1] = address;
4566 aarch64_insn_r->mem_rec_count = 1;
4567 }
4568 else
4569 {
4570 if (vector_flag)
4571 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
4572 else
4573 record_buf[0] = reg_rt;
4574 aarch64_insn_r->reg_rec_count = 1;
4575 }
4576 if (insn_bits10_11 == 0x01 || insn_bits10_11 == 0x03)
4577 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
4578 }
4579 /* Advanced SIMD load/store instructions. */
4580 else
4581 return aarch64_record_asimd_load_store (aarch64_insn_r);
4582
4583 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
4584 record_buf_mem);
4585 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
4586 record_buf);
4588}
4589
4590/* Record handler for data processing SIMD and floating point instructions. */
4591
4592static unsigned int
4594{
4595 uint8_t insn_bit21, opcode, rmode, reg_rd;
4596 uint8_t insn_bits24_27, insn_bits28_31, insn_bits10_11, insn_bits12_15;
4597 uint8_t insn_bits11_14;
4598 uint32_t record_buf[2];
4599
4600 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
4601 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
4602 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
4603 insn_bits12_15 = bits (aarch64_insn_r->aarch64_insn, 12, 15);
4604 insn_bits11_14 = bits (aarch64_insn_r->aarch64_insn, 11, 14);
4605 opcode = bits (aarch64_insn_r->aarch64_insn, 16, 18);
4606 rmode = bits (aarch64_insn_r->aarch64_insn, 19, 20);
4607 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
4608 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
4609
4610 if (record_debug)
4611 debug_printf ("Process record: data processing SIMD/FP: ");
4612
4613 if ((insn_bits28_31 & 0x05) == 0x01 && insn_bits24_27 == 0x0e)
4614 {
4615 /* Floating point - fixed point conversion instructions. */
4616 if (!insn_bit21)
4617 {
4618 if (record_debug)
4619 debug_printf ("FP - fixed point conversion");
4620
4621 if ((opcode >> 1) == 0x0 && rmode == 0x03)
4622 record_buf[0] = reg_rd;
4623 else
4624 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4625 }
4626 /* Floating point - conditional compare instructions. */
4627 else if (insn_bits10_11 == 0x01)
4628 {
4629 if (record_debug)
4630 debug_printf ("FP - conditional compare");
4631
4632 record_buf[0] = AARCH64_CPSR_REGNUM;
4633 }
4634 /* Floating point - data processing (2-source) and
4635 conditional select instructions. */
4636 else if (insn_bits10_11 == 0x02 || insn_bits10_11 == 0x03)
4637 {
4638 if (record_debug)
4639 debug_printf ("FP - DP (2-source)");
4640
4641 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4642 }
4643 else if (insn_bits10_11 == 0x00)
4644 {
4645 /* Floating point - immediate instructions. */
4646 if ((insn_bits12_15 & 0x01) == 0x01
4647 || (insn_bits12_15 & 0x07) == 0x04)
4648 {
4649 if (record_debug)
4650 debug_printf ("FP - immediate");
4651 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4652 }
4653 /* Floating point - compare instructions. */
4654 else if ((insn_bits12_15 & 0x03) == 0x02)
4655 {
4656 if (record_debug)
4657 debug_printf ("FP - immediate");
4658 record_buf[0] = AARCH64_CPSR_REGNUM;
4659 }
4660 /* Floating point - integer conversions instructions. */
4661 else if (insn_bits12_15 == 0x00)
4662 {
4663 /* Convert float to integer instruction. */
4664 if (!(opcode >> 1) || ((opcode >> 1) == 0x02 && !rmode))
4665 {
4666 if (record_debug)
4667 debug_printf ("float to int conversion");
4668
4669 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
4670 }
4671 /* Convert integer to float instruction. */
4672 else if ((opcode >> 1) == 0x01 && !rmode)
4673 {
4674 if (record_debug)
4675 debug_printf ("int to float conversion");
4676
4677 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4678 }
4679 /* Move float to integer instruction. */
4680 else if ((opcode >> 1) == 0x03)
4681 {
4682 if (record_debug)
4683 debug_printf ("move float to int");
4684
4685 if (!(opcode & 0x01))
4686 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
4687 else
4688 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4689 }
4690 else
4692 }
4693 else
4695 }
4696 else
4698 }
4699 else if ((insn_bits28_31 & 0x09) == 0x00 && insn_bits24_27 == 0x0e)
4700 {
4701 if (record_debug)
4702 debug_printf ("SIMD copy");
4703
4704 /* Advanced SIMD copy instructions. */
4705 if (!bits (aarch64_insn_r->aarch64_insn, 21, 23)
4706 && !bit (aarch64_insn_r->aarch64_insn, 15)
4707 && bit (aarch64_insn_r->aarch64_insn, 10))
4708 {
4709 if (insn_bits11_14 == 0x05 || insn_bits11_14 == 0x07)
4710 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
4711 else
4712 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4713 }
4714 else
4715 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4716 }
4717 /* All remaining floating point or advanced SIMD instructions. */
4718 else
4719 {
4720 if (record_debug)
4721 debug_printf ("all remain");
4722
4723 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4724 }
4725
4726 if (record_debug)
4727 debug_printf ("\n");
4728
4729 /* Record the V/X register. */
4730 aarch64_insn_r->reg_rec_count++;
4731
4732 /* Some of these instructions may set bits in the FPSR, so record it
4733 too. */
4734 record_buf[1] = AARCH64_FPSR_REGNUM;
4735 aarch64_insn_r->reg_rec_count++;
4736
4737 gdb_assert (aarch64_insn_r->reg_rec_count == 2);
4738 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
4739 record_buf);
4741}
4742
4743/* Decodes insns type and invokes its record handler. */
4744
4745static unsigned int
4747{
4748 uint32_t ins_bit25, ins_bit26, ins_bit27, ins_bit28;
4749
4750 ins_bit25 = bit (aarch64_insn_r->aarch64_insn, 25);
4751 ins_bit26 = bit (aarch64_insn_r->aarch64_insn, 26);
4752 ins_bit27 = bit (aarch64_insn_r->aarch64_insn, 27);
4753 ins_bit28 = bit (aarch64_insn_r->aarch64_insn, 28);
4754
4755 /* Data processing - immediate instructions. */
4756 if (!ins_bit26 && !ins_bit27 && ins_bit28)
4757 return aarch64_record_data_proc_imm (aarch64_insn_r);
4758
4759 /* Branch, exception generation and system instructions. */
4760 if (ins_bit26 && !ins_bit27 && ins_bit28)
4761 return aarch64_record_branch_except_sys (aarch64_insn_r);
4762
4763 /* Load and store instructions. */
4764 if (!ins_bit25 && ins_bit27)
4765 return aarch64_record_load_store (aarch64_insn_r);
4766
4767 /* Data processing - register instructions. */
4768 if (ins_bit25 && !ins_bit26 && ins_bit27)
4769 return aarch64_record_data_proc_reg (aarch64_insn_r);
4770
4771 /* Data processing - SIMD and floating point instructions. */
4772 if (ins_bit25 && ins_bit26 && ins_bit27)
4773 return aarch64_record_data_proc_simd_fp (aarch64_insn_r);
4774
4776}
4777
4778/* Cleans up local record registers and memory allocations. */
4779
4780static void
4782{
4783 xfree (record->aarch64_regs);
4784 xfree (record->aarch64_mems);
4785}
4786
4787#if GDB_SELF_TEST
4788namespace selftests {
4789
4790static void
4791aarch64_process_record_test (void)
4792{
4793 struct gdbarch_info info;
4794 uint32_t ret;
4795
4796 info.bfd_arch_info = bfd_scan_arch ("aarch64");
4797
4798 struct gdbarch *gdbarch = gdbarch_find_by_info (info);
4799 SELF_CHECK (gdbarch != NULL);
4800
4801 aarch64_insn_decode_record aarch64_record;
4802
4803 memset (&aarch64_record, 0, sizeof (aarch64_insn_decode_record));
4804 aarch64_record.regcache = NULL;
4805 aarch64_record.this_addr = 0;
4806 aarch64_record.gdbarch = gdbarch;
4807
4808 /* 20 00 80 f9 prfm pldl1keep, [x1] */
4809 aarch64_record.aarch64_insn = 0xf9800020;
4810 ret = aarch64_record_decode_insn_handler (&aarch64_record);
4811 SELF_CHECK (ret == AARCH64_RECORD_SUCCESS);
4812 SELF_CHECK (aarch64_record.reg_rec_count == 0);
4813 SELF_CHECK (aarch64_record.mem_rec_count == 0);
4814
4815 deallocate_reg_mem (&aarch64_record);
4816}
4817
4818} // namespace selftests
4819#endif /* GDB_SELF_TEST */
4820
4821/* Parse the current instruction and record the values of the registers and
4822 memory that will be changed in current instruction to record_arch_list
4823 return -1 if something is wrong. */
4824
4825int
4827 CORE_ADDR insn_addr)
4828{
4829 uint32_t rec_no = 0;
4830 uint8_t insn_size = 4;
4831 uint32_t ret = 0;
4832 gdb_byte buf[insn_size];
4833 aarch64_insn_decode_record aarch64_record;
4834
4835 memset (&buf[0], 0, insn_size);
4836 memset (&aarch64_record, 0, sizeof (aarch64_insn_decode_record));
4837 target_read_memory (insn_addr, &buf[0], insn_size);
4838 aarch64_record.aarch64_insn
4839 = (uint32_t) extract_unsigned_integer (&buf[0],
4840 insn_size,
4842 aarch64_record.regcache = regcache;
4843 aarch64_record.this_addr = insn_addr;
4844 aarch64_record.gdbarch = gdbarch;
4845
4846 ret = aarch64_record_decode_insn_handler (&aarch64_record);
4847 if (ret == AARCH64_RECORD_UNSUPPORTED)
4848 {
4850 _("Process record does not support instruction "
4851 "0x%0x at address %s.\n"),
4852 aarch64_record.aarch64_insn,
4853 paddress (gdbarch, insn_addr));
4854 ret = -1;
4855 }
4856
4857 if (0 == ret)
4858 {
4859 /* Record registers. */
4862 /* Always record register CPSR. */
4865 if (aarch64_record.aarch64_regs)
4866 for (rec_no = 0; rec_no < aarch64_record.reg_rec_count; rec_no++)
4867 if (record_full_arch_list_add_reg (aarch64_record.regcache,
4868 aarch64_record.aarch64_regs[rec_no]))
4869 ret = -1;
4870
4871 /* Record memories. */
4872 if (aarch64_record.aarch64_mems)
4873 for (rec_no = 0; rec_no < aarch64_record.mem_rec_count; rec_no++)
4875 ((CORE_ADDR)aarch64_record.aarch64_mems[rec_no].addr,
4876 aarch64_record.aarch64_mems[rec_no].len))
4877 ret = -1;
4878
4880 ret = -1;
4881 }
4882
4883 deallocate_reg_mem (&aarch64_record);
4884 return ret;
4885}
int aarch64_emit_insn(uint32_t *buf, uint32_t insn)
void aarch64_relocate_instruction(uint32_t insn, const struct aarch64_insn_visitor *visitor, struct aarch64_insn_data *data)
bool aarch64_debug
#define IS_BTI(instruction)
#define emit_b(buf, is_bl, offset)
#define emit_bcond(buf, cond, offset)
#define bit(obj, st)
#define aarch64_debug_printf(fmt,...)
#define bits(obj, st, fn)
#define can_encode_int32(val, bits)
#define emit_tb(buf, is_tbnz, bit, rt, offset)
#define emit_cb(buf, is_cbnz, rt, offset)
@ CLEAR_Rn_MASK
@ MEMORY_OPERAND_OFFSET
#define emit_nop(buf)
#define emit_ldrsw(buf, rt, rn, operand)
@ BR
@ RET
@ BLR
#define emit_ldr(buf, rt, rn, operand)
void register_aarch64_ravenscar_ops(struct gdbarch *gdbarch)
static void aarch64_pseudo_write(struct gdbarch *gdbarch, struct regcache *regcache, int regnum, const gdb_byte *buf)
static struct value * value_of_aarch64_user_reg(frame_info_ptr frame, const void *baton)
static std::unordered_map< aarch64_features, target_desc * > tdesc_aarch64_map
static int aarch64_stack_frame_destroyed_p(struct gdbarch *gdbarch, CORE_ADDR pc)
static int aarch64_pseudo_register_reggroup_p(struct gdbarch *gdbarch, int regnum, const struct reggroup *group)
static bool aapcs_is_vfp_call_or_return_candidate(struct type *type, int *count, struct type **fundamental_type)
static void pass_in_x(struct gdbarch *gdbarch, struct regcache *regcache, struct aarch64_call_info *info, struct type *type, struct value *arg)
displaced_step_copy_insn_closure_up aarch64_displaced_step_copy_insn(struct gdbarch *gdbarch, CORE_ADDR from, CORE_ADDR to, struct regcache *regs)
constexpr gdb_byte aarch64_default_breakpoint[]
static void aarch64_stub_this_id(frame_info_ptr this_frame, void **this_cache, struct frame_id *this_id)
#define HA_MAX_NUM_FLDS
void _initialize_aarch64_tdep()
#define REG_ALLOC(REGS, LENGTH, RECORD_BUF)
static void aarch64_displaced_step_adr(const int32_t offset, const unsigned rd, const int is_adrp, struct aarch64_insn_data *data)
static const unsigned char op_lit0
static struct value * aarch64_prologue_prev_register(frame_info_ptr this_frame, void **this_cache, int prev_regnum)
static void aarch64_store_return_value(struct type *type, struct regcache *regs, const gdb_byte *valbuf)
static struct type * aarch64_vnd_type(struct gdbarch *gdbarch)
static int aarch64_gdb_print_insn(bfd_vma memaddr, disassemble_info *info)
static ULONGEST aarch64_type_align(gdbarch *gdbarch, struct type *t)
static unsigned int aarch64_record_data_proc_simd_fp(aarch64_insn_decode_record *aarch64_insn_r)
static constexpr uint32_t BRK_INSN_MASK
static void aarch64_displaced_step_cb(const int32_t offset, const int is_cbnz, const unsigned rn, int is64, struct aarch64_insn_data *data)
static struct value * aarch64_pseudo_read_value(struct gdbarch *gdbarch, readable_regcache *regcache, int regnum)
static CORE_ADDR aarch64_analyze_prologue(struct gdbarch *gdbarch, CORE_ADDR start, CORE_ADDR limit, struct aarch64_prologue_cache *cache, abstract_instruction_reader &reader)
static void aarch64_displaced_step_b(const int is_bl, const int32_t offset, struct aarch64_insn_data *data)
static int aapcs_is_vfp_call_or_return_candidate_1(struct type *type, struct type **fundamental_type)
static void aarch64_displaced_step_others(const uint32_t insn, struct aarch64_insn_data *data)
static int aarch64_dwarf_reg_to_regnum(struct gdbarch *gdbarch, int reg)
#define MEM_ALLOC(MEMS, LENGTH, RECORD_BUF)
static const struct aarch64_insn_visitor visitor
static void aarch64_scan_prologue(frame_info_ptr this_frame, struct aarch64_prologue_cache *cache)
static void pass_on_stack(struct aarch64_call_info *info, struct type *type, struct value *arg)
static struct value * aarch64_dwarf2_prev_register(frame_info_ptr this_frame, void **this_cache, int regnum)
static int pass_in_v(struct gdbarch *gdbarch, struct regcache *regcache, struct aarch64_call_info *info, int len, const bfd_byte *buf)
static struct aarch64_prologue_cache * aarch64_make_stub_cache(frame_info_ptr this_frame, void **this_cache)
static enum unwind_stop_reason aarch64_stub_frame_unwind_stop_reason(frame_info_ptr this_frame, void **this_cache)
static unsigned int aarch64_record_load_store(aarch64_insn_decode_record *aarch64_insn_r)
static const char *const aarch64_v_register_names[]
static void aarch64_prologue_this_id(frame_info_ptr this_frame, void **this_cache, struct frame_id *this_id)
static void aarch64_make_prologue_cache_1(frame_info_ptr this_frame, struct aarch64_prologue_cache *cache)
static struct value * aarch64_pseudo_read_value_1(struct gdbarch *gdbarch, readable_regcache *regcache, int regnum_offset, int regsize, struct value *result_value)
static void pass_in_x_or_stack(struct gdbarch *gdbarch, struct regcache *regcache, struct aarch64_call_info *info, struct type *type, struct value *arg)
static unsigned int aarch64_record_data_proc_imm(aarch64_insn_decode_record *aarch64_insn_r)
static void show_aarch64_debug(struct ui_file *file, int from_tty, struct cmd_list_element *c, const char *value)
static int aarch64_stub_unwind_sniffer(const struct frame_unwind *self, frame_info_ptr this_frame, void **this_prologue_cache)
static struct type * aarch64_vnb_type(struct gdbarch *gdbarch)
int regnum
static CORE_ADDR aarch64_frame_align(struct gdbarch *gdbarch, CORE_ADDR sp)
static std::string aarch64_get_pc_address_flags(frame_info_ptr frame, CORE_ADDR pc)
static uint64_t aarch64_get_tdesc_vq(const struct target_desc *tdesc)
static const char *const aarch64_pauth_register_names[]
static bool pass_in_v_vfp_candidate(struct gdbarch *gdbarch, struct regcache *regcache, struct aarch64_call_info *info, struct type *arg_type, struct value *arg)
static CORE_ADDR aarch64_normal_frame_base(frame_info_ptr this_frame, void **this_cache)
static const char *const aarch64_sve_register_names[]
aarch64_features aarch64_features_from_target_desc(const struct target_desc *tdesc)
static CORE_ADDR aarch64_frame_unmask_lr(aarch64_gdbarch_tdep *tdep, frame_info_ptr this_frame, CORE_ADDR addr)
bool aarch64_displaced_step_hw_singlestep(struct gdbarch *gdbarch)
static frame_unwind aarch64_prologue_unwind
static int aarch64_cannot_store_register(struct gdbarch *gdbarch, int regnum)
static void aarch64_dwarf2_frame_init_reg(struct gdbarch *gdbarch, int regnum, struct dwarf2_frame_state_reg *reg, frame_info_ptr this_frame)
static struct aarch64_prologue_cache * aarch64_make_prologue_cache(frame_info_ptr this_frame, void **this_cache)
static struct type * aarch64_vnh_type(struct gdbarch *gdbarch)
static struct type * aarch64_vnq_type(struct gdbarch *gdbarch)
static constexpr uint32_t BRK_INSN_BASE
static void aarch64_displaced_step_tb(const int32_t offset, int is_tbnz, const unsigned rt, unsigned bit, struct aarch64_insn_data *data)
static const char * aarch64_pseudo_register_name(struct gdbarch *gdbarch, int regnum)
static struct type * aarch64_vns_type(struct gdbarch *gdbarch)
static enum return_value_convention aarch64_return_value(struct gdbarch *gdbarch, struct value *func_value, struct type *valtype, struct regcache *regcache, gdb_byte *readbuf, const gdb_byte *writebuf)
const target_desc * aarch64_read_description(const aarch64_features &features)
static void aarch64_dump_tdep(struct gdbarch *gdbarch, struct ui_file *file)
static void deallocate_reg_mem(aarch64_insn_decode_record *record)
static void aarch64_displaced_step_b_cond(const unsigned cond, const int32_t offset, struct aarch64_insn_data *data)
static const unsigned char op_lit1
aarch64_record_result
@ AARCH64_RECORD_SUCCESS
@ AARCH64_RECORD_UNKNOWN
@ AARCH64_RECORD_UNSUPPORTED
static struct gdbarch * aarch64_gdbarch_init(struct gdbarch_info info, struct gdbarch_list *arches)
static bool is_w_pseudo_register(struct gdbarch *gdbarch, int regnum)
static enum unwind_stop_reason aarch64_prologue_frame_unwind_stop_reason(frame_info_ptr this_frame, void **this_cache)
static const char *const aarch64_r_register_names[]
static unsigned int aarch64_record_decode_insn_handler(aarch64_insn_decode_record *aarch64_insn_r)
static unsigned int aarch64_record_data_proc_reg(aarch64_insn_decode_record *aarch64_insn_r)
static unsigned int aarch64_record_asimd_load_store(aarch64_insn_decode_record *aarch64_insn_r)
int aarch64_process_record(struct gdbarch *gdbarch, struct regcache *regcache, CORE_ADDR insn_addr)
void aarch64_displaced_step_fixup(struct gdbarch *gdbarch, struct displaced_step_copy_insn_closure *dsc_, CORE_ADDR from, CORE_ADDR to, struct regcache *regs)
static void aarch64_pseudo_write_1(struct gdbarch *gdbarch, struct regcache *regcache, int regnum_offset, int regsize, const gdb_byte *buf)
const char *const name
static CORE_ADDR aarch64_skip_prologue(struct gdbarch *gdbarch, CORE_ADDR pc)
static struct type * aarch64_vnv_type(struct gdbarch *gdbarch)
static void aarch64_displaced_step_ldr_literal(const int32_t offset, const int is_sw, const unsigned rt, const int is64, struct aarch64_insn_data *data)
static std::vector< CORE_ADDR > aarch64_software_single_step(struct regcache *regcache)
static bool aarch64_execute_dwarf_cfa_vendor_op(struct gdbarch *gdbarch, gdb_byte op, struct dwarf2_frame_state *fs)
static frame_base aarch64_normal_base
static int aarch64_get_longjmp_target(frame_info_ptr frame, CORE_ADDR *pc)
static frame_unwind aarch64_stub_unwind
static unsigned int aarch64_record_branch_except_sys(aarch64_insn_decode_record *aarch64_insn_r)
static bool aarch64_program_breakpoint_here_p(gdbarch *gdbarch, CORE_ADDR address)
static const char *const aarch64_mte_register_names[]
static struct type * aarch64_pseudo_register_type(struct gdbarch *gdbarch, int regnum)
static CORE_ADDR aarch64_push_dummy_call(struct gdbarch *gdbarch, struct value *function, struct regcache *regcache, CORE_ADDR bp_addr, int nargs, struct value **args, CORE_ADDR sp, function_call_return_method return_method, CORE_ADDR struct_addr)
static const struct @0 aarch64_register_aliases[]
static int aarch64_return_in_memory(struct gdbarch *gdbarch, struct type *type)
static void aarch64_gen_return_address(struct gdbarch *gdbarch, struct agent_expr *ax, struct axs_value *value, CORE_ADDR scope)
#define AARCH64_DWARF_PC
#define B_REGISTER_SIZE
#define AARCH64_DISPLACED_MODIFIED_INSNS
#define D_REGISTER_SIZE
#define AARCH64_DWARF_RA_SIGN_STATE
#define AARCH64_DWARF_SVE_Z0
#define AARCH64_DWARF_SP
#define Q_REGISTER_SIZE
#define AARCH64_D_REGISTER_COUNT
#define AARCH64_DWARF_V0
#define AARCH64_DWARF_X0
#define AARCH64_DWARF_SVE_VG
#define AARCH64_DWARF_SVE_FFR
#define H_REGISTER_SIZE
#define X_REGISTER_SIZE
#define AARCH64_X_REGISTER_COUNT
#define AARCH64_DWARF_SVE_P0
#define S_REGISTER_SIZE
target_desc * aarch64_create_target_description(const aarch64_features &features)
Definition aarch64.c:32
#define AARCH64_Q0_REGNUM
Definition aarch64.h:126
#define AARCH64_H0_REGNUM
Definition aarch64.h:129
#define AARCH64_PAUTH_CMASK_REGNUM(pauth_reg_base)
Definition aarch64.h:134
#define sve_vq_from_vl(vl)
Definition aarch64.h:156
#define AARCH64_SVE_V0_REGNUM
Definition aarch64.h:131
@ AARCH64_SVE_FFR_REGNUM
Definition aarch64.h:105
@ AARCH64_FP_REGNUM
Definition aarch64.h:91
@ AARCH64_PC_REGNUM
Definition aarch64.h:94
@ AARCH64_CPSR_REGNUM
Definition aarch64.h:95
@ AARCH64_SP_REGNUM
Definition aarch64.h:93
@ AARCH64_STRUCT_RETURN_REGNUM
Definition aarch64.h:110
@ AARCH64_SVE_VG_REGNUM
Definition aarch64.h:106
@ AARCH64_SVE_Z0_REGNUM
Definition aarch64.h:98
@ AARCH64_FPSR_REGNUM
Definition aarch64.h:100
@ AARCH64_SVE_P0_REGNUM
Definition aarch64.h:102
@ AARCH64_LR_REGNUM
Definition aarch64.h:92
@ AARCH64_X0_REGNUM
Definition aarch64.h:90
@ AARCH64_V0_REGNUM
Definition aarch64.h:96
#define AARCH64_B0_REGNUM
Definition aarch64.h:130
#define V_REGISTER_SIZE
Definition aarch64.h:116
#define AARCH64_S0_REGNUM
Definition aarch64.h:128
#define AARCH64_MAX_SVE_VQ
Definition aarch64.h:166
#define AARCH64_D0_REGNUM
Definition aarch64.h:127
#define AARCH64_V_REGS_NUM
Definition aarch64.h:138
#define AARCH64_PAUTH_DMASK_REGNUM(pauth_reg_base)
Definition aarch64.h:133
void xfree(void *)
gdb_static_assert(sizeof(splay_tree_key) >=sizeof(CORE_ADDR *))
int default_print_insn(bfd_vma memaddr, disassemble_info *info)
static std::vector< const char * > arches
Definition arch-utils.c:685
void gdbarch_register(enum bfd_architecture bfd_architecture, gdbarch_init_ftype *init, gdbarch_dump_tdep_ftype *dump_tdep)
int core_addr_lessthan(CORE_ADDR lhs, CORE_ADDR rhs)
Definition arch-utils.c:177
struct gdbarch_list * gdbarch_list_lookup_by_info(struct gdbarch_list *arches, const struct gdbarch_info *info)
struct gdbarch * gdbarch_find_by_info(struct gdbarch_info info)
#define BP_MANIPULATION(BREAK_INSN)
Definition arch-utils.h:70
@ axs_lvalue_register
Definition ax-gdb.h:69
bool find_pc_partial_function(CORE_ADDR pc, const char **name, CORE_ADDR *address, CORE_ADDR *endaddr, const struct block **block)
Definition blockframe.c:373
bool find_reg(struct gdbarch *gdbarch, int reg, CORE_ADDR *offset_p)
bool store_would_trash(pv_t addr)
void store(pv_t addr, CORE_ADDR size, pv_t value)
enum register_status raw_read(int regnum, gdb_byte *buf)
Definition regcache.c:605
enum register_status raw_read_part(int regnum, int offset, int len, gdb_byte *buf)
Definition regcache.c:1013
enum register_status cooked_read(int regnum, gdb_byte *buf)
Definition regcache.c:692
gdbarch * arch() const
Definition regcache.c:230
void cooked_write(int regnum, const gdb_byte *buf)
Definition regcache.c:861
void raw_write_part(int regnum, int offset, int len, const gdb_byte *buf)
Definition regcache.c:1023
void raw_write(int regnum, const gdb_byte *buf)
Definition regcache.c:827
void * get(unsigned key)
Definition registry.h:211
struct cmd_list_element * showdebuglist
Definition cli-cmds.c:165
struct cmd_list_element * setdebuglist
Definition cli-cmds.c:163
set_show_commands add_setshow_boolean_cmd(const char *name, enum command_class theclass, bool *var, const char *set_doc, const char *show_doc, const char *help_doc, cmd_func_ftype *set_func, show_value_ftype *show_func, struct cmd_list_element **set_list, struct cmd_list_element **show_list)
Definition cli-decode.c:739
@ class_maintenance
Definition command.h:65
void write_memory(CORE_ADDR memaddr, const bfd_byte *myaddr, ssize_t len)
Definition corefile.c:346
void write_memory_unsigned_integer(CORE_ADDR addr, int len, enum bfd_endian byte_order, ULONGEST value)
Definition corefile.c:369
void read_memory(CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
Definition corefile.c:237
int safe_read_memory_unsigned_integer(CORE_ADDR memaddr, int len, enum bfd_endian byte_order, ULONGEST *return_value)
Definition corefile.c:281
ULONGEST read_code_unsigned_integer(CORE_ADDR memaddr, int len, enum bfd_endian byte_order)
Definition corefile.c:325
static void store_signed_integer(gdb_byte *addr, int len, enum bfd_endian byte_order, LONGEST val)
Definition defs.h:554
static void store_unsigned_integer(gdb_byte *addr, int len, enum bfd_endian byte_order, ULONGEST val)
Definition defs.h:561
@ lval_register
Definition defs.h:366
static ULONGEST extract_unsigned_integer(gdb::array_view< const gdb_byte > buf, enum bfd_endian byte_order)
Definition defs.h:526
return_value_convention
Definition defs.h:258
@ RETURN_VALUE_ABI_RETURNS_ADDRESS
Definition defs.h:274
@ RETURN_VALUE_REGISTER_CONVENTION
Definition defs.h:261
#define displaced_debug_printf(fmt,...)
std::unique_ptr< displaced_step_copy_insn_closure > displaced_step_copy_insn_closure_up
void dwarf2_append_unwinders(struct gdbarch *gdbarch)
Definition frame.c:1361
void dwarf2_frame_set_init_reg(struct gdbarch *gdbarch, void(*init_reg)(struct gdbarch *, int, struct dwarf2_frame_state_reg *, frame_info_ptr))
Definition frame.c:659
@ DWARF2_FRAME_REG_SAVED_VAL_EXP
Definition frame.h:56
@ DWARF2_FRAME_REG_FN
Definition frame.h:60
@ DWARF2_FRAME_REG_SAME_VALUE
Definition frame.h:52
@ DWARF2_FRAME_REG_CFA
Definition frame.h:63
ssize_t read(int fd, void *buf, size_t count)
struct value * value_of_register(int regnum, frame_info_ptr frame)
Definition findvar.c:254
void frame_base_set_default(struct gdbarch *gdbarch, const struct frame_base *default_base)
Definition frame-base.c:93
int default_frame_sniffer(const struct frame_unwind *self, frame_info_ptr this_frame, void **this_prologue_cache)
struct value * frame_unwind_got_constant(frame_info_ptr frame, int regnum, ULONGEST val)
void frame_unwind_append_unwinder(struct gdbarch *gdbarch, const struct frame_unwind *unwinder)
ULONGEST get_frame_register_unsigned(frame_info_ptr frame, int regnum)
Definition frame.c:1351
ULONGEST frame_unwind_register_unsigned(frame_info_ptr next_frame, int regnum)
Definition frame.c:1323
CORE_ADDR get_frame_pc(frame_info_ptr frame)
Definition frame.c:2592
struct frame_id frame_id_build(CORE_ADDR stack_addr, CORE_ADDR code_addr)
Definition frame.c:713
struct gdbarch * get_frame_arch(frame_info_ptr this_frame)
Definition frame.c:2907
bool get_frame_pc_masked(frame_info_ptr frame)
Definition frame.c:204
struct frame_id frame_id_build_unavailable_stack(CORE_ADDR code_addr)
Definition frame.c:686
CORE_ADDR get_frame_func(frame_info_ptr this_frame)
Definition frame.c:1050
void set_frame_previous_pc_masked(frame_info_ptr frame)
Definition frame.c:196
CORE_ADDR get_frame_address_in_block(frame_info_ptr this_frame)
Definition frame.c:2622
@ NORMAL_FRAME
Definition frame.h:179
unwind_stop_reason
Definition frame.h:436
#define FRAME_OBSTACK_ZALLOC(TYPE)
Definition frame.h:608
void set_gdbarch_long_long_bit(struct gdbarch *gdbarch, int long_long_bit)
Definition gdbarch.c:1467
void set_gdbarch_char_signed(struct gdbarch *gdbarch, int char_signed)
Definition gdbarch.c:1755
void set_gdbarch_gen_return_address(struct gdbarch *gdbarch, gdbarch_gen_return_address_ftype *gen_return_address)
enum bfd_endian gdbarch_byte_order(struct gdbarch *gdbarch)
Definition gdbarch.c:1370
void set_gdbarch_breakpoint_kind_from_pc(struct gdbarch *gdbarch, gdbarch_breakpoint_kind_from_pc_ftype *breakpoint_kind_from_pc)
const char * gdbarch_register_name(struct gdbarch *gdbarch, int regnr)
Definition gdbarch.c:2142
void set_gdbarch_software_single_step(struct gdbarch *gdbarch, gdbarch_software_single_step_ftype *software_single_step)
void set_gdbarch_pseudo_register_read_value(struct gdbarch *gdbarch, gdbarch_pseudo_register_read_value_ftype *pseudo_register_read_value)
void set_gdbarch_frame_align(struct gdbarch *gdbarch, gdbarch_frame_align_ftype *frame_align)
void set_gdbarch_program_breakpoint_here_p(struct gdbarch *gdbarch, gdbarch_program_breakpoint_here_p_ftype *program_breakpoint_here_p)
void set_gdbarch_get_longjmp_target(struct gdbarch *gdbarch, gdbarch_get_longjmp_target_ftype *get_longjmp_target)
void set_gdbarch_skip_prologue(struct gdbarch *gdbarch, gdbarch_skip_prologue_ftype *skip_prologue)
int gdbarch_addr_bit(struct gdbarch *gdbarch)
Definition gdbarch.c:1708
void set_gdbarch_get_pc_address_flags(struct gdbarch *gdbarch, gdbarch_get_pc_address_flags_ftype *get_pc_address_flags)
void set_gdbarch_int_bit(struct gdbarch *gdbarch, int int_bit)
Definition gdbarch.c:1433
void set_gdbarch_return_value(struct gdbarch *gdbarch, gdbarch_return_value_ftype *return_value)
void set_gdbarch_wchar_signed(struct gdbarch *gdbarch, int wchar_signed)
Definition gdbarch.c:1667
void set_gdbarch_have_nonsteppable_watchpoint(struct gdbarch *gdbarch, int have_nonsteppable_watchpoint)
Definition gdbarch.c:3508
int gdbarch_num_regs(struct gdbarch *gdbarch)
Definition gdbarch.c:1899
void set_gdbarch_execute_dwarf_cfa_vendor_op(struct gdbarch *gdbarch, gdbarch_execute_dwarf_cfa_vendor_op_ftype *execute_dwarf_cfa_vendor_op)
void set_gdbarch_double_bit(struct gdbarch *gdbarch, int double_bit)
Definition gdbarch.c:1583
void set_gdbarch_inner_than(struct gdbarch *gdbarch, gdbarch_inner_than_ftype *inner_than)
enum bfd_endian gdbarch_byte_order_for_code(struct gdbarch *gdbarch)
Definition gdbarch.c:1379
void set_gdbarch_sp_regnum(struct gdbarch *gdbarch, int sp_regnum)
Definition gdbarch.c:2016
void set_gdbarch_long_double_format(struct gdbarch *gdbarch, const struct floatformat **long_double_format)
Definition gdbarch.c:1632
void set_gdbarch_pc_regnum(struct gdbarch *gdbarch, int pc_regnum)
Definition gdbarch.c:2033
void set_gdbarch_print_insn(struct gdbarch *gdbarch, gdbarch_print_insn_ftype *print_insn)
void set_gdbarch_float_bit(struct gdbarch *gdbarch, int float_bit)
Definition gdbarch.c:1550
void set_gdbarch_stack_frame_destroyed_p(struct gdbarch *gdbarch, gdbarch_stack_frame_destroyed_p_ftype *stack_frame_destroyed_p)
void set_gdbarch_short_bit(struct gdbarch *gdbarch, int short_bit)
Definition gdbarch.c:1416
void set_gdbarch_pseudo_register_write(struct gdbarch *gdbarch, gdbarch_pseudo_register_write_ftype *pseudo_register_write)
void set_gdbarch_num_pseudo_regs(struct gdbarch *gdbarch, int num_pseudo_regs)
Definition gdbarch.c:1927
void set_gdbarch_dwarf2_reg_to_regnum(struct gdbarch *gdbarch, gdbarch_dwarf2_reg_to_regnum_ftype *dwarf2_reg_to_regnum)
void set_gdbarch_long_bit(struct gdbarch *gdbarch, int long_bit)
Definition gdbarch.c:1450
void set_gdbarch_ptr_bit(struct gdbarch *gdbarch, int ptr_bit)
Definition gdbarch.c:1701
void set_gdbarch_type_align(struct gdbarch *gdbarch, gdbarch_type_align_ftype *type_align)
void set_gdbarch_cannot_store_register(struct gdbarch *gdbarch, gdbarch_cannot_store_register_ftype *cannot_store_register)
void set_gdbarch_num_regs(struct gdbarch *gdbarch, int num_regs)
Definition gdbarch.c:1910
void set_gdbarch_long_double_bit(struct gdbarch *gdbarch, int long_double_bit)
Definition gdbarch.c:1616
void set_gdbarch_sw_breakpoint_from_kind(struct gdbarch *gdbarch, gdbarch_sw_breakpoint_from_kind_ftype *sw_breakpoint_from_kind)
void set_gdbarch_double_format(struct gdbarch *gdbarch, const struct floatformat **double_format)
Definition gdbarch.c:1599
void set_gdbarch_float_format(struct gdbarch *gdbarch, const struct floatformat **float_format)
Definition gdbarch.c:1566
void set_gdbarch_push_dummy_call(struct gdbarch *gdbarch, gdbarch_push_dummy_call_ftype *push_dummy_call)
void set_gdbarch_vbit_in_delta(struct gdbarch *gdbarch, int vbit_in_delta)
Definition gdbarch.c:3961
struct gdbarch * gdbarch_alloc(const struct gdbarch_info *info, struct gdbarch_tdep_base *tdep)
Definition gdbarch.c:264
function_call_return_method
Definition gdbarch.h:112
@ return_method_normal
Definition gdbarch.h:114
@ return_method_hidden_param
Definition gdbarch.h:119
struct type * lookup_pointer_type(struct type *type)
Definition gdbtypes.c:402
int field_is_static(struct field *f)
Definition gdbtypes.c:5048
struct type * arch_composite_type(struct gdbarch *gdbarch, const char *name, enum type_code code)
Definition gdbtypes.c:5989
const struct floatformat * floatformats_ieee_quad[BFD_ENDIAN_UNKNOWN]
Definition gdbtypes.c:91
struct type * init_vector_type(struct type *elt_type, int n)
Definition gdbtypes.c:1540
const struct floatformat * floatformats_ieee_single[BFD_ENDIAN_UNKNOWN]
Definition gdbtypes.c:83
unsigned type_align(struct type *type)
Definition gdbtypes.c:3645
const struct floatformat * floatformats_ieee_double[BFD_ENDIAN_UNKNOWN]
Definition gdbtypes.c:87
void append_composite_type_field(struct type *t, const char *name, struct type *field)
Definition gdbtypes.c:6065
bool is_fixed_point_type(struct type *type)
Definition gdbtypes.c:6116
struct type * check_typedef(struct type *type)
Definition gdbtypes.c:3010
#define TYPE_IS_REFERENCE(t)
Definition gdbtypes.h:156
type_code
Definition gdbtypes.h:99
unsigned dummy
Definition go32-nat.c:8
size_t size
Definition go32-nat.c:241
struct language_pass_by_ref_info language_pass_by_reference(struct type *type)
Definition language.c:563
#define SIZE
Definition m2-exp.c:221
info(c)
Definition gdbarch.py:184
static int in_plt_section(CORE_ADDR pc)
Definition objfiles.h:901
void gdbarch_init_osabi(struct gdbarch_info info, struct gdbarch *gdbarch)
Definition osabi.c:382
pv_t pv_subtract(pv_t a, pv_t b)
pv_t pv_constant(CORE_ADDR k)
pv_t pv_register(int reg, CORE_ADDR k)
pv_t pv_add_constant(pv_t v, CORE_ADDR k)
int pv_is_register(pv_t a, int r)
int record_full_arch_list_add_reg(struct regcache *regcache, int regnum)
int record_full_arch_list_add_mem(CORE_ADDR addr, int len)
int record_full_arch_list_add_end(void)
unsigned int record_debug
Definition record.c:33
CORE_ADDR regcache_read_pc(struct regcache *regcache)
Definition regcache.c:1324
enum register_status regcache_raw_read_unsigned(struct regcache *regcache, int regnum, ULONGEST *val)
Definition regcache.c:643
int register_size(struct gdbarch *gdbarch, int regnum)
Definition regcache.c:170
enum register_status regcache_cooked_read_unsigned(struct regcache *regcache, int regnum, ULONGEST *val)
Definition regcache.c:790
void regcache_cooked_write_unsigned(struct regcache *regcache, int regnum, ULONGEST val)
Definition regcache.c:819
struct type * register_type(struct gdbarch *gdbarch, int regnum)
Definition regcache.c:158
const reggroup *const float_reggroup
Definition reggroups.c:252
const reggroup *const all_reggroup
Definition reggroups.c:255
const reggroup *const vector_reggroup
Definition reggroups.c:254
std::vector< stack_item_t > si
uint32_t insn_buf[AARCH64_DISPLACED_MODIFIED_INSNS]
struct aarch64_insn_data base
aarch64_displaced_step_copy_insn_closure * dsc
uint64_t vq
Definition aarch64.h:32
uint8_t tls
Definition aarch64.h:38
struct type * vnh_type
struct type * vnv_type
struct type * vnq_type
int(* aarch64_syscall_record)(struct regcache *regcache, unsigned long svc_number)
struct type * vnd_type
struct type * vnb_type
struct type * vns_type
bool has_sve() const
bool has_pauth() const
struct aarch64_mem_r * aarch64_mems
struct regcache * regcache
trad_frame_saved_reg * saved_regs
struct type * builtin_uint16
Definition gdbtypes.h:2284
struct type * builtin_double
Definition gdbtypes.h:2258
struct type * builtin_int8
Definition gdbtypes.h:2281
struct type * builtin_uint128
Definition gdbtypes.h:2292
struct type * builtin_uint32
Definition gdbtypes.h:2288
struct type * builtin_uint64
Definition gdbtypes.h:2290
struct type * builtin_int64
Definition gdbtypes.h:2289
struct type * builtin_int32
Definition gdbtypes.h:2287
struct type * builtin_uint8
Definition gdbtypes.h:2282
struct type * builtin_half
Definition gdbtypes.h:2256
struct type * builtin_bfloat16
Definition gdbtypes.h:2255
struct type * builtin_int128
Definition gdbtypes.h:2291
struct type * builtin_int16
Definition gdbtypes.h:2283
struct type * builtin_float
Definition gdbtypes.h:2257
struct dwarf2_frame_state_reg::@42::@43 exp
const gdb_byte * start
Definition frame.h:81
enum dwarf2_frame_reg_rule how
Definition frame.h:86
union dwarf2_frame_state_reg::@42 loc
struct type * type() const
Definition gdbtypes.h:559
const gdb_byte * data
CORE_ADDR end
Definition symtab.h:2273
std::vector< tdesc_feature_up > features
bool is_addr() const
Definition trad-frame.h:165
void set_addr(LONGEST addr)
Definition trad-frame.h:102
void set_value(LONGEST val)
Definition trad-frame.h:88
bool is_value() const
Definition trad-frame.h:155
struct type * target_type() const
Definition gdbtypes.h:1000
type_code code() const
Definition gdbtypes.h:927
ULONGEST length() const
Definition gdbtypes.h:954
struct field & field(int idx) const
Definition gdbtypes.h:983
bool is_unsigned() const
Definition gdbtypes.h:1063
bool is_vector() const
Definition gdbtypes.h:1149
int num_fields() const
Definition gdbtypes.h:965
Definition value.c:181
struct type * type
Definition value.c:304
struct value::@195::@196 reg
LONGEST offset
Definition value.c:281
CORE_ADDR skip_prologue_using_sal(struct gdbarch *gdbarch, CORE_ADDR func_addr)
Definition symtab.c:3956
struct symtab_and_line find_pc_line(CORE_ADDR pc, int notcurrent)
Definition symtab.c:3297
tdesc_arch_data_up tdesc_data_alloc(void)
const struct tdesc_feature * tdesc_find_feature(const struct target_desc *target_desc, const char *name)
int tdesc_numbered_register(const struct tdesc_feature *feature, struct tdesc_arch_data *data, int regno, const char *name)
static const registry< gdbarch >::key< tdesc_arch_data > tdesc_data
void tdesc_use_registers(struct gdbarch *gdbarch, const struct target_desc *target_desc, tdesc_arch_data_up &&early_data, tdesc_unknown_register_ftype unk_reg_cb)
void set_tdesc_pseudo_register_name(struct gdbarch *gdbarch, gdbarch_register_name_ftype *pseudo_name)
int tdesc_has_registers(const struct target_desc *target_desc)
void set_tdesc_pseudo_register_type(struct gdbarch *gdbarch, gdbarch_register_type_ftype *pseudo_type)
int tdesc_register_bitsize(const struct tdesc_feature *feature, const char *name)
int tdesc_unnumbered_register(const struct tdesc_feature *feature, const char *name)
void set_tdesc_pseudo_register_reggroup_p(struct gdbarch *gdbarch, gdbarch_register_reggroup_p_ftype *pseudo_reggroup_p)
std::unique_ptr< tdesc_arch_data, tdesc_arch_data_deleter > tdesc_arch_data_up
scoped_restore_tmpl< int > make_scoped_restore_show_memory_breakpoints(int show)
Definition target.c:1653
int target_read_memory(CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
Definition target.c:1771
trad_frame_saved_reg * trad_frame_alloc_saved_regs(struct gdbarch *gdbarch)
Definition trad-frame.c:62
struct value * trad_frame_get_prev_register(frame_info_ptr this_frame, trad_frame_saved_reg this_saved_regs[], int regnum)
Definition trad-frame.c:187
void trad_frame_reset_saved_regs(struct gdbarch *gdbarch, trad_frame_saved_reg *regs)
Definition trad-frame.c:52
void user_reg_add(struct gdbarch *gdbarch, const char *name, user_reg_read_ftype *xread, const void *baton)
Definition user-regs.c:122
const char * paddress(struct gdbarch *gdbarch, CORE_ADDR addr)
Definition utils.c:3114
void gdb_printf(struct ui_file *stream, const char *format,...)
Definition utils.c:1865
#define gdb_stderr
Definition utils.h:193
struct value * value_cast(struct type *type, struct value *arg2)
Definition valops.c:408
struct type * value_type(const struct value *value)
Definition value.c:1109
struct value * allocate_value(struct type *type)
Definition value.c:1053
struct value * value_primitive_field(struct value *arg1, LONGEST offset, int fieldno, struct type *arg_type)
Definition value.c:3105
void mark_value_bytes_unavailable(struct value *value, LONGEST offset, LONGEST length)
Definition value.c:635
gdb::array_view< gdb_byte > value_contents_raw(struct value *value)
Definition value.c:1167
gdb::array_view< const gdb_byte > value_contents(struct value *value)
Definition value.c:1464
struct value * value_from_pointer(struct type *type, CORE_ADDR addr)
Definition value.c:3651
LONGEST unpack_long(struct type *type, const gdb_byte *valaddr)
Definition value.c:2921
#define VALUE_LVAL(val)
Definition value.h:438
#define VALUE_REGNUM(val)
Definition value.h:469