GDB (xrefs)
Loading...
Searching...
No Matches
record-btrace.c
Go to the documentation of this file.
1/* Branch trace support for GDB, the GNU debugger.
2
3 Copyright (C) 2013-2023 Free Software Foundation, Inc.
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22#include "defs.h"
23#include "record.h"
24#include "record-btrace.h"
25#include "gdbthread.h"
26#include "target.h"
27#include "gdbcmd.h"
28#include "disasm.h"
29#include "observable.h"
30#include "cli/cli-utils.h"
31#include "source.h"
32#include "ui-out.h"
33#include "symtab.h"
34#include "filenames.h"
35#include "regcache.h"
36#include "frame-unwind.h"
37#include "hashtab.h"
38#include "infrun.h"
39#include "gdbsupport/event-loop.h"
40#include "inf-loop.h"
41#include "inferior.h"
42#include <algorithm>
43#include "gdbarch.h"
44#include "cli/cli-style.h"
45#include "async-event.h"
46#include <forward_list>
47#include "objfiles.h"
48#include "interps.h"
49
51 "record-btrace",
52 N_("Branch tracing target"),
53 N_("Collect control-flow trace and provide the execution history.")
54};
55
56/* The target_ops of record-btrace. */
57
58class record_btrace_target final : public target_ops
59{
60public:
61 const target_info &info () const override
63
64 strata stratum () const override { return record_stratum; }
65
66 void close () override;
67 void async (bool) override;
68
69 void detach (inferior *inf, int from_tty) override
70 { record_detach (this, inf, from_tty); }
71
72 void disconnect (const char *, int) override;
73
74 void mourn_inferior () override
75 { record_mourn_inferior (this); }
76
77 void kill () override
78 { record_kill (this); }
79
80 enum record_method record_method (ptid_t ptid) override;
81
82 void stop_recording () override;
83 void info_record () override;
84
85 void insn_history (int size, gdb_disassembly_flags flags) override;
86 void insn_history_from (ULONGEST from, int size,
87 gdb_disassembly_flags flags) override;
88 void insn_history_range (ULONGEST begin, ULONGEST end,
89 gdb_disassembly_flags flags) override;
90 void call_history (int size, record_print_flags flags) override;
91 void call_history_from (ULONGEST begin, int size, record_print_flags flags)
92 override;
93 void call_history_range (ULONGEST begin, ULONGEST end, record_print_flags flags)
94 override;
95
96 bool record_is_replaying (ptid_t ptid) override;
97 bool record_will_replay (ptid_t ptid, int dir) override;
98 void record_stop_replaying () override;
99
101 const char *annex,
102 gdb_byte *readbuf,
103 const gdb_byte *writebuf,
104 ULONGEST offset, ULONGEST len,
105 ULONGEST *xfered_len) override;
106
107 int insert_breakpoint (struct gdbarch *,
108 struct bp_target_info *) override;
109 int remove_breakpoint (struct gdbarch *, struct bp_target_info *,
110 enum remove_bp_reason) override;
111
112 void fetch_registers (struct regcache *, int) override;
113
114 void store_registers (struct regcache *, int) override;
115 void prepare_to_store (struct regcache *) override;
116
117 const struct frame_unwind *get_unwinder () override;
118
119 const struct frame_unwind *get_tailcall_unwinder () override;
120
121 void resume (ptid_t, int, enum gdb_signal) override;
122 ptid_t wait (ptid_t, struct target_waitstatus *, target_wait_flags) override;
123
124 void stop (ptid_t) override;
125 void update_thread_list () override;
126 bool thread_alive (ptid_t ptid) override;
127 void goto_record_begin () override;
128 void goto_record_end () override;
129 void goto_record (ULONGEST insn) override;
130
131 bool can_execute_reverse () override;
132
133 bool stopped_by_sw_breakpoint () override;
134 bool supports_stopped_by_sw_breakpoint () override;
135
136 bool stopped_by_hw_breakpoint () override;
137 bool supports_stopped_by_hw_breakpoint () override;
138
140 void prepare_to_generate_core () override;
141 void done_generating_core () override;
142};
143
145
146/* Initialize the record-btrace target ops. */
147
148/* Token associated with a new-thread observer enabling branch tracing
149 for the new thread. */
150static const gdb::observers::token record_btrace_thread_observer_token {};
151
152/* Memory access types used in set/show record btrace replay-memory-access. */
153static const char replay_memory_access_read_only[] = "read-only";
154static const char replay_memory_access_read_write[] = "read-write";
155static const char *const replay_memory_access_types[] =
156{
159 NULL
160};
161
162/* The currently allowed replay memory access type. */
164
165/* The cpu state kinds. */
172
173/* The current cpu state. */
175
176/* The current cpu for trace decode. */
177static struct btrace_cpu record_btrace_cpu;
178
179/* Command lists for "set/show record btrace". */
182
183/* The execution direction of the last resume we got. See record-full.c. */
185
186/* The async event handler for reverse/replay execution. */
188
189/* A flag indicating that we are currently generating a core file. */
191
192/* The current branch trace configuration. */
193static struct btrace_config record_btrace_conf;
194
195/* Command list for "record btrace". */
197
198/* Command lists for "set/show record btrace bts". */
201
202/* Command lists for "set/show record btrace pt". */
205
206/* Command list for "set record btrace cpu". */
208
209/* Print a record-btrace debug message. Use do ... while (0) to avoid
210 ambiguities when used in if statements. */
211
212#define DEBUG(msg, args...) \
213 do \
214 { \
215 if (record_debug != 0) \
216 gdb_printf (gdb_stdlog, \
217 "[record-btrace] " msg "\n", ##args); \
218 } \
219 while (0)
220
221
222/* Return the cpu configured by the user. Returns NULL if the cpu was
223 configured as auto. */
224const struct btrace_cpu *
226{
228 {
229 case CS_AUTO:
230 return nullptr;
231
232 case CS_NONE:
233 record_btrace_cpu.vendor = CV_UNKNOWN;
234 /* Fall through. */
235 case CS_CPU:
236 return &record_btrace_cpu;
237 }
238
239 error (_("Internal error: bad record btrace cpu state."));
240}
241
242/* Update the branch trace for the current thread and return a pointer to its
243 thread_info.
244
245 Throws an error if there is no thread or no trace. This function never
246 returns NULL. */
247
248static struct thread_info *
250{
251 DEBUG ("require");
252
253 if (inferior_ptid == null_ptid)
254 error (_("No thread."));
255
257
259
261
262 if (btrace_is_empty (tp))
263 error (_("No trace."));
264
265 return tp;
266}
267
268/* Update the branch trace for the current thread and return a pointer to its
269 branch trace information struct.
270
271 Throws an error if there is no thread or no trace. This function never
272 returns NULL. */
273
274static struct btrace_thread_info *
276{
277 struct thread_info *tp;
278
279 tp = require_btrace_thread ();
280
281 return &tp->btrace;
282}
283
284/* The new thread observer. */
285
286static void
288{
289 /* Ignore this thread if its inferior is not recorded by us. */
291 if (rec != &record_btrace_ops)
292 return;
293
294 try
295 {
297 }
298 catch (const gdb_exception_error &error)
299 {
300 warning ("%s", error.what ());
301 }
302}
303
304/* Enable automatic tracing of new threads. */
305
306static void
308{
309 DEBUG ("attach thread observer");
310
313 "record-btrace");
314}
315
316/* Disable automatic tracing of new threads. */
317
318static void
320{
321 DEBUG ("detach thread observer");
322
324}
325
326/* The record-btrace async event handler function. */
327
328static void
333
334/* See record-btrace.h. */
335
336void
338{
339 const char *format;
340
342
344
347 NULL, "record-btrace");
349
350 format = btrace_format_short_string (record_btrace_conf.format);
351 interps_notify_record_changed (current_inferior (), 1, "btrace", format);
352}
353
354/* Disable btrace on a set of threads on scope exit. */
355
357{
359
361
363 {
364 for (thread_info *tp : m_threads)
365 btrace_disable (tp);
366 }
367
368 void add_thread (thread_info *thread)
369 {
370 m_threads.push_front (thread);
371 }
372
373 void discard ()
374 {
375 m_threads.clear ();
376 }
377
378private:
379 std::forward_list<thread_info *> m_threads;
380};
381
382/* Open target record-btrace. */
383
384static void
385record_btrace_target_open (const char *args, int from_tty)
386{
387 /* If we fail to enable btrace for one thread, disable it for the threads for
388 which it was successfully enabled. */
390
391 DEBUG ("open");
392
394
395 if (!target_has_execution ())
396 error (_("The program is not being run."));
397
399 if (args == NULL || *args == 0 || number_is_in_list (args, tp->global_num))
400 {
402
403 btrace_disable.add_thread (tp);
404 }
405
407
408 btrace_disable.discard ();
409}
410
411/* The stop_recording method of target record-btrace. */
412
413void
415{
416 DEBUG ("stop recording");
417
419
421 if (tp->btrace.target != NULL)
422 btrace_disable (tp);
423}
424
425/* The disconnect method of target record-btrace. */
426
427void
429 int from_tty)
430{
431 struct target_ops *beneath = this->beneath ();
432
433 /* Do not stop recording, just clean up GDB side. */
435
436 /* Forward disconnect. */
437 beneath->disconnect (args, from_tty);
438}
439
440/* The close method of target record-btrace. */
441
442void
444{
447
448 /* Make sure automatic recording gets disabled even if we did not stop
449 recording before closing the record-btrace target. */
451
452 /* We should have already stopped recording.
453 Tear down btrace in case we have not. */
455 btrace_teardown (tp);
456}
457
458/* The async method of target record-btrace. */
459
460void
470
471/* Adjusts the size and returns a human readable size suffix. */
472
473static const char *
475{
476 unsigned int sz;
477
478 sz = *size;
479
480 if ((sz & ((1u << 30) - 1)) == 0)
481 {
482 *size = sz >> 30;
483 return "GB";
484 }
485 else if ((sz & ((1u << 20) - 1)) == 0)
486 {
487 *size = sz >> 20;
488 return "MB";
489 }
490 else if ((sz & ((1u << 10) - 1)) == 0)
491 {
492 *size = sz >> 10;
493 return "kB";
494 }
495 else
496 return "";
497}
498
499/* Print a BTS configuration. */
500
501static void
502record_btrace_print_bts_conf (const struct btrace_config_bts *conf)
503{
504 const char *suffix;
505 unsigned int size;
506
507 size = conf->size;
508 if (size > 0)
509 {
511 gdb_printf (_("Buffer size: %u%s.\n"), size, suffix);
512 }
513}
514
515/* Print an Intel Processor Trace configuration. */
516
517static void
518record_btrace_print_pt_conf (const struct btrace_config_pt *conf)
519{
520 const char *suffix;
521 unsigned int size;
522
523 size = conf->size;
524 if (size > 0)
525 {
527 gdb_printf (_("Buffer size: %u%s.\n"), size, suffix);
528 }
529}
530
531/* Print a branch tracing configuration. */
532
533static void
534record_btrace_print_conf (const struct btrace_config *conf)
535{
536 gdb_printf (_("Recording format: %s.\n"),
537 btrace_format_string (conf->format));
538
539 switch (conf->format)
540 {
541 case BTRACE_FORMAT_NONE:
542 return;
543
544 case BTRACE_FORMAT_BTS:
545 record_btrace_print_bts_conf (&conf->bts);
546 return;
547
548 case BTRACE_FORMAT_PT:
549 record_btrace_print_pt_conf (&conf->pt);
550 return;
551 }
552
553 internal_error (_("Unknown branch trace format."));
554}
555
556/* The info_record method of target record-btrace. */
557
558void
560{
561 struct btrace_thread_info *btinfo;
562 const struct btrace_config *conf;
563 struct thread_info *tp;
564 unsigned int insns, calls, gaps;
565
566 DEBUG ("info");
567
568 if (inferior_ptid == null_ptid)
569 error (_("No thread."));
570
571 tp = inferior_thread ();
572
574
575 btinfo = &tp->btrace;
576
577 conf = ::btrace_conf (btinfo);
578 if (conf != NULL)
580
582
583 insns = 0;
584 calls = 0;
585 gaps = 0;
586
587 if (!btrace_is_empty (tp))
588 {
589 struct btrace_call_iterator call;
590 struct btrace_insn_iterator insn;
591
592 btrace_call_end (&call, btinfo);
593 btrace_call_prev (&call, 1);
594 calls = btrace_call_number (&call);
595
596 btrace_insn_end (&insn, btinfo);
597 insns = btrace_insn_number (&insn);
598
599 /* If the last instruction is not a gap, it is the current instruction
600 that is not actually part of the record. */
601 if (btrace_insn_get (&insn) != NULL)
602 insns -= 1;
603
604 gaps = btinfo->ngaps;
605 }
606
607 gdb_printf (_("Recorded %u instructions in %u functions (%u gaps) "
608 "for thread %s (%s).\n"), insns, calls, gaps,
609 print_thread_id (tp),
610 target_pid_to_str (tp->ptid).c_str ());
611
612 if (btrace_is_replaying (tp))
613 gdb_printf (_("Replay in progress. At instruction %u.\n"),
614 btrace_insn_number (btinfo->replay));
615}
616
617/* Print a decode error. */
618
619static void
620btrace_ui_out_decode_error (struct ui_out *uiout, int errcode,
621 enum btrace_format format)
622{
623 const char *errstr = btrace_decode_error (format, errcode);
624
625 uiout->text (_("["));
626 /* ERRCODE > 0 indicates notifications on BTRACE_FORMAT_PT. */
627 if (!(format == BTRACE_FORMAT_PT && errcode > 0))
628 {
629 uiout->text (_("decode error ("));
630 uiout->field_signed ("errcode", errcode);
631 uiout->text (_("): "));
632 }
633 uiout->text (errstr);
634 uiout->text (_("]\n"));
635}
636
637/* A range of source lines. */
638
640{
641 /* The symtab this line is from. */
642 struct symtab *symtab;
643
644 /* The first line (inclusive). */
645 int begin;
646
647 /* The last line (exclusive). */
648 int end;
649};
650
651/* Construct a line range. */
652
653static struct btrace_line_range
655{
657
658 range.symtab = symtab;
659 range.begin = begin;
660 range.end = end;
661
662 return range;
663}
664
665/* Add a line to a line range. */
666
667static struct btrace_line_range
669{
670 if (range.end <= range.begin)
671 {
672 /* This is the first entry. */
673 range.begin = line;
674 range.end = line + 1;
675 }
676 else if (line < range.begin)
677 range.begin = line;
678 else if (range.end < line)
679 range.end = line;
680
681 return range;
682}
683
684/* Return non-zero if RANGE is empty, zero otherwise. */
685
686static int
688{
689 return range.end <= range.begin;
690}
691
692/* Return non-zero if LHS contains RHS, zero otherwise. */
693
694static int
696 struct btrace_line_range rhs)
697{
698 return ((lhs.symtab == rhs.symtab)
699 && (lhs.begin <= rhs.begin)
700 && (rhs.end <= lhs.end));
701}
702
703/* Find the line range associated with PC. */
704
705static struct btrace_line_range
707{
709 const linetable_entry *lines;
710 const linetable *ltable;
711 struct symtab *symtab;
712 int nlines, i;
713
715 if (symtab == NULL)
716 return btrace_mk_line_range (NULL, 0, 0);
717
718 ltable = symtab->linetable ();
719 if (ltable == NULL)
720 return btrace_mk_line_range (symtab, 0, 0);
721
722 nlines = ltable->nitems;
723 lines = ltable->item;
724 if (nlines <= 0)
725 return btrace_mk_line_range (symtab, 0, 0);
726
727 struct objfile *objfile = symtab->compunit ()->objfile ();
728 unrelocated_addr unrel_pc
729 = unrelocated_addr (pc - objfile->text_section_offset ());
730
732 for (i = 0; i < nlines - 1; i++)
733 {
734 /* The test of is_stmt here was added when the is_stmt field was
735 introduced to the 'struct linetable_entry' structure. This
736 ensured that this loop maintained the same behaviour as before we
737 introduced is_stmt. That said, it might be that we would be
738 better off not checking is_stmt here, this would lead to us
739 possibly adding more line numbers to the range. At the time this
740 change was made I was unsure how to test this so chose to go with
741 maintaining the existing experience. */
742 if (lines[i].unrelocated_pc () == unrel_pc && lines[i].line != 0
743 && lines[i].is_stmt)
744 range = btrace_line_range_add (range, lines[i].line);
745 }
746
747 return range;
748}
749
750/* Print source lines in LINES to UIOUT.
751
752 UI_ITEM_CHAIN is a cleanup chain for the last source line and the
753 instructions corresponding to that source line. When printing a new source
754 line, we do the cleanups for the open chain and open a new cleanup chain for
755 the new source line. If the source line range in LINES is not empty, this
756 function will leave the cleanup chain for the last printed source line open
757 so instructions can be added to it. */
758
759static void
760btrace_print_lines (struct btrace_line_range lines, struct ui_out *uiout,
761 gdb::optional<ui_out_emit_tuple> *src_and_asm_tuple,
762 gdb::optional<ui_out_emit_list> *asm_list,
763 gdb_disassembly_flags flags)
764{
765 print_source_lines_flags psl_flags;
766
768 psl_flags |= PRINT_SOURCE_LINES_FILENAME;
769
770 for (int line = lines.begin; line < lines.end; ++line)
771 {
772 asm_list->reset ();
773
774 src_and_asm_tuple->emplace (uiout, "src_and_asm_line");
775
776 print_source_lines (lines.symtab, line, line + 1, psl_flags);
777
778 asm_list->emplace (uiout, "line_asm_insn");
779 }
780}
781
782/* Disassemble a section of the recorded instruction trace. */
783
784static void
786 const struct btrace_thread_info *btinfo,
787 const struct btrace_insn_iterator *begin,
788 const struct btrace_insn_iterator *end,
789 gdb_disassembly_flags flags)
790{
791 DEBUG ("itrace (0x%x): [%u; %u)", (unsigned) flags,
793
795
796 struct gdbarch *gdbarch = target_gdbarch ();
797 btrace_line_range last_lines = btrace_mk_line_range (NULL, 0, 0);
798
799 ui_out_emit_list list_emitter (uiout, "asm_insns");
800
801 gdb::optional<ui_out_emit_tuple> src_and_asm_tuple;
802 gdb::optional<ui_out_emit_list> asm_list;
803
805
806 for (btrace_insn_iterator it = *begin; btrace_insn_cmp (&it, end) != 0;
807 btrace_insn_next (&it, 1))
808 {
809 const struct btrace_insn *insn;
810
811 insn = btrace_insn_get (&it);
812
813 /* A NULL instruction indicates a gap in the trace. */
814 if (insn == NULL)
815 {
816 const struct btrace_config *conf;
817
818 conf = btrace_conf (btinfo);
819
820 /* We have trace so we must have a configuration. */
821 gdb_assert (conf != NULL);
822
823 uiout->field_fmt ("insn-number", "%u",
824 btrace_insn_number (&it));
825 uiout->text ("\t");
826
828 conf->format);
829 }
830 else
831 {
832 struct disasm_insn dinsn;
833
834 if ((flags & DISASSEMBLY_SOURCE) != 0)
835 {
836 struct btrace_line_range lines;
837
838 lines = btrace_find_line_range (insn->pc);
839 if (!btrace_line_range_is_empty (lines)
840 && !btrace_line_range_contains_range (last_lines, lines))
841 {
842 btrace_print_lines (lines, uiout, &src_and_asm_tuple, &asm_list,
843 flags);
844 last_lines = lines;
845 }
846 else if (!src_and_asm_tuple.has_value ())
847 {
848 gdb_assert (!asm_list.has_value ());
849
850 src_and_asm_tuple.emplace (uiout, "src_and_asm_line");
851
852 /* No source information. */
853 asm_list.emplace (uiout, "line_asm_insn");
854 }
855
856 gdb_assert (src_and_asm_tuple.has_value ());
857 gdb_assert (asm_list.has_value ());
858 }
859
860 memset (&dinsn, 0, sizeof (dinsn));
861 dinsn.number = btrace_insn_number (&it);
862 dinsn.addr = insn->pc;
863
864 if ((insn->flags & BTRACE_INSN_FLAG_SPECULATIVE) != 0)
865 dinsn.is_speculative = 1;
866
867 disasm.pretty_print_insn (&dinsn, flags);
868 }
869 }
870}
871
872/* The insn_history method of target record-btrace. */
873
874void
876{
877 struct btrace_thread_info *btinfo;
878 struct btrace_insn_history *history;
879 struct btrace_insn_iterator begin, end;
880 struct ui_out *uiout;
881 unsigned int context, covered;
882
883 uiout = current_uiout;
884 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
885 context = abs (size);
886 if (context == 0)
887 error (_("Bad record instruction-history-size."));
888
889 btinfo = require_btrace ();
890 history = btinfo->insn_history;
891 if (history == NULL)
892 {
893 struct btrace_insn_iterator *replay;
894
895 DEBUG ("insn-history (0x%x): %d", (unsigned) flags, size);
896
897 /* If we're replaying, we start at the replay position. Otherwise, we
898 start at the tail of the trace. */
899 replay = btinfo->replay;
900 if (replay != NULL)
901 begin = *replay;
902 else
903 btrace_insn_end (&begin, btinfo);
904
905 /* We start from here and expand in the requested direction. Then we
906 expand in the other direction, as well, to fill up any remaining
907 context. */
908 end = begin;
909 if (size < 0)
910 {
911 /* We want the current position covered, as well. */
912 covered = btrace_insn_next (&end, 1);
913 covered += btrace_insn_prev (&begin, context - covered);
914 covered += btrace_insn_next (&end, context - covered);
915 }
916 else
917 {
918 covered = btrace_insn_next (&end, context);
919 covered += btrace_insn_prev (&begin, context - covered);
920 }
921 }
922 else
923 {
924 begin = history->begin;
925 end = history->end;
926
927 DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", (unsigned) flags, size,
928 btrace_insn_number (&begin), btrace_insn_number (&end));
929
930 if (size < 0)
931 {
932 end = begin;
933 covered = btrace_insn_prev (&begin, context);
934 }
935 else
936 {
937 begin = end;
938 covered = btrace_insn_next (&end, context);
939 }
940 }
941
942 if (covered > 0)
943 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
944 else
945 {
946 if (size < 0)
947 gdb_printf (_("At the start of the branch trace record.\n"));
948 else
949 gdb_printf (_("At the end of the branch trace record.\n"));
950 }
951
952 btrace_set_insn_history (btinfo, &begin, &end);
953}
954
955/* The insn_history_range method of target record-btrace. */
956
957void
959 gdb_disassembly_flags flags)
960{
961 struct btrace_thread_info *btinfo;
962 struct btrace_insn_iterator begin, end;
963 struct ui_out *uiout;
964 unsigned int low, high;
965 int found;
966
967 uiout = current_uiout;
968 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
969 low = from;
970 high = to;
971
972 DEBUG ("insn-history (0x%x): [%u; %u)", (unsigned) flags, low, high);
973
974 /* Check for wrap-arounds. */
975 if (low != from || high != to)
976 error (_("Bad range."));
977
978 if (high < low)
979 error (_("Bad range."));
980
981 btinfo = require_btrace ();
982
983 found = btrace_find_insn_by_number (&begin, btinfo, low);
984 if (found == 0)
985 error (_("Range out of bounds."));
986
987 found = btrace_find_insn_by_number (&end, btinfo, high);
988 if (found == 0)
989 {
990 /* Silently truncate the range. */
991 btrace_insn_end (&end, btinfo);
992 }
993 else
994 {
995 /* We want both begin and end to be inclusive. */
996 btrace_insn_next (&end, 1);
997 }
998
999 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
1000 btrace_set_insn_history (btinfo, &begin, &end);
1001}
1002
1003/* The insn_history_from method of target record-btrace. */
1004
1005void
1007 gdb_disassembly_flags flags)
1008{
1009 ULONGEST begin, end, context;
1010
1011 context = abs (size);
1012 if (context == 0)
1013 error (_("Bad record instruction-history-size."));
1014
1015 if (size < 0)
1016 {
1017 end = from;
1018
1019 if (from < context)
1020 begin = 0;
1021 else
1022 begin = from - context + 1;
1023 }
1024 else
1025 {
1026 begin = from;
1027 end = from + context - 1;
1028
1029 /* Check for wrap-around. */
1030 if (end < begin)
1031 end = ULONGEST_MAX;
1032 }
1033
1034 insn_history_range (begin, end, flags);
1035}
1036
1037/* Print the instruction number range for a function call history line. */
1038
1039static void
1041 const struct btrace_function *bfun)
1042{
1043 unsigned int begin, end, size;
1044
1045 size = bfun->insn.size ();
1046 gdb_assert (size > 0);
1047
1048 begin = bfun->insn_offset;
1049 end = begin + size - 1;
1050
1051 uiout->field_unsigned ("insn begin", begin);
1052 uiout->text (",");
1053 uiout->field_unsigned ("insn end", end);
1054}
1055
1056/* Compute the lowest and highest source line for the instructions in BFUN
1057 and return them in PBEGIN and PEND.
1058 Ignore instructions that can't be mapped to BFUN, e.g. instructions that
1059 result from inlining or macro expansion. */
1060
1061static void
1063 int *pbegin, int *pend)
1064{
1065 struct symtab *symtab;
1066 struct symbol *sym;
1067 int begin, end;
1068
1069 begin = INT_MAX;
1070 end = INT_MIN;
1071
1072 sym = bfun->sym;
1073 if (sym == NULL)
1074 goto out;
1075
1076 symtab = sym->symtab ();
1077
1078 for (const btrace_insn &insn : bfun->insn)
1079 {
1080 struct symtab_and_line sal;
1081
1082 sal = find_pc_line (insn.pc, 0);
1083 if (sal.symtab != symtab || sal.line == 0)
1084 continue;
1085
1086 begin = std::min (begin, sal.line);
1087 end = std::max (end, sal.line);
1088 }
1089
1090 out:
1091 *pbegin = begin;
1092 *pend = end;
1093}
1094
1095/* Print the source line information for a function call history line. */
1096
1097static void
1099 const struct btrace_function *bfun)
1100{
1101 struct symbol *sym;
1102 int begin, end;
1103
1104 sym = bfun->sym;
1105 if (sym == NULL)
1106 return;
1107
1108 uiout->field_string ("file",
1111
1112 btrace_compute_src_line_range (bfun, &begin, &end);
1113 if (end < begin)
1114 return;
1115
1116 uiout->text (":");
1117 uiout->field_signed ("min line", begin);
1118
1119 if (end == begin)
1120 return;
1121
1122 uiout->text (",");
1123 uiout->field_signed ("max line", end);
1124}
1125
1126/* Get the name of a branch trace function. */
1127
1128static const char *
1130{
1131 struct minimal_symbol *msym;
1132 struct symbol *sym;
1133
1134 if (bfun == NULL)
1135 return "??";
1136
1137 msym = bfun->msym;
1138 sym = bfun->sym;
1139
1140 if (sym != NULL)
1141 return sym->print_name ();
1142 else if (msym != NULL)
1143 return msym->print_name ();
1144 else
1145 return "??";
1146}
1147
1148/* Disassemble a section of the recorded function trace. */
1149
1150static void
1152 const struct btrace_thread_info *btinfo,
1153 const struct btrace_call_iterator *begin,
1154 const struct btrace_call_iterator *end,
1155 int int_flags)
1156{
1157 struct btrace_call_iterator it;
1158 record_print_flags flags = (enum record_print_flag) int_flags;
1159
1160 DEBUG ("ftrace (0x%x): [%u; %u)", int_flags, btrace_call_number (begin),
1161 btrace_call_number (end));
1162
1163 for (it = *begin; btrace_call_cmp (&it, end) < 0; btrace_call_next (&it, 1))
1164 {
1165 const struct btrace_function *bfun;
1166 struct minimal_symbol *msym;
1167 struct symbol *sym;
1168
1169 bfun = btrace_call_get (&it);
1170 sym = bfun->sym;
1171 msym = bfun->msym;
1172
1173 /* Print the function index. */
1174 uiout->field_unsigned ("index", bfun->number);
1175 uiout->text ("\t");
1176
1177 /* Indicate gaps in the trace. */
1178 if (bfun->errcode != 0)
1179 {
1180 const struct btrace_config *conf;
1181
1182 conf = btrace_conf (btinfo);
1183
1184 /* We have trace so we must have a configuration. */
1185 gdb_assert (conf != NULL);
1186
1187 btrace_ui_out_decode_error (uiout, bfun->errcode, conf->format);
1188
1189 continue;
1190 }
1191
1192 if ((flags & RECORD_PRINT_INDENT_CALLS) != 0)
1193 {
1194 int level = bfun->level + btinfo->level, i;
1195
1196 for (i = 0; i < level; ++i)
1197 uiout->text (" ");
1198 }
1199
1200 if (sym != NULL)
1201 uiout->field_string ("function", sym->print_name (),
1203 else if (msym != NULL)
1204 uiout->field_string ("function", msym->print_name (),
1206 else if (!uiout->is_mi_like_p ())
1207 uiout->field_string ("function", "??",
1209
1210 if ((flags & RECORD_PRINT_INSN_RANGE) != 0)
1211 {
1212 uiout->text (_("\tinst "));
1213 btrace_call_history_insn_range (uiout, bfun);
1214 }
1215
1216 if ((flags & RECORD_PRINT_SRC_LINE) != 0)
1217 {
1218 uiout->text (_("\tat "));
1219 btrace_call_history_src_line (uiout, bfun);
1220 }
1221
1222 uiout->text ("\n");
1223 }
1224}
1225
1226/* The call_history method of target record-btrace. */
1227
1228void
1230{
1231 struct btrace_thread_info *btinfo;
1232 struct btrace_call_history *history;
1233 struct btrace_call_iterator begin, end;
1234 struct ui_out *uiout;
1235 unsigned int context, covered;
1236
1237 uiout = current_uiout;
1238 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
1239 context = abs (size);
1240 if (context == 0)
1241 error (_("Bad record function-call-history-size."));
1242
1243 btinfo = require_btrace ();
1244 history = btinfo->call_history;
1245 if (history == NULL)
1246 {
1247 struct btrace_insn_iterator *replay;
1248
1249 DEBUG ("call-history (0x%x): %d", (int) flags, size);
1250
1251 /* If we're replaying, we start at the replay position. Otherwise, we
1252 start at the tail of the trace. */
1253 replay = btinfo->replay;
1254 if (replay != NULL)
1255 {
1256 begin.btinfo = btinfo;
1257 begin.index = replay->call_index;
1258 }
1259 else
1260 btrace_call_end (&begin, btinfo);
1261
1262 /* We start from here and expand in the requested direction. Then we
1263 expand in the other direction, as well, to fill up any remaining
1264 context. */
1265 end = begin;
1266 if (size < 0)
1267 {
1268 /* We want the current position covered, as well. */
1269 covered = btrace_call_next (&end, 1);
1270 covered += btrace_call_prev (&begin, context - covered);
1271 covered += btrace_call_next (&end, context - covered);
1272 }
1273 else
1274 {
1275 covered = btrace_call_next (&end, context);
1276 covered += btrace_call_prev (&begin, context- covered);
1277 }
1278 }
1279 else
1280 {
1281 begin = history->begin;
1282 end = history->end;
1283
1284 DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", (int) flags, size,
1285 btrace_call_number (&begin), btrace_call_number (&end));
1286
1287 if (size < 0)
1288 {
1289 end = begin;
1290 covered = btrace_call_prev (&begin, context);
1291 }
1292 else
1293 {
1294 begin = end;
1295 covered = btrace_call_next (&end, context);
1296 }
1297 }
1298
1299 if (covered > 0)
1300 btrace_call_history (uiout, btinfo, &begin, &end, flags);
1301 else
1302 {
1303 if (size < 0)
1304 gdb_printf (_("At the start of the branch trace record.\n"));
1305 else
1306 gdb_printf (_("At the end of the branch trace record.\n"));
1307 }
1308
1309 btrace_set_call_history (btinfo, &begin, &end);
1310}
1311
1312/* The call_history_range method of target record-btrace. */
1313
1314void
1316 record_print_flags flags)
1317{
1318 struct btrace_thread_info *btinfo;
1319 struct btrace_call_iterator begin, end;
1320 struct ui_out *uiout;
1321 unsigned int low, high;
1322 int found;
1323
1324 uiout = current_uiout;
1325 ui_out_emit_tuple tuple_emitter (uiout, "func history");
1326 low = from;
1327 high = to;
1328
1329 DEBUG ("call-history (0x%x): [%u; %u)", (int) flags, low, high);
1330
1331 /* Check for wrap-arounds. */
1332 if (low != from || high != to)
1333 error (_("Bad range."));
1334
1335 if (high < low)
1336 error (_("Bad range."));
1337
1338 btinfo = require_btrace ();
1339
1340 found = btrace_find_call_by_number (&begin, btinfo, low);
1341 if (found == 0)
1342 error (_("Range out of bounds."));
1343
1344 found = btrace_find_call_by_number (&end, btinfo, high);
1345 if (found == 0)
1346 {
1347 /* Silently truncate the range. */
1348 btrace_call_end (&end, btinfo);
1349 }
1350 else
1351 {
1352 /* We want both begin and end to be inclusive. */
1353 btrace_call_next (&end, 1);
1354 }
1355
1356 btrace_call_history (uiout, btinfo, &begin, &end, flags);
1357 btrace_set_call_history (btinfo, &begin, &end);
1358}
1359
1360/* The call_history_from method of target record-btrace. */
1361
1362void
1364 record_print_flags flags)
1365{
1366 ULONGEST begin, end, context;
1367
1368 context = abs (size);
1369 if (context == 0)
1370 error (_("Bad record function-call-history-size."));
1371
1372 if (size < 0)
1373 {
1374 end = from;
1375
1376 if (from < context)
1377 begin = 0;
1378 else
1379 begin = from - context + 1;
1380 }
1381 else
1382 {
1383 begin = from;
1384 end = from + context - 1;
1385
1386 /* Check for wrap-around. */
1387 if (end < begin)
1388 end = ULONGEST_MAX;
1389 }
1390
1391 call_history_range ( begin, end, flags);
1392}
1393
1394/* The record_method method of target record-btrace. */
1395
1396enum record_method
1398{
1400 thread_info *const tp = proc_target->find_thread (ptid);
1401
1402 if (tp == NULL)
1403 error (_("No thread."));
1404
1405 if (tp->btrace.target == NULL)
1406 return RECORD_METHOD_NONE;
1407
1408 return RECORD_METHOD_BTRACE;
1409}
1410
1411/* The record_is_replaying method of target record-btrace. */
1412
1413bool
1415{
1417 for (thread_info *tp : all_non_exited_threads (proc_target, ptid))
1418 if (btrace_is_replaying (tp))
1419 return true;
1420
1421 return false;
1422}
1423
1424/* The record_will_replay method of target record-btrace. */
1425
1426bool
1428{
1429 return dir == EXEC_REVERSE || record_is_replaying (ptid);
1430}
1431
1432/* The xfer_partial method of target record-btrace. */
1433
1436 const char *annex, gdb_byte *readbuf,
1437 const gdb_byte *writebuf, ULONGEST offset,
1438 ULONGEST len, ULONGEST *xfered_len)
1439{
1440 /* Filter out requests that don't make sense during replay. */
1444 {
1445 switch (object)
1446 {
1448 {
1449 const struct target_section *section;
1450
1451 /* We do not allow writing memory in general. */
1452 if (writebuf != NULL)
1453 {
1454 *xfered_len = len;
1456 }
1457
1458 /* We allow reading readonly memory. */
1459 section = target_section_by_addr (this, offset);
1460 if (section != NULL)
1461 {
1462 /* Check if the section we found is readonly. */
1463 if ((bfd_section_flags (section->the_bfd_section)
1464 & SEC_READONLY) != 0)
1465 {
1466 /* Truncate the request to fit into this section. */
1467 len = std::min (len, section->endaddr - offset);
1468 break;
1469 }
1470 }
1471
1472 *xfered_len = len;
1474 }
1475 }
1476 }
1477
1478 /* Forward the request. */
1479 return this->beneath ()->xfer_partial (object, annex, readbuf, writebuf,
1480 offset, len, xfered_len);
1481}
1482
1483/* The insert_breakpoint method of target record-btrace. */
1484
1485int
1487 struct bp_target_info *bp_tgt)
1488{
1489 const char *old;
1490 int ret;
1491
1492 /* Inserting breakpoints requires accessing memory. Allow it for the
1493 duration of this function. */
1496
1497 ret = 0;
1498 try
1499 {
1500 ret = this->beneath ()->insert_breakpoint (gdbarch, bp_tgt);
1501 }
1502 catch (const gdb_exception &except)
1503 {
1505 throw;
1506 }
1508
1509 return ret;
1510}
1511
1512/* The remove_breakpoint method of target record-btrace. */
1513
1514int
1516 struct bp_target_info *bp_tgt,
1517 enum remove_bp_reason reason)
1518{
1519 const char *old;
1520 int ret;
1521
1522 /* Removing breakpoints requires accessing memory. Allow it for the
1523 duration of this function. */
1526
1527 ret = 0;
1528 try
1529 {
1530 ret = this->beneath ()->remove_breakpoint (gdbarch, bp_tgt, reason);
1531 }
1532 catch (const gdb_exception &except)
1533 {
1535 throw;
1536 }
1538
1539 return ret;
1540}
1541
1542/* The fetch_registers method of target record-btrace. */
1543
1544void
1546{
1547 btrace_insn_iterator *replay = nullptr;
1548
1549 /* Thread-db may ask for a thread's registers before GDB knows about the
1550 thread. We forward the request to the target beneath in this
1551 case. */
1552 thread_info *tp
1554 if (tp != nullptr)
1555 replay = tp->btrace.replay;
1556
1557 if (replay != nullptr && !record_btrace_generating_corefile)
1558 {
1559 const struct btrace_insn *insn;
1560 struct gdbarch *gdbarch;
1561 int pcreg;
1562
1563 gdbarch = regcache->arch ();
1564 pcreg = gdbarch_pc_regnum (gdbarch);
1565 if (pcreg < 0)
1566 return;
1567
1568 /* We can only provide the PC register. */
1569 if (regno >= 0 && regno != pcreg)
1570 return;
1571
1572 insn = btrace_insn_get (replay);
1573 gdb_assert (insn != NULL);
1574
1575 regcache->raw_supply (regno, &insn->pc);
1576 }
1577 else
1578 this->beneath ()->fetch_registers (regcache, regno);
1579}
1580
1581/* The store_registers method of target record-btrace. */
1582
1583void
1585{
1588 error (_("Cannot write registers while replaying."));
1589
1590 gdb_assert (may_write_registers);
1591
1592 this->beneath ()->store_registers (regcache, regno);
1593}
1594
1595/* The prepare_to_store method of target record-btrace. */
1596
1597void
1599{
1602 return;
1603
1604 this->beneath ()->prepare_to_store (regcache);
1605}
1606
1607/* The branch trace frame cache. */
1608
1610{
1611 /* The thread. */
1613
1614 /* The frame info. */
1616
1617 /* The branch trace function segment. */
1618 const struct btrace_function *bfun;
1619};
1620
1621/* A struct btrace_frame_cache hash table indexed by NEXT. */
1622
1623static htab_t bfcache;
1624
1625/* hash_f for htab_create_alloc of bfcache. */
1626
1627static hashval_t
1628bfcache_hash (const void *arg)
1629{
1630 const struct btrace_frame_cache *cache
1631 = (const struct btrace_frame_cache *) arg;
1632
1633 return htab_hash_pointer (cache->frame);
1634}
1635
1636/* eq_f for htab_create_alloc of bfcache. */
1637
1638static int
1639bfcache_eq (const void *arg1, const void *arg2)
1640{
1641 const struct btrace_frame_cache *cache1
1642 = (const struct btrace_frame_cache *) arg1;
1643 const struct btrace_frame_cache *cache2
1644 = (const struct btrace_frame_cache *) arg2;
1645
1646 return cache1->frame == cache2->frame;
1647}
1648
1649/* Create a new btrace frame cache. */
1650
1651static struct btrace_frame_cache *
1653{
1654 struct btrace_frame_cache *cache;
1655 void **slot;
1656
1658 cache->frame = frame.get ();
1659
1660 slot = htab_find_slot (bfcache, cache, INSERT);
1661 gdb_assert (*slot == NULL);
1662 *slot = cache;
1663
1664 return cache;
1665}
1666
1667/* Extract the branch trace function from a branch trace frame. */
1668
1669static const struct btrace_function *
1671{
1672 const struct btrace_frame_cache *cache;
1673 struct btrace_frame_cache pattern;
1674 void **slot;
1675
1676 pattern.frame = frame.get ();
1677
1678 slot = htab_find_slot (bfcache, &pattern, NO_INSERT);
1679 if (slot == NULL)
1680 return NULL;
1681
1682 cache = (const struct btrace_frame_cache *) *slot;
1683 return cache->bfun;
1684}
1685
1686/* Implement stop_reason method for record_btrace_frame_unwind. */
1687
1688static enum unwind_stop_reason
1690 void **this_cache)
1691{
1692 const struct btrace_frame_cache *cache;
1693 const struct btrace_function *bfun;
1694
1695 cache = (const struct btrace_frame_cache *) *this_cache;
1696 bfun = cache->bfun;
1697 gdb_assert (bfun != NULL);
1698
1699 if (bfun->up == 0)
1700 return UNWIND_UNAVAILABLE;
1701
1702 return UNWIND_NO_REASON;
1703}
1704
1705/* Implement this_id method for record_btrace_frame_unwind. */
1706
1707static void
1708record_btrace_frame_this_id (frame_info_ptr this_frame, void **this_cache,
1709 struct frame_id *this_id)
1710{
1711 const struct btrace_frame_cache *cache;
1712 const struct btrace_function *bfun;
1713 struct btrace_call_iterator it;
1714 CORE_ADDR code, special;
1715
1716 cache = (const struct btrace_frame_cache *) *this_cache;
1717
1718 bfun = cache->bfun;
1719 gdb_assert (bfun != NULL);
1720
1721 while (btrace_find_call_by_number (&it, &cache->tp->btrace, bfun->prev) != 0)
1722 bfun = btrace_call_get (&it);
1723
1724 code = get_frame_func (this_frame);
1725 special = bfun->number;
1726
1727 *this_id = frame_id_build_unavailable_stack_special (code, special);
1728
1729 DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1730 btrace_get_bfun_name (cache->bfun),
1731 core_addr_to_string_nz (this_id->code_addr),
1732 core_addr_to_string_nz (this_id->special_addr));
1733}
1734
1735/* Implement prev_register method for record_btrace_frame_unwind. */
1736
1737static struct value *
1739 void **this_cache,
1740 int regnum)
1741{
1742 const struct btrace_frame_cache *cache;
1743 const struct btrace_function *bfun, *caller;
1744 struct btrace_call_iterator it;
1745 struct gdbarch *gdbarch;
1746 CORE_ADDR pc;
1747 int pcreg;
1748
1749 gdbarch = get_frame_arch (this_frame);
1750 pcreg = gdbarch_pc_regnum (gdbarch);
1751 if (pcreg < 0 || regnum != pcreg)
1752 throw_error (NOT_AVAILABLE_ERROR,
1753 _("Registers are not available in btrace record history"));
1754
1755 cache = (const struct btrace_frame_cache *) *this_cache;
1756 bfun = cache->bfun;
1757 gdb_assert (bfun != NULL);
1758
1759 if (btrace_find_call_by_number (&it, &cache->tp->btrace, bfun->up) == 0)
1760 throw_error (NOT_AVAILABLE_ERROR,
1761 _("No caller in btrace record history"));
1762
1763 caller = btrace_call_get (&it);
1764
1765 if ((bfun->flags & BFUN_UP_LINKS_TO_RET) != 0)
1766 pc = caller->insn.front ().pc;
1767 else
1768 {
1769 pc = caller->insn.back ().pc;
1770 pc += gdb_insn_length (gdbarch, pc);
1771 }
1772
1773 DEBUG ("[frame] unwound PC in %s on level %d: %s",
1775 core_addr_to_string_nz (pc));
1776
1777 return frame_unwind_got_address (this_frame, regnum, pc);
1778}
1779
1780/* Implement sniffer method for record_btrace_frame_unwind. */
1781
1782static int
1784 frame_info_ptr this_frame,
1785 void **this_cache)
1786{
1787 const struct btrace_function *bfun;
1788 struct btrace_frame_cache *cache;
1789 struct thread_info *tp;
1790 frame_info_ptr next;
1791
1792 /* THIS_FRAME does not contain a reference to its thread. */
1793 tp = inferior_thread ();
1794
1795 bfun = NULL;
1796 next = get_next_frame (this_frame);
1797 if (next == NULL)
1798 {
1799 const struct btrace_insn_iterator *replay;
1800
1801 replay = tp->btrace.replay;
1802 if (replay != NULL)
1803 bfun = &replay->btinfo->functions[replay->call_index];
1804 }
1805 else
1806 {
1807 const struct btrace_function *callee;
1808 struct btrace_call_iterator it;
1809
1810 callee = btrace_get_frame_function (next);
1811 if (callee == NULL || (callee->flags & BFUN_UP_LINKS_TO_TAILCALL) != 0)
1812 return 0;
1813
1814 if (btrace_find_call_by_number (&it, &tp->btrace, callee->up) == 0)
1815 return 0;
1816
1817 bfun = btrace_call_get (&it);
1818 }
1819
1820 if (bfun == NULL)
1821 return 0;
1822
1823 DEBUG ("[frame] sniffed frame for %s on level %d",
1824 btrace_get_bfun_name (bfun), bfun->level);
1825
1826 /* This is our frame. Initialize the frame cache. */
1827 cache = bfcache_new (this_frame);
1828 cache->tp = tp;
1829 cache->bfun = bfun;
1830
1831 *this_cache = cache;
1832 return 1;
1833}
1834
1835/* Implement sniffer method for record_btrace_tailcall_frame_unwind. */
1836
1837static int
1839 frame_info_ptr this_frame,
1840 void **this_cache)
1841{
1842 const struct btrace_function *bfun, *callee;
1843 struct btrace_frame_cache *cache;
1844 struct btrace_call_iterator it;
1845 frame_info_ptr next;
1846 struct thread_info *tinfo;
1847
1848 next = get_next_frame (this_frame);
1849 if (next == NULL)
1850 return 0;
1851
1852 callee = btrace_get_frame_function (next);
1853 if (callee == NULL)
1854 return 0;
1855
1856 if ((callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1857 return 0;
1858
1859 tinfo = inferior_thread ();
1860 if (btrace_find_call_by_number (&it, &tinfo->btrace, callee->up) == 0)
1861 return 0;
1862
1863 bfun = btrace_call_get (&it);
1864
1865 DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1866 btrace_get_bfun_name (bfun), bfun->level);
1867
1868 /* This is our frame. Initialize the frame cache. */
1869 cache = bfcache_new (this_frame);
1870 cache->tp = tinfo;
1871 cache->bfun = bfun;
1872
1873 *this_cache = cache;
1874 return 1;
1875}
1876
1877static void
1879{
1880 struct btrace_frame_cache *cache;
1881 void **slot;
1882
1883 cache = (struct btrace_frame_cache *) this_cache;
1884
1885 slot = htab_find_slot (bfcache, cache, NO_INSERT);
1886 gdb_assert (slot != NULL);
1887
1888 htab_remove_elt (bfcache, cache);
1889}
1890
1891/* btrace recording does not store previous memory content, neither the stack
1892 frames content. Any unwinding would return erroneous results as the stack
1893 contents no longer matches the changed PC value restored from history.
1894 Therefore this unwinder reports any possibly unwound registers as
1895 <unavailable>. */
1896
1908
1920
1921/* Implement the get_unwinder method. */
1922
1923const struct frame_unwind *
1928
1929/* Implement the get_tailcall_unwinder method. */
1930
1931const struct frame_unwind *
1936
1937/* Return a human-readable string for FLAG. */
1938
1939static const char *
1940btrace_thread_flag_to_str (btrace_thread_flags flag)
1941{
1942 switch (flag)
1943 {
1944 case BTHR_STEP:
1945 return "step";
1946
1947 case BTHR_RSTEP:
1948 return "reverse-step";
1949
1950 case BTHR_CONT:
1951 return "cont";
1952
1953 case BTHR_RCONT:
1954 return "reverse-cont";
1955
1956 case BTHR_STOP:
1957 return "stop";
1958 }
1959
1960 return "<invalid>";
1961}
1962
1963/* Indicate that TP should be resumed according to FLAG. */
1964
1965static void
1967 enum btrace_thread_flag flag)
1968{
1969 struct btrace_thread_info *btinfo;
1970
1971 DEBUG ("resuming thread %s (%s): %x (%s)", print_thread_id (tp),
1972 tp->ptid.to_string ().c_str (), flag,
1974
1975 btinfo = &tp->btrace;
1976
1977 /* Fetch the latest branch trace. */
1979
1980 /* A resume request overwrites a preceding resume or stop request. */
1981 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
1982 btinfo->flags |= flag;
1983}
1984
1985/* Get the current frame for TP. */
1986
1987static struct frame_id
1989{
1990 /* Set current thread, which is implicitly used by
1991 get_current_frame. */
1992 scoped_restore_current_thread restore_thread;
1993
1994 switch_to_thread (tp);
1995
1996 process_stratum_target *proc_target = tp->inf->process_target ();
1997
1998 /* Clear the executing flag to allow changes to the current frame.
1999 We are not actually running, yet. We just started a reverse execution
2000 command or a record goto command.
2001 For the latter, EXECUTING is false and this has no effect.
2002 For the former, EXECUTING is true and we're in wait, about to
2003 move the thread. Since we need to recompute the stack, we temporarily
2004 set EXECUTING to false. */
2005 bool executing = tp->executing ();
2006 set_executing (proc_target, inferior_ptid, false);
2007 SCOPE_EXIT
2008 {
2009 set_executing (proc_target, inferior_ptid, executing);
2010 };
2011 return get_frame_id (get_current_frame ());
2012}
2013
2014/* Start replaying a thread. */
2015
2016static struct btrace_insn_iterator *
2018{
2019 struct btrace_insn_iterator *replay;
2020 struct btrace_thread_info *btinfo;
2021
2022 btinfo = &tp->btrace;
2023 replay = NULL;
2024
2025 /* We can't start replaying without trace. */
2026 if (btinfo->functions.empty ())
2027 error (_("No trace."));
2028
2029 /* GDB stores the current frame_id when stepping in order to detects steps
2030 into subroutines.
2031 Since frames are computed differently when we're replaying, we need to
2032 recompute those stored frames and fix them up so we can still detect
2033 subroutines after we started replaying. */
2034 try
2035 {
2036 struct frame_id frame_id;
2037 int upd_step_frame_id, upd_step_stack_frame_id;
2038
2039 /* The current frame without replaying - computed via normal unwind. */
2041
2042 /* Check if we need to update any stepping-related frame id's. */
2043 upd_step_frame_id = (frame_id == tp->control.step_frame_id);
2044 upd_step_stack_frame_id = (frame_id == tp->control.step_stack_frame_id);
2045
2046 /* We start replaying at the end of the branch trace. This corresponds
2047 to the current instruction. */
2048 replay = XNEW (struct btrace_insn_iterator);
2049 btrace_insn_end (replay, btinfo);
2050
2051 /* Skip gaps at the end of the trace. */
2052 while (btrace_insn_get (replay) == NULL)
2053 {
2054 unsigned int steps;
2055
2056 steps = btrace_insn_prev (replay, 1);
2057 if (steps == 0)
2058 error (_("No trace."));
2059 }
2060
2061 /* We're not replaying, yet. */
2062 gdb_assert (btinfo->replay == NULL);
2063 btinfo->replay = replay;
2064
2065 /* Make sure we're not using any stale registers. */
2067
2068 /* The current frame with replaying - computed via btrace unwind. */
2070
2071 /* Replace stepping related frames where necessary. */
2072 if (upd_step_frame_id)
2074 if (upd_step_stack_frame_id)
2076 }
2077 catch (const gdb_exception &except)
2078 {
2079 xfree (btinfo->replay);
2080 btinfo->replay = NULL;
2081
2083
2084 throw;
2085 }
2086
2087 return replay;
2088}
2089
2090/* Stop replaying a thread. */
2091
2092static void
2094{
2095 struct btrace_thread_info *btinfo;
2096
2097 btinfo = &tp->btrace;
2098
2099 xfree (btinfo->replay);
2100 btinfo->replay = NULL;
2101
2102 /* Make sure we're not leaving any stale registers. */
2104}
2105
2106/* Stop replaying TP if it is at the end of its execution history. */
2107
2108static void
2110{
2111 struct btrace_insn_iterator *replay, end;
2112 struct btrace_thread_info *btinfo;
2113
2114 btinfo = &tp->btrace;
2115 replay = btinfo->replay;
2116
2117 if (replay == NULL)
2118 return;
2119
2120 btrace_insn_end (&end, btinfo);
2121
2122 if (btrace_insn_cmp (replay, &end) == 0)
2124}
2125
2126/* The resume method of target record-btrace. */
2127
2128void
2129record_btrace_target::resume (ptid_t ptid, int step, enum gdb_signal signal)
2130{
2131 enum btrace_thread_flag flag, cflag;
2132
2133 DEBUG ("resume %s: %s%s", ptid.to_string ().c_str (),
2134 ::execution_direction == EXEC_REVERSE ? "reverse-" : "",
2135 step ? "step" : "cont");
2136
2137 /* Store the execution direction of the last resume.
2138
2139 If there is more than one resume call, we have to rely on infrun
2140 to not change the execution direction in-between. */
2142
2143 /* As long as we're not replaying, just forward the request.
2144
2145 For non-stop targets this means that no thread is replaying. In order to
2146 make progress, we may need to explicitly move replaying threads to the end
2147 of their execution history. */
2149 && !record_is_replaying (minus_one_ptid))
2150 {
2151 this->beneath ()->resume (ptid, step, signal);
2152 return;
2153 }
2154
2155 /* Compute the btrace thread flag for the requested move. */
2157 {
2158 flag = step == 0 ? BTHR_RCONT : BTHR_RSTEP;
2159 cflag = BTHR_RCONT;
2160 }
2161 else
2162 {
2163 flag = step == 0 ? BTHR_CONT : BTHR_STEP;
2164 cflag = BTHR_CONT;
2165 }
2166
2167 /* We just indicate the resume intent here. The actual stepping happens in
2168 record_btrace_wait below.
2169
2170 For all-stop targets, we only step INFERIOR_PTID and continue others. */
2171
2173
2174 if (!target_is_non_stop_p ())
2175 {
2176 gdb_assert (inferior_ptid.matches (ptid));
2177
2178 for (thread_info *tp : all_non_exited_threads (proc_target, ptid))
2179 {
2180 if (tp->ptid.matches (inferior_ptid))
2181 record_btrace_resume_thread (tp, flag);
2182 else
2183 record_btrace_resume_thread (tp, cflag);
2184 }
2185 }
2186 else
2187 {
2188 for (thread_info *tp : all_non_exited_threads (proc_target, ptid))
2189 record_btrace_resume_thread (tp, flag);
2190 }
2191
2192 /* Async support. */
2193 if (target_can_async_p ())
2194 {
2195 target_async (true);
2197 }
2198}
2199
2200/* Cancel resuming TP. */
2201
2202static void
2204{
2205 btrace_thread_flags flags;
2206
2207 flags = tp->btrace.flags & (BTHR_MOVE | BTHR_STOP);
2208 if (flags == 0)
2209 return;
2210
2211 DEBUG ("cancel resume thread %s (%s): %x (%s)",
2212 print_thread_id (tp),
2213 tp->ptid.to_string ().c_str (), flags.raw (),
2215
2216 tp->btrace.flags &= ~(BTHR_MOVE | BTHR_STOP);
2218}
2219
2220/* Return a target_waitstatus indicating that we ran out of history. */
2221
2222static struct target_waitstatus
2224{
2226
2228
2229 return status;
2230}
2231
2232/* Return a target_waitstatus indicating that a step finished. */
2233
2234static struct target_waitstatus
2236{
2238
2239 status.set_stopped (GDB_SIGNAL_TRAP);
2240
2241 return status;
2242}
2243
2244/* Return a target_waitstatus indicating that a thread was stopped as
2245 requested. */
2246
2247static struct target_waitstatus
2249{
2251
2252 status.set_stopped (GDB_SIGNAL_0);
2253
2254 return status;
2255}
2256
2257/* Return a target_waitstatus indicating a spurious stop. */
2258
2259static struct target_waitstatus
2261{
2263
2265
2266 return status;
2267}
2268
2269/* Return a target_waitstatus indicating that the thread was not resumed. */
2270
2271static struct target_waitstatus
2273{
2275
2277
2278 return status;
2279}
2280
2281/* Return a target_waitstatus indicating that we should wait again. */
2282
2283static struct target_waitstatus
2285{
2287
2288 status.set_ignore ();
2289
2290 return status;
2291}
2292
2293/* Clear the record histories. */
2294
2295static void
2297{
2298 xfree (btinfo->insn_history);
2299 xfree (btinfo->call_history);
2300
2301 btinfo->insn_history = NULL;
2302 btinfo->call_history = NULL;
2303}
2304
2305/* Check whether TP's current replay position is at a breakpoint. */
2306
2307static int
2309{
2310 struct btrace_insn_iterator *replay;
2311 struct btrace_thread_info *btinfo;
2312 const struct btrace_insn *insn;
2313
2314 btinfo = &tp->btrace;
2315 replay = btinfo->replay;
2316
2317 if (replay == NULL)
2318 return 0;
2319
2320 insn = btrace_insn_get (replay);
2321 if (insn == NULL)
2322 return 0;
2323
2324 return record_check_stopped_by_breakpoint (tp->inf->aspace, insn->pc,
2325 &btinfo->stop_reason);
2326}
2327
2328/* Step one instruction in forward direction. */
2329
2330static struct target_waitstatus
2332{
2333 struct btrace_insn_iterator *replay, end, start;
2334 struct btrace_thread_info *btinfo;
2335
2336 btinfo = &tp->btrace;
2337 replay = btinfo->replay;
2338
2339 /* We're done if we're not replaying. */
2340 if (replay == NULL)
2341 return btrace_step_no_history ();
2342
2343 /* Check if we're stepping a breakpoint. */
2345 return btrace_step_stopped ();
2346
2347 /* Skip gaps during replay. If we end up at a gap (at the end of the trace),
2348 jump back to the instruction at which we started. */
2349 start = *replay;
2350 do
2351 {
2352 unsigned int steps;
2353
2354 /* We will bail out here if we continue stepping after reaching the end
2355 of the execution history. */
2356 steps = btrace_insn_next (replay, 1);
2357 if (steps == 0)
2358 {
2359 *replay = start;
2360 return btrace_step_no_history ();
2361 }
2362 }
2363 while (btrace_insn_get (replay) == NULL);
2364
2365 /* Determine the end of the instruction trace. */
2366 btrace_insn_end (&end, btinfo);
2367
2368 /* The execution trace contains (and ends with) the current instruction.
2369 This instruction has not been executed, yet, so the trace really ends
2370 one instruction earlier. */
2371 if (btrace_insn_cmp (replay, &end) == 0)
2372 return btrace_step_no_history ();
2373
2374 return btrace_step_spurious ();
2375}
2376
2377/* Step one instruction in backward direction. */
2378
2379static struct target_waitstatus
2381{
2382 struct btrace_insn_iterator *replay, start;
2383 struct btrace_thread_info *btinfo;
2384
2385 btinfo = &tp->btrace;
2386 replay = btinfo->replay;
2387
2388 /* Start replaying if we're not already doing so. */
2389 if (replay == NULL)
2391
2392 /* If we can't step any further, we reached the end of the history.
2393 Skip gaps during replay. If we end up at a gap (at the beginning of
2394 the trace), jump back to the instruction at which we started. */
2395 start = *replay;
2396 do
2397 {
2398 unsigned int steps;
2399
2400 steps = btrace_insn_prev (replay, 1);
2401 if (steps == 0)
2402 {
2403 *replay = start;
2404 return btrace_step_no_history ();
2405 }
2406 }
2407 while (btrace_insn_get (replay) == NULL);
2408
2409 /* Check if we're stepping a breakpoint.
2410
2411 For reverse-stepping, this check is after the step. There is logic in
2412 infrun.c that handles reverse-stepping separately. See, for example,
2413 proceed and adjust_pc_after_break.
2414
2415 This code assumes that for reverse-stepping, PC points to the last
2416 de-executed instruction, whereas for forward-stepping PC points to the
2417 next to-be-executed instruction. */
2419 return btrace_step_stopped ();
2420
2421 return btrace_step_spurious ();
2422}
2423
2424/* Step a single thread. */
2425
2426static struct target_waitstatus
2428{
2429 struct btrace_thread_info *btinfo;
2431 btrace_thread_flags flags;
2432
2433 btinfo = &tp->btrace;
2434
2435 flags = btinfo->flags & (BTHR_MOVE | BTHR_STOP);
2436 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
2437
2438 DEBUG ("stepping thread %s (%s): %x (%s)", print_thread_id (tp),
2439 tp->ptid.to_string ().c_str (), flags.raw (),
2441
2442 /* We can't step without an execution history. */
2443 if ((flags & BTHR_MOVE) != 0 && btrace_is_empty (tp))
2444 return btrace_step_no_history ();
2445
2446 switch (flags)
2447 {
2448 default:
2449 internal_error (_("invalid stepping type."));
2450
2451 case BTHR_STOP:
2453
2454 case BTHR_STEP:
2456 if (status.kind () != TARGET_WAITKIND_SPURIOUS)
2457 break;
2458
2459 return btrace_step_stopped ();
2460
2461 case BTHR_RSTEP:
2463 if (status.kind () != TARGET_WAITKIND_SPURIOUS)
2464 break;
2465
2466 return btrace_step_stopped ();
2467
2468 case BTHR_CONT:
2470 if (status.kind () != TARGET_WAITKIND_SPURIOUS)
2471 break;
2472
2473 btinfo->flags |= flags;
2474 return btrace_step_again ();
2475
2476 case BTHR_RCONT:
2478 if (status.kind () != TARGET_WAITKIND_SPURIOUS)
2479 break;
2480
2481 btinfo->flags |= flags;
2482 return btrace_step_again ();
2483 }
2484
2485 /* We keep threads moving at the end of their execution history. The wait
2486 method will stop the thread for whom the event is reported. */
2487 if (status.kind () == TARGET_WAITKIND_NO_HISTORY)
2488 btinfo->flags |= flags;
2489
2490 return status;
2491}
2492
2493/* Announce further events if necessary. */
2494
2495static void
2497 (const std::vector<thread_info *> &moving,
2498 const std::vector<thread_info *> &no_history)
2499{
2500 bool more_moving = !moving.empty ();
2501 bool more_no_history = !no_history.empty ();;
2502
2503 if (!more_moving && !more_no_history)
2504 return;
2505
2506 if (more_moving)
2507 DEBUG ("movers pending");
2508
2509 if (more_no_history)
2510 DEBUG ("no-history pending");
2511
2513}
2514
2515/* The wait method of target record-btrace. */
2516
2517ptid_t
2519 target_wait_flags options)
2520{
2521 std::vector<thread_info *> moving;
2522 std::vector<thread_info *> no_history;
2523
2524 /* Clear this, if needed we'll re-mark it below. */
2526
2527 DEBUG ("wait %s (0x%x)", ptid.to_string ().c_str (),
2528 (unsigned) options);
2529
2530 /* As long as we're not replaying, just forward the request. */
2532 && !record_is_replaying (minus_one_ptid))
2533 {
2534 return this->beneath ()->wait (ptid, status, options);
2535 }
2536
2537 /* Keep a work list of moving threads. */
2539 for (thread_info *tp : all_non_exited_threads (proc_target, ptid))
2540 if ((tp->btrace.flags & (BTHR_MOVE | BTHR_STOP)) != 0)
2541 moving.push_back (tp);
2542
2543 if (moving.empty ())
2544 {
2546
2547 DEBUG ("wait ended by %s: %s", null_ptid.to_string ().c_str (),
2548 status->to_string ().c_str ());
2549
2550 return null_ptid;
2551 }
2552
2553 /* Step moving threads one by one, one step each, until either one thread
2554 reports an event or we run out of threads to step.
2555
2556 When stepping more than one thread, chances are that some threads reach
2557 the end of their execution history earlier than others. If we reported
2558 this immediately, all-stop on top of non-stop would stop all threads and
2559 resume the same threads next time. And we would report the same thread
2560 having reached the end of its execution history again.
2561
2562 In the worst case, this would starve the other threads. But even if other
2563 threads would be allowed to make progress, this would result in far too
2564 many intermediate stops.
2565
2566 We therefore delay the reporting of "no execution history" until we have
2567 nothing else to report. By this time, all threads should have moved to
2568 either the beginning or the end of their execution history. There will
2569 be a single user-visible stop. */
2570 struct thread_info *eventing = NULL;
2571 while ((eventing == NULL) && !moving.empty ())
2572 {
2573 for (unsigned int ix = 0; eventing == NULL && ix < moving.size ();)
2574 {
2575 thread_info *tp = moving[ix];
2576
2578
2579 switch (status->kind ())
2580 {
2582 ix++;
2583 break;
2584
2586 no_history.push_back (ordered_remove (moving, ix));
2587 break;
2588
2589 default:
2590 eventing = unordered_remove (moving, ix);
2591 break;
2592 }
2593 }
2594 }
2595
2596 if (eventing == NULL)
2597 {
2598 /* We started with at least one moving thread. This thread must have
2599 either stopped or reached the end of its execution history.
2600
2601 In the former case, EVENTING must not be NULL.
2602 In the latter case, NO_HISTORY must not be empty. */
2603 gdb_assert (!no_history.empty ());
2604
2605 /* We kept threads moving at the end of their execution history. Stop
2606 EVENTING now that we are going to report its stop. */
2607 eventing = unordered_remove (no_history, 0);
2608 eventing->btrace.flags &= ~BTHR_MOVE;
2609
2611 }
2612
2613 gdb_assert (eventing != NULL);
2614
2615 /* We kept threads replaying at the end of their execution history. Stop
2616 replaying EVENTING now that we are going to report its stop. */
2618
2619 /* Stop all other threads. */
2620 if (!target_is_non_stop_p ())
2621 {
2624 }
2625
2626 /* In async mode, we need to announce further events. */
2627 if (target_is_async_p ())
2628 record_btrace_maybe_mark_async_event (moving, no_history);
2629
2630 /* Start record histories anew from the current position. */
2632
2633 /* We moved the replay position but did not update registers. */
2634 registers_changed_thread (eventing);
2635
2636 DEBUG ("wait ended by thread %s (%s): %s",
2637 print_thread_id (eventing),
2638 eventing->ptid.to_string ().c_str (),
2639 status->to_string ().c_str ());
2640
2641 return eventing->ptid;
2642}
2643
2644/* The stop method of target record-btrace. */
2645
2646void
2648{
2649 DEBUG ("stop %s", ptid.to_string ().c_str ());
2650
2651 /* As long as we're not replaying, just forward the request. */
2653 && !record_is_replaying (minus_one_ptid))
2654 {
2655 this->beneath ()->stop (ptid);
2656 }
2657 else
2658 {
2659 process_stratum_target *proc_target
2661
2662 for (thread_info *tp : all_non_exited_threads (proc_target, ptid))
2663 {
2664 tp->btrace.flags &= ~BTHR_MOVE;
2665 tp->btrace.flags |= BTHR_STOP;
2666 }
2667 }
2668 }
2669
2670/* The can_execute_reverse method of target record-btrace. */
2671
2672bool
2674{
2675 return true;
2676}
2677
2678/* The stopped_by_sw_breakpoint method of target record-btrace. */
2679
2680bool
2682{
2683 if (record_is_replaying (minus_one_ptid))
2684 {
2685 struct thread_info *tp = inferior_thread ();
2686
2687 return tp->btrace.stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT;
2688 }
2689
2690 return this->beneath ()->stopped_by_sw_breakpoint ();
2691}
2692
2693/* The supports_stopped_by_sw_breakpoint method of target
2694 record-btrace. */
2695
2696bool
2698{
2699 if (record_is_replaying (minus_one_ptid))
2700 return true;
2701
2702 return this->beneath ()->supports_stopped_by_sw_breakpoint ();
2703}
2704
2705/* The stopped_by_sw_breakpoint method of target record-btrace. */
2706
2707bool
2709{
2710 if (record_is_replaying (minus_one_ptid))
2711 {
2712 struct thread_info *tp = inferior_thread ();
2713
2714 return tp->btrace.stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT;
2715 }
2716
2717 return this->beneath ()->stopped_by_hw_breakpoint ();
2718}
2719
2720/* The supports_stopped_by_hw_breakpoint method of target
2721 record-btrace. */
2722
2723bool
2725{
2726 if (record_is_replaying (minus_one_ptid))
2727 return true;
2728
2729 return this->beneath ()->supports_stopped_by_hw_breakpoint ();
2730}
2731
2732/* The update_thread_list method of target record-btrace. */
2733
2734void
2736{
2737 /* We don't add or remove threads during replay. */
2738 if (record_is_replaying (minus_one_ptid))
2739 return;
2740
2741 /* Forward the request. */
2742 this->beneath ()->update_thread_list ();
2743}
2744
2745/* The thread_alive method of target record-btrace. */
2746
2747bool
2749{
2750 /* We don't add or remove threads during replay. */
2751 if (record_is_replaying (minus_one_ptid))
2752 return true;
2753
2754 /* Forward the request. */
2755 return this->beneath ()->thread_alive (ptid);
2756}
2757
2758/* Set the replay branch trace instruction iterator. If IT is NULL, replay
2759 is stopped. */
2760
2761static void
2763 const struct btrace_insn_iterator *it)
2764{
2765 struct btrace_thread_info *btinfo;
2766
2767 btinfo = &tp->btrace;
2768
2769 if (it == NULL)
2771 else
2772 {
2773 if (btinfo->replay == NULL)
2775 else if (btrace_insn_cmp (btinfo->replay, it) == 0)
2776 return;
2777
2778 *btinfo->replay = *it;
2780 }
2781
2782 /* Start anew from the new replay position. */
2784
2787}
2788
2789/* The goto_record_begin method of target record-btrace. */
2790
2791void
2793{
2794 struct thread_info *tp;
2795 struct btrace_insn_iterator begin;
2796
2797 tp = require_btrace_thread ();
2798
2799 btrace_insn_begin (&begin, &tp->btrace);
2800
2801 /* Skip gaps at the beginning of the trace. */
2802 while (btrace_insn_get (&begin) == NULL)
2803 {
2804 unsigned int steps;
2805
2806 steps = btrace_insn_next (&begin, 1);
2807 if (steps == 0)
2808 error (_("No trace."));
2809 }
2810
2811 record_btrace_set_replay (tp, &begin);
2812}
2813
2814/* The goto_record_end method of target record-btrace. */
2815
2816void
2818{
2819 struct thread_info *tp;
2820
2821 tp = require_btrace_thread ();
2822
2823 record_btrace_set_replay (tp, NULL);
2824}
2825
2826/* The goto_record method of target record-btrace. */
2827
2828void
2830{
2831 struct thread_info *tp;
2832 struct btrace_insn_iterator it;
2833 unsigned int number;
2834 int found;
2835
2836 number = insn;
2837
2838 /* Check for wrap-arounds. */
2839 if (number != insn)
2840 error (_("Instruction number out of range."));
2841
2842 tp = require_btrace_thread ();
2843
2844 found = btrace_find_insn_by_number (&it, &tp->btrace, number);
2845
2846 /* Check if the instruction could not be found or is a gap. */
2847 if (found == 0 || btrace_insn_get (&it) == NULL)
2848 error (_("No such instruction."));
2849
2850 record_btrace_set_replay (tp, &it);
2851}
2852
2853/* The record_stop_replaying method of target record-btrace. */
2854
2855void
2861
2862/* The execution_direction target method. */
2863
2869
2870/* The prepare_to_generate_core target method. */
2871
2872void
2877
2878/* The done_generating_core target method. */
2879
2880void
2885
2886/* Start recording in BTS format. */
2887
2888static void
2889cmd_record_btrace_bts_start (const char *args, int from_tty)
2890{
2891 if (args != NULL && *args != 0)
2892 error (_("Invalid argument."));
2893
2894 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2895
2896 try
2897 {
2898 execute_command ("target record-btrace", from_tty);
2899 }
2900 catch (const gdb_exception &exception)
2901 {
2902 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2903 throw;
2904 }
2905}
2906
2907/* Start recording in Intel Processor Trace format. */
2908
2909static void
2910cmd_record_btrace_pt_start (const char *args, int from_tty)
2911{
2912 if (args != NULL && *args != 0)
2913 error (_("Invalid argument."));
2914
2915 record_btrace_conf.format = BTRACE_FORMAT_PT;
2916
2917 try
2918 {
2919 execute_command ("target record-btrace", from_tty);
2920 }
2921 catch (const gdb_exception &exception)
2922 {
2923 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2924 throw;
2925 }
2926}
2927
2928/* Alias for "target record". */
2929
2930static void
2931cmd_record_btrace_start (const char *args, int from_tty)
2932{
2933 if (args != NULL && *args != 0)
2934 error (_("Invalid argument."));
2935
2936 record_btrace_conf.format = BTRACE_FORMAT_PT;
2937
2938 try
2939 {
2940 execute_command ("target record-btrace", from_tty);
2941 }
2942 catch (const gdb_exception_error &exception)
2943 {
2944 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2945
2946 try
2947 {
2948 execute_command ("target record-btrace", from_tty);
2949 }
2950 catch (const gdb_exception &ex)
2951 {
2952 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2953 throw;
2954 }
2955 }
2956}
2957
2958/* The "show record btrace replay-memory-access" command. */
2959
2960static void
2961cmd_show_replay_memory_access (struct ui_file *file, int from_tty,
2962 struct cmd_list_element *c, const char *value)
2963{
2964 gdb_printf (file, _("Replay memory access is %s.\n"),
2966}
2967
2968/* The "set record btrace cpu none" command. */
2969
2970static void
2971cmd_set_record_btrace_cpu_none (const char *args, int from_tty)
2972{
2973 if (args != nullptr && *args != 0)
2974 error (_("Trailing junk: '%s'."), args);
2975
2977}
2978
2979/* The "set record btrace cpu auto" command. */
2980
2981static void
2982cmd_set_record_btrace_cpu_auto (const char *args, int from_tty)
2983{
2984 if (args != nullptr && *args != 0)
2985 error (_("Trailing junk: '%s'."), args);
2986
2988}
2989
2990/* The "set record btrace cpu" command. */
2991
2992static void
2993cmd_set_record_btrace_cpu (const char *args, int from_tty)
2994{
2995 if (args == nullptr)
2996 args = "";
2997
2998 /* We use a hard-coded vendor string for now. */
2999 unsigned int family, model, stepping;
3000 int l1, l2, matches = sscanf (args, "intel: %u/%u%n/%u%n", &family,
3001 &model, &l1, &stepping, &l2);
3002 if (matches == 3)
3003 {
3004 if (strlen (args) != l2)
3005 error (_("Trailing junk: '%s'."), args + l2);
3006 }
3007 else if (matches == 2)
3008 {
3009 if (strlen (args) != l1)
3010 error (_("Trailing junk: '%s'."), args + l1);
3011
3012 stepping = 0;
3013 }
3014 else
3015 error (_("Bad format. See \"help set record btrace cpu\"."));
3016
3017 if (USHRT_MAX < family)
3018 error (_("Cpu family too big."));
3019
3020 if (UCHAR_MAX < model)
3021 error (_("Cpu model too big."));
3022
3023 if (UCHAR_MAX < stepping)
3024 error (_("Cpu stepping too big."));
3025
3026 record_btrace_cpu.vendor = CV_INTEL;
3027 record_btrace_cpu.family = family;
3028 record_btrace_cpu.model = model;
3029 record_btrace_cpu.stepping = stepping;
3030
3032}
3033
3034/* The "show record btrace cpu" command. */
3035
3036static void
3037cmd_show_record_btrace_cpu (const char *args, int from_tty)
3038{
3039 if (args != nullptr && *args != 0)
3040 error (_("Trailing junk: '%s'."), args);
3041
3043 {
3044 case CS_AUTO:
3045 gdb_printf (_("btrace cpu is 'auto'.\n"));
3046 return;
3047
3048 case CS_NONE:
3049 gdb_printf (_("btrace cpu is 'none'.\n"));
3050 return;
3051
3052 case CS_CPU:
3053 switch (record_btrace_cpu.vendor)
3054 {
3055 case CV_INTEL:
3056 if (record_btrace_cpu.stepping == 0)
3057 gdb_printf (_("btrace cpu is 'intel: %u/%u'.\n"),
3058 record_btrace_cpu.family,
3059 record_btrace_cpu.model);
3060 else
3061 gdb_printf (_("btrace cpu is 'intel: %u/%u/%u'.\n"),
3062 record_btrace_cpu.family,
3063 record_btrace_cpu.model,
3064 record_btrace_cpu.stepping);
3065 return;
3066 }
3067 }
3068
3069 error (_("Internal error: bad cpu state."));
3070}
3071
3072/* The "record bts buffer-size" show value function. */
3073
3074static void
3075show_record_bts_buffer_size_value (struct ui_file *file, int from_tty,
3076 struct cmd_list_element *c,
3077 const char *value)
3078{
3079 gdb_printf (file, _("The record/replay bts buffer size is %s.\n"),
3080 value);
3081}
3082
3083/* The "record pt buffer-size" show value function. */
3084
3085static void
3086show_record_pt_buffer_size_value (struct ui_file *file, int from_tty,
3087 struct cmd_list_element *c,
3088 const char *value)
3089{
3090 gdb_printf (file, _("The record/replay pt buffer size is %s.\n"),
3091 value);
3092}
3093
3094/* Initialize btrace commands. */
3095
3097void
3099{
3100 cmd_list_element *record_btrace_cmd
3102 _("Start branch trace recording."),
3104 add_alias_cmd ("b", record_btrace_cmd, class_obscure, 1, &record_cmdlist);
3105
3106 cmd_list_element *record_btrace_bts_cmd
3108 _("\
3109Start branch trace recording in Branch Trace Store (BTS) format.\n\n\
3110The processor stores a from/to record for each branch into a cyclic buffer.\n\
3111This format may not be available on all processors."),
3113 add_alias_cmd ("bts", record_btrace_bts_cmd, class_obscure, 1,
3115
3116 cmd_list_element *record_btrace_pt_cmd
3118 _("\
3119Start branch trace recording in Intel Processor Trace format.\n\n\
3120This format may not be available on all processors."),
3122 add_alias_cmd ("pt", record_btrace_pt_cmd, class_obscure, 1, &record_cmdlist);
3123
3125 _("Set record options."),
3126 _("Show record options."),
3130
3131 add_setshow_enum_cmd ("replay-memory-access", no_class,
3133Set what memory accesses are allowed during replay."), _("\
3134Show what memory accesses are allowed during replay."),
3135 _("Default is READ-ONLY.\n\n\
3136The btrace record target does not trace data.\n\
3137The memory therefore corresponds to the live target and not \
3138to the current replay position.\n\n\
3139When READ-ONLY, allow accesses to read-only memory during replay.\n\
3140When READ-WRITE, allow accesses to read-only and read-write memory during \
3141replay."),
3145
3147 _("\
3148Set the cpu to be used for trace decode.\n\n\
3149The format is \"VENDOR:IDENTIFIER\" or \"none\" or \"auto\" (default).\n\
3150For vendor \"intel\" the format is \"FAMILY/MODEL[/STEPPING]\".\n\n\
3151When decoding branch trace, enable errata workarounds for the specified cpu.\n\
3152The default is \"auto\", which uses the cpu on which the trace was recorded.\n\
3153When GDB does not support that cpu, this option can be used to enable\n\
3154workarounds for a similar cpu that GDB supports.\n\n\
3155When set to \"none\", errata workarounds are disabled."),
3157 1,
3159
3161Automatically determine the cpu to be used for trace decode."),
3163
3165Do not enable errata workarounds for trace decode."),
3167
3169Show the cpu to be used for trace decode."),
3171
3173 _("Set record btrace bts options."),
3174 _("Show record btrace bts options."),
3179
3180 add_setshow_uinteger_cmd ("buffer-size", no_class,
3181 &record_btrace_conf.bts.size,
3182 _("Set the record/replay bts buffer size."),
3183 _("Show the record/replay bts buffer size."), _("\
3184When starting recording request a trace buffer of this size. \
3185The actual buffer size may differ from the requested size. \
3186Use \"info record\" to see the actual buffer size.\n\n\
3187Bigger buffers allow longer recording but also take more time to process \
3188the recorded execution trace.\n\n\
3189The trace buffer size may not be changed while recording."), NULL,
3193
3195 _("Set record btrace pt options."),
3196 _("Show record btrace pt options."),
3201
3202 add_setshow_uinteger_cmd ("buffer-size", no_class,
3203 &record_btrace_conf.pt.size,
3204 _("Set the record/replay pt buffer size."),
3205 _("Show the record/replay pt buffer size."), _("\
3206Bigger buffers allow longer recording but also take more time to process \
3207the recorded execution.\n\
3208The actual buffer size may differ from the requested size. Use \"info record\" \
3209to see the actual buffer size."), NULL, show_record_pt_buffer_size_value,
3212
3214
3215 bfcache = htab_create_alloc (50, bfcache_hash, bfcache_eq, NULL,
3216 xcalloc, xfree);
3217
3218 record_btrace_conf.bts.size = 64 * 1024;
3219 record_btrace_conf.pt.size = 16 * 1024;
3220}
int regnum
void xfree(void *)
int code
Definition ada-lex.l:670
void * xcalloc(size_t number, size_t size)
Definition alloc.c:85
struct gdbarch * target_gdbarch(void)
void mark_async_event_handler(async_event_handler *async_handler_ptr)
async_event_handler * create_async_event_handler(async_event_handler_func *proc, gdb_client_data client_data, const char *name)
void clear_async_event_handler(async_event_handler *async_handler_ptr)
void delete_async_event_handler(async_event_handler **async_handler_ptr)
remove_bp_reason
Definition breakpoint.h:64
void btrace_enable(struct thread_info *tp, const struct btrace_config *conf)
Definition btrace.c:1606
const struct btrace_function * btrace_call_get(const struct btrace_call_iterator *it)
Definition btrace.c:2324
unsigned int btrace_call_prev(struct btrace_call_iterator *it, unsigned int stride)
Definition btrace.c:2412
unsigned int btrace_call_next(struct btrace_call_iterator *it, unsigned int stride)
Definition btrace.c:2376
unsigned int btrace_call_number(const struct btrace_call_iterator *it)
Definition btrace.c:2335
void btrace_insn_end(struct btrace_insn_iterator *it, const struct btrace_thread_info *btinfo)
Definition btrace.c:2075
int btrace_insn_cmp(const struct btrace_insn_iterator *lhs, const struct btrace_insn_iterator *rhs)
Definition btrace.c:2241
int btrace_find_insn_by_number(struct btrace_insn_iterator *it, const struct btrace_thread_info *btinfo, unsigned int number)
Definition btrace.c:2255
unsigned int btrace_insn_prev(struct btrace_insn_iterator *it, unsigned int stride)
Definition btrace.c:2183
const char * btrace_decode_error(enum btrace_format format, int errcode)
Definition btrace.c:1857
int btrace_find_call_by_number(struct btrace_call_iterator *it, const struct btrace_thread_info *btinfo, unsigned int number)
Definition btrace.c:2456
void btrace_set_insn_history(struct btrace_thread_info *btinfo, const struct btrace_insn_iterator *begin, const struct btrace_insn_iterator *end)
Definition btrace.c:2473
void btrace_disable(struct thread_info *tp)
Definition btrace.c:1664
void btrace_fetch(struct thread_info *tp, const struct btrace_cpu *cpu)
Definition btrace.c:1906
void btrace_set_call_history(struct btrace_thread_info *btinfo, const struct btrace_call_iterator *begin, const struct btrace_call_iterator *end)
Definition btrace.c:2487
const struct btrace_config * btrace_conf(const struct btrace_thread_info *btinfo)
Definition btrace.c:1653
unsigned int btrace_insn_next(struct btrace_insn_iterator *it, unsigned int stride)
Definition btrace.c:2101
const struct btrace_insn * btrace_insn_get(const struct btrace_insn_iterator *it)
Definition btrace.c:2022
void btrace_teardown(struct thread_info *tp)
Definition btrace.c:1684
unsigned int btrace_insn_number(const struct btrace_insn_iterator *it)
Definition btrace.c:2053
int btrace_insn_get_error(const struct btrace_insn_iterator *it)
Definition btrace.c:2045
int btrace_is_empty(struct thread_info *tp)
Definition btrace.c:2511
void btrace_insn_begin(struct btrace_insn_iterator *it, const struct btrace_thread_info *btinfo)
Definition btrace.c:2061
void btrace_call_end(struct btrace_call_iterator *it, const struct btrace_thread_info *btinfo)
Definition btrace.c:2363
int btrace_is_replaying(struct thread_info *tp)
Definition btrace.c:2503
int btrace_call_cmp(const struct btrace_call_iterator *lhs, const struct btrace_call_iterator *rhs)
Definition btrace.c:2446
@ BFUN_UP_LINKS_TO_RET
Definition btrace.h:90
@ BFUN_UP_LINKS_TO_TAILCALL
Definition btrace.h:94
@ BTRACE_INSN_FLAG_SPECULATIVE
Definition btrace.h:62
btrace_thread_flag
Definition btrace.h:232
@ BTHR_CONT
Definition btrace.h:240
@ BTHR_RSTEP
Definition btrace.h:237
@ BTHR_STEP
Definition btrace.h:234
@ BTHR_MOVE
Definition btrace.h:246
@ BTHR_RCONT
Definition btrace.h:243
@ BTHR_STOP
Definition btrace.h:249
ui_file_style style() const
Definition cli-style.c:169
int pretty_print_insn(const struct disasm_insn *insn, gdb_disassembly_flags flags)
Definition disasm.c:362
int unpush_target(struct target_ops *t)
Definition inferior.c:96
void push_target(struct target_ops *t)
Definition inferior.h:406
struct process_stratum_target * process_target()
Definition inferior.h:449
struct address_space * aspace
Definition inferior.h:579
inf_non_exited_threads_range non_exited_threads()
Definition inferior.h:481
target_ops * target_at(enum strata stratum)
Definition inferior.h:453
thread_info * find_thread(ptid_t ptid)
void stop_recording() override
void call_history_range(ULONGEST begin, ULONGEST end, record_print_flags flags) override
void async(bool) override
void fetch_registers(struct regcache *, int) override
enum record_method record_method(ptid_t ptid) override
void record_stop_replaying() override
void info_record() override
ptid_t wait(ptid_t, struct target_waitstatus *, target_wait_flags) override
enum exec_direction_kind execution_direction() override
bool stopped_by_hw_breakpoint() override
void insn_history(int size, gdb_disassembly_flags flags) override
bool stopped_by_sw_breakpoint() override
void close() override
void goto_record_end() override
void prepare_to_store(struct regcache *) override
void mourn_inferior() override
void call_history_from(ULONGEST begin, int size, record_print_flags flags) override
void detach(inferior *inf, int from_tty) override
void update_thread_list() override
bool supports_stopped_by_hw_breakpoint() override
void goto_record_begin() override
void store_registers(struct regcache *, int) override
bool supports_stopped_by_sw_breakpoint() override
void insn_history_from(ULONGEST from, int size, gdb_disassembly_flags flags) override
bool can_execute_reverse() override
int insert_breakpoint(struct gdbarch *, struct bp_target_info *) override
bool record_is_replaying(ptid_t ptid) override
const struct frame_unwind * get_tailcall_unwinder() override
enum target_xfer_status xfer_partial(enum target_object object, const char *annex, gdb_byte *readbuf, const gdb_byte *writebuf, ULONGEST offset, ULONGEST len, ULONGEST *xfered_len) override
void kill() override
bool record_will_replay(ptid_t ptid, int dir) override
strata stratum() const override
const struct frame_unwind * get_unwinder() override
bool thread_alive(ptid_t ptid) override
int remove_breakpoint(struct gdbarch *, struct bp_target_info *, enum remove_bp_reason) override
void prepare_to_generate_core() override
void goto_record(ULONGEST insn) override
void call_history(int size, record_print_flags flags) override
void resume(ptid_t, int, enum gdb_signal) override
void insn_history_range(ULONGEST begin, ULONGEST end, gdb_disassembly_flags flags) override
void done_generating_core() override
const target_info & info() const override
void disconnect(const char *, int) override
gdbarch * arch() const
Definition regcache.c:231
void raw_supply(int regnum, const void *buf) override
Definition regcache.c:1062
ptid_t ptid() const
Definition regcache.h:408
ptid_t ptid
Definition gdbthread.h:259
struct inferior * inf
Definition gdbthread.h:301
void set_stop_pc(CORE_ADDR stop_pc)
Definition gdbthread.h:372
thread_control_state control
Definition gdbthread.h:343
void begin(ui_out_type type, const char *id)
Definition ui-out.c:399
void field_string(const char *fldname, const char *string, const ui_file_style &style=ui_file_style())
Definition ui-out.c:511
void field_fmt(const char *fldname, const char *format,...) ATTRIBUTE_PRINTF(3
Definition ui-out.c:525
void field_signed(const char *fldname, LONGEST value)
Definition ui-out.c:437
void text(const char *string)
Definition ui-out.c:566
bool is_mi_like_p() const
Definition ui-out.c:810
void field_unsigned(const char *fldname, ULONGEST value)
Definition ui-out.c:464
void end(ui_out_type type)
Definition ui-out.c:429
struct cmd_list_element * add_alias_cmd(const char *name, cmd_list_element *target, enum command_class theclass, int abbrev_flag, struct cmd_list_element **list)
Definition cli-decode.c:294
struct cmd_list_element * add_cmd(const char *name, enum command_class theclass, const char *doc, struct cmd_list_element **list)
Definition cli-decode.c:233
set_show_commands add_setshow_uinteger_cmd(const char *name, enum command_class theclass, unsigned int *var, const literal_def *extra_literals, const char *set_doc, const char *show_doc, const char *help_doc, cmd_func_ftype *set_func, show_value_ftype *show_func, struct cmd_list_element **set_list, struct cmd_list_element **show_list)
set_show_commands add_setshow_enum_cmd(const char *name, enum command_class theclass, const char *const *enumlist, const char **var, const char *set_doc, const char *show_doc, const char *help_doc, cmd_func_ftype *set_func, show_value_ftype *show_func, struct cmd_list_element **set_list, struct cmd_list_element **show_list)
Definition cli-decode.c:688
set_show_commands add_setshow_prefix_cmd(const char *name, command_class theclass, const char *set_doc, const char *show_doc, cmd_list_element **set_subcommands_list, cmd_list_element **show_subcommands_list, cmd_list_element **set_list, cmd_list_element **show_list)
Definition cli-decode.c:428
struct cmd_list_element * add_prefix_cmd(const char *name, enum command_class theclass, cmd_simple_func_ftype *fun, const char *doc, struct cmd_list_element **subcommands, int allow_unknown, struct cmd_list_element **list)
Definition cli-decode.c:357
cli_style_option function_name_style
cli_style_option file_name_style
int number_is_in_list(const char *list, int number)
Definition cli-utils.c:348
@ class_obscure
Definition command.h:64
@ class_support
Definition command.h:58
@ no_class
Definition command.h:53
@ DISASSEMBLY_SOURCE
@ DISASSEMBLY_FILENAME
@ DISASSEMBLY_SPECULATIVE
int gdb_insn_length(struct gdbarch *gdbarch, CORE_ADDR addr)
Definition disasm.c:1230
struct value * frame_unwind_got_address(frame_info_ptr frame, int regnum, CORE_ADDR addr)
frame_info_ptr get_next_frame(frame_info_ptr this_frame)
Definition frame.c:2068
struct frame_id frame_id_build_unavailable_stack_special(CORE_ADDR code_addr, CORE_ADDR special_addr)
Definition frame.c:722
struct gdbarch * get_frame_arch(frame_info_ptr this_frame)
Definition frame.c:3027
CORE_ADDR get_frame_func(frame_info_ptr this_frame)
Definition frame.c:1098
frame_info_ptr get_selected_frame(const char *message)
Definition frame.c:1888
frame_info_ptr get_current_frame(void)
Definition frame.c:1670
struct frame_id get_frame_id(frame_info_ptr fi)
Definition frame.c:631
@ SRC_AND_LOC
Definition frame.h:811
@ TAILCALL_FRAME
Definition frame.h:195
@ NORMAL_FRAME
Definition frame.h:187
void print_stack_frame(frame_info_ptr, int print_level, enum print_what print_what, int set_current_sal)
Definition stack.c:353
unwind_stop_reason
Definition frame.h:653
#define FRAME_OBSTACK_ZALLOC(TYPE)
Definition frame.h:825
int gdbarch_pc_regnum(struct gdbarch *gdbarch)
Definition gdbarch.c:2054
void execute_command(const char *, int)
Definition top.c:459
all_non_exited_threads_range all_non_exited_threads(process_stratum_target *proc_target=nullptr, ptid_t filter_ptid=minus_one_ptid)
Definition gdbthread.h:753
void validate_registers_access(void)
Definition thread.c:958
void set_executing(process_stratum_target *targ, ptid_t ptid, bool executing)
Definition thread.c:908
struct thread_info * inferior_thread(void)
Definition thread.c:85
void switch_to_thread(struct thread_info *thr)
Definition thread.c:1360
const char * print_thread_id(struct thread_info *thr)
Definition thread.c:1470
mach_port_t kern_return_t mach_port_t mach_msg_type_name_t msgportsPoly mach_port_t kern_return_t pid_t pid mach_port_t kern_return_t mach_port_t task mach_port_t kern_return_t int flags
Definition gnu-nat.c:1861
mach_port_t mach_port_t name mach_port_t mach_port_t name kern_return_t int status
Definition gnu-nat.c:1790
size_t size
Definition go32-nat.c:239
void inferior_event_handler(enum inferior_event_type event_type)
Definition inf-loop.c:37
ptid_t inferior_ptid
Definition infcmd.c:74
struct inferior * current_inferior(void)
Definition inferior.c:55
exec_direction_kind
Definition infrun.h:112
@ EXEC_REVERSE
Definition infrun.h:114
@ EXEC_FORWARD
Definition infrun.h:113
void interps_notify_record_changed(inferior *inf, int started, const char *method, const char *format)
Definition interps.c:474
static const char * range
Definition language.c:96
observable< struct thread_info * > new_thread
void _initialize_record_btrace()
static void record_btrace_print_conf(const struct btrace_config *conf)
static struct btrace_line_range btrace_line_range_add(struct btrace_line_range range, int line)
static struct cmd_list_element * set_record_btrace_pt_cmdlist
static struct cmd_list_element * show_record_btrace_pt_cmdlist
static struct btrace_line_range btrace_find_line_range(CORE_ADDR pc)
static const char replay_memory_access_read_only[]
static const char * btrace_thread_flag_to_str(btrace_thread_flags flag)
static struct target_waitstatus btrace_step_again(void)
static void btrace_compute_src_line_range(const struct btrace_function *bfun, int *pbegin, int *pend)
static int bfcache_eq(const void *arg1, const void *arg2)
static void record_btrace_maybe_mark_async_event(const std::vector< thread_info * > &moving, const std::vector< thread_info * > &no_history)
static void record_btrace_stop_replaying_at_end(struct thread_info *tp)
static void record_btrace_set_replay(struct thread_info *tp, const struct btrace_insn_iterator *it)
static struct thread_info * require_btrace_thread(void)
static void cmd_record_btrace_pt_start(const char *args, int from_tty)
static void cmd_show_record_btrace_cpu(const char *args, int from_tty)
static struct async_event_handler * record_btrace_async_inferior_event_handler
static void cmd_set_record_btrace_cpu_auto(const char *args, int from_tty)
static void record_btrace_cancel_resume(struct thread_info *tp)
static void record_btrace_auto_disable(void)
static void record_btrace_stop_replaying(struct thread_info *tp)
static const gdb::observers::token record_btrace_thread_observer_token
static void btrace_call_history(struct ui_out *uiout, const struct btrace_thread_info *btinfo, const struct btrace_call_iterator *begin, const struct btrace_call_iterator *end, int int_flags)
static void btrace_print_lines(struct btrace_line_range lines, struct ui_out *uiout, gdb::optional< ui_out_emit_tuple > *src_and_asm_tuple, gdb::optional< ui_out_emit_list > *asm_list, gdb_disassembly_flags flags)
static hashval_t bfcache_hash(const void *arg)
static record_btrace_target record_btrace_ops
static struct btrace_cpu record_btrace_cpu
static enum record_btrace_cpu_state_kind record_btrace_cpu_state
record_btrace_cpu_state_kind
@ CS_CPU
@ CS_AUTO
@ CS_NONE
const struct frame_unwind record_btrace_frame_unwind
static void cmd_show_replay_memory_access(struct ui_file *file, int from_tty, struct cmd_list_element *c, const char *value)
static void record_btrace_clear_histories(struct btrace_thread_info *btinfo)
static struct cmd_list_element * show_record_btrace_cmdlist
static void cmd_set_record_btrace_cpu(const char *args, int from_tty)
static struct btrace_insn_iterator * record_btrace_start_replaying(struct thread_info *tp)
static const char * replay_memory_access
static struct btrace_line_range btrace_mk_line_range(struct symtab *symtab, int begin, int end)
#define DEBUG(msg, args...)
static void btrace_insn_history(struct ui_out *uiout, const struct btrace_thread_info *btinfo, const struct btrace_insn_iterator *begin, const struct btrace_insn_iterator *end, gdb_disassembly_flags flags)
static void btrace_call_history_insn_range(struct ui_out *uiout, const struct btrace_function *bfun)
static void record_btrace_print_bts_conf(const struct btrace_config_bts *conf)
static struct target_waitstatus btrace_step_spurious(void)
static struct value * record_btrace_frame_prev_register(frame_info_ptr this_frame, void **this_cache, int regnum)
static void cmd_set_record_btrace_cpu_none(const char *args, int from_tty)
static const char replay_memory_access_read_write[]
static void cmd_record_btrace_bts_start(const char *args, int from_tty)
static struct target_waitstatus btrace_step_stopped(void)
static struct target_waitstatus record_btrace_single_step_backward(struct thread_info *tp)
const struct btrace_cpu * record_btrace_get_cpu(void)
static void cmd_record_btrace_start(const char *args, int from_tty)
static struct target_waitstatus record_btrace_single_step_forward(struct thread_info *tp)
static struct target_waitstatus btrace_step_stopped_on_request(void)
static void record_btrace_print_pt_conf(const struct btrace_config_pt *conf)
static void btrace_call_history_src_line(struct ui_out *uiout, const struct btrace_function *bfun)
static void btrace_ui_out_decode_error(struct ui_out *uiout, int errcode, enum btrace_format format)
static enum exec_direction_kind record_btrace_resume_exec_dir
static struct cmd_list_element * show_record_btrace_bts_cmdlist
static struct frame_id get_thread_current_frame_id(struct thread_info *tp)
static int record_btrace_generating_corefile
static void record_btrace_handle_async_inferior_event(gdb_client_data data)
const struct frame_unwind record_btrace_tailcall_frame_unwind
void record_btrace_push_target(void)
static struct target_waitstatus record_btrace_step_thread(struct thread_info *tp)
static int record_btrace_frame_sniffer(const struct frame_unwind *self, frame_info_ptr this_frame, void **this_cache)
static struct cmd_list_element * set_record_btrace_cmdlist
static void record_btrace_resume_thread(struct thread_info *tp, enum btrace_thread_flag flag)
static void record_btrace_target_open(const char *args, int from_tty)
static int btrace_line_range_contains_range(struct btrace_line_range lhs, struct btrace_line_range rhs)
static int record_btrace_tailcall_frame_sniffer(const struct frame_unwind *self, frame_info_ptr this_frame, void **this_cache)
static struct btrace_thread_info * require_btrace(void)
static struct btrace_config record_btrace_conf
static void record_btrace_frame_this_id(frame_info_ptr this_frame, void **this_cache, struct frame_id *this_id)
static struct target_waitstatus btrace_step_no_resumed(void)
static int btrace_line_range_is_empty(struct btrace_line_range range)
static void record_btrace_on_new_thread(struct thread_info *tp)
static void show_record_pt_buffer_size_value(struct ui_file *file, int from_tty, struct cmd_list_element *c, const char *value)
static const char * btrace_get_bfun_name(const struct btrace_function *bfun)
static struct target_waitstatus btrace_step_no_history(void)
static void record_btrace_frame_dealloc_cache(frame_info *self, void *this_cache)
static void show_record_bts_buffer_size_value(struct ui_file *file, int from_tty, struct cmd_list_element *c, const char *value)
static htab_t bfcache
static const char *const replay_memory_access_types[]
static struct cmd_list_element * record_btrace_cmdlist
static const char * record_btrace_adjust_size(unsigned int *size)
static struct cmd_list_element * set_record_btrace_cpu_cmdlist
static void record_btrace_auto_enable(void)
static enum unwind_stop_reason record_btrace_frame_unwind_stop_reason(frame_info_ptr this_frame, void **this_cache)
static const target_info record_btrace_target_info
static struct btrace_frame_cache * bfcache_new(frame_info_ptr frame)
static const struct btrace_function * btrace_get_frame_function(frame_info_ptr frame)
static struct cmd_list_element * set_record_btrace_bts_cmdlist
static int record_btrace_replay_at_breakpoint(struct thread_info *tp)
void record_detach(struct target_ops *t, inferior *inf, int from_tty)
Definition record.c:191
struct cmd_list_element * set_record_cmdlist
Definition record.c:53
struct cmd_list_element * show_record_cmdlist
Definition record.c:54
struct cmd_list_element * record_cmdlist
Definition record.c:51
void record_preopen(void)
Definition record.c:87
void record_mourn_inferior(struct target_ops *t)
Definition record.c:206
void record_kill(struct target_ops *t)
Definition record.c:222
int record_check_stopped_by_breakpoint(const address_space *aspace, CORE_ADDR pc, enum target_stop_reason *reason)
Definition record.c:238
record_print_flag
Definition record.h:57
@ RECORD_PRINT_SRC_LINE
Definition record.h:59
@ RECORD_PRINT_INSN_RANGE
Definition record.h:62
@ RECORD_PRINT_INDENT_CALLS
Definition record.h:65
record_method
Definition record.h:44
@ RECORD_METHOD_BTRACE
Definition record.h:52
@ RECORD_METHOD_NONE
Definition record.h:46
CORE_ADDR regcache_read_pc(struct regcache *regcache)
Definition regcache.c:1333
void registers_changed_thread(thread_info *thread)
Definition regcache.c:574
struct regcache * get_current_regcache(void)
Definition regcache.c:429
#define enable()
Definition ser-go32.c:239
const char * symtab_to_filename_for_display(struct symtab *symtab)
Definition source.c:1269
void print_source_lines(struct symtab *s, int line, int stopline, print_source_lines_flags flags)
Definition source.c:1465
@ PRINT_SOURCE_LINES_FILENAME
Definition source.h:141
struct btrace_call_iterator begin
Definition btrace.h:226
struct btrace_call_iterator end
Definition btrace.h:227
unsigned int index
Definition btrace.h:209
const struct btrace_thread_info * btinfo
Definition btrace.h:206
const struct btrace_function * bfun
struct thread_info * tp
btrace_function_flags flags
Definition btrace.h:186
unsigned int up
Definition btrace.h:155
struct minimal_symbol * msym
Definition btrace.h:142
unsigned int prev
Definition btrace.h:149
unsigned int number
Definition btrace.h:175
std::vector< btrace_insn > insn
Definition btrace.h:160
struct symbol * sym
Definition btrace.h:143
unsigned int insn_offset
Definition btrace.h:170
struct btrace_insn_iterator begin
Definition btrace.h:217
struct btrace_insn_iterator end
Definition btrace.h:218
unsigned int call_index
Definition btrace.h:196
const struct btrace_thread_info * btinfo
Definition btrace.h:193
btrace_insn_flags flags
Definition btrace.h:81
CORE_ADDR pc
Definition btrace.h:72
struct symtab * symtab
unsigned int ngaps
Definition btrace.h:339
std::vector< btrace_function > functions
Definition btrace.h:331
btrace_thread_flags flags
Definition btrace.h:342
struct btrace_insn_iterator * replay
Definition btrace.h:353
struct btrace_call_history * call_history
Definition btrace.h:348
enum target_stop_reason stop_reason
Definition btrace.h:356
struct btrace_insn_history * insn_history
Definition btrace.h:345
struct objfile * objfile() const
Definition symtab.h:1788
unsigned int is_speculative
Definition disasm.h:337
unsigned int number
Definition disasm.h:334
CORE_ADDR addr
Definition disasm.h:331
CORE_ADDR code_addr
Definition frame-id.h:83
CORE_ADDR special_addr
Definition frame-id.h:95
const char * print_name() const
Definition symtab.h:475
Definition gnu-nat.c:153
Definition symtab.h:1596
int nitems
Definition symtab.h:1656
struct linetable_entry item[1]
Definition symtab.h:1661
CORE_ADDR text_section_offset() const
Definition objfiles.h:482
Definition value.h:90
void add_thread(thread_info *thread)
DISABLE_COPY_AND_ASSIGN(scoped_btrace_disable)
scoped_btrace_disable()=default
std::forward_list< thread_info * > m_threads
struct symtab * symtab
Definition symtab.h:1457
struct symtab * symtab
Definition symtab.h:2328
CORE_ADDR end
Definition symtab.h:2338
struct compunit_symtab * compunit() const
Definition symtab.h:1677
const struct linetable * linetable() const
Definition symtab.h:1687
virtual ptid_t wait(ptid_t, struct target_waitstatus *, target_wait_flags options) TARGET_DEFAULT_FUNC(default_target_wait)
virtual int remove_breakpoint(struct gdbarch *, struct bp_target_info *, enum remove_bp_reason) TARGET_DEFAULT_NORETURN(noprocess())
virtual void fetch_registers(struct regcache *, int) TARGET_DEFAULT_IGNORE()
virtual bool stopped_by_sw_breakpoint() TARGET_DEFAULT_RETURN(false)
target_ops * beneath() const
Definition target.c:3041
virtual void store_registers(struct regcache *, int) TARGET_DEFAULT_NORETURN(noprocess())
virtual enum target_xfer_status xfer_partial(enum target_object object, const char *annex, gdb_byte *readbuf, const gdb_byte *writebuf, ULONGEST offset, ULONGEST len, ULONGEST *xfered_len) TARGET_DEFAULT_RETURN(TARGET_XFER_E_IO)
virtual gdb::array_view< const_gdb_byte > virtual thread_info_to_thread_handle(struct thread_info *) TARGET_DEFAULT_RETURN(gdb voi stop)(ptid_t) TARGET_DEFAULT_IGNORE()
Definition target.h:689
virtual bool supports_stopped_by_sw_breakpoint() TARGET_DEFAULT_RETURN(false)
virtual bool stopped_by_hw_breakpoint() TARGET_DEFAULT_RETURN(false)
virtual void async(bool) TARGET_DEFAULT_NORETURN(tcomplain())
virtual bool supports_stopped_by_hw_breakpoint() TARGET_DEFAULT_RETURN(false)
virtual void resume(ptid_t, int TARGET_DEBUG_PRINTER(target_debug_print_step), enum gdb_signal) TARGET_DEFAULT_NORETURN(noprocess())
virtual void update_thread_list() TARGET_DEFAULT_IGNORE()
virtual void prepare_to_store(struct regcache *) TARGET_DEFAULT_NORETURN(noprocess())
virtual bool thread_alive(ptid_t ptid) TARGET_DEFAULT_RETURN(false)
virtual void disconnect(const char *, int) TARGET_DEFAULT_NORETURN(tcomplain())
virtual int insert_breakpoint(struct gdbarch *, struct bp_target_info *) TARGET_DEFAULT_NORETURN(noprocess())
virtual const struct btrace_config * btrace_conf(const struct btrace_target_info *) TARGET_DEFAULT_RETURN(NULL)
CORE_ADDR endaddr
struct bfd_section * the_bfd_section
target_waitstatus & set_spurious()
Definition waitstatus.h:300
target_waitstatus & set_no_resumed()
Definition waitstatus.h:321
target_waitstatus & set_stopped(gdb_signal sig)
Definition waitstatus.h:230
target_waitstatus & set_ignore()
Definition waitstatus.h:307
target_waitstatus & set_no_history()
Definition waitstatus.h:314
Definition value.h:130
struct symtab * find_pc_line_symtab(CORE_ADDR pc)
Definition symtab.c:3317
struct symtab_and_line find_pc_line(CORE_ADDR pc, int notcurrent)
Definition symtab.c:3295
bool target_is_async_p()
Definition target.c:402
void target_async(bool enable)
Definition target.c:4337
const struct target_section * target_section_by_addr(struct target_ops *target, CORE_ADDR addr)
Definition target.c:1393
bool target_can_async_p()
Definition target.c:384
bool target_has_execution(inferior *inf)
Definition target.c:201
void add_target(const target_info &t, target_open_ftype *func, completer_ftype *completer)
Definition target.c:868
bool target_is_non_stop_p()
Definition target.c:4394
std::string target_pid_to_str(ptid_t ptid)
Definition target.c:2623
@ INF_REG_EVENT
Definition target.h:134
target_xfer_status
Definition target.h:219
@ TARGET_XFER_UNAVAILABLE
Definition target.h:227
bool may_write_registers
target_object
Definition target.h:143
@ TARGET_OBJECT_MEMORY
Definition target.h:147
strata
Definition target.h:94
@ record_stratum
Definition target.h:99
#define current_uiout
Definition ui-out.h:40
void gdb_printf(struct ui_file *stream, const char *format,...)
Definition utils.c:1886
@ TARGET_WAITKIND_SPURIOUS
Definition waitstatus.h:78
@ TARGET_WAITKIND_NO_HISTORY
Definition waitstatus.h:93
@ TARGET_WAITKIND_IGNORE
Definition waitstatus.h:89
@ TARGET_STOPPED_BY_SW_BREAKPOINT
Definition waitstatus.h:434
@ TARGET_STOPPED_BY_HW_BREAKPOINT
Definition waitstatus.h:437