GDB (xrefs)
Loading...
Searching...
No Matches
btrace.c
Go to the documentation of this file.
1/* Branch trace support for GDB, the GNU debugger.
2
3 Copyright (C) 2013-2023 Free Software Foundation, Inc.
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22#include "defs.h"
23#include "btrace.h"
24#include "gdbthread.h"
25#include "inferior.h"
26#include "target.h"
27#include "record.h"
28#include "symtab.h"
29#include "disasm.h"
30#include "source.h"
31#include "filenames.h"
32#include "regcache.h"
33#include "gdbsupport/rsp-low.h"
34#include "gdbcmd.h"
35#include "cli/cli-utils.h"
36#include "gdbarch.h"
37
38/* For maintenance commands. */
39#include "record-btrace.h"
40
41#include <inttypes.h>
42#include <ctype.h>
43#include <algorithm>
44
45/* Command lists for btrace maintenance commands. */
51
52/* Control whether to skip PAD packets when computing the packet history. */
53static bool maint_btrace_pt_skip_pad = true;
54
55static void btrace_add_pc (struct thread_info *tp);
56
57/* Print a record debug message. Use do ... while (0) to avoid ambiguities
58 when used in if statements. */
59
60#define DEBUG(msg, args...) \
61 do \
62 { \
63 if (record_debug != 0) \
64 gdb_printf (gdb_stdlog, \
65 "[btrace] " msg "\n", ##args); \
66 } \
67 while (0)
68
69#define DEBUG_FTRACE(msg, args...) DEBUG ("[ftrace] " msg, ##args)
70
71/* Return the function name of a recorded function segment for printing.
72 This function never returns NULL. */
73
74static const char *
76{
77 struct minimal_symbol *msym;
78 struct symbol *sym;
79
80 msym = bfun->msym;
81 sym = bfun->sym;
82
83 if (sym != NULL)
84 return sym->print_name ();
85
86 if (msym != NULL)
87 return msym->print_name ();
88
89 return "<unknown>";
90}
91
92/* Return the file name of a recorded function segment for printing.
93 This function never returns NULL. */
94
95static const char *
97{
98 struct symbol *sym;
99 const char *filename;
100
101 sym = bfun->sym;
102
103 if (sym != NULL)
104 filename = symtab_to_filename_for_display (sym->symtab ());
105 else
106 filename = "<unknown>";
107
108 return filename;
109}
110
111/* Return a string representation of the address of an instruction.
112 This function never returns NULL. */
113
114static const char *
116{
117 if (insn == NULL)
118 return "<nil>";
119
120 return core_addr_to_string_nz (insn->pc);
121}
122
123/* Print an ftrace debug status message. */
124
125static void
126ftrace_debug (const struct btrace_function *bfun, const char *prefix)
127{
128 const char *fun, *file;
129 unsigned int ibegin, iend;
130 int level;
131
132 fun = ftrace_print_function_name (bfun);
133 file = ftrace_print_filename (bfun);
134 level = bfun->level;
135
136 ibegin = bfun->insn_offset;
137 iend = ibegin + bfun->insn.size ();
138
139 DEBUG_FTRACE ("%s: fun = %s, file = %s, level = %d, insn = [%u; %u)",
140 prefix, fun, file, level, ibegin, iend);
141}
142
143/* Return the number of instructions in a given function call segment. */
144
145static unsigned int
147{
148 if (bfun == NULL)
149 return 0;
150
151 /* A gap is always counted as one instruction. */
152 if (bfun->errcode != 0)
153 return 1;
154
155 return bfun->insn.size ();
156}
157
158/* Return the function segment with the given NUMBER or NULL if no such segment
159 exists. BTINFO is the branch trace information for the current thread. */
160
161static struct btrace_function *
163 unsigned int number)
164{
165 if (number == 0 || number > btinfo->functions.size ())
166 return NULL;
167
168 return &btinfo->functions[number - 1];
169}
170
171/* A const version of the function above. */
172
173static const struct btrace_function *
175 unsigned int number)
176{
177 if (number == 0 || number > btinfo->functions.size ())
178 return NULL;
179
180 return &btinfo->functions[number - 1];
181}
182
183/* Return non-zero if BFUN does not match MFUN and FUN,
184 return zero otherwise. */
185
186static int
188 const struct minimal_symbol *mfun,
189 const struct symbol *fun)
190{
191 struct minimal_symbol *msym;
192 struct symbol *sym;
193
194 msym = bfun->msym;
195 sym = bfun->sym;
196
197 /* If the minimal symbol changed, we certainly switched functions. */
198 if (mfun != NULL && msym != NULL
199 && strcmp (mfun->linkage_name (), msym->linkage_name ()) != 0)
200 return 1;
201
202 /* If the symbol changed, we certainly switched functions. */
203 if (fun != NULL && sym != NULL)
204 {
205 const char *bfname, *fname;
206
207 /* Check the function name. */
208 if (strcmp (fun->linkage_name (), sym->linkage_name ()) != 0)
209 return 1;
210
211 /* Check the location of those functions, as well. */
212 bfname = symtab_to_fullname (sym->symtab ());
213 fname = symtab_to_fullname (fun->symtab ());
214 if (filename_cmp (fname, bfname) != 0)
215 return 1;
216 }
217
218 /* If we lost symbol information, we switched functions. */
219 if (!(msym == NULL && sym == NULL) && mfun == NULL && fun == NULL)
220 return 1;
221
222 /* If we gained symbol information, we switched functions. */
223 if (msym == NULL && sym == NULL && !(mfun == NULL && fun == NULL))
224 return 1;
225
226 return 0;
227}
228
229/* Allocate and initialize a new branch trace function segment at the end of
230 the trace.
231 BTINFO is the branch trace information for the current thread.
232 MFUN and FUN are the symbol information we have for this function.
233 This invalidates all struct btrace_function pointer currently held. */
234
235static struct btrace_function *
237 struct minimal_symbol *mfun,
238 struct symbol *fun)
239{
240 int level;
241 unsigned int number, insn_offset;
242
243 if (btinfo->functions.empty ())
244 {
245 /* Start counting NUMBER and INSN_OFFSET at one. */
246 level = 0;
247 number = 1;
248 insn_offset = 1;
249 }
250 else
251 {
252 const struct btrace_function *prev = &btinfo->functions.back ();
253 level = prev->level;
254 number = prev->number + 1;
255 insn_offset = prev->insn_offset + ftrace_call_num_insn (prev);
256 }
257
258 btinfo->functions.emplace_back (mfun, fun, number, insn_offset, level);
259 return &btinfo->functions.back ();
260}
261
262/* Update the UP field of a function segment. */
263
264static void
266 struct btrace_function *caller,
267 btrace_function_flags flags)
268{
269 if (bfun->up != 0)
270 ftrace_debug (bfun, "updating caller");
271
272 bfun->up = caller->number;
273 bfun->flags = flags;
274
275 ftrace_debug (bfun, "set caller");
276 ftrace_debug (caller, "..to");
277}
278
279/* Fix up the caller for all segments of a function. */
280
281static void
283 struct btrace_function *bfun,
284 struct btrace_function *caller,
285 btrace_function_flags flags)
286{
287 unsigned int prev, next;
288
289 prev = bfun->prev;
290 next = bfun->next;
291 ftrace_update_caller (bfun, caller, flags);
292
293 /* Update all function segments belonging to the same function. */
294 for (; prev != 0; prev = bfun->prev)
295 {
296 bfun = ftrace_find_call_by_number (btinfo, prev);
297 ftrace_update_caller (bfun, caller, flags);
298 }
299
300 for (; next != 0; next = bfun->next)
301 {
302 bfun = ftrace_find_call_by_number (btinfo, next);
303 ftrace_update_caller (bfun, caller, flags);
304 }
305}
306
307/* Add a new function segment for a call at the end of the trace.
308 BTINFO is the branch trace information for the current thread.
309 MFUN and FUN are the symbol information we have for this function. */
310
311static struct btrace_function *
313 struct minimal_symbol *mfun,
314 struct symbol *fun)
315{
316 const unsigned int length = btinfo->functions.size ();
317 struct btrace_function *bfun = ftrace_new_function (btinfo, mfun, fun);
318
319 bfun->up = length;
320 bfun->level += 1;
321
322 ftrace_debug (bfun, "new call");
323
324 return bfun;
325}
326
327/* Add a new function segment for a tail call at the end of the trace.
328 BTINFO is the branch trace information for the current thread.
329 MFUN and FUN are the symbol information we have for this function. */
330
331static struct btrace_function *
333 struct minimal_symbol *mfun,
334 struct symbol *fun)
335{
336 const unsigned int length = btinfo->functions.size ();
337 struct btrace_function *bfun = ftrace_new_function (btinfo, mfun, fun);
338
339 bfun->up = length;
340 bfun->level += 1;
342
343 ftrace_debug (bfun, "new tail call");
344
345 return bfun;
346}
347
348/* Return the caller of BFUN or NULL if there is none. This function skips
349 tail calls in the call chain. BTINFO is the branch trace information for
350 the current thread. */
351static struct btrace_function *
353 struct btrace_function *bfun)
354{
355 for (; bfun != NULL; bfun = ftrace_find_call_by_number (btinfo, bfun->up))
356 if ((bfun->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
357 return ftrace_find_call_by_number (btinfo, bfun->up);
358
359 return NULL;
360}
361
362/* Find the innermost caller in the back trace of BFUN with MFUN/FUN
363 symbol information. BTINFO is the branch trace information for the current
364 thread. */
365
366static struct btrace_function *
368 struct btrace_function *bfun,
369 struct minimal_symbol *mfun,
370 struct symbol *fun)
371{
372 for (; bfun != NULL; bfun = ftrace_find_call_by_number (btinfo, bfun->up))
373 {
374 /* Skip functions with incompatible symbol information. */
375 if (ftrace_function_switched (bfun, mfun, fun))
376 continue;
377
378 /* This is the function segment we're looking for. */
379 break;
380 }
381
382 return bfun;
383}
384
385/* Find the innermost caller in the back trace of BFUN, skipping all
386 function segments that do not end with a call instruction (e.g.
387 tail calls ending with a jump). BTINFO is the branch trace information for
388 the current thread. */
389
390static struct btrace_function *
392 struct btrace_function *bfun)
393{
394 for (; bfun != NULL; bfun = ftrace_find_call_by_number (btinfo, bfun->up))
395 {
396 /* Skip gaps. */
397 if (bfun->errcode != 0)
398 continue;
399
400 btrace_insn &last = bfun->insn.back ();
401
402 if (last.iclass == BTRACE_INSN_CALL)
403 break;
404 }
405
406 return bfun;
407}
408
409/* Add a continuation segment for a function into which we return at the end of
410 the trace.
411 BTINFO is the branch trace information for the current thread.
412 MFUN and FUN are the symbol information we have for this function. */
413
414static struct btrace_function *
416 struct minimal_symbol *mfun,
417 struct symbol *fun)
418{
419 struct btrace_function *prev, *bfun, *caller;
420
421 bfun = ftrace_new_function (btinfo, mfun, fun);
422 prev = ftrace_find_call_by_number (btinfo, bfun->number - 1);
423
424 /* It is important to start at PREV's caller. Otherwise, we might find
425 PREV itself, if PREV is a recursive function. */
426 caller = ftrace_find_call_by_number (btinfo, prev->up);
427 caller = ftrace_find_caller (btinfo, caller, mfun, fun);
428 if (caller != NULL)
429 {
430 /* The caller of PREV is the preceding btrace function segment in this
431 function instance. */
432 gdb_assert (caller->next == 0);
433
434 caller->next = bfun->number;
435 bfun->prev = caller->number;
436
437 /* Maintain the function level. */
438 bfun->level = caller->level;
439
440 /* Maintain the call stack. */
441 bfun->up = caller->up;
442 bfun->flags = caller->flags;
443
444 ftrace_debug (bfun, "new return");
445 }
446 else
447 {
448 /* We did not find a caller. This could mean that something went
449 wrong or that the call is simply not included in the trace. */
450
451 /* Let's search for some actual call. */
452 caller = ftrace_find_call_by_number (btinfo, prev->up);
453 caller = ftrace_find_call (btinfo, caller);
454 if (caller == NULL)
455 {
456 /* There is no call in PREV's back trace. We assume that the
457 branch trace did not include it. */
458
459 /* Let's find the topmost function and add a new caller for it.
460 This should handle a series of initial tail calls. */
461 while (prev->up != 0)
462 prev = ftrace_find_call_by_number (btinfo, prev->up);
463
464 bfun->level = prev->level - 1;
465
466 /* Fix up the call stack for PREV. */
468
469 ftrace_debug (bfun, "new return - no caller");
470 }
471 else
472 {
473 /* There is a call in PREV's back trace to which we should have
474 returned but didn't. Let's start a new, separate back trace
475 from PREV's level. */
476 bfun->level = prev->level - 1;
477
478 /* We fix up the back trace for PREV but leave other function segments
479 on the same level as they are.
480 This should handle things like schedule () correctly where we're
481 switching contexts. */
482 prev->up = bfun->number;
483 prev->flags = BFUN_UP_LINKS_TO_RET;
484
485 ftrace_debug (bfun, "new return - unknown caller");
486 }
487 }
488
489 return bfun;
490}
491
492/* Add a new function segment for a function switch at the end of the trace.
493 BTINFO is the branch trace information for the current thread.
494 MFUN and FUN are the symbol information we have for this function. */
495
496static struct btrace_function *
498 struct minimal_symbol *mfun,
499 struct symbol *fun)
500{
501 struct btrace_function *prev, *bfun;
502
503 /* This is an unexplained function switch. We can't really be sure about the
504 call stack, yet the best I can think of right now is to preserve it. */
505 bfun = ftrace_new_function (btinfo, mfun, fun);
506 prev = ftrace_find_call_by_number (btinfo, bfun->number - 1);
507 bfun->up = prev->up;
508 bfun->flags = prev->flags;
509
510 ftrace_debug (bfun, "new switch");
511
512 return bfun;
513}
514
515/* Add a new function segment for a gap in the trace due to a decode error at
516 the end of the trace.
517 BTINFO is the branch trace information for the current thread.
518 ERRCODE is the format-specific error code. */
519
520static struct btrace_function *
522 std::vector<unsigned int> &gaps)
523{
524 struct btrace_function *bfun;
525
526 if (btinfo->functions.empty ())
527 bfun = ftrace_new_function (btinfo, NULL, NULL);
528 else
529 {
530 /* We hijack the previous function segment if it was empty. */
531 bfun = &btinfo->functions.back ();
532 if (bfun->errcode != 0 || !bfun->insn.empty ())
533 bfun = ftrace_new_function (btinfo, NULL, NULL);
534 }
535
536 bfun->errcode = errcode;
537 gaps.push_back (bfun->number);
538
539 ftrace_debug (bfun, "new gap");
540
541 return bfun;
542}
543
544/* Update the current function segment at the end of the trace in BTINFO with
545 respect to the instruction at PC. This may create new function segments.
546 Return the chronologically latest function segment, never NULL. */
547
548static struct btrace_function *
549ftrace_update_function (struct btrace_thread_info *btinfo, CORE_ADDR pc)
550{
551 struct bound_minimal_symbol bmfun;
552 struct minimal_symbol *mfun;
553 struct symbol *fun;
554 struct btrace_function *bfun;
555
556 /* Try to determine the function we're in. We use both types of symbols
557 to avoid surprises when we sometimes get a full symbol and sometimes
558 only a minimal symbol. */
559 fun = find_pc_function (pc);
560 bmfun = lookup_minimal_symbol_by_pc (pc);
561 mfun = bmfun.minsym;
562
563 if (fun == NULL && mfun == NULL)
564 DEBUG_FTRACE ("no symbol at %s", core_addr_to_string_nz (pc));
565
566 /* If we didn't have a function, we create one. */
567 if (btinfo->functions.empty ())
568 return ftrace_new_function (btinfo, mfun, fun);
569
570 /* If we had a gap before, we create a function. */
571 bfun = &btinfo->functions.back ();
572 if (bfun->errcode != 0)
573 return ftrace_new_function (btinfo, mfun, fun);
574
575 /* Check the last instruction, if we have one.
576 We do this check first, since it allows us to fill in the call stack
577 links in addition to the normal flow links. */
578 btrace_insn *last = NULL;
579 if (!bfun->insn.empty ())
580 last = &bfun->insn.back ();
581
582 if (last != NULL)
583 {
584 switch (last->iclass)
585 {
587 {
588 const char *fname;
589
590 /* On some systems, _dl_runtime_resolve returns to the resolved
591 function instead of jumping to it. From our perspective,
592 however, this is a tailcall.
593 If we treated it as return, we wouldn't be able to find the
594 resolved function in our stack back trace. Hence, we would
595 lose the current stack back trace and start anew with an empty
596 back trace. When the resolved function returns, we would then
597 create a stack back trace with the same function names but
598 different frame id's. This will confuse stepping. */
599 fname = ftrace_print_function_name (bfun);
600 if (strcmp (fname, "_dl_runtime_resolve") == 0)
601 return ftrace_new_tailcall (btinfo, mfun, fun);
602
603 return ftrace_new_return (btinfo, mfun, fun);
604 }
605
606 case BTRACE_INSN_CALL:
607 /* Ignore calls to the next instruction. They are used for PIC. */
608 if (last->pc + last->size == pc)
609 break;
610
611 return ftrace_new_call (btinfo, mfun, fun);
612
613 case BTRACE_INSN_JUMP:
614 {
615 CORE_ADDR start;
616
617 start = get_pc_function_start (pc);
618
619 /* A jump to the start of a function is (typically) a tail call. */
620 if (start == pc)
621 return ftrace_new_tailcall (btinfo, mfun, fun);
622
623 /* Some versions of _Unwind_RaiseException use an indirect
624 jump to 'return' to the exception handler of the caller
625 handling the exception instead of a return. Let's restrict
626 this heuristic to that and related functions. */
627 const char *fname = ftrace_print_function_name (bfun);
628 if (strncmp (fname, "_Unwind_", strlen ("_Unwind_")) == 0)
629 {
630 struct btrace_function *caller
631 = ftrace_find_call_by_number (btinfo, bfun->up);
632 caller = ftrace_find_caller (btinfo, caller, mfun, fun);
633 if (caller != NULL)
634 return ftrace_new_return (btinfo, mfun, fun);
635 }
636
637 /* If we can't determine the function for PC, we treat a jump at
638 the end of the block as tail call if we're switching functions
639 and as an intra-function branch if we don't. */
640 if (start == 0 && ftrace_function_switched (bfun, mfun, fun))
641 return ftrace_new_tailcall (btinfo, mfun, fun);
642
643 break;
644 }
645 }
646 }
647
648 /* Check if we're switching functions for some other reason. */
649 if (ftrace_function_switched (bfun, mfun, fun))
650 {
651 DEBUG_FTRACE ("switching from %s in %s at %s",
654 ftrace_print_filename (bfun));
655
656 return ftrace_new_switch (btinfo, mfun, fun);
657 }
658
659 return bfun;
660}
661
662/* Add the instruction at PC to BFUN's instructions. */
663
664static void
666{
667 bfun->insn.push_back (insn);
668
669 if (record_debug > 1)
670 ftrace_debug (bfun, "update insn");
671}
672
673/* Classify the instruction at PC. */
674
675static enum btrace_insn_class
676ftrace_classify_insn (struct gdbarch *gdbarch, CORE_ADDR pc)
677{
678 enum btrace_insn_class iclass;
679
680 iclass = BTRACE_INSN_OTHER;
681 try
682 {
684 iclass = BTRACE_INSN_CALL;
685 else if (gdbarch_insn_is_ret (gdbarch, pc))
686 iclass = BTRACE_INSN_RETURN;
687 else if (gdbarch_insn_is_jump (gdbarch, pc))
688 iclass = BTRACE_INSN_JUMP;
689 }
690 catch (const gdb_exception_error &error)
691 {
692 }
693
694 return iclass;
695}
696
697/* Try to match the back trace at LHS to the back trace at RHS. Returns the
698 number of matching function segments or zero if the back traces do not
699 match. BTINFO is the branch trace information for the current thread. */
700
701static int
703 struct btrace_function *lhs,
704 struct btrace_function *rhs)
705{
706 int matches;
707
708 for (matches = 0; lhs != NULL && rhs != NULL; ++matches)
709 {
710 if (ftrace_function_switched (lhs, rhs->msym, rhs->sym))
711 return 0;
712
713 lhs = ftrace_get_caller (btinfo, lhs);
714 rhs = ftrace_get_caller (btinfo, rhs);
715 }
716
717 return matches;
718}
719
720/* Add ADJUSTMENT to the level of BFUN and succeeding function segments.
721 BTINFO is the branch trace information for the current thread. */
722
723static void
725 struct btrace_function *bfun, int adjustment)
726{
727 if (adjustment == 0)
728 return;
729
730 DEBUG_FTRACE ("fixup level (%+d)", adjustment);
731 ftrace_debug (bfun, "..bfun");
732
733 while (bfun != NULL)
734 {
735 bfun->level += adjustment;
736 bfun = ftrace_find_call_by_number (btinfo, bfun->number + 1);
737 }
738}
739
740/* Recompute the global level offset. Traverse the function trace and compute
741 the global level offset as the negative of the minimal function level. */
742
743static void
745{
746 int level = INT_MAX;
747
748 if (btinfo == NULL)
749 return;
750
751 if (btinfo->functions.empty ())
752 return;
753
754 unsigned int length = btinfo->functions.size() - 1;
755 for (unsigned int i = 0; i < length; ++i)
756 level = std::min (level, btinfo->functions[i].level);
757
758 /* The last function segment contains the current instruction, which is not
759 really part of the trace. If it contains just this one instruction, we
760 ignore the segment. */
761 struct btrace_function *last = &btinfo->functions.back();
762 if (last->insn.size () != 1)
763 level = std::min (level, last->level);
764
765 DEBUG_FTRACE ("setting global level offset: %d", -level);
766 btinfo->level = -level;
767}
768
769/* Connect the function segments PREV and NEXT in a bottom-to-top walk as in
770 ftrace_connect_backtrace. BTINFO is the branch trace information for the
771 current thread. */
772
773static void
775 struct btrace_function *prev,
776 struct btrace_function *next)
777{
778 DEBUG_FTRACE ("connecting...");
779 ftrace_debug (prev, "..prev");
780 ftrace_debug (next, "..next");
781
782 /* The function segments are not yet connected. */
783 gdb_assert (prev->next == 0);
784 gdb_assert (next->prev == 0);
785
786 prev->next = next->number;
787 next->prev = prev->number;
788
789 /* We may have moved NEXT to a different function level. */
790 ftrace_fixup_level (btinfo, next, prev->level - next->level);
791
792 /* If we run out of back trace for one, let's use the other's. */
793 if (prev->up == 0)
794 {
795 const btrace_function_flags flags = next->flags;
796
797 next = ftrace_find_call_by_number (btinfo, next->up);
798 if (next != NULL)
799 {
800 DEBUG_FTRACE ("using next's callers");
801 ftrace_fixup_caller (btinfo, prev, next, flags);
802 }
803 }
804 else if (next->up == 0)
805 {
806 const btrace_function_flags flags = prev->flags;
807
808 prev = ftrace_find_call_by_number (btinfo, prev->up);
809 if (prev != NULL)
810 {
811 DEBUG_FTRACE ("using prev's callers");
812 ftrace_fixup_caller (btinfo, next, prev, flags);
813 }
814 }
815 else
816 {
817 /* PREV may have a tailcall caller, NEXT can't. If it does, fixup the up
818 link to add the tail callers to NEXT's back trace.
819
820 This removes NEXT->UP from NEXT's back trace. It will be added back
821 when connecting NEXT and PREV's callers - provided they exist.
822
823 If PREV's back trace consists of a series of tail calls without an
824 actual call, there will be no further connection and NEXT's caller will
825 be removed for good. To catch this case, we handle it here and connect
826 the top of PREV's back trace to NEXT's caller. */
827 if ((prev->flags & BFUN_UP_LINKS_TO_TAILCALL) != 0)
828 {
829 struct btrace_function *caller;
830 btrace_function_flags next_flags, prev_flags;
831
832 /* We checked NEXT->UP above so CALLER can't be NULL. */
833 caller = ftrace_find_call_by_number (btinfo, next->up);
834 next_flags = next->flags;
835 prev_flags = prev->flags;
836
837 DEBUG_FTRACE ("adding prev's tail calls to next");
838
839 prev = ftrace_find_call_by_number (btinfo, prev->up);
840 ftrace_fixup_caller (btinfo, next, prev, prev_flags);
841
842 for (; prev != NULL; prev = ftrace_find_call_by_number (btinfo,
843 prev->up))
844 {
845 /* At the end of PREV's back trace, continue with CALLER. */
846 if (prev->up == 0)
847 {
848 DEBUG_FTRACE ("fixing up link for tailcall chain");
849 ftrace_debug (prev, "..top");
850 ftrace_debug (caller, "..up");
851
852 ftrace_fixup_caller (btinfo, prev, caller, next_flags);
853
854 /* If we skipped any tail calls, this may move CALLER to a
855 different function level.
856
857 Note that changing CALLER's level is only OK because we
858 know that this is the last iteration of the bottom-to-top
859 walk in ftrace_connect_backtrace.
860
861 Otherwise we will fix up CALLER's level when we connect it
862 to PREV's caller in the next iteration. */
863 ftrace_fixup_level (btinfo, caller,
864 prev->level - caller->level - 1);
865 break;
866 }
867
868 /* There's nothing to do if we find a real call. */
869 if ((prev->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
870 {
871 DEBUG_FTRACE ("will fix up link in next iteration");
872 break;
873 }
874 }
875 }
876 }
877}
878
879/* Connect function segments on the same level in the back trace at LHS and RHS.
880 The back traces at LHS and RHS are expected to match according to
881 ftrace_match_backtrace. BTINFO is the branch trace information for the
882 current thread. */
883
884static void
886 struct btrace_function *lhs,
887 struct btrace_function *rhs)
888{
889 while (lhs != NULL && rhs != NULL)
890 {
891 struct btrace_function *prev, *next;
892
893 gdb_assert (!ftrace_function_switched (lhs, rhs->msym, rhs->sym));
894
895 /* Connecting LHS and RHS may change the up link. */
896 prev = lhs;
897 next = rhs;
898
899 lhs = ftrace_get_caller (btinfo, lhs);
900 rhs = ftrace_get_caller (btinfo, rhs);
901
902 ftrace_connect_bfun (btinfo, prev, next);
903 }
904}
905
906/* Bridge the gap between two function segments left and right of a gap if their
907 respective back traces match in at least MIN_MATCHES functions. BTINFO is
908 the branch trace information for the current thread.
909
910 Returns non-zero if the gap could be bridged, zero otherwise. */
911
912static int
914 struct btrace_function *lhs, struct btrace_function *rhs,
915 int min_matches)
916{
917 struct btrace_function *best_l, *best_r, *cand_l, *cand_r;
918 int best_matches;
919
920 DEBUG_FTRACE ("checking gap at insn %u (req matches: %d)",
921 rhs->insn_offset - 1, min_matches);
922
923 best_matches = 0;
924 best_l = NULL;
925 best_r = NULL;
926
927 /* We search the back traces of LHS and RHS for valid connections and connect
928 the two function segments that give the longest combined back trace. */
929
930 for (cand_l = lhs; cand_l != NULL;
931 cand_l = ftrace_get_caller (btinfo, cand_l))
932 for (cand_r = rhs; cand_r != NULL;
933 cand_r = ftrace_get_caller (btinfo, cand_r))
934 {
935 int matches;
936
937 matches = ftrace_match_backtrace (btinfo, cand_l, cand_r);
938 if (best_matches < matches)
939 {
940 best_matches = matches;
941 best_l = cand_l;
942 best_r = cand_r;
943 }
944 }
945
946 /* We need at least MIN_MATCHES matches. */
947 gdb_assert (min_matches > 0);
948 if (best_matches < min_matches)
949 return 0;
950
951 DEBUG_FTRACE ("..matches: %d", best_matches);
952
953 /* We will fix up the level of BEST_R and succeeding function segments such
954 that BEST_R's level matches BEST_L's when we connect BEST_L to BEST_R.
955
956 This will ignore the level of RHS and following if BEST_R != RHS. I.e. if
957 BEST_R is a successor of RHS in the back trace of RHS (phases 1 and 3).
958
959 To catch this, we already fix up the level here where we can start at RHS
960 instead of at BEST_R. We will ignore the level fixup when connecting
961 BEST_L to BEST_R as they will already be on the same level. */
962 ftrace_fixup_level (btinfo, rhs, best_l->level - best_r->level);
963
964 ftrace_connect_backtrace (btinfo, best_l, best_r);
965
966 return best_matches;
967}
968
969/* Try to bridge gaps due to overflow or decode errors by connecting the
970 function segments that are separated by the gap. */
971
972static void
973btrace_bridge_gaps (struct thread_info *tp, std::vector<unsigned int> &gaps)
974{
975 struct btrace_thread_info *btinfo = &tp->btrace;
976 std::vector<unsigned int> remaining;
977 int min_matches;
978
979 DEBUG ("bridge gaps");
980
981 /* We require a minimum amount of matches for bridging a gap. The number of
982 required matches will be lowered with each iteration.
983
984 The more matches the higher our confidence that the bridging is correct.
985 For big gaps or small traces, however, it may not be feasible to require a
986 high number of matches. */
987 for (min_matches = 5; min_matches > 0; --min_matches)
988 {
989 /* Let's try to bridge as many gaps as we can. In some cases, we need to
990 skip a gap and revisit it again after we closed later gaps. */
991 while (!gaps.empty ())
992 {
993 for (const unsigned int number : gaps)
994 {
995 struct btrace_function *gap, *lhs, *rhs;
996 int bridged;
997
998 gap = ftrace_find_call_by_number (btinfo, number);
999
1000 /* We may have a sequence of gaps if we run from one error into
1001 the next as we try to re-sync onto the trace stream. Ignore
1002 all but the leftmost gap in such a sequence.
1003
1004 Also ignore gaps at the beginning of the trace. */
1005 lhs = ftrace_find_call_by_number (btinfo, gap->number - 1);
1006 if (lhs == NULL || lhs->errcode != 0)
1007 continue;
1008
1009 /* Skip gaps to the right. */
1010 rhs = ftrace_find_call_by_number (btinfo, gap->number + 1);
1011 while (rhs != NULL && rhs->errcode != 0)
1012 rhs = ftrace_find_call_by_number (btinfo, rhs->number + 1);
1013
1014 /* Ignore gaps at the end of the trace. */
1015 if (rhs == NULL)
1016 continue;
1017
1018 bridged = ftrace_bridge_gap (btinfo, lhs, rhs, min_matches);
1019
1020 /* Keep track of gaps we were not able to bridge and try again.
1021 If we just pushed them to the end of GAPS we would risk an
1022 infinite loop in case we simply cannot bridge a gap. */
1023 if (bridged == 0)
1024 remaining.push_back (number);
1025 }
1026
1027 /* Let's see if we made any progress. */
1028 if (remaining.size () == gaps.size ())
1029 break;
1030
1031 gaps.clear ();
1032 gaps.swap (remaining);
1033 }
1034
1035 /* We get here if either GAPS is empty or if GAPS equals REMAINING. */
1036 if (gaps.empty ())
1037 break;
1038
1039 remaining.clear ();
1040 }
1041
1042 /* We may omit this in some cases. Not sure it is worth the extra
1043 complication, though. */
1045}
1046
1047/* Compute the function branch trace from BTS trace. */
1048
1049static void
1051 const struct btrace_data_bts *btrace,
1052 std::vector<unsigned int> &gaps)
1053{
1054 /* We may end up doing target calls that require the current thread to be TP,
1055 for example reading memory through gdb_insn_length. Make sure TP is the
1056 current thread. */
1057 scoped_restore_current_thread restore_thread;
1058 switch_to_thread (tp);
1059
1060 struct btrace_thread_info *btinfo;
1061 struct gdbarch *gdbarch;
1062 unsigned int blk;
1063 int level;
1064
1066 btinfo = &tp->btrace;
1067 blk = btrace->blocks->size ();
1068
1069 if (btinfo->functions.empty ())
1070 level = INT_MAX;
1071 else
1072 level = -btinfo->level;
1073
1074 while (blk != 0)
1075 {
1076 CORE_ADDR pc;
1077
1078 blk -= 1;
1079
1080 const btrace_block &block = btrace->blocks->at (blk);
1081 pc = block.begin;
1082
1083 for (;;)
1084 {
1085 struct btrace_function *bfun;
1086 struct btrace_insn insn;
1087 int size;
1088
1089 /* We should hit the end of the block. Warn if we went too far. */
1090 if (block.end < pc)
1091 {
1092 /* Indicate the gap in the trace. */
1093 bfun = ftrace_new_gap (btinfo, BDE_BTS_OVERFLOW, gaps);
1094
1095 warning (_("Recorded trace may be corrupted at instruction "
1096 "%u (pc = %s)."), bfun->insn_offset - 1,
1097 core_addr_to_string_nz (pc));
1098
1099 break;
1100 }
1101
1102 bfun = ftrace_update_function (btinfo, pc);
1103
1104 /* Maintain the function level offset.
1105 For all but the last block, we do it here. */
1106 if (blk != 0)
1107 level = std::min (level, bfun->level);
1108
1109 size = 0;
1110 try
1111 {
1113 }
1114 catch (const gdb_exception_error &error)
1115 {
1116 }
1117
1118 insn.pc = pc;
1119 insn.size = size;
1121 insn.flags = 0;
1122
1123 ftrace_update_insns (bfun, insn);
1124
1125 /* We're done once we pushed the instruction at the end. */
1126 if (block.end == pc)
1127 break;
1128
1129 /* We can't continue if we fail to compute the size. */
1130 if (size <= 0)
1131 {
1132 /* Indicate the gap in the trace. We just added INSN so we're
1133 not at the beginning. */
1134 bfun = ftrace_new_gap (btinfo, BDE_BTS_INSN_SIZE, gaps);
1135
1136 warning (_("Recorded trace may be incomplete at instruction %u "
1137 "(pc = %s)."), bfun->insn_offset - 1,
1138 core_addr_to_string_nz (pc));
1139
1140 break;
1141 }
1142
1143 pc += size;
1144
1145 /* Maintain the function level offset.
1146 For the last block, we do it here to not consider the last
1147 instruction.
1148 Since the last instruction corresponds to the current instruction
1149 and is not really part of the execution history, it shouldn't
1150 affect the level. */
1151 if (blk == 0)
1152 level = std::min (level, bfun->level);
1153 }
1154 }
1155
1156 /* LEVEL is the minimal function level of all btrace function segments.
1157 Define the global level offset to -LEVEL so all function levels are
1158 normalized to start at zero. */
1159 btinfo->level = -level;
1160}
1161
1162#if defined (HAVE_LIBIPT)
1163
1164static enum btrace_insn_class
1165pt_reclassify_insn (enum pt_insn_class iclass)
1166{
1167 switch (iclass)
1168 {
1169 case ptic_call:
1170 return BTRACE_INSN_CALL;
1171
1172 case ptic_return:
1173 return BTRACE_INSN_RETURN;
1174
1175 case ptic_jump:
1176 return BTRACE_INSN_JUMP;
1177
1178 default:
1179 return BTRACE_INSN_OTHER;
1180 }
1181}
1182
1183/* Return the btrace instruction flags for INSN. */
1184
1185static btrace_insn_flags
1186pt_btrace_insn_flags (const struct pt_insn &insn)
1187{
1188 btrace_insn_flags flags = 0;
1189
1190 if (insn.speculative)
1192
1193 return flags;
1194}
1195
1196/* Return the btrace instruction for INSN. */
1197
1198static btrace_insn
1199pt_btrace_insn (const struct pt_insn &insn)
1200{
1201 return {(CORE_ADDR) insn.ip, (gdb_byte) insn.size,
1202 pt_reclassify_insn (insn.iclass),
1203 pt_btrace_insn_flags (insn)};
1204}
1205
1206/* Handle instruction decode events (libipt-v2). */
1207
1208static int
1209handle_pt_insn_events (struct btrace_thread_info *btinfo,
1210 struct pt_insn_decoder *decoder,
1211 std::vector<unsigned int> &gaps, int status)
1212{
1213#if defined (HAVE_PT_INSN_EVENT)
1214 while (status & pts_event_pending)
1215 {
1216 struct btrace_function *bfun;
1217 struct pt_event event;
1218 uint64_t offset;
1219
1220 status = pt_insn_event (decoder, &event, sizeof (event));
1221 if (status < 0)
1222 break;
1223
1224 switch (event.type)
1225 {
1226 default:
1227 break;
1228
1229 case ptev_enabled:
1230 if (event.status_update != 0)
1231 break;
1232
1233 if (event.variant.enabled.resumed == 0 && !btinfo->functions.empty ())
1234 {
1235 bfun = ftrace_new_gap (btinfo, BDE_PT_DISABLED, gaps);
1236
1237 pt_insn_get_offset (decoder, &offset);
1238
1239 warning (_("Non-contiguous trace at instruction %u (offset = 0x%"
1240 PRIx64 ")."), bfun->insn_offset - 1, offset);
1241 }
1242
1243 break;
1244
1245 case ptev_overflow:
1246 bfun = ftrace_new_gap (btinfo, BDE_PT_OVERFLOW, gaps);
1247
1248 pt_insn_get_offset (decoder, &offset);
1249
1250 warning (_("Overflow at instruction %u (offset = 0x%" PRIx64 ")."),
1251 bfun->insn_offset - 1, offset);
1252
1253 break;
1254 }
1255 }
1256#endif /* defined (HAVE_PT_INSN_EVENT) */
1257
1258 return status;
1259}
1260
1261/* Handle events indicated by flags in INSN (libipt-v1). */
1262
1263static void
1264handle_pt_insn_event_flags (struct btrace_thread_info *btinfo,
1265 struct pt_insn_decoder *decoder,
1266 const struct pt_insn &insn,
1267 std::vector<unsigned int> &gaps)
1268{
1269#if defined (HAVE_STRUCT_PT_INSN_ENABLED)
1270 /* Tracing is disabled and re-enabled each time we enter the kernel. Most
1271 times, we continue from the same instruction we stopped before. This is
1272 indicated via the RESUMED instruction flag. The ENABLED instruction flag
1273 means that we continued from some other instruction. Indicate this as a
1274 trace gap except when tracing just started. */
1275 if (insn.enabled && !btinfo->functions.empty ())
1276 {
1277 struct btrace_function *bfun;
1278 uint64_t offset;
1279
1280 bfun = ftrace_new_gap (btinfo, BDE_PT_DISABLED, gaps);
1281
1282 pt_insn_get_offset (decoder, &offset);
1283
1284 warning (_("Non-contiguous trace at instruction %u (offset = 0x%" PRIx64
1285 ", pc = 0x%" PRIx64 ")."), bfun->insn_offset - 1, offset,
1286 insn.ip);
1287 }
1288#endif /* defined (HAVE_STRUCT_PT_INSN_ENABLED) */
1289
1290#if defined (HAVE_STRUCT_PT_INSN_RESYNCED)
1291 /* Indicate trace overflows. */
1292 if (insn.resynced)
1293 {
1294 struct btrace_function *bfun;
1295 uint64_t offset;
1296
1297 bfun = ftrace_new_gap (btinfo, BDE_PT_OVERFLOW, gaps);
1298
1299 pt_insn_get_offset (decoder, &offset);
1300
1301 warning (_("Overflow at instruction %u (offset = 0x%" PRIx64 ", pc = 0x%"
1302 PRIx64 ")."), bfun->insn_offset - 1, offset, insn.ip);
1303 }
1304#endif /* defined (HAVE_STRUCT_PT_INSN_RESYNCED) */
1305}
1306
1307/* Add function branch trace to BTINFO using DECODER. */
1308
1309static void
1310ftrace_add_pt (struct btrace_thread_info *btinfo,
1311 struct pt_insn_decoder *decoder,
1312 int *plevel,
1313 std::vector<unsigned int> &gaps)
1314{
1315 struct btrace_function *bfun;
1316 uint64_t offset;
1317 int status;
1318
1319 for (;;)
1320 {
1321 struct pt_insn insn;
1322
1323 status = pt_insn_sync_forward (decoder);
1324 if (status < 0)
1325 {
1326 if (status != -pte_eos)
1327 warning (_("Failed to synchronize onto the Intel Processor "
1328 "Trace stream: %s."), pt_errstr (pt_errcode (status)));
1329 break;
1330 }
1331
1332 for (;;)
1333 {
1334 /* Handle events from the previous iteration or synchronization. */
1335 status = handle_pt_insn_events (btinfo, decoder, gaps, status);
1336 if (status < 0)
1337 break;
1338
1339 status = pt_insn_next (decoder, &insn, sizeof(insn));
1340 if (status < 0)
1341 break;
1342
1343 /* Handle events indicated by flags in INSN. */
1344 handle_pt_insn_event_flags (btinfo, decoder, insn, gaps);
1345
1346 bfun = ftrace_update_function (btinfo, insn.ip);
1347
1348 /* Maintain the function level offset. */
1349 *plevel = std::min (*plevel, bfun->level);
1350
1351 ftrace_update_insns (bfun, pt_btrace_insn (insn));
1352 }
1353
1354 if (status == -pte_eos)
1355 break;
1356
1357 /* Indicate the gap in the trace. */
1358 bfun = ftrace_new_gap (btinfo, status, gaps);
1359
1360 pt_insn_get_offset (decoder, &offset);
1361
1362 warning (_("Decode error (%d) at instruction %u (offset = 0x%" PRIx64
1363 ", pc = 0x%" PRIx64 "): %s."), status, bfun->insn_offset - 1,
1364 offset, insn.ip, pt_errstr (pt_errcode (status)));
1365 }
1366}
1367
1368/* A callback function to allow the trace decoder to read the inferior's
1369 memory. */
1370
1371static int
1372btrace_pt_readmem_callback (gdb_byte *buffer, size_t size,
1373 const struct pt_asid *asid, uint64_t pc,
1374 void *context)
1375{
1376 int result, errcode;
1377
1378 result = (int) size;
1379 try
1380 {
1381 errcode = target_read_code ((CORE_ADDR) pc, buffer, size);
1382 if (errcode != 0)
1383 result = -pte_nomap;
1384 }
1385 catch (const gdb_exception_error &error)
1386 {
1387 result = -pte_nomap;
1388 }
1389
1390 return result;
1391}
1392
1393/* Translate the vendor from one enum to another. */
1394
1395static enum pt_cpu_vendor
1396pt_translate_cpu_vendor (enum btrace_cpu_vendor vendor)
1397{
1398 switch (vendor)
1399 {
1400 default:
1401 return pcv_unknown;
1402
1403 case CV_INTEL:
1404 return pcv_intel;
1405 }
1406}
1407
1408/* Finalize the function branch trace after decode. */
1409
1410static void btrace_finalize_ftrace_pt (struct pt_insn_decoder *decoder,
1411 struct thread_info *tp, int level)
1412{
1413 pt_insn_free_decoder (decoder);
1414
1415 /* LEVEL is the minimal function level of all btrace function segments.
1416 Define the global level offset to -LEVEL so all function levels are
1417 normalized to start at zero. */
1418 tp->btrace.level = -level;
1419
1420 /* Add a single last instruction entry for the current PC.
1421 This allows us to compute the backtrace at the current PC using both
1422 standard unwind and btrace unwind.
1423 This extra entry is ignored by all record commands. */
1424 btrace_add_pc (tp);
1425}
1426
1427/* Compute the function branch trace from Intel Processor Trace
1428 format. */
1429
1430static void
1432 const struct btrace_data_pt *btrace,
1433 std::vector<unsigned int> &gaps)
1434{
1435 /* We may end up doing target calls that require the current thread to be TP,
1436 for example reading memory through btrace_pt_readmem_callback. Make sure
1437 TP is the current thread. */
1438 scoped_restore_current_thread restore_thread;
1439 switch_to_thread (tp);
1440
1441 struct btrace_thread_info *btinfo;
1442 struct pt_insn_decoder *decoder;
1443 struct pt_config config;
1444 int level, errcode;
1445
1446 if (btrace->size == 0)
1447 return;
1448
1449 btinfo = &tp->btrace;
1450 if (btinfo->functions.empty ())
1451 level = INT_MAX;
1452 else
1453 level = -btinfo->level;
1454
1455 pt_config_init(&config);
1456 config.begin = btrace->data;
1457 config.end = btrace->data + btrace->size;
1458
1459 /* We treat an unknown vendor as 'no errata'. */
1460 if (btrace->config.cpu.vendor != CV_UNKNOWN)
1461 {
1462 config.cpu.vendor
1463 = pt_translate_cpu_vendor (btrace->config.cpu.vendor);
1464 config.cpu.family = btrace->config.cpu.family;
1465 config.cpu.model = btrace->config.cpu.model;
1466 config.cpu.stepping = btrace->config.cpu.stepping;
1467
1468 errcode = pt_cpu_errata (&config.errata, &config.cpu);
1469 if (errcode < 0)
1470 error (_("Failed to configure the Intel Processor Trace "
1471 "decoder: %s."), pt_errstr (pt_errcode (errcode)));
1472 }
1473
1474 decoder = pt_insn_alloc_decoder (&config);
1475 if (decoder == NULL)
1476 error (_("Failed to allocate the Intel Processor Trace decoder."));
1477
1478 try
1479 {
1480 struct pt_image *image;
1481
1482 image = pt_insn_get_image(decoder);
1483 if (image == NULL)
1484 error (_("Failed to configure the Intel Processor Trace decoder."));
1485
1486 errcode = pt_image_set_callback(image, btrace_pt_readmem_callback, NULL);
1487 if (errcode < 0)
1488 error (_("Failed to configure the Intel Processor Trace decoder: "
1489 "%s."), pt_errstr (pt_errcode (errcode)));
1490
1491 ftrace_add_pt (btinfo, decoder, &level, gaps);
1492 }
1493 catch (const gdb_exception &error)
1494 {
1495 /* Indicate a gap in the trace if we quit trace processing. */
1496 if (error.reason == RETURN_QUIT && !btinfo->functions.empty ())
1497 ftrace_new_gap (btinfo, BDE_PT_USER_QUIT, gaps);
1498
1499 btrace_finalize_ftrace_pt (decoder, tp, level);
1500
1501 throw;
1502 }
1503
1504 btrace_finalize_ftrace_pt (decoder, tp, level);
1505}
1506
1507#else /* defined (HAVE_LIBIPT) */
1508
1509static void
1511 const struct btrace_data_pt *btrace,
1512 std::vector<unsigned int> &gaps)
1513{
1514 internal_error (_("Unexpected branch trace format."));
1515}
1516
1517#endif /* defined (HAVE_LIBIPT) */
1518
1519/* Compute the function branch trace from a block branch trace BTRACE for
1520 a thread given by BTINFO. If CPU is not NULL, overwrite the cpu in the
1521 branch trace configuration. This is currently only used for the PT
1522 format. */
1523
1524static void
1526 struct btrace_data *btrace,
1527 const struct btrace_cpu *cpu,
1528 std::vector<unsigned int> &gaps)
1529{
1530 DEBUG ("compute ftrace");
1531
1532 switch (btrace->format)
1533 {
1534 case BTRACE_FORMAT_NONE:
1535 return;
1536
1537 case BTRACE_FORMAT_BTS:
1538 btrace_compute_ftrace_bts (tp, &btrace->variant.bts, gaps);
1539 return;
1540
1541 case BTRACE_FORMAT_PT:
1542 /* Overwrite the cpu we use for enabling errata workarounds. */
1543 if (cpu != nullptr)
1544 btrace->variant.pt.config.cpu = *cpu;
1545
1546 btrace_compute_ftrace_pt (tp, &btrace->variant.pt, gaps);
1547 return;
1548 }
1549
1550 internal_error (_("Unknown branch trace format."));
1551}
1552
1553static void
1554btrace_finalize_ftrace (struct thread_info *tp, std::vector<unsigned int> &gaps)
1555{
1556 if (!gaps.empty ())
1557 {
1558 tp->btrace.ngaps += gaps.size ();
1559 btrace_bridge_gaps (tp, gaps);
1560 }
1561}
1562
1563static void
1564btrace_compute_ftrace (struct thread_info *tp, struct btrace_data *btrace,
1565 const struct btrace_cpu *cpu)
1566{
1567 std::vector<unsigned int> gaps;
1568
1569 try
1570 {
1571 btrace_compute_ftrace_1 (tp, btrace, cpu, gaps);
1572 }
1573 catch (const gdb_exception &error)
1574 {
1575 btrace_finalize_ftrace (tp, gaps);
1576
1577 throw;
1578 }
1579
1580 btrace_finalize_ftrace (tp, gaps);
1581}
1582
1583/* Add an entry for the current PC. */
1584
1585static void
1587{
1588 struct btrace_data btrace;
1589 struct regcache *regcache;
1590 CORE_ADDR pc;
1591
1594
1595 btrace.format = BTRACE_FORMAT_BTS;
1596 btrace.variant.bts.blocks = new std::vector<btrace_block>;
1597
1598 btrace.variant.bts.blocks->emplace_back (pc, pc);
1599
1600 btrace_compute_ftrace (tp, &btrace, NULL);
1601}
1602
1603/* See btrace.h. */
1604
1605void
1606btrace_enable (struct thread_info *tp, const struct btrace_config *conf)
1607{
1608 if (tp->btrace.target != NULL)
1609 error (_("Recording already enabled on thread %s (%s)."),
1610 print_thread_id (tp), target_pid_to_str (tp->ptid).c_str ());
1611
1612#if !defined (HAVE_LIBIPT)
1613 if (conf->format == BTRACE_FORMAT_PT)
1614 error (_("Intel Processor Trace support was disabled at compile time."));
1615#endif /* !defined (HAVE_LIBIPT) */
1616
1617 DEBUG ("enable thread %s (%s)", print_thread_id (tp),
1618 tp->ptid.to_string ().c_str ());
1619
1620 tp->btrace.target = target_enable_btrace (tp, conf);
1621
1622 if (tp->btrace.target == NULL)
1623 error (_("Failed to enable recording on thread %s (%s)."),
1624 print_thread_id (tp), target_pid_to_str (tp->ptid).c_str ());
1625
1626 /* We need to undo the enable in case of errors. */
1627 try
1628 {
1629 /* Add an entry for the current PC so we start tracing from where we
1630 enabled it.
1631
1632 If we can't access TP's registers, TP is most likely running. In this
1633 case, we can't really say where tracing was enabled so it should be
1634 safe to simply skip this step.
1635
1636 This is not relevant for BTRACE_FORMAT_PT since the trace will already
1637 start at the PC at which tracing was enabled. */
1638 if (conf->format != BTRACE_FORMAT_PT
1640 btrace_add_pc (tp);
1641 }
1642 catch (const gdb_exception &exception)
1643 {
1644 btrace_disable (tp);
1645
1646 throw;
1647 }
1648}
1649
1650/* See btrace.h. */
1651
1652const struct btrace_config *
1653btrace_conf (const struct btrace_thread_info *btinfo)
1654{
1655 if (btinfo->target == NULL)
1656 return NULL;
1657
1658 return target_btrace_conf (btinfo->target);
1659}
1660
1661/* See btrace.h. */
1662
1663void
1665{
1666 struct btrace_thread_info *btp = &tp->btrace;
1667
1668 if (btp->target == NULL)
1669 error (_("Recording not enabled on thread %s (%s)."),
1670 print_thread_id (tp), target_pid_to_str (tp->ptid).c_str ());
1671
1672 DEBUG ("disable thread %s (%s)", print_thread_id (tp),
1673 tp->ptid.to_string ().c_str ());
1674
1676 btp->target = NULL;
1677
1678 btrace_clear (tp);
1679}
1680
1681/* See btrace.h. */
1682
1683void
1685{
1686 struct btrace_thread_info *btp = &tp->btrace;
1687
1688 if (btp->target == NULL)
1689 return;
1690
1691 DEBUG ("teardown thread %s (%s)", print_thread_id (tp),
1692 tp->ptid.to_string ().c_str ());
1693
1695 btp->target = NULL;
1696
1697 btrace_clear (tp);
1698}
1699
1700/* Stitch branch trace in BTS format. */
1701
1702static int
1703btrace_stitch_bts (struct btrace_data_bts *btrace, struct thread_info *tp)
1704{
1705 struct btrace_thread_info *btinfo;
1706 struct btrace_function *last_bfun;
1707 btrace_block *first_new_block;
1708
1709 btinfo = &tp->btrace;
1710 gdb_assert (!btinfo->functions.empty ());
1711 gdb_assert (!btrace->blocks->empty ());
1712
1713 last_bfun = &btinfo->functions.back ();
1714
1715 /* If the existing trace ends with a gap, we just glue the traces
1716 together. We need to drop the last (i.e. chronologically first) block
1717 of the new trace, though, since we can't fill in the start address.*/
1718 if (last_bfun->insn.empty ())
1719 {
1720 btrace->blocks->pop_back ();
1721 return 0;
1722 }
1723
1724 /* Beware that block trace starts with the most recent block, so the
1725 chronologically first block in the new trace is the last block in
1726 the new trace's block vector. */
1727 first_new_block = &btrace->blocks->back ();
1728 const btrace_insn &last_insn = last_bfun->insn.back ();
1729
1730 /* If the current PC at the end of the block is the same as in our current
1731 trace, there are two explanations:
1732 1. we executed the instruction and some branch brought us back.
1733 2. we have not made any progress.
1734 In the first case, the delta trace vector should contain at least two
1735 entries.
1736 In the second case, the delta trace vector should contain exactly one
1737 entry for the partial block containing the current PC. Remove it. */
1738 if (first_new_block->end == last_insn.pc && btrace->blocks->size () == 1)
1739 {
1740 btrace->blocks->pop_back ();
1741 return 0;
1742 }
1743
1744 DEBUG ("stitching %s to %s", ftrace_print_insn_addr (&last_insn),
1745 core_addr_to_string_nz (first_new_block->end));
1746
1747 /* Do a simple sanity check to make sure we don't accidentally end up
1748 with a bad block. This should not occur in practice. */
1749 if (first_new_block->end < last_insn.pc)
1750 {
1751 warning (_("Error while trying to read delta trace. Falling back to "
1752 "a full read."));
1753 return -1;
1754 }
1755
1756 /* We adjust the last block to start at the end of our current trace. */
1757 gdb_assert (first_new_block->begin == 0);
1758 first_new_block->begin = last_insn.pc;
1759
1760 /* We simply pop the last insn so we can insert it again as part of
1761 the normal branch trace computation.
1762 Since instruction iterators are based on indices in the instructions
1763 vector, we don't leave any pointers dangling. */
1764 DEBUG ("pruning insn at %s for stitching",
1765 ftrace_print_insn_addr (&last_insn));
1766
1767 last_bfun->insn.pop_back ();
1768
1769 /* The instructions vector may become empty temporarily if this has
1770 been the only instruction in this function segment.
1771 This violates the invariant but will be remedied shortly by
1772 btrace_compute_ftrace when we add the new trace. */
1773
1774 /* The only case where this would hurt is if the entire trace consisted
1775 of just that one instruction. If we remove it, we might turn the now
1776 empty btrace function segment into a gap. But we don't want gaps at
1777 the beginning. To avoid this, we remove the entire old trace. */
1778 if (last_bfun->number == 1 && last_bfun->insn.empty ())
1779 btrace_clear (tp);
1780
1781 return 0;
1782}
1783
1784/* Adjust the block trace in order to stitch old and new trace together.
1785 BTRACE is the new delta trace between the last and the current stop.
1786 TP is the traced thread.
1787 May modifx BTRACE as well as the existing trace in TP.
1788 Return 0 on success, -1 otherwise. */
1789
1790static int
1791btrace_stitch_trace (struct btrace_data *btrace, struct thread_info *tp)
1792{
1793 /* If we don't have trace, there's nothing to do. */
1794 if (btrace->empty ())
1795 return 0;
1796
1797 switch (btrace->format)
1798 {
1799 case BTRACE_FORMAT_NONE:
1800 return 0;
1801
1802 case BTRACE_FORMAT_BTS:
1803 return btrace_stitch_bts (&btrace->variant.bts, tp);
1804
1805 case BTRACE_FORMAT_PT:
1806 /* Delta reads are not supported. */
1807 return -1;
1808 }
1809
1810 internal_error (_("Unknown branch trace format."));
1811}
1812
1813/* Clear the branch trace histories in BTINFO. */
1814
1815static void
1817{
1818 xfree (btinfo->insn_history);
1819 xfree (btinfo->call_history);
1820 xfree (btinfo->replay);
1821
1822 btinfo->insn_history = NULL;
1823 btinfo->call_history = NULL;
1824 btinfo->replay = NULL;
1825}
1826
1827/* Clear the branch trace maintenance histories in BTINFO. */
1828
1829static void
1831{
1832 switch (btinfo->data.format)
1833 {
1834 default:
1835 break;
1836
1837 case BTRACE_FORMAT_BTS:
1838 btinfo->maint.variant.bts.packet_history.begin = 0;
1839 btinfo->maint.variant.bts.packet_history.end = 0;
1840 break;
1841
1842#if defined (HAVE_LIBIPT)
1843 case BTRACE_FORMAT_PT:
1844 delete btinfo->maint.variant.pt.packets;
1845
1846 btinfo->maint.variant.pt.packets = NULL;
1847 btinfo->maint.variant.pt.packet_history.begin = 0;
1848 btinfo->maint.variant.pt.packet_history.end = 0;
1849 break;
1850#endif /* defined (HAVE_LIBIPT) */
1851 }
1852}
1853
1854/* See btrace.h. */
1855
1856const char *
1857btrace_decode_error (enum btrace_format format, int errcode)
1858{
1859 switch (format)
1860 {
1861 case BTRACE_FORMAT_BTS:
1862 switch (errcode)
1863 {
1864 case BDE_BTS_OVERFLOW:
1865 return _("instruction overflow");
1866
1867 case BDE_BTS_INSN_SIZE:
1868 return _("unknown instruction");
1869
1870 default:
1871 break;
1872 }
1873 break;
1874
1875#if defined (HAVE_LIBIPT)
1876 case BTRACE_FORMAT_PT:
1877 switch (errcode)
1878 {
1879 case BDE_PT_USER_QUIT:
1880 return _("trace decode cancelled");
1881
1882 case BDE_PT_DISABLED:
1883 return _("disabled");
1884
1885 case BDE_PT_OVERFLOW:
1886 return _("overflow");
1887
1888 default:
1889 if (errcode < 0)
1890 return pt_errstr (pt_errcode (errcode));
1891 break;
1892 }
1893 break;
1894#endif /* defined (HAVE_LIBIPT) */
1895
1896 default:
1897 break;
1898 }
1899
1900 return _("unknown");
1901}
1902
1903/* See btrace.h. */
1904
1905void
1906btrace_fetch (struct thread_info *tp, const struct btrace_cpu *cpu)
1907{
1908 struct btrace_thread_info *btinfo;
1909 struct btrace_target_info *tinfo;
1910 struct btrace_data btrace;
1911 int errcode;
1912
1913 DEBUG ("fetch thread %s (%s)", print_thread_id (tp),
1914 tp->ptid.to_string ().c_str ());
1915
1916 btinfo = &tp->btrace;
1917 tinfo = btinfo->target;
1918 if (tinfo == NULL)
1919 return;
1920
1921 /* There's no way we could get new trace while replaying.
1922 On the other hand, delta trace would return a partial record with the
1923 current PC, which is the replay PC, not the last PC, as expected. */
1924 if (btinfo->replay != NULL)
1925 return;
1926
1927 /* With CLI usage, TP is always the current thread when we get here.
1928 However, since we can also store a gdb.Record object in Python
1929 referring to a different thread than the current one, we need to
1930 temporarily set the current thread. */
1931 scoped_restore_current_thread restore_thread;
1932 switch_to_thread (tp);
1933
1934 /* We should not be called on running or exited threads. */
1935 gdb_assert (can_access_registers_thread (tp));
1936
1937 /* Let's first try to extend the trace we already have. */
1938 if (!btinfo->functions.empty ())
1939 {
1940 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_DELTA);
1941 if (errcode == 0)
1942 {
1943 /* Success. Let's try to stitch the traces together. */
1944 errcode = btrace_stitch_trace (&btrace, tp);
1945 }
1946 else
1947 {
1948 /* We failed to read delta trace. Let's try to read new trace. */
1949 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_NEW);
1950
1951 /* If we got any new trace, discard what we have. */
1952 if (errcode == 0 && !btrace.empty ())
1953 btrace_clear (tp);
1954 }
1955
1956 /* If we were not able to read the trace, we start over. */
1957 if (errcode != 0)
1958 {
1959 btrace_clear (tp);
1960 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_ALL);
1961 }
1962 }
1963 else
1964 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_ALL);
1965
1966 /* If we were not able to read the branch trace, signal an error. */
1967 if (errcode != 0)
1968 error (_("Failed to read branch trace."));
1969
1970 /* Compute the trace, provided we have any. */
1971 if (!btrace.empty ())
1972 {
1973 /* Store the raw trace data. The stored data will be cleared in
1974 btrace_clear, so we always append the new trace. */
1975 btrace_data_append (&btinfo->data, &btrace);
1976 btrace_maint_clear (btinfo);
1977
1978 btrace_clear_history (btinfo);
1979 btrace_compute_ftrace (tp, &btrace, cpu);
1980 }
1981}
1982
1983/* See btrace.h. */
1984
1985void
1987{
1988 struct btrace_thread_info *btinfo;
1989
1990 DEBUG ("clear thread %s (%s)", print_thread_id (tp),
1991 tp->ptid.to_string ().c_str ());
1992
1993 /* Make sure btrace frames that may hold a pointer into the branch
1994 trace data are destroyed. */
1996
1997 btinfo = &tp->btrace;
1998
1999 btinfo->functions.clear ();
2000 btinfo->ngaps = 0;
2001
2002 /* Must clear the maint data before - it depends on BTINFO->DATA. */
2003 btrace_maint_clear (btinfo);
2004 btinfo->data.clear ();
2005 btrace_clear_history (btinfo);
2006}
2007
2008/* See btrace.h. */
2009
2010void
2012{
2013 DEBUG ("free objfile");
2014
2015 for (thread_info *tp : all_non_exited_threads ())
2016 btrace_clear (tp);
2017}
2018
2019/* See btrace.h. */
2020
2021const struct btrace_insn *
2023{
2024 const struct btrace_function *bfun;
2025 unsigned int index, end;
2026
2027 index = it->insn_index;
2028 bfun = &it->btinfo->functions[it->call_index];
2029
2030 /* Check if the iterator points to a gap in the trace. */
2031 if (bfun->errcode != 0)
2032 return NULL;
2033
2034 /* The index is within the bounds of this function's instruction vector. */
2035 end = bfun->insn.size ();
2036 gdb_assert (0 < end);
2037 gdb_assert (index < end);
2038
2039 return &bfun->insn[index];
2040}
2041
2042/* See btrace.h. */
2043
2044int
2046{
2047 return it->btinfo->functions[it->call_index].errcode;
2048}
2049
2050/* See btrace.h. */
2051
2052unsigned int
2054{
2055 return it->btinfo->functions[it->call_index].insn_offset + it->insn_index;
2056}
2057
2058/* See btrace.h. */
2059
2060void
2062 const struct btrace_thread_info *btinfo)
2063{
2064 if (btinfo->functions.empty ())
2065 error (_("No trace."));
2066
2067 it->btinfo = btinfo;
2068 it->call_index = 0;
2069 it->insn_index = 0;
2070}
2071
2072/* See btrace.h. */
2073
2074void
2076 const struct btrace_thread_info *btinfo)
2077{
2078 const struct btrace_function *bfun;
2079 unsigned int length;
2080
2081 if (btinfo->functions.empty ())
2082 error (_("No trace."));
2083
2084 bfun = &btinfo->functions.back ();
2085 length = bfun->insn.size ();
2086
2087 /* The last function may either be a gap or it contains the current
2088 instruction, which is one past the end of the execution trace; ignore
2089 it. */
2090 if (length > 0)
2091 length -= 1;
2092
2093 it->btinfo = btinfo;
2094 it->call_index = bfun->number - 1;
2095 it->insn_index = length;
2096}
2097
2098/* See btrace.h. */
2099
2100unsigned int
2101btrace_insn_next (struct btrace_insn_iterator *it, unsigned int stride)
2102{
2103 const struct btrace_function *bfun;
2104 unsigned int index, steps;
2105
2106 bfun = &it->btinfo->functions[it->call_index];
2107 steps = 0;
2108 index = it->insn_index;
2109
2110 while (stride != 0)
2111 {
2112 unsigned int end, space, adv;
2113
2114 end = bfun->insn.size ();
2115
2116 /* An empty function segment represents a gap in the trace. We count
2117 it as one instruction. */
2118 if (end == 0)
2119 {
2120 const struct btrace_function *next;
2121
2122 next = ftrace_find_call_by_number (it->btinfo, bfun->number + 1);
2123 if (next == NULL)
2124 break;
2125
2126 stride -= 1;
2127 steps += 1;
2128
2129 bfun = next;
2130 index = 0;
2131
2132 continue;
2133 }
2134
2135 gdb_assert (0 < end);
2136 gdb_assert (index < end);
2137
2138 /* Compute the number of instructions remaining in this segment. */
2139 space = end - index;
2140
2141 /* Advance the iterator as far as possible within this segment. */
2142 adv = std::min (space, stride);
2143 stride -= adv;
2144 index += adv;
2145 steps += adv;
2146
2147 /* Move to the next function if we're at the end of this one. */
2148 if (index == end)
2149 {
2150 const struct btrace_function *next;
2151
2152 next = ftrace_find_call_by_number (it->btinfo, bfun->number + 1);
2153 if (next == NULL)
2154 {
2155 /* We stepped past the last function.
2156
2157 Let's adjust the index to point to the last instruction in
2158 the previous function. */
2159 index -= 1;
2160 steps -= 1;
2161 break;
2162 }
2163
2164 /* We now point to the first instruction in the new function. */
2165 bfun = next;
2166 index = 0;
2167 }
2168
2169 /* We did make progress. */
2170 gdb_assert (adv > 0);
2171 }
2172
2173 /* Update the iterator. */
2174 it->call_index = bfun->number - 1;
2175 it->insn_index = index;
2176
2177 return steps;
2178}
2179
2180/* See btrace.h. */
2181
2182unsigned int
2183btrace_insn_prev (struct btrace_insn_iterator *it, unsigned int stride)
2184{
2185 const struct btrace_function *bfun;
2186 unsigned int index, steps;
2187
2188 bfun = &it->btinfo->functions[it->call_index];
2189 steps = 0;
2190 index = it->insn_index;
2191
2192 while (stride != 0)
2193 {
2194 unsigned int adv;
2195
2196 /* Move to the previous function if we're at the start of this one. */
2197 if (index == 0)
2198 {
2199 const struct btrace_function *prev;
2200
2201 prev = ftrace_find_call_by_number (it->btinfo, bfun->number - 1);
2202 if (prev == NULL)
2203 break;
2204
2205 /* We point to one after the last instruction in the new function. */
2206 bfun = prev;
2207 index = bfun->insn.size ();
2208
2209 /* An empty function segment represents a gap in the trace. We count
2210 it as one instruction. */
2211 if (index == 0)
2212 {
2213 stride -= 1;
2214 steps += 1;
2215
2216 continue;
2217 }
2218 }
2219
2220 /* Advance the iterator as far as possible within this segment. */
2221 adv = std::min (index, stride);
2222
2223 stride -= adv;
2224 index -= adv;
2225 steps += adv;
2226
2227 /* We did make progress. */
2228 gdb_assert (adv > 0);
2229 }
2230
2231 /* Update the iterator. */
2232 it->call_index = bfun->number - 1;
2233 it->insn_index = index;
2234
2235 return steps;
2236}
2237
2238/* See btrace.h. */
2239
2240int
2242 const struct btrace_insn_iterator *rhs)
2243{
2244 gdb_assert (lhs->btinfo == rhs->btinfo);
2245
2246 if (lhs->call_index != rhs->call_index)
2247 return lhs->call_index - rhs->call_index;
2248
2249 return lhs->insn_index - rhs->insn_index;
2250}
2251
2252/* See btrace.h. */
2253
2254int
2256 const struct btrace_thread_info *btinfo,
2257 unsigned int number)
2258{
2259 const struct btrace_function *bfun;
2260 unsigned int upper, lower;
2261
2262 if (btinfo->functions.empty ())
2263 return 0;
2264
2265 lower = 0;
2266 bfun = &btinfo->functions[lower];
2267 if (number < bfun->insn_offset)
2268 return 0;
2269
2270 upper = btinfo->functions.size () - 1;
2271 bfun = &btinfo->functions[upper];
2272 if (number >= bfun->insn_offset + ftrace_call_num_insn (bfun))
2273 return 0;
2274
2275 /* We assume that there are no holes in the numbering. */
2276 for (;;)
2277 {
2278 const unsigned int average = lower + (upper - lower) / 2;
2279
2280 bfun = &btinfo->functions[average];
2281
2282 if (number < bfun->insn_offset)
2283 {
2284 upper = average - 1;
2285 continue;
2286 }
2287
2288 if (number >= bfun->insn_offset + ftrace_call_num_insn (bfun))
2289 {
2290 lower = average + 1;
2291 continue;
2292 }
2293
2294 break;
2295 }
2296
2297 it->btinfo = btinfo;
2298 it->call_index = bfun->number - 1;
2299 it->insn_index = number - bfun->insn_offset;
2300 return 1;
2301}
2302
2303/* Returns true if the recording ends with a function segment that
2304 contains only a single (i.e. the current) instruction. */
2305
2306static bool
2308{
2309 const btrace_function *bfun;
2310
2311 if (btinfo->functions.empty ())
2312 return false;
2313
2314 bfun = &btinfo->functions.back ();
2315 if (bfun->errcode != 0)
2316 return false;
2317
2318 return ftrace_call_num_insn (bfun) == 1;
2319}
2320
2321/* See btrace.h. */
2322
2323const struct btrace_function *
2325{
2326 if (it->index >= it->btinfo->functions.size ())
2327 return NULL;
2328
2329 return &it->btinfo->functions[it->index];
2330}
2331
2332/* See btrace.h. */
2333
2334unsigned int
2336{
2337 const unsigned int length = it->btinfo->functions.size ();
2338
2339 /* If the last function segment contains only a single instruction (i.e. the
2340 current instruction), skip it. */
2341 if ((it->index == length) && btrace_ends_with_single_insn (it->btinfo))
2342 return length;
2343
2344 return it->index + 1;
2345}
2346
2347/* See btrace.h. */
2348
2349void
2351 const struct btrace_thread_info *btinfo)
2352{
2353 if (btinfo->functions.empty ())
2354 error (_("No trace."));
2355
2356 it->btinfo = btinfo;
2357 it->index = 0;
2358}
2359
2360/* See btrace.h. */
2361
2362void
2364 const struct btrace_thread_info *btinfo)
2365{
2366 if (btinfo->functions.empty ())
2367 error (_("No trace."));
2368
2369 it->btinfo = btinfo;
2370 it->index = btinfo->functions.size ();
2371}
2372
2373/* See btrace.h. */
2374
2375unsigned int
2376btrace_call_next (struct btrace_call_iterator *it, unsigned int stride)
2377{
2378 const unsigned int length = it->btinfo->functions.size ();
2379
2380 if (it->index + stride < length - 1)
2381 /* Default case: Simply advance the iterator. */
2382 it->index += stride;
2383 else if (it->index + stride == length - 1)
2384 {
2385 /* We land exactly at the last function segment. If it contains only one
2386 instruction (i.e. the current instruction) it is not actually part of
2387 the trace. */
2389 it->index = length;
2390 else
2391 it->index = length - 1;
2392 }
2393 else
2394 {
2395 /* We land past the last function segment and have to adjust the stride.
2396 If the last function segment contains only one instruction (i.e. the
2397 current instruction) it is not actually part of the trace. */
2399 stride = length - it->index - 1;
2400 else
2401 stride = length - it->index;
2402
2403 it->index = length;
2404 }
2405
2406 return stride;
2407}
2408
2409/* See btrace.h. */
2410
2411unsigned int
2412btrace_call_prev (struct btrace_call_iterator *it, unsigned int stride)
2413{
2414 const unsigned int length = it->btinfo->functions.size ();
2415 int steps = 0;
2416
2417 gdb_assert (it->index <= length);
2418
2419 if (stride == 0 || it->index == 0)
2420 return 0;
2421
2422 /* If we are at the end, the first step is a special case. If the last
2423 function segment contains only one instruction (i.e. the current
2424 instruction) it is not actually part of the trace. To be able to step
2425 over this instruction, we need at least one more function segment. */
2426 if ((it->index == length) && (length > 1))
2427 {
2429 it->index = length - 2;
2430 else
2431 it->index = length - 1;
2432
2433 steps = 1;
2434 stride -= 1;
2435 }
2436
2437 stride = std::min (stride, it->index);
2438
2439 it->index -= stride;
2440 return steps + stride;
2441}
2442
2443/* See btrace.h. */
2444
2445int
2447 const struct btrace_call_iterator *rhs)
2448{
2449 gdb_assert (lhs->btinfo == rhs->btinfo);
2450 return (int) (lhs->index - rhs->index);
2451}
2452
2453/* See btrace.h. */
2454
2455int
2457 const struct btrace_thread_info *btinfo,
2458 unsigned int number)
2459{
2460 const unsigned int length = btinfo->functions.size ();
2461
2462 if ((number == 0) || (number > length))
2463 return 0;
2464
2465 it->btinfo = btinfo;
2466 it->index = number - 1;
2467 return 1;
2468}
2469
2470/* See btrace.h. */
2471
2472void
2474 const struct btrace_insn_iterator *begin,
2475 const struct btrace_insn_iterator *end)
2476{
2477 if (btinfo->insn_history == NULL)
2478 btinfo->insn_history = XCNEW (struct btrace_insn_history);
2479
2480 btinfo->insn_history->begin = *begin;
2481 btinfo->insn_history->end = *end;
2482}
2483
2484/* See btrace.h. */
2485
2486void
2488 const struct btrace_call_iterator *begin,
2489 const struct btrace_call_iterator *end)
2490{
2491 gdb_assert (begin->btinfo == end->btinfo);
2492
2493 if (btinfo->call_history == NULL)
2494 btinfo->call_history = XCNEW (struct btrace_call_history);
2495
2496 btinfo->call_history->begin = *begin;
2497 btinfo->call_history->end = *end;
2498}
2499
2500/* See btrace.h. */
2501
2502int
2504{
2505 return tp->btrace.replay != NULL;
2506}
2507
2508/* See btrace.h. */
2509
2510int
2512{
2513 struct btrace_insn_iterator begin, end;
2514 struct btrace_thread_info *btinfo;
2515
2516 btinfo = &tp->btrace;
2517
2518 if (btinfo->functions.empty ())
2519 return 1;
2520
2521 btrace_insn_begin (&begin, btinfo);
2522 btrace_insn_end (&end, btinfo);
2523
2524 return btrace_insn_cmp (&begin, &end) == 0;
2525}
2526
2527#if defined (HAVE_LIBIPT)
2528
2529/* Print a single packet. */
2530
2531static void
2532pt_print_packet (const struct pt_packet *packet)
2533{
2534 switch (packet->type)
2535 {
2536 default:
2537 gdb_printf (("[??: %x]"), packet->type);
2538 break;
2539
2540 case ppt_psb:
2541 gdb_printf (("psb"));
2542 break;
2543
2544 case ppt_psbend:
2545 gdb_printf (("psbend"));
2546 break;
2547
2548 case ppt_pad:
2549 gdb_printf (("pad"));
2550 break;
2551
2552 case ppt_tip:
2553 gdb_printf (("tip %u: 0x%" PRIx64 ""),
2554 packet->payload.ip.ipc,
2555 packet->payload.ip.ip);
2556 break;
2557
2558 case ppt_tip_pge:
2559 gdb_printf (("tip.pge %u: 0x%" PRIx64 ""),
2560 packet->payload.ip.ipc,
2561 packet->payload.ip.ip);
2562 break;
2563
2564 case ppt_tip_pgd:
2565 gdb_printf (("tip.pgd %u: 0x%" PRIx64 ""),
2566 packet->payload.ip.ipc,
2567 packet->payload.ip.ip);
2568 break;
2569
2570 case ppt_fup:
2571 gdb_printf (("fup %u: 0x%" PRIx64 ""),
2572 packet->payload.ip.ipc,
2573 packet->payload.ip.ip);
2574 break;
2575
2576 case ppt_tnt_8:
2577 gdb_printf (("tnt-8 %u: 0x%" PRIx64 ""),
2578 packet->payload.tnt.bit_size,
2579 packet->payload.tnt.payload);
2580 break;
2581
2582 case ppt_tnt_64:
2583 gdb_printf (("tnt-64 %u: 0x%" PRIx64 ""),
2584 packet->payload.tnt.bit_size,
2585 packet->payload.tnt.payload);
2586 break;
2587
2588 case ppt_pip:
2589 gdb_printf (("pip %" PRIx64 "%s"), packet->payload.pip.cr3,
2590 packet->payload.pip.nr ? (" nr") : (""));
2591 break;
2592
2593 case ppt_tsc:
2594 gdb_printf (("tsc %" PRIx64 ""), packet->payload.tsc.tsc);
2595 break;
2596
2597 case ppt_cbr:
2598 gdb_printf (("cbr %u"), packet->payload.cbr.ratio);
2599 break;
2600
2601 case ppt_mode:
2602 switch (packet->payload.mode.leaf)
2603 {
2604 default:
2605 gdb_printf (("mode %u"), packet->payload.mode.leaf);
2606 break;
2607
2608 case pt_mol_exec:
2609 gdb_printf (("mode.exec%s%s"),
2610 packet->payload.mode.bits.exec.csl
2611 ? (" cs.l") : (""),
2612 packet->payload.mode.bits.exec.csd
2613 ? (" cs.d") : (""));
2614 break;
2615
2616 case pt_mol_tsx:
2617 gdb_printf (("mode.tsx%s%s"),
2618 packet->payload.mode.bits.tsx.intx
2619 ? (" intx") : (""),
2620 packet->payload.mode.bits.tsx.abrt
2621 ? (" abrt") : (""));
2622 break;
2623 }
2624 break;
2625
2626 case ppt_ovf:
2627 gdb_printf (("ovf"));
2628 break;
2629
2630 case ppt_stop:
2631 gdb_printf (("stop"));
2632 break;
2633
2634 case ppt_vmcs:
2635 gdb_printf (("vmcs %" PRIx64 ""), packet->payload.vmcs.base);
2636 break;
2637
2638 case ppt_tma:
2639 gdb_printf (("tma %x %x"), packet->payload.tma.ctc,
2640 packet->payload.tma.fc);
2641 break;
2642
2643 case ppt_mtc:
2644 gdb_printf (("mtc %x"), packet->payload.mtc.ctc);
2645 break;
2646
2647 case ppt_cyc:
2648 gdb_printf (("cyc %" PRIx64 ""), packet->payload.cyc.value);
2649 break;
2650
2651 case ppt_mnt:
2652 gdb_printf (("mnt %" PRIx64 ""), packet->payload.mnt.payload);
2653 break;
2654 }
2655}
2656
2657/* Decode packets into MAINT using DECODER. */
2658
2659static void
2660btrace_maint_decode_pt (struct btrace_maint_info *maint,
2661 struct pt_packet_decoder *decoder)
2662{
2663 int errcode;
2664
2665 if (maint->variant.pt.packets == NULL)
2666 maint->variant.pt.packets = new std::vector<btrace_pt_packet>;
2667
2668 for (;;)
2669 {
2670 struct btrace_pt_packet packet;
2671
2672 errcode = pt_pkt_sync_forward (decoder);
2673 if (errcode < 0)
2674 break;
2675
2676 for (;;)
2677 {
2678 pt_pkt_get_offset (decoder, &packet.offset);
2679
2680 errcode = pt_pkt_next (decoder, &packet.packet,
2681 sizeof(packet.packet));
2682 if (errcode < 0)
2683 break;
2684
2685 if (maint_btrace_pt_skip_pad == 0 || packet.packet.type != ppt_pad)
2686 {
2687 packet.errcode = pt_errcode (errcode);
2688 maint->variant.pt.packets->push_back (packet);
2689 }
2690 }
2691
2692 if (errcode == -pte_eos)
2693 break;
2694
2695 packet.errcode = pt_errcode (errcode);
2696 maint->variant.pt.packets->push_back (packet);
2697
2698 warning (_("Error at trace offset 0x%" PRIx64 ": %s."),
2699 packet.offset, pt_errstr (packet.errcode));
2700 }
2701
2702 if (errcode != -pte_eos)
2703 warning (_("Failed to synchronize onto the Intel Processor Trace "
2704 "stream: %s."), pt_errstr (pt_errcode (errcode)));
2705}
2706
2707/* Update the packet history in BTINFO. */
2708
2709static void
2710btrace_maint_update_pt_packets (struct btrace_thread_info *btinfo)
2711{
2712 struct pt_packet_decoder *decoder;
2713 const struct btrace_cpu *cpu;
2714 struct btrace_data_pt *pt;
2715 struct pt_config config;
2716 int errcode;
2717
2718 pt = &btinfo->data.variant.pt;
2719
2720 /* Nothing to do if there is no trace. */
2721 if (pt->size == 0)
2722 return;
2723
2724 memset (&config, 0, sizeof(config));
2725
2726 config.size = sizeof (config);
2727 config.begin = pt->data;
2728 config.end = pt->data + pt->size;
2729
2730 cpu = record_btrace_get_cpu ();
2731 if (cpu == nullptr)
2732 cpu = &pt->config.cpu;
2733
2734 /* We treat an unknown vendor as 'no errata'. */
2735 if (cpu->vendor != CV_UNKNOWN)
2736 {
2737 config.cpu.vendor = pt_translate_cpu_vendor (cpu->vendor);
2738 config.cpu.family = cpu->family;
2739 config.cpu.model = cpu->model;
2740 config.cpu.stepping = cpu->stepping;
2741
2742 errcode = pt_cpu_errata (&config.errata, &config.cpu);
2743 if (errcode < 0)
2744 error (_("Failed to configure the Intel Processor Trace "
2745 "decoder: %s."), pt_errstr (pt_errcode (errcode)));
2746 }
2747
2748 decoder = pt_pkt_alloc_decoder (&config);
2749 if (decoder == NULL)
2750 error (_("Failed to allocate the Intel Processor Trace decoder."));
2751
2752 try
2753 {
2754 btrace_maint_decode_pt (&btinfo->maint, decoder);
2755 }
2756 catch (const gdb_exception &except)
2757 {
2758 pt_pkt_free_decoder (decoder);
2759
2760 if (except.reason < 0)
2761 throw;
2762 }
2763
2764 pt_pkt_free_decoder (decoder);
2765}
2766
2767#endif /* !defined (HAVE_LIBIPT) */
2768
2769/* Update the packet maintenance information for BTINFO and store the
2770 low and high bounds into BEGIN and END, respectively.
2771 Store the current iterator state into FROM and TO. */
2772
2773static void
2775 unsigned int *begin, unsigned int *end,
2776 unsigned int *from, unsigned int *to)
2777{
2778 switch (btinfo->data.format)
2779 {
2780 default:
2781 *begin = 0;
2782 *end = 0;
2783 *from = 0;
2784 *to = 0;
2785 break;
2786
2787 case BTRACE_FORMAT_BTS:
2788 /* Nothing to do - we operate directly on BTINFO->DATA. */
2789 *begin = 0;
2790 *end = btinfo->data.variant.bts.blocks->size ();
2791 *from = btinfo->maint.variant.bts.packet_history.begin;
2792 *to = btinfo->maint.variant.bts.packet_history.end;
2793 break;
2794
2795#if defined (HAVE_LIBIPT)
2796 case BTRACE_FORMAT_PT:
2797 if (btinfo->maint.variant.pt.packets == nullptr)
2798 btinfo->maint.variant.pt.packets = new std::vector<btrace_pt_packet>;
2799
2800 if (btinfo->maint.variant.pt.packets->empty ())
2801 btrace_maint_update_pt_packets (btinfo);
2802
2803 *begin = 0;
2804 *end = btinfo->maint.variant.pt.packets->size ();
2805 *from = btinfo->maint.variant.pt.packet_history.begin;
2806 *to = btinfo->maint.variant.pt.packet_history.end;
2807 break;
2808#endif /* defined (HAVE_LIBIPT) */
2809 }
2810}
2811
2812/* Print packets in BTINFO from BEGIN (inclusive) until END (exclusive) and
2813 update the current iterator position. */
2814
2815static void
2817 unsigned int begin, unsigned int end)
2818{
2819 switch (btinfo->data.format)
2820 {
2821 default:
2822 break;
2823
2824 case BTRACE_FORMAT_BTS:
2825 {
2826 const std::vector<btrace_block> &blocks
2827 = *btinfo->data.variant.bts.blocks;
2828 unsigned int blk;
2829
2830 for (blk = begin; blk < end; ++blk)
2831 {
2832 const btrace_block &block = blocks.at (blk);
2833
2834 gdb_printf ("%u\tbegin: %s, end: %s\n", blk,
2835 core_addr_to_string_nz (block.begin),
2836 core_addr_to_string_nz (block.end));
2837 }
2838
2839 btinfo->maint.variant.bts.packet_history.begin = begin;
2840 btinfo->maint.variant.bts.packet_history.end = end;
2841 }
2842 break;
2843
2844#if defined (HAVE_LIBIPT)
2845 case BTRACE_FORMAT_PT:
2846 {
2847 const std::vector<btrace_pt_packet> &packets
2848 = *btinfo->maint.variant.pt.packets;
2849 unsigned int pkt;
2850
2851 for (pkt = begin; pkt < end; ++pkt)
2852 {
2853 const struct btrace_pt_packet &packet = packets.at (pkt);
2854
2855 gdb_printf ("%u\t", pkt);
2856 gdb_printf ("0x%" PRIx64 "\t", packet.offset);
2857
2858 if (packet.errcode == pte_ok)
2859 pt_print_packet (&packet.packet);
2860 else
2861 gdb_printf ("[error: %s]", pt_errstr (packet.errcode));
2862
2863 gdb_printf ("\n");
2864 }
2865
2866 btinfo->maint.variant.pt.packet_history.begin = begin;
2867 btinfo->maint.variant.pt.packet_history.end = end;
2868 }
2869 break;
2870#endif /* defined (HAVE_LIBIPT) */
2871 }
2872}
2873
2874/* Read a number from an argument string. */
2875
2876static unsigned int
2877get_uint (const char **arg)
2878{
2879 const char *begin, *pos;
2880 char *end;
2881 unsigned long number;
2882
2883 begin = *arg;
2884 pos = skip_spaces (begin);
2885
2886 if (!isdigit (*pos))
2887 error (_("Expected positive number, got: %s."), pos);
2888
2889 number = strtoul (pos, &end, 10);
2890 if (number > UINT_MAX)
2891 error (_("Number too big."));
2892
2893 *arg += (end - begin);
2894
2895 return (unsigned int) number;
2896}
2897
2898/* Read a context size from an argument string. */
2899
2900static int
2901get_context_size (const char **arg)
2902{
2903 const char *pos = skip_spaces (*arg);
2904
2905 if (!isdigit (*pos))
2906 error (_("Expected positive number, got: %s."), pos);
2907
2908 char *end;
2909 long result = strtol (pos, &end, 10);
2910 *arg = end;
2911 return result;
2912}
2913
2914/* Complain about junk at the end of an argument string. */
2915
2916static void
2917no_chunk (const char *arg)
2918{
2919 if (*arg != 0)
2920 error (_("Junk after argument: %s."), arg);
2921}
2922
2923/* The "maintenance btrace packet-history" command. */
2924
2925static void
2926maint_btrace_packet_history_cmd (const char *arg, int from_tty)
2927{
2928 struct btrace_thread_info *btinfo;
2929 unsigned int size, begin, end, from, to;
2930
2932 if (tp == NULL)
2933 error (_("No thread."));
2934
2935 size = 10;
2936 btinfo = &tp->btrace;
2937
2938 btrace_maint_update_packets (btinfo, &begin, &end, &from, &to);
2939 if (begin == end)
2940 {
2941 gdb_printf (_("No trace.\n"));
2942 return;
2943 }
2944
2945 if (arg == NULL || *arg == 0 || strcmp (arg, "+") == 0)
2946 {
2947 from = to;
2948
2949 if (end - from < size)
2950 size = end - from;
2951 to = from + size;
2952 }
2953 else if (strcmp (arg, "-") == 0)
2954 {
2955 to = from;
2956
2957 if (to - begin < size)
2958 size = to - begin;
2959 from = to - size;
2960 }
2961 else
2962 {
2963 from = get_uint (&arg);
2964 if (end <= from)
2965 error (_("'%u' is out of range."), from);
2966
2967 arg = skip_spaces (arg);
2968 if (*arg == ',')
2969 {
2970 arg = skip_spaces (++arg);
2971
2972 if (*arg == '+')
2973 {
2974 arg += 1;
2975 size = get_context_size (&arg);
2976
2977 no_chunk (arg);
2978
2979 if (end - from < size)
2980 size = end - from;
2981 to = from + size;
2982 }
2983 else if (*arg == '-')
2984 {
2985 arg += 1;
2986 size = get_context_size (&arg);
2987
2988 no_chunk (arg);
2989
2990 /* Include the packet given as first argument. */
2991 from += 1;
2992 to = from;
2993
2994 if (to - begin < size)
2995 size = to - begin;
2996 from = to - size;
2997 }
2998 else
2999 {
3000 to = get_uint (&arg);
3001
3002 /* Include the packet at the second argument and silently
3003 truncate the range. */
3004 if (to < end)
3005 to += 1;
3006 else
3007 to = end;
3008
3009 no_chunk (arg);
3010 }
3011 }
3012 else
3013 {
3014 no_chunk (arg);
3015
3016 if (end - from < size)
3017 size = end - from;
3018 to = from + size;
3019 }
3020
3021 dont_repeat ();
3022 }
3023
3024 btrace_maint_print_packets (btinfo, from, to);
3025}
3026
3027/* The "maintenance btrace clear-packet-history" command. */
3028
3029static void
3030maint_btrace_clear_packet_history_cmd (const char *args, int from_tty)
3031{
3032 if (args != NULL && *args != 0)
3033 error (_("Invalid argument."));
3034
3035 if (inferior_ptid == null_ptid)
3036 error (_("No thread."));
3037
3039 btrace_thread_info *btinfo = &tp->btrace;
3040
3041 /* Must clear the maint data before - it depends on BTINFO->DATA. */
3042 btrace_maint_clear (btinfo);
3043 btinfo->data.clear ();
3044}
3045
3046/* The "maintenance btrace clear" command. */
3047
3048static void
3049maint_btrace_clear_cmd (const char *args, int from_tty)
3050{
3051 if (args != NULL && *args != 0)
3052 error (_("Invalid argument."));
3053
3054 if (inferior_ptid == null_ptid)
3055 error (_("No thread."));
3056
3058 btrace_clear (tp);
3059}
3060
3061/* The "maintenance info btrace" command. */
3062
3063static void
3064maint_info_btrace_cmd (const char *args, int from_tty)
3065{
3066 struct btrace_thread_info *btinfo;
3067 const struct btrace_config *conf;
3068
3069 if (args != NULL && *args != 0)
3070 error (_("Invalid argument."));
3071
3072 if (inferior_ptid == null_ptid)
3073 error (_("No thread."));
3074
3076
3077 btinfo = &tp->btrace;
3078
3079 conf = btrace_conf (btinfo);
3080 if (conf == NULL)
3081 error (_("No btrace configuration."));
3082
3083 gdb_printf (_("Format: %s.\n"),
3084 btrace_format_string (conf->format));
3085
3086 switch (conf->format)
3087 {
3088 default:
3089 break;
3090
3091 case BTRACE_FORMAT_BTS:
3092 gdb_printf (_("Number of packets: %zu.\n"),
3093 btinfo->data.variant.bts.blocks->size ());
3094 break;
3095
3096#if defined (HAVE_LIBIPT)
3097 case BTRACE_FORMAT_PT:
3098 {
3099 struct pt_version version;
3100
3101 version = pt_library_version ();
3102 gdb_printf (_("Version: %u.%u.%u%s.\n"), version.major,
3103 version.minor, version.build,
3104 version.ext != NULL ? version.ext : "");
3105
3106 btrace_maint_update_pt_packets (btinfo);
3107 gdb_printf (_("Number of packets: %zu.\n"),
3108 ((btinfo->maint.variant.pt.packets == nullptr)
3109 ? 0 : btinfo->maint.variant.pt.packets->size ()));
3110 }
3111 break;
3112#endif /* defined (HAVE_LIBIPT) */
3113 }
3114}
3115
3116/* The "maint show btrace pt skip-pad" show value function. */
3117
3118static void
3119show_maint_btrace_pt_skip_pad (struct ui_file *file, int from_tty,
3120 struct cmd_list_element *c,
3121 const char *value)
3122{
3123 gdb_printf (file, _("Skip PAD packets is %s.\n"), value);
3124}
3125
3126
3127/* Initialize btrace maintenance commands. */
3128
3129void _initialize_btrace ();
3130void
3132{
3134 _("Info about branch tracing data."), &maintenanceinfolist);
3135
3137 _("Branch tracing maintenance commands."),
3139
3141 _("Set branch tracing specific variables."),
3142 _("Show branch tracing specific variables."),
3147
3149 _("Set Intel Processor Trace specific variables."),
3150 _("Show Intel Processor Trace specific variables."),
3155
3158Set whether PAD packets should be skipped in the btrace packet history."), _("\
3159Show whether PAD packets should be skipped in the btrace packet history."),_("\
3160When enabled, PAD packets are ignored in the btrace packet history."),
3164
3166 _("Print the raw branch tracing data.\n\
3167With no argument, print ten more packets after the previous ten-line print.\n\
3168With '-' as argument print ten packets before a previous ten-line print.\n\
3169One argument specifies the starting packet of a ten-line print.\n\
3170Two arguments with comma between specify starting and ending packets to \
3171print.\n\
3172Preceded with '+'/'-' the second argument specifies the distance from the \
3173first."),
3175
3176 add_cmd ("clear-packet-history", class_maintenance,
3178 _("Clears the branch tracing packet history.\n\
3179Discards the raw branch tracing data but not the execution history data."),
3181
3183 _("Clears the branch tracing data.\n\
3184Discards the raw branch tracing data and the execution history data.\n\
3185The next 'record' command will fetch the branch tracing data anew."),
3187
3188}
#define bits(obj, st, fn)
void xfree(void *)
struct gdbarch * target_gdbarch(void)
struct symbol * find_pc_function(CORE_ADDR pc)
Definition blockframe.c:150
CORE_ADDR get_pc_function_start(CORE_ADDR pc)
Definition blockframe.c:86
void btrace_enable(struct thread_info *tp, const struct btrace_config *conf)
Definition btrace.c:1606
static void show_maint_btrace_pt_skip_pad(struct ui_file *file, int from_tty, struct cmd_list_element *c, const char *value)
Definition btrace.c:3119
static int btrace_stitch_bts(struct btrace_data_bts *btrace, struct thread_info *tp)
Definition btrace.c:1703
static struct btrace_function * ftrace_update_function(struct btrace_thread_info *btinfo, CORE_ADDR pc)
Definition btrace.c:549
const struct btrace_function * btrace_call_get(const struct btrace_call_iterator *it)
Definition btrace.c:2324
unsigned int btrace_call_prev(struct btrace_call_iterator *it, unsigned int stride)
Definition btrace.c:2412
void _initialize_btrace()
Definition btrace.c:3131
static void no_chunk(const char *arg)
Definition btrace.c:2917
unsigned int btrace_call_next(struct btrace_call_iterator *it, unsigned int stride)
Definition btrace.c:2376
unsigned int btrace_call_number(const struct btrace_call_iterator *it)
Definition btrace.c:2335
static struct cmd_list_element * maint_btrace_show_cmdlist
Definition btrace.c:48
static enum btrace_insn_class ftrace_classify_insn(struct gdbarch *gdbarch, CORE_ADDR pc)
Definition btrace.c:676
static int ftrace_bridge_gap(struct btrace_thread_info *btinfo, struct btrace_function *lhs, struct btrace_function *rhs, int min_matches)
Definition btrace.c:913
void btrace_insn_end(struct btrace_insn_iterator *it, const struct btrace_thread_info *btinfo)
Definition btrace.c:2075
static struct btrace_function * ftrace_new_function(struct btrace_thread_info *btinfo, struct minimal_symbol *mfun, struct symbol *fun)
Definition btrace.c:236
static struct btrace_function * ftrace_new_switch(struct btrace_thread_info *btinfo, struct minimal_symbol *mfun, struct symbol *fun)
Definition btrace.c:497
int btrace_insn_cmp(const struct btrace_insn_iterator *lhs, const struct btrace_insn_iterator *rhs)
Definition btrace.c:2241
static bool btrace_ends_with_single_insn(const struct btrace_thread_info *btinfo)
Definition btrace.c:2307
static void btrace_compute_ftrace(struct thread_info *tp, struct btrace_data *btrace, const struct btrace_cpu *cpu)
Definition btrace.c:1564
static struct cmd_list_element * maint_btrace_cmdlist
Definition btrace.c:46
static void btrace_add_pc(struct thread_info *tp)
Definition btrace.c:1586
static unsigned int ftrace_call_num_insn(const struct btrace_function *bfun)
Definition btrace.c:146
static void ftrace_fixup_level(struct btrace_thread_info *btinfo, struct btrace_function *bfun, int adjustment)
Definition btrace.c:724
static struct btrace_function * ftrace_find_call(struct btrace_thread_info *btinfo, struct btrace_function *bfun)
Definition btrace.c:391
static void maint_btrace_clear_cmd(const char *args, int from_tty)
Definition btrace.c:3049
static void btrace_finalize_ftrace(struct thread_info *tp, std::vector< unsigned int > &gaps)
Definition btrace.c:1554
static const char * ftrace_print_insn_addr(const struct btrace_insn *insn)
Definition btrace.c:115
static void ftrace_debug(const struct btrace_function *bfun, const char *prefix)
Definition btrace.c:126
int btrace_find_insn_by_number(struct btrace_insn_iterator *it, const struct btrace_thread_info *btinfo, unsigned int number)
Definition btrace.c:2255
void btrace_free_objfile(struct objfile *objfile)
Definition btrace.c:2011
unsigned int btrace_insn_prev(struct btrace_insn_iterator *it, unsigned int stride)
Definition btrace.c:2183
static int btrace_stitch_trace(struct btrace_data *btrace, struct thread_info *tp)
Definition btrace.c:1791
void btrace_call_begin(struct btrace_call_iterator *it, const struct btrace_thread_info *btinfo)
Definition btrace.c:2350
static void btrace_compute_ftrace_pt(struct thread_info *tp, const struct btrace_data_pt *btrace, std::vector< unsigned int > &gaps)
Definition btrace.c:1510
const char * btrace_decode_error(enum btrace_format format, int errcode)
Definition btrace.c:1857
static void btrace_bridge_gaps(struct thread_info *tp, std::vector< unsigned int > &gaps)
Definition btrace.c:973
static void ftrace_update_insns(struct btrace_function *bfun, const btrace_insn &insn)
Definition btrace.c:665
#define DEBUG(msg, args...)
Definition btrace.c:60
static struct btrace_function * ftrace_new_tailcall(struct btrace_thread_info *btinfo, struct minimal_symbol *mfun, struct symbol *fun)
Definition btrace.c:332
static int ftrace_match_backtrace(struct btrace_thread_info *btinfo, struct btrace_function *lhs, struct btrace_function *rhs)
Definition btrace.c:702
static void btrace_compute_ftrace_1(struct thread_info *tp, struct btrace_data *btrace, const struct btrace_cpu *cpu, std::vector< unsigned int > &gaps)
Definition btrace.c:1525
static struct btrace_function * ftrace_get_caller(struct btrace_thread_info *btinfo, struct btrace_function *bfun)
Definition btrace.c:352
int btrace_find_call_by_number(struct btrace_call_iterator *it, const struct btrace_thread_info *btinfo, unsigned int number)
Definition btrace.c:2456
static void ftrace_update_caller(struct btrace_function *bfun, struct btrace_function *caller, btrace_function_flags flags)
Definition btrace.c:265
void btrace_set_insn_history(struct btrace_thread_info *btinfo, const struct btrace_insn_iterator *begin, const struct btrace_insn_iterator *end)
Definition btrace.c:2473
void btrace_disable(struct thread_info *tp)
Definition btrace.c:1664
static struct btrace_function * ftrace_new_call(struct btrace_thread_info *btinfo, struct minimal_symbol *mfun, struct symbol *fun)
Definition btrace.c:312
void btrace_fetch(struct thread_info *tp, const struct btrace_cpu *cpu)
Definition btrace.c:1906
static const char * ftrace_print_filename(const struct btrace_function *bfun)
Definition btrace.c:96
static struct cmd_list_element * maint_btrace_pt_set_cmdlist
Definition btrace.c:49
void btrace_set_call_history(struct btrace_thread_info *btinfo, const struct btrace_call_iterator *begin, const struct btrace_call_iterator *end)
Definition btrace.c:2487
static void btrace_clear_history(struct btrace_thread_info *btinfo)
Definition btrace.c:1816
const struct btrace_config * btrace_conf(const struct btrace_thread_info *btinfo)
Definition btrace.c:1653
static int get_context_size(const char **arg)
Definition btrace.c:2901
static void ftrace_connect_backtrace(struct btrace_thread_info *btinfo, struct btrace_function *lhs, struct btrace_function *rhs)
Definition btrace.c:885
unsigned int btrace_insn_next(struct btrace_insn_iterator *it, unsigned int stride)
Definition btrace.c:2101
static struct btrace_function * ftrace_new_gap(struct btrace_thread_info *btinfo, int errcode, std::vector< unsigned int > &gaps)
Definition btrace.c:521
static void btrace_maint_print_packets(struct btrace_thread_info *btinfo, unsigned int begin, unsigned int end)
Definition btrace.c:2816
static struct cmd_list_element * maint_btrace_pt_show_cmdlist
Definition btrace.c:50
static const char * ftrace_print_function_name(const struct btrace_function *bfun)
Definition btrace.c:75
static void maint_btrace_packet_history_cmd(const char *arg, int from_tty)
Definition btrace.c:2926
static void maint_info_btrace_cmd(const char *args, int from_tty)
Definition btrace.c:3064
static bool maint_btrace_pt_skip_pad
Definition btrace.c:53
#define DEBUG_FTRACE(msg, args...)
Definition btrace.c:69
static void ftrace_fixup_caller(struct btrace_thread_info *btinfo, struct btrace_function *bfun, struct btrace_function *caller, btrace_function_flags flags)
Definition btrace.c:282
const struct btrace_insn * btrace_insn_get(const struct btrace_insn_iterator *it)
Definition btrace.c:2022
static void maint_btrace_clear_packet_history_cmd(const char *args, int from_tty)
Definition btrace.c:3030
void btrace_teardown(struct thread_info *tp)
Definition btrace.c:1684
static struct btrace_function * ftrace_new_return(struct btrace_thread_info *btinfo, struct minimal_symbol *mfun, struct symbol *fun)
Definition btrace.c:415
static void ftrace_compute_global_level_offset(struct btrace_thread_info *btinfo)
Definition btrace.c:744
static struct cmd_list_element * maint_btrace_set_cmdlist
Definition btrace.c:47
static struct btrace_function * ftrace_find_call_by_number(struct btrace_thread_info *btinfo, unsigned int number)
Definition btrace.c:162
unsigned int btrace_insn_number(const struct btrace_insn_iterator *it)
Definition btrace.c:2053
static struct btrace_function * ftrace_find_caller(struct btrace_thread_info *btinfo, struct btrace_function *bfun, struct minimal_symbol *mfun, struct symbol *fun)
Definition btrace.c:367
int btrace_insn_get_error(const struct btrace_insn_iterator *it)
Definition btrace.c:2045
static int ftrace_function_switched(const struct btrace_function *bfun, const struct minimal_symbol *mfun, const struct symbol *fun)
Definition btrace.c:187
static void ftrace_connect_bfun(struct btrace_thread_info *btinfo, struct btrace_function *prev, struct btrace_function *next)
Definition btrace.c:774
static void btrace_compute_ftrace_bts(struct thread_info *tp, const struct btrace_data_bts *btrace, std::vector< unsigned int > &gaps)
Definition btrace.c:1050
static unsigned int get_uint(const char **arg)
Definition btrace.c:2877
int btrace_is_empty(struct thread_info *tp)
Definition btrace.c:2511
void btrace_insn_begin(struct btrace_insn_iterator *it, const struct btrace_thread_info *btinfo)
Definition btrace.c:2061
void btrace_clear(struct thread_info *tp)
Definition btrace.c:1986
static void btrace_maint_update_packets(struct btrace_thread_info *btinfo, unsigned int *begin, unsigned int *end, unsigned int *from, unsigned int *to)
Definition btrace.c:2774
void btrace_call_end(struct btrace_call_iterator *it, const struct btrace_thread_info *btinfo)
Definition btrace.c:2363
int btrace_is_replaying(struct thread_info *tp)
Definition btrace.c:2503
int btrace_call_cmp(const struct btrace_call_iterator *lhs, const struct btrace_call_iterator *rhs)
Definition btrace.c:2446
static void btrace_maint_clear(struct btrace_thread_info *btinfo)
Definition btrace.c:1830
@ BFUN_UP_LINKS_TO_RET
Definition btrace.h:90
@ BFUN_UP_LINKS_TO_TAILCALL
Definition btrace.h:94
btrace_insn_class
Definition btrace.h:44
@ BTRACE_INSN_RETURN
Definition btrace.h:52
@ BTRACE_INSN_JUMP
Definition btrace.h:55
@ BTRACE_INSN_OTHER
Definition btrace.h:46
@ BTRACE_INSN_CALL
Definition btrace.h:49
@ BDE_PT_OVERFLOW
Definition btrace.h:118
@ BDE_PT_USER_QUIT
Definition btrace.h:112
@ BDE_PT_DISABLED
Definition btrace.h:115
@ BTRACE_INSN_FLAG_SPECULATIVE
Definition btrace.h:62
@ BDE_BTS_OVERFLOW
Definition btrace.h:102
@ BDE_BTS_INSN_SIZE
Definition btrace.h:105
thread_info * find_thread(ptid_t ptid)
Definition inferior.c:238
friend class regcache
Definition regcache.h:269
ptid_t ptid
Definition gdbthread.h:259
struct cmd_list_element * maintenancelist
Definition cli-cmds.c:143
struct cmd_list_element * maintenanceinfolist
Definition cli-cmds.c:147
struct cmd_list_element * maintenance_show_cmdlist
Definition maint.c:752
struct cmd_list_element * maintenance_set_cmdlist
Definition maint.c:751
struct cmd_list_element * add_cmd(const char *name, enum command_class theclass, const char *doc, struct cmd_list_element **list)
Definition cli-decode.c:233
set_show_commands add_setshow_prefix_cmd(const char *name, command_class theclass, const char *set_doc, const char *show_doc, cmd_list_element **set_subcommands_list, cmd_list_element **show_subcommands_list, cmd_list_element **set_list, cmd_list_element **show_list)
Definition cli-decode.c:428
set_show_commands add_setshow_boolean_cmd(const char *name, enum command_class theclass, bool *var, const char *set_doc, const char *show_doc, const char *help_doc, cmd_func_ftype *set_func, show_value_ftype *show_func, struct cmd_list_element **set_list, struct cmd_list_element **show_list)
Definition cli-decode.c:809
struct cmd_list_element * add_basic_prefix_cmd(const char *name, enum command_class theclass, const char *doc, struct cmd_list_element **subcommands, int allow_unknown, struct cmd_list_element **list)
Definition cli-decode.c:391
void dont_repeat()
Definition top.c:696
@ class_maintenance
Definition command.h:65
int gdb_insn_length(struct gdbarch *gdbarch, CORE_ADDR addr)
Definition disasm.c:1230
void reinit_frame_cache(void)
Definition frame.c:2107
int gdbarch_insn_is_call(struct gdbarch *gdbarch, CORE_ADDR addr)
Definition gdbarch.c:5106
int gdbarch_insn_is_ret(struct gdbarch *gdbarch, CORE_ADDR addr)
Definition gdbarch.c:5123
int gdbarch_insn_is_jump(struct gdbarch *gdbarch, CORE_ADDR addr)
Definition gdbarch.c:5140
all_non_exited_threads_range all_non_exited_threads(process_stratum_target *proc_target=nullptr, ptid_t filter_ptid=minus_one_ptid)
Definition gdbthread.h:753
struct thread_info * inferior_thread(void)
Definition thread.c:85
void switch_to_thread(struct thread_info *thr)
Definition thread.c:1360
bool can_access_registers_thread(struct thread_info *thread)
Definition thread.c:982
const char * print_thread_id(struct thread_info *thr)
Definition thread.c:1470
mach_port_t kern_return_t mach_port_t mach_msg_type_name_t msgportsPoly mach_port_t kern_return_t pid_t pid mach_port_t kern_return_t mach_port_t task mach_port_t kern_return_t int flags
Definition gnu-nat.c:1861
mach_port_t mach_port_t name mach_port_t mach_port_t name kern_return_t int status
Definition gnu-nat.c:1790
size_t size
Definition go32-nat.c:239
ptid_t inferior_ptid
Definition infcmd.c:74
struct inferior * current_inferior(void)
Definition inferior.c:55
struct bound_minimal_symbol lookup_minimal_symbol_by_pc(CORE_ADDR pc)
Definition minsyms.c:996
#define prefix(a, b, R, do)
Definition ppc64-tdep.c:52
const struct btrace_cpu * record_btrace_get_cpu(void)
unsigned int record_debug
Definition record.c:34
CORE_ADDR regcache_read_pc(struct regcache *regcache)
Definition regcache.c:1333
struct regcache * get_thread_regcache(process_stratum_target *target, ptid_t ptid)
Definition regcache.c:400
const char * symtab_to_fullname(struct symtab *s)
Definition source.c:1234
const char * symtab_to_filename_for_display(struct symtab *symtab)
Definition source.c:1269
Definition block.h:109
CORE_ADDR end() const
Definition block.h:119
struct minimal_symbol * minsym
Definition minsyms.h:49
struct btrace_call_iterator begin
Definition btrace.h:226
struct btrace_call_iterator end
Definition btrace.h:227
unsigned int index
Definition btrace.h:209
const struct btrace_thread_info * btinfo
Definition btrace.h:206
btrace_function_flags flags
Definition btrace.h:186
unsigned int up
Definition btrace.h:155
struct minimal_symbol * msym
Definition btrace.h:142
unsigned int prev
Definition btrace.h:149
unsigned int number
Definition btrace.h:175
std::vector< btrace_insn > insn
Definition btrace.h:160
unsigned int next
Definition btrace.h:150
struct symbol * sym
Definition btrace.h:143
unsigned int insn_offset
Definition btrace.h:170
struct btrace_insn_iterator begin
Definition btrace.h:217
struct btrace_insn_iterator end
Definition btrace.h:218
unsigned int call_index
Definition btrace.h:196
unsigned int insn_index
Definition btrace.h:199
const struct btrace_thread_info * btinfo
Definition btrace.h:193
gdb_byte size
Definition btrace.h:75
btrace_insn_flags flags
Definition btrace.h:81
enum btrace_insn_class iclass
Definition btrace.h:78
CORE_ADDR pc
Definition btrace.h:72
union btrace_maint_info::@22 variant
struct btrace_maint_info::@22::@23 bts
struct btrace_maint_packet_history packet_history
Definition btrace.h:292
unsigned int ngaps
Definition btrace.h:339
struct btrace_target_info * target
Definition btrace.h:323
std::vector< btrace_function > functions
Definition btrace.h:331
struct btrace_insn_iterator * replay
Definition btrace.h:353
struct btrace_maint_info maint
Definition btrace.h:359
struct btrace_call_history * call_history
Definition btrace.h:348
struct btrace_insn_history * insn_history
Definition btrace.h:345
struct btrace_data data
Definition btrace.h:326
const char * print_name() const
Definition symtab.h:475
const char * linkage_name() const
Definition symtab.h:460
struct symtab * symtab
Definition symtab.h:1457
Definition value.h:130
int target_read_code(CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
Definition target.c:1844
void target_disable_btrace(struct btrace_target_info *btinfo)
Definition target.c:4056
struct btrace_target_info * target_enable_btrace(thread_info *tp, const struct btrace_config *conf)
Definition target.c:4048
void target_teardown_btrace(struct btrace_target_info *btinfo)
Definition target.c:4064
std::string target_pid_to_str(ptid_t ptid)
Definition target.c:2623
enum btrace_error target_read_btrace(struct btrace_data *btrace, struct btrace_target_info *btinfo, enum btrace_read_type type)
Definition target.c:4072
const struct btrace_config * target_btrace_conf(const struct btrace_target_info *btinfo)
Definition target.c:4084
void gdb_printf(struct ui_file *stream, const char *format,...)
Definition utils.c:1886
const char version[]
Definition version.c:2