GDBserver
Loading...
Searching...
No Matches
linux-btrace.c
Go to the documentation of this file.
1/* Linux-dependent part of branch trace support for GDB, and GDBserver.
2
3 Copyright (C) 2013-2023 Free Software Foundation, Inc.
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22#include "gdbsupport/common-defs.h"
23#include "linux-btrace.h"
24#include "gdbsupport/common-regcache.h"
25#include "gdbsupport/gdb_wait.h"
26#include "x86-cpuid.h"
27#include "gdbsupport/filestuff.h"
28#include "gdbsupport/scoped_fd.h"
29#include "gdbsupport/scoped_mmap.h"
30
31#include <inttypes.h>
32
33#include <sys/syscall.h>
34
35#if HAVE_LINUX_PERF_EVENT_H && defined(SYS_perf_event_open)
36#include <unistd.h>
37#include <sys/mman.h>
38#include <sys/user.h>
39#include "nat/gdb_ptrace.h"
40#include <sys/types.h>
41#include <signal.h>
42
43/* A branch trace record in perf_event. */
44struct perf_event_bts
45{
46 /* The linear address of the branch source. */
47 uint64_t from;
48
49 /* The linear address of the branch destination. */
50 uint64_t to;
51};
52
53/* A perf_event branch trace sample. */
54struct perf_event_sample
55{
56 /* The perf_event sample header. */
57 struct perf_event_header header;
58
59 /* The perf_event branch tracing payload. */
60 struct perf_event_bts bts;
61};
62
63/* Identify the cpu we're running on. */
64static struct btrace_cpu
65btrace_this_cpu (void)
66{
67 struct btrace_cpu cpu;
68 unsigned int eax, ebx, ecx, edx;
69 int ok;
70
71 memset (&cpu, 0, sizeof (cpu));
72
73 ok = x86_cpuid (0, &eax, &ebx, &ecx, &edx);
74 if (ok != 0)
75 {
76 if (ebx == signature_INTEL_ebx && ecx == signature_INTEL_ecx
77 && edx == signature_INTEL_edx)
78 {
79 unsigned int cpuid, ignore;
80
81 ok = x86_cpuid (1, &cpuid, &ignore, &ignore, &ignore);
82 if (ok != 0)
83 {
84 cpu.vendor = CV_INTEL;
85
86 cpu.family = (cpuid >> 8) & 0xf;
87 if (cpu.family == 0xf)
88 cpu.family += (cpuid >> 20) & 0xff;
89
90 cpu.model = (cpuid >> 4) & 0xf;
91 if ((cpu.family == 0x6) || ((cpu.family & 0xf) == 0xf))
92 cpu.model += (cpuid >> 12) & 0xf0;
93 }
94 }
95 else if (ebx == signature_AMD_ebx && ecx == signature_AMD_ecx
96 && edx == signature_AMD_edx)
97 cpu.vendor = CV_AMD;
98 }
99
100 return cpu;
101}
102
103/* Return non-zero if there is new data in PEVENT; zero otherwise. */
104
105static int
106perf_event_new_data (const struct perf_event_buffer *pev)
107{
108 return *pev->data_head != pev->last_head;
109}
110
111/* Copy the last SIZE bytes from PEV ending at DATA_HEAD and return a pointer
112 to the memory holding the copy.
113 The caller is responsible for freeing the memory. */
114
115static gdb_byte *
116perf_event_read (const struct perf_event_buffer *pev, __u64 data_head,
117 size_t size)
118{
119 const gdb_byte *begin, *end, *start, *stop;
120 gdb_byte *buffer;
121 size_t buffer_size;
122 __u64 data_tail;
123
124 if (size == 0)
125 return NULL;
126
127 /* We should never ask for more data than the buffer can hold. */
128 buffer_size = pev->size;
129 gdb_assert (size <= buffer_size);
130
131 /* If we ask for more data than we seem to have, we wrap around and read
132 data from the end of the buffer. This is already handled by the %
133 BUFFER_SIZE operation, below. Here, we just need to make sure that we
134 don't underflow.
135
136 Note that this is perfectly OK for perf event buffers where data_head
137 doesn'grow indefinitely and instead wraps around to remain within the
138 buffer's boundaries. */
139 if (data_head < size)
140 data_head += buffer_size;
141
142 gdb_assert (size <= data_head);
143 data_tail = data_head - size;
144
145 begin = pev->mem;
146 start = begin + data_tail % buffer_size;
147 stop = begin + data_head % buffer_size;
148
149 buffer = (gdb_byte *) xmalloc (size);
150
151 if (start < stop)
152 memcpy (buffer, start, stop - start);
153 else
154 {
155 end = begin + buffer_size;
156
157 memcpy (buffer, start, end - start);
158 memcpy (buffer + (end - start), begin, stop - begin);
159 }
160
161 return buffer;
162}
163
164/* Copy the perf event buffer data from PEV.
165 Store a pointer to the copy into DATA and its size in SIZE. */
166
167static void
168perf_event_read_all (struct perf_event_buffer *pev, gdb_byte **data,
169 size_t *psize)
170{
171 size_t size;
172 __u64 data_head;
173
174 data_head = *pev->data_head;
175 size = pev->size;
176
177 *data = perf_event_read (pev, data_head, size);
178 *psize = size;
179
180 pev->last_head = data_head;
181}
182
183/* Try to determine the start address of the Linux kernel. */
184
185static uint64_t
186linux_determine_kernel_start (void)
187{
188 static uint64_t kernel_start;
189 static int cached;
190
191 if (cached != 0)
192 return kernel_start;
193
194 cached = 1;
195
196 gdb_file_up file = gdb_fopen_cloexec ("/proc/kallsyms", "r");
197 if (file == NULL)
198 return kernel_start;
199
200 while (!feof (file.get ()))
201 {
202 char buffer[1024], symbol[8], *line;
203 uint64_t addr;
204 int match;
205
206 line = fgets (buffer, sizeof (buffer), file.get ());
207 if (line == NULL)
208 break;
209
210 match = sscanf (line, "%" SCNx64 " %*[tT] %7s", &addr, symbol);
211 if (match != 2)
212 continue;
213
214 if (strcmp (symbol, "_text") == 0)
215 {
216 kernel_start = addr;
217 break;
218 }
219 }
220
221 return kernel_start;
222}
223
224/* Check whether an address is in the kernel. */
225
226static inline int
227perf_event_is_kernel_addr (uint64_t addr)
228{
229 uint64_t kernel_start;
230
231 kernel_start = linux_determine_kernel_start ();
232 if (kernel_start != 0ull)
233 return (addr >= kernel_start);
234
235 /* If we don't know the kernel's start address, let's check the most
236 significant bit. This will work at least for 64-bit kernels. */
237 return ((addr & (1ull << 63)) != 0);
238}
239
240/* Check whether a perf event record should be skipped. */
241
242static inline int
243perf_event_skip_bts_record (const struct perf_event_bts *bts)
244{
245 /* The hardware may report branches from kernel into user space. Branches
246 from user into kernel space will be suppressed. We filter the former to
247 provide a consistent branch trace excluding kernel. */
248 return perf_event_is_kernel_addr (bts->from);
249}
250
251/* Perform a few consistency checks on a perf event sample record. This is
252 meant to catch cases when we get out of sync with the perf event stream. */
253
254static inline int
255perf_event_sample_ok (const struct perf_event_sample *sample)
256{
257 if (sample->header.type != PERF_RECORD_SAMPLE)
258 return 0;
259
260 if (sample->header.size != sizeof (*sample))
261 return 0;
262
263 return 1;
264}
265
266/* Branch trace is collected in a circular buffer [begin; end) as pairs of from
267 and to addresses (plus a header).
268
269 Start points into that buffer at the next sample position.
270 We read the collected samples backwards from start.
271
272 While reading the samples, we convert the information into a list of blocks.
273 For two adjacent samples s1 and s2, we form a block b such that b.begin =
274 s1.to and b.end = s2.from.
275
276 In case the buffer overflows during sampling, one sample may have its lower
277 part at the end and its upper part at the beginning of the buffer. */
278
279static std::vector<btrace_block> *
280perf_event_read_bts (btrace_target_info *tinfo, const uint8_t *begin,
281 const uint8_t *end, const uint8_t *start, size_t size)
282{
283 std::vector<btrace_block> *btrace = new std::vector<btrace_block>;
284 struct perf_event_sample sample;
285 size_t read = 0;
286 struct btrace_block block = { 0, 0 };
287 struct regcache *regcache;
288
289 gdb_assert (begin <= start);
290 gdb_assert (start <= end);
291
292 /* The first block ends at the current pc. */
293 regcache = get_thread_regcache_for_ptid (tinfo->ptid);
294 block.end = regcache_read_pc (regcache);
295
296 /* The buffer may contain a partial record as its last entry (i.e. when the
297 buffer size is not a multiple of the sample size). */
298 read = sizeof (sample) - 1;
299
300 for (; read < size; read += sizeof (sample))
301 {
302 const struct perf_event_sample *psample;
303
304 /* Find the next perf_event sample in a backwards traversal. */
305 start -= sizeof (sample);
306
307 /* If we're still inside the buffer, we're done. */
308 if (begin <= start)
309 psample = (const struct perf_event_sample *) start;
310 else
311 {
312 int missing;
313
314 /* We're to the left of the ring buffer, we will wrap around and
315 reappear at the very right of the ring buffer. */
316
317 missing = (begin - start);
318 start = (end - missing);
319
320 /* If the entire sample is missing, we're done. */
321 if (missing == sizeof (sample))
322 psample = (const struct perf_event_sample *) start;
323 else
324 {
325 uint8_t *stack;
326
327 /* The sample wrapped around. The lower part is at the end and
328 the upper part is at the beginning of the buffer. */
329 stack = (uint8_t *) &sample;
330
331 /* Copy the two parts so we have a contiguous sample. */
332 memcpy (stack, start, missing);
333 memcpy (stack + missing, begin, sizeof (sample) - missing);
334
335 psample = &sample;
336 }
337 }
338
339 if (!perf_event_sample_ok (psample))
340 {
341 warning (_("Branch trace may be incomplete."));
342 break;
343 }
344
345 if (perf_event_skip_bts_record (&psample->bts))
346 continue;
347
348 /* We found a valid sample, so we can complete the current block. */
349 block.begin = psample->bts.to;
350
351 btrace->push_back (block);
352
353 /* Start the next block. */
354 block.end = psample->bts.from;
355 }
356
357 /* Push the last block (i.e. the first one of inferior execution), as well.
358 We don't know where it ends, but we know where it starts. If we're
359 reading delta trace, we can fill in the start address later on.
360 Otherwise we will prune it. */
361 block.begin = 0;
362 btrace->push_back (block);
363
364 return btrace;
365}
366
367/* Check whether an Intel cpu supports BTS. */
368
369static int
370intel_supports_bts (const struct btrace_cpu *cpu)
371{
372 switch (cpu->family)
373 {
374 case 0x6:
375 switch (cpu->model)
376 {
377 case 0x1a: /* Nehalem */
378 case 0x1f:
379 case 0x1e:
380 case 0x2e:
381 case 0x25: /* Westmere */
382 case 0x2c:
383 case 0x2f:
384 case 0x2a: /* Sandy Bridge */
385 case 0x2d:
386 case 0x3a: /* Ivy Bridge */
387
388 /* AAJ122: LBR, BTM, or BTS records may have incorrect branch
389 "from" information afer an EIST transition, T-states, C1E, or
390 Adaptive Thermal Throttling. */
391 return 0;
392 }
393 }
394
395 return 1;
396}
397
398/* Check whether the cpu supports BTS. */
399
400static int
401cpu_supports_bts (void)
402{
403 struct btrace_cpu cpu;
404
405 cpu = btrace_this_cpu ();
406 switch (cpu.vendor)
407 {
408 default:
409 /* Don't know about others. Let's assume they do. */
410 return 1;
411
412 case CV_INTEL:
413 return intel_supports_bts (&cpu);
414
415 case CV_AMD:
416 return 0;
417 }
418}
419
420/* The perf_event_open syscall failed. Try to print a helpful error
421 message. */
422
423static void
424diagnose_perf_event_open_fail ()
425{
426 switch (errno)
427 {
428 case EPERM:
429 case EACCES:
430 {
431 static const char filename[] = "/proc/sys/kernel/perf_event_paranoid";
432 errno = 0;
433 gdb_file_up file = gdb_fopen_cloexec (filename, "r");
434 if (file.get () == nullptr)
435 error (_("Failed to open %s (%s). Your system does not support "
436 "process recording."), filename, safe_strerror (errno));
437
438 int level, found = fscanf (file.get (), "%d", &level);
439 if (found == 1 && level > 2)
440 error (_("You do not have permission to record the process. "
441 "Try setting %s to 2 or less."), filename);
442 }
443
444 break;
445 }
446
447 error (_("Failed to start recording: %s"), safe_strerror (errno));
448}
449
450/* Get the linux version of a btrace_target_info. */
451
453get_linux_btrace_target_info (btrace_target_info *gtinfo)
454{
455 return gdb::checked_static_cast<linux_btrace_target_info *> (gtinfo);
456}
457
458/* Enable branch tracing in BTS format. */
459
460static struct btrace_target_info *
461linux_enable_bts (ptid_t ptid, const struct btrace_config_bts *conf)
462{
463 size_t size, pages;
464 __u64 data_offset;
465 int pid, pg;
466
467 if (!cpu_supports_bts ())
468 error (_("BTS support has been disabled for the target cpu."));
469
470 std::unique_ptr<linux_btrace_target_info> tinfo
471 { gdb::make_unique<linux_btrace_target_info> (ptid) };
472
473 tinfo->conf.format = BTRACE_FORMAT_BTS;
474
475 tinfo->attr.size = sizeof (tinfo->attr);
476 tinfo->attr.type = PERF_TYPE_HARDWARE;
477 tinfo->attr.config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS;
478 tinfo->attr.sample_period = 1;
479
480 /* We sample from and to address. */
481 tinfo->attr.sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_ADDR;
482
483 tinfo->attr.exclude_kernel = 1;
484 tinfo->attr.exclude_hv = 1;
485 tinfo->attr.exclude_idle = 1;
486
487 pid = ptid.lwp ();
488 if (pid == 0)
489 pid = ptid.pid ();
490
491 errno = 0;
492 scoped_fd fd (syscall (SYS_perf_event_open, &tinfo->attr, pid, -1, -1, 0));
493 if (fd.get () < 0)
494 diagnose_perf_event_open_fail ();
495
496 /* Convert the requested size in bytes to pages (rounding up). */
497 pages = ((size_t) conf->size / PAGE_SIZE
498 + ((conf->size % PAGE_SIZE) == 0 ? 0 : 1));
499 /* We need at least one page. */
500 if (pages == 0)
501 pages = 1;
502
503 /* The buffer size can be requested in powers of two pages. Adjust PAGES
504 to the next power of two. */
505 for (pg = 0; pages != ((size_t) 1 << pg); ++pg)
506 if ((pages & ((size_t) 1 << pg)) != 0)
507 pages += ((size_t) 1 << pg);
508
509 /* We try to allocate the requested size.
510 If that fails, try to get as much as we can. */
511 scoped_mmap data;
512 for (; pages > 0; pages >>= 1)
513 {
514 size_t length;
515 __u64 data_size;
516
517 data_size = (__u64) pages * PAGE_SIZE;
518
519 /* Don't ask for more than we can represent in the configuration. */
520 if ((__u64) UINT_MAX < data_size)
521 continue;
522
523 size = (size_t) data_size;
524 length = size + PAGE_SIZE;
525
526 /* Check for overflows. */
527 if ((__u64) length != data_size + PAGE_SIZE)
528 continue;
529
530 errno = 0;
531 /* The number of pages we request needs to be a power of two. */
532 data.reset (nullptr, length, PROT_READ, MAP_SHARED, fd.get (), 0);
533 if (data.get () != MAP_FAILED)
534 break;
535 }
536
537 if (pages == 0)
538 error (_("Failed to map trace buffer: %s."), safe_strerror (errno));
539
540 struct perf_event_mmap_page *header = (struct perf_event_mmap_page *)
541 data.get ();
542 data_offset = PAGE_SIZE;
543
544#if defined (PERF_ATTR_SIZE_VER5)
545 if (offsetof (struct perf_event_mmap_page, data_size) <= header->size)
546 {
547 __u64 data_size;
548
549 data_offset = header->data_offset;
550 data_size = header->data_size;
551
552 size = (unsigned int) data_size;
553
554 /* Check for overflows. */
555 if ((__u64) size != data_size)
556 error (_("Failed to determine trace buffer size."));
557 }
558#endif /* defined (PERF_ATTR_SIZE_VER5) */
559
560 tinfo->pev.size = size;
561 tinfo->pev.data_head = &header->data_head;
562 tinfo->pev.mem = (const uint8_t *) data.release () + data_offset;
563 tinfo->pev.last_head = 0ull;
564 tinfo->header = header;
565 tinfo->file = fd.release ();
566
567 tinfo->conf.bts.size = (unsigned int) size;
568 return tinfo.release ();
569}
570
571#if defined (PERF_ATTR_SIZE_VER5)
572
573/* Determine the event type. */
574
575static int
576perf_event_pt_event_type ()
577{
578 static const char filename[] = "/sys/bus/event_source/devices/intel_pt/type";
579
580 errno = 0;
581 gdb_file_up file = gdb_fopen_cloexec (filename, "r");
582 if (file.get () == nullptr)
583 switch (errno)
584 {
585 case EACCES:
586 case EFAULT:
587 case EPERM:
588 error (_("Failed to open %s (%s). You do not have permission "
589 "to use Intel PT."), filename, safe_strerror (errno));
590
591 case ENOTDIR:
592 case ENOENT:
593 error (_("Failed to open %s (%s). Your system does not support "
594 "Intel PT."), filename, safe_strerror (errno));
595
596 default:
597 error (_("Failed to open %s: %s."), filename, safe_strerror (errno));
598 }
599
600 int type, found = fscanf (file.get (), "%d", &type);
601 if (found != 1)
602 error (_("Failed to read the PT event type from %s."), filename);
603
604 return type;
605}
606
607/* Enable branch tracing in Intel Processor Trace format. */
608
609static struct btrace_target_info *
610linux_enable_pt (ptid_t ptid, const struct btrace_config_pt *conf)
611{
612 size_t pages;
613 int pid, pg;
614
615 pid = ptid.lwp ();
616 if (pid == 0)
617 pid = ptid.pid ();
618
619 std::unique_ptr<linux_btrace_target_info> tinfo
620 { gdb::make_unique<linux_btrace_target_info> (ptid) };
621
622 tinfo->conf.format = BTRACE_FORMAT_PT;
623
624 tinfo->attr.size = sizeof (tinfo->attr);
625 tinfo->attr.type = perf_event_pt_event_type ();
626
627 tinfo->attr.exclude_kernel = 1;
628 tinfo->attr.exclude_hv = 1;
629 tinfo->attr.exclude_idle = 1;
630
631 errno = 0;
632 scoped_fd fd (syscall (SYS_perf_event_open, &tinfo->attr, pid, -1, -1, 0));
633 if (fd.get () < 0)
634 diagnose_perf_event_open_fail ();
635
636 /* Allocate the configuration page. */
637 scoped_mmap data (nullptr, PAGE_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED,
638 fd.get (), 0);
639 if (data.get () == MAP_FAILED)
640 error (_("Failed to map trace user page: %s."), safe_strerror (errno));
641
642 struct perf_event_mmap_page *header = (struct perf_event_mmap_page *)
643 data.get ();
644
645 header->aux_offset = header->data_offset + header->data_size;
646
647 /* Convert the requested size in bytes to pages (rounding up). */
648 pages = ((size_t) conf->size / PAGE_SIZE
649 + ((conf->size % PAGE_SIZE) == 0 ? 0 : 1));
650 /* We need at least one page. */
651 if (pages == 0)
652 pages = 1;
653
654 /* The buffer size can be requested in powers of two pages. Adjust PAGES
655 to the next power of two. */
656 for (pg = 0; pages != ((size_t) 1 << pg); ++pg)
657 if ((pages & ((size_t) 1 << pg)) != 0)
658 pages += ((size_t) 1 << pg);
659
660 /* We try to allocate the requested size.
661 If that fails, try to get as much as we can. */
662 scoped_mmap aux;
663 for (; pages > 0; pages >>= 1)
664 {
665 size_t length;
666 __u64 data_size;
667
668 data_size = (__u64) pages * PAGE_SIZE;
669
670 /* Don't ask for more than we can represent in the configuration. */
671 if ((__u64) UINT_MAX < data_size)
672 continue;
673
674 length = (size_t) data_size;
675
676 /* Check for overflows. */
677 if ((__u64) length != data_size)
678 continue;
679
680 header->aux_size = data_size;
681
682 errno = 0;
683 aux.reset (nullptr, length, PROT_READ, MAP_SHARED, fd.get (),
684 header->aux_offset);
685 if (aux.get () != MAP_FAILED)
686 break;
687 }
688
689 if (pages == 0)
690 error (_("Failed to map trace buffer: %s."), safe_strerror (errno));
691
692 tinfo->pev.size = aux.size ();
693 tinfo->pev.mem = (const uint8_t *) aux.release ();
694 tinfo->pev.data_head = &header->aux_head;
695 tinfo->header = (struct perf_event_mmap_page *) data.release ();
696 gdb_assert (tinfo->header == header);
697 tinfo->file = fd.release ();
698
699 tinfo->conf.pt.size = (unsigned int) tinfo->pev.size;
700 return tinfo.release ();
701}
702
703#else /* !defined (PERF_ATTR_SIZE_VER5) */
704
705static struct btrace_target_info *
706linux_enable_pt (ptid_t ptid, const struct btrace_config_pt *conf)
707{
708 error (_("Intel Processor Trace support was disabled at compile time."));
709}
710
711#endif /* !defined (PERF_ATTR_SIZE_VER5) */
712
713/* See linux-btrace.h. */
714
715struct btrace_target_info *
716linux_enable_btrace (ptid_t ptid, const struct btrace_config *conf)
717{
718 switch (conf->format)
719 {
720 case BTRACE_FORMAT_NONE:
721 error (_("Bad branch trace format."));
722
723 default:
724 error (_("Unknown branch trace format."));
725
726 case BTRACE_FORMAT_BTS:
727 return linux_enable_bts (ptid, &conf->bts);
728
729 case BTRACE_FORMAT_PT:
730 return linux_enable_pt (ptid, &conf->pt);
731 }
732}
733
734/* Disable BTS tracing. */
735
736static void
737linux_disable_bts (struct linux_btrace_target_info *tinfo)
738{
739 munmap ((void *) tinfo->header, tinfo->pev.size + PAGE_SIZE);
740 close (tinfo->file);
741}
742
743/* Disable Intel Processor Trace tracing. */
744
745static void
746linux_disable_pt (struct linux_btrace_target_info *tinfo)
747{
748 munmap ((void *) tinfo->pev.mem, tinfo->pev.size);
749 munmap ((void *) tinfo->header, PAGE_SIZE);
750 close (tinfo->file);
751}
752
753/* See linux-btrace.h. */
754
755enum btrace_error
756linux_disable_btrace (struct btrace_target_info *gtinfo)
757{
759 = get_linux_btrace_target_info (gtinfo);
760
761 switch (tinfo->conf.format)
762 {
763 case BTRACE_FORMAT_NONE:
764 return BTRACE_ERR_NOT_SUPPORTED;
765
766 case BTRACE_FORMAT_BTS:
767 linux_disable_bts (tinfo);
768 delete tinfo;
769 return BTRACE_ERR_NONE;
770
771 case BTRACE_FORMAT_PT:
772 linux_disable_pt (tinfo);
773 delete tinfo;
774 return BTRACE_ERR_NONE;
775 }
776
777 return BTRACE_ERR_NOT_SUPPORTED;
778}
779
780/* Read branch trace data in BTS format for the thread given by TINFO into
781 BTRACE using the TYPE reading method. */
782
783static enum btrace_error
784linux_read_bts (btrace_data_bts *btrace, linux_btrace_target_info *tinfo,
785 enum btrace_read_type type)
786{
787 const uint8_t *begin, *end, *start;
788 size_t buffer_size, size;
789 __u64 data_head = 0, data_tail;
790 unsigned int retries = 5;
791
792 /* For delta reads, we return at least the partial last block containing
793 the current PC. */
794 if (type == BTRACE_READ_NEW && !perf_event_new_data (&tinfo->pev))
795 return BTRACE_ERR_NONE;
796
797 buffer_size = tinfo->pev.size;
798 data_tail = tinfo->pev.last_head;
799
800 /* We may need to retry reading the trace. See below. */
801 while (retries--)
802 {
803 data_head = *tinfo->pev.data_head;
804
805 /* Delete any leftover trace from the previous iteration. */
806 delete btrace->blocks;
807 btrace->blocks = nullptr;
808
809 if (type == BTRACE_READ_DELTA)
810 {
811 __u64 data_size;
812
813 /* Determine the number of bytes to read and check for buffer
814 overflows. */
815
816 /* Check for data head overflows. We might be able to recover from
817 those but they are very unlikely and it's not really worth the
818 effort, I think. */
819 if (data_head < data_tail)
820 return BTRACE_ERR_OVERFLOW;
821
822 /* If the buffer is smaller than the trace delta, we overflowed. */
823 data_size = data_head - data_tail;
824 if (buffer_size < data_size)
825 return BTRACE_ERR_OVERFLOW;
826
827 /* DATA_SIZE <= BUFFER_SIZE and therefore fits into a size_t. */
828 size = (size_t) data_size;
829 }
830 else
831 {
832 /* Read the entire buffer. */
833 size = buffer_size;
834
835 /* Adjust the size if the buffer has not overflowed, yet. */
836 if (data_head < size)
837 size = (size_t) data_head;
838 }
839
840 /* Data_head keeps growing; the buffer itself is circular. */
841 begin = tinfo->pev.mem;
842 start = begin + data_head % buffer_size;
843
844 if (data_head <= buffer_size)
845 end = start;
846 else
847 end = begin + tinfo->pev.size;
848
849 btrace->blocks = perf_event_read_bts (tinfo, begin, end, start, size);
850
851 /* The stopping thread notifies its ptracer before it is scheduled out.
852 On multi-core systems, the debugger might therefore run while the
853 kernel might be writing the last branch trace records.
854
855 Let's check whether the data head moved while we read the trace. */
856 if (data_head == *tinfo->pev.data_head)
857 break;
858 }
859
860 tinfo->pev.last_head = data_head;
861
862 /* Prune the incomplete last block (i.e. the first one of inferior execution)
863 if we're not doing a delta read. There is no way of filling in its zeroed
864 BEGIN element. */
865 if (!btrace->blocks->empty () && type != BTRACE_READ_DELTA)
866 btrace->blocks->pop_back ();
867
868 return BTRACE_ERR_NONE;
869}
870
871/* Fill in the Intel Processor Trace configuration information. */
872
873static void
874linux_fill_btrace_pt_config (struct btrace_data_pt_config *conf)
875{
876 conf->cpu = btrace_this_cpu ();
877}
878
879/* Read branch trace data in Intel Processor Trace format for the thread
880 given by TINFO into BTRACE using the TYPE reading method. */
881
882static enum btrace_error
883linux_read_pt (btrace_data_pt *btrace, linux_btrace_target_info *tinfo,
884 enum btrace_read_type type)
885{
886 linux_fill_btrace_pt_config (&btrace->config);
887
888 switch (type)
889 {
890 case BTRACE_READ_DELTA:
891 /* We don't support delta reads. The data head (i.e. aux_head) wraps
892 around to stay inside the aux buffer. */
893 return BTRACE_ERR_NOT_SUPPORTED;
894
895 case BTRACE_READ_NEW:
896 if (!perf_event_new_data (&tinfo->pev))
897 return BTRACE_ERR_NONE;
898
899 /* Fall through. */
900 case BTRACE_READ_ALL:
901 perf_event_read_all (&tinfo->pev, &btrace->data, &btrace->size);
902 return BTRACE_ERR_NONE;
903 }
904
905 internal_error (_("Unknown btrace read type."));
906}
907
908/* See linux-btrace.h. */
909
910enum btrace_error
911linux_read_btrace (struct btrace_data *btrace,
912 struct btrace_target_info *gtinfo,
913 enum btrace_read_type type)
914{
916 = get_linux_btrace_target_info (gtinfo);
917
918 switch (tinfo->conf.format)
919 {
920 case BTRACE_FORMAT_NONE:
921 return BTRACE_ERR_NOT_SUPPORTED;
922
923 case BTRACE_FORMAT_BTS:
924 /* We read btrace in BTS format. */
925 btrace->format = BTRACE_FORMAT_BTS;
926 btrace->variant.bts.blocks = NULL;
927
928 return linux_read_bts (&btrace->variant.bts, tinfo, type);
929
930 case BTRACE_FORMAT_PT:
931 /* We read btrace in Intel Processor Trace format. */
932 btrace->format = BTRACE_FORMAT_PT;
933 btrace->variant.pt.data = NULL;
934 btrace->variant.pt.size = 0;
935
936 return linux_read_pt (&btrace->variant.pt, tinfo, type);
937 }
938
939 internal_error (_("Unkown branch trace format."));
940}
941
942/* See linux-btrace.h. */
943
944const struct btrace_config *
945linux_btrace_conf (const struct btrace_target_info *tinfo)
946{
947 return &tinfo->conf;
948}
949
950#else /* !HAVE_LINUX_PERF_EVENT_H */
951
952/* See linux-btrace.h. */
953
954struct btrace_target_info *
955linux_enable_btrace (ptid_t ptid, const struct btrace_config *conf)
956{
957 return NULL;
958}
959
960/* See linux-btrace.h. */
961
962enum btrace_error
963linux_disable_btrace (struct btrace_target_info *tinfo)
964{
965 return BTRACE_ERR_NOT_SUPPORTED;
966}
967
968/* See linux-btrace.h. */
969
970enum btrace_error
971linux_read_btrace (struct btrace_data *btrace,
972 struct btrace_target_info *tinfo,
973 enum btrace_read_type type)
974{
975 return BTRACE_ERR_NOT_SUPPORTED;
976}
977
978/* See linux-btrace.h. */
979
980const struct btrace_config *
981linux_btrace_conf (const struct btrace_target_info *tinfo)
982{
983 return NULL;
984}
985
986#endif /* !HAVE_LINUX_PERF_EVENT_H */
enum btrace_error linux_read_btrace(struct btrace_data *btrace, struct btrace_target_info *tinfo, enum btrace_read_type type)
const struct btrace_config * linux_btrace_conf(const struct btrace_target_info *tinfo)
struct btrace_target_info * linux_enable_btrace(ptid_t ptid, const struct btrace_config *conf)
enum btrace_error linux_disable_btrace(struct btrace_target_info *tinfo)
static __inline int x86_cpuid(unsigned int __level, unsigned int *__eax, unsigned int *__ebx, unsigned int *__ecx, unsigned int *__edx)
Definition x86-cpuid.h:84
#define signature_INTEL_edx
#define signature_INTEL_ebx
#define signature_AMD_ecx
#define signature_AMD_edx
#define signature_INTEL_ecx
#define signature_AMD_ebx