GDB (xrefs)
Loading...
Searching...
No Matches
infrun.c
Go to the documentation of this file.
1/* Target-struct-independent code to start (run) and stop an inferior
2 process.
3
4 Copyright (C) 1986-2023 Free Software Foundation, Inc.
5
6 This file is part of GDB.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20
21#include "defs.h"
22#include "displaced-stepping.h"
23#include "infrun.h"
24#include <ctype.h>
25#include "symtab.h"
26#include "frame.h"
27#include "inferior.h"
28#include "breakpoint.h"
29#include "gdbcore.h"
30#include "gdbcmd.h"
31#include "target.h"
32#include "target-connection.h"
33#include "gdbthread.h"
34#include "annotate.h"
35#include "symfile.h"
36#include "top.h"
37#include "ui.h"
38#include "inf-loop.h"
39#include "regcache.h"
40#include "value.h"
41#include "observable.h"
42#include "language.h"
43#include "solib.h"
44#include "main.h"
45#include "block.h"
46#include "mi/mi-common.h"
47#include "event-top.h"
48#include "record.h"
49#include "record-full.h"
50#include "inline-frame.h"
51#include "jit.h"
52#include "tracepoint.h"
53#include "skip.h"
54#include "probe.h"
55#include "objfiles.h"
56#include "completer.h"
57#include "target-descriptions.h"
58#include "target-dcache.h"
59#include "terminal.h"
60#include "solist.h"
61#include "gdbsupport/event-loop.h"
62#include "thread-fsm.h"
63#include "gdbsupport/enum-flags.h"
65#include "gdbsupport/gdb_optional.h"
66#include "arch-utils.h"
67#include "gdbsupport/scope-exit.h"
68#include "gdbsupport/forward-scope-exit.h"
69#include "gdbsupport/gdb_select.h"
70#include <unordered_map>
71#include "async-event.h"
72#include "gdbsupport/selftest.h"
73#include "scoped-mock-context.h"
74#include "test-target.h"
75#include "gdbsupport/common-debug.h"
76#include "gdbsupport/buildargv.h"
77#include "extension.h"
78#include "disasm.h"
79#include "interps.h"
80
81/* Prototypes for local functions */
82
83static void sig_print_info (enum gdb_signal);
84
85static void sig_print_header (void);
86
87static void follow_inferior_reset_breakpoints (void);
88
89static bool currently_stepping (struct thread_info *tp);
90
92
94
95static void insert_longjmp_resume_breakpoint (struct gdbarch *, CORE_ADDR);
96
97static bool maybe_software_singlestep (struct gdbarch *gdbarch);
98
99static void resume (gdb_signal sig);
100
101static void wait_for_inferior (inferior *inf);
102
103static void restart_threads (struct thread_info *event_thread,
104 inferior *inf = nullptr);
105
106static bool start_step_over (void);
107
108static bool step_over_info_valid_p (void);
109
110/* Asynchronous signal handler registered as event loop source for
111 when we have pending events ready to be passed to the core. */
113
114/* Stores whether infrun_async was previously enabled or disabled.
115 Starts off as -1, indicating "never enabled/disabled". */
116static int infrun_is_async = -1;
117
118/* See infrun.h. */
119
120void
135
136/* See infrun.h. */
137
138void
143
144/* When set, stop the 'step' command if we enter a function which has
145 no line number information. The normal behavior is that we step
146 over such function. */
148static void
149show_step_stop_if_no_debug (struct ui_file *file, int from_tty,
150 struct cmd_list_element *c, const char *value)
151{
152 gdb_printf (file, _("Mode of the step operation is %s.\n"), value);
153}
154
155/* proceed and normal_stop use this to notify the user when the
156 inferior stopped in a different thread than it had been running in.
157 It can also be used to find for which thread normal_stop last
158 reported a stop. */
160
161/* See infrun.h. */
162
163void
165{
166 if (inferior_ptid == null_ptid)
167 previous_thread = nullptr;
168 else
169 previous_thread = thread_info_ref::new_reference (inferior_thread ());
170}
171
172/* See infrun.h. */
173
176{
177 return previous_thread.get ();
178}
179
180/* If set (default for legacy reasons), when following a fork, GDB
181 will detach from one of the fork branches, child or parent.
182 Exactly which branch is detached depends on 'set follow-fork-mode'
183 setting. */
184
185static bool detach_fork = true;
186
187bool debug_infrun = false;
188static void
189show_debug_infrun (struct ui_file *file, int from_tty,
190 struct cmd_list_element *c, const char *value)
191{
192 gdb_printf (file, _("Inferior debugging is %s.\n"), value);
193}
194
195/* Support for disabling address space randomization. */
196
198
199static void
200show_disable_randomization (struct ui_file *file, int from_tty,
201 struct cmd_list_element *c, const char *value)
202{
204 gdb_printf (file,
205 _("Disabling randomization of debuggee's "
206 "virtual address space is %s.\n"),
207 value);
208 else
209 gdb_puts (_("Disabling randomization of debuggee's "
210 "virtual address space is unsupported on\n"
211 "this platform.\n"), file);
212}
213
214static void
215set_disable_randomization (const char *args, int from_tty,
216 struct cmd_list_element *c)
217{
219 error (_("Disabling randomization of debuggee's "
220 "virtual address space is unsupported on\n"
221 "this platform."));
222}
223
224/* User interface for non-stop mode. */
225
226bool non_stop = false;
227static bool non_stop_1 = false;
228
229static void
230set_non_stop (const char *args, int from_tty,
231 struct cmd_list_element *c)
232{
234 {
236 error (_("Cannot change this setting while the inferior is running."));
237 }
238
240}
241
242static void
243show_non_stop (struct ui_file *file, int from_tty,
244 struct cmd_list_element *c, const char *value)
245{
246 gdb_printf (file,
247 _("Controlling the inferior in non-stop mode is %s.\n"),
248 value);
249}
250
251/* "Observer mode" is somewhat like a more extreme version of
252 non-stop, in which all GDB operations that might affect the
253 target's execution have been disabled. */
254
255static bool observer_mode = false;
256static bool observer_mode_1 = false;
257
258static void
259set_observer_mode (const char *args, int from_tty,
260 struct cmd_list_element *c)
261{
263 {
265 error (_("Cannot change this setting while the inferior is running."));
266 }
267
269
274 /* We can insert fast tracepoints in or out of observer mode,
275 but enable them if we're going into this mode. */
276 if (observer_mode)
280
281 /* Going *into* observer mode we must force non-stop, then
282 going out we leave it that way. */
283 if (observer_mode)
284 {
285 pagination_enabled = false;
286 non_stop = non_stop_1 = true;
287 }
288
289 if (from_tty)
290 gdb_printf (_("Observer mode is now %s.\n"),
291 (observer_mode ? "on" : "off"));
292}
293
294static void
295show_observer_mode (struct ui_file *file, int from_tty,
296 struct cmd_list_element *c, const char *value)
297{
298 gdb_printf (file, _("Observer mode is %s.\n"), value);
299}
300
301/* This updates the value of observer mode based on changes in
302 permissions. Note that we are deliberately ignoring the values of
303 may-write-registers and may-write-memory, since the user may have
304 reason to enable these during a session, for instance to turn on a
305 debugging-related global. */
306
307void
309{
310 bool newval = (!may_insert_breakpoints
313 && !may_stop
314 && non_stop);
315
316 /* Let the user know if things change. */
317 if (newval != observer_mode)
318 gdb_printf (_("Observer mode is now %s.\n"),
319 (newval ? "on" : "off"));
320
322}
323
324/* Tables of how to react to signals; the user sets them. */
325
326static unsigned char signal_stop[GDB_SIGNAL_LAST];
327static unsigned char signal_print[GDB_SIGNAL_LAST];
328static unsigned char signal_program[GDB_SIGNAL_LAST];
329
330/* Table of signals that are registered with "catch signal". A
331 non-zero entry indicates that the signal is caught by some "catch
332 signal" command. */
333static unsigned char signal_catch[GDB_SIGNAL_LAST];
334
335/* Table of signals that the target may silently handle.
336 This is automatically determined from the flags above,
337 and simply cached here. */
338static unsigned char signal_pass[GDB_SIGNAL_LAST];
339
340#define SET_SIGS(nsigs,sigs,flags) \
341 do { \
342 int signum = (nsigs); \
343 while (signum-- > 0) \
344 if ((sigs)[signum]) \
345 (flags)[signum] = 1; \
346 } while (0)
347
348#define UNSET_SIGS(nsigs,sigs,flags) \
349 do { \
350 int signum = (nsigs); \
351 while (signum-- > 0) \
352 if ((sigs)[signum]) \
353 (flags)[signum] = 0; \
354 } while (0)
355
356/* Update the target's copy of SIGNAL_PROGRAM. The sole purpose of
357 this function is to avoid exporting `signal_program'. */
358
359void
364
365/* Value to pass to target_resume() to cause all threads to resume. */
366
367#define RESUME_ALL minus_one_ptid
368
369/* Command list pointer for the "stop" placeholder. */
370
372
373/* Nonzero if we want to give control to the user when we're notified
374 of shared library events by the dynamic linker. */
376
377/* Enable or disable optional shared library event breakpoints
378 as appropriate when the above flag is changed. */
379
380static void
381set_stop_on_solib_events (const char *args,
382 int from_tty, struct cmd_list_element *c)
383{
385}
386
387static void
388show_stop_on_solib_events (struct ui_file *file, int from_tty,
389 struct cmd_list_element *c, const char *value)
390{
391 gdb_printf (file, _("Stopping for shared library events is %s.\n"),
392 value);
393}
394
395/* True after stop if current stack frame should be printed. */
396
398
399/* This is a cached copy of the target/ptid/waitstatus of the last
400 event returned by target_wait().
401 This information is returned by get_last_target_status(). */
405
406void init_thread_stepping_state (struct thread_info *tss);
407
408static const char follow_fork_mode_child[] = "child";
409static const char follow_fork_mode_parent[] = "parent";
410
411static const char *const follow_fork_mode_kind_names[] = {
414 nullptr
415};
416
418static void
419show_follow_fork_mode_string (struct ui_file *file, int from_tty,
420 struct cmd_list_element *c, const char *value)
421{
422 gdb_printf (file,
423 _("Debugger response to a program "
424 "call of fork or vfork is \"%s\".\n"),
425 value);
426}
427
428
429/* Handle changes to the inferior list based on the type of fork,
430 which process is being followed, and whether the other process
431 should be detached. On entry inferior_ptid must be the ptid of
432 the fork parent. At return inferior_ptid is the ptid of the
433 followed inferior. */
434
435static bool
436follow_fork_inferior (bool follow_child, bool detach_fork)
437{
439
440 infrun_debug_printf ("follow_child = %d, detach_fork = %d",
441 follow_child, detach_fork);
442
444 gdb_assert (fork_kind == TARGET_WAITKIND_FORKED
445 || fork_kind == TARGET_WAITKIND_VFORKED);
446 bool has_vforked = fork_kind == TARGET_WAITKIND_VFORKED;
447 ptid_t parent_ptid = inferior_ptid;
449
450 if (has_vforked
451 && !non_stop /* Non-stop always resumes both branches. */
453 && !(follow_child || detach_fork || sched_multi))
454 {
455 /* The parent stays blocked inside the vfork syscall until the
456 child execs or exits. If we don't let the child run, then
457 the parent stays blocked. If we're telling the parent to run
458 in the foreground, the user will not be able to ctrl-c to get
459 back the terminal, effectively hanging the debug session. */
461Can not resume the parent process over vfork in the foreground while\n\
462holding the child stopped. Try \"set detach-on-fork\" or \
463\"set schedule-multiple\".\n"));
464 return true;
465 }
466
467 inferior *parent_inf = current_inferior ();
468 inferior *child_inf = nullptr;
469
470 gdb_assert (parent_inf->thread_waiting_for_vfork_done == nullptr);
471
472 if (!follow_child)
473 {
474 /* Detach new forked process? */
475 if (detach_fork)
476 {
477 /* Before detaching from the child, remove all breakpoints
478 from it. If we forked, then this has already been taken
479 care of by infrun.c. If we vforked however, any
480 breakpoint inserted in the parent is visible in the
481 child, even those added while stopped in a vfork
482 catchpoint. This will remove the breakpoints from the
483 parent also, but they'll be reinserted below. */
484 if (has_vforked)
485 {
486 /* Keep breakpoints list in sync. */
488 }
489
491 {
492 /* Ensure that we have a process ptid. */
493 ptid_t process_ptid = ptid_t (child_ptid.pid ());
494
496 gdb_printf (_("[Detaching after %s from child %s]\n"),
497 has_vforked ? "vfork" : "fork",
498 target_pid_to_str (process_ptid).c_str ());
499 }
500 }
501 else
502 {
503 /* Add process to GDB's tables. */
504 child_inf = add_inferior (child_ptid.pid ());
505
506 child_inf->attach_flag = parent_inf->attach_flag;
507 copy_terminal_info (child_inf, parent_inf);
508 child_inf->gdbarch = parent_inf->gdbarch;
509 child_inf->tdesc_info = parent_inf->tdesc_info;
510
511 child_inf->symfile_flags = SYMFILE_NO_READ;
512
513 /* If this is a vfork child, then the address-space is
514 shared with the parent. */
515 if (has_vforked)
516 {
517 child_inf->pspace = parent_inf->pspace;
518 child_inf->aspace = parent_inf->aspace;
519
520 exec_on_vfork (child_inf);
521
522 /* The parent will be frozen until the child is done
523 with the shared region. Keep track of the
524 parent. */
525 child_inf->vfork_parent = parent_inf;
526 child_inf->pending_detach = false;
527 parent_inf->vfork_child = child_inf;
528 parent_inf->pending_detach = false;
529 }
530 else
531 {
532 child_inf->aspace = new address_space ();
533 child_inf->pspace = new program_space (child_inf->aspace);
534 child_inf->removable = true;
535 clone_program_space (child_inf->pspace, parent_inf->pspace);
536 }
537 }
538
539 if (has_vforked)
540 {
541 /* If we detached from the child, then we have to be careful
542 to not insert breakpoints in the parent until the child
543 is done with the shared memory region. However, if we're
544 staying attached to the child, then we can and should
545 insert breakpoints, so that we can debug it. A
546 subsequent child exec or exit is enough to know when does
547 the child stops using the parent's address space. */
549 = detach_fork ? inferior_thread () : nullptr;
551
553 ("parent_inf->thread_waiting_for_vfork_done == %s",
554 (parent_inf->thread_waiting_for_vfork_done == nullptr
555 ? "nullptr"
556 : (parent_inf->thread_waiting_for_vfork_done
557 ->ptid.to_string ().c_str ())));
558 }
559 }
560 else
561 {
562 /* Follow the child. */
563
565 {
566 std::string parent_pid = target_pid_to_str (parent_ptid);
567 std::string child_pid = target_pid_to_str (child_ptid);
568
570 gdb_printf (_("[Attaching after %s %s to child %s]\n"),
571 parent_pid.c_str (),
572 has_vforked ? "vfork" : "fork",
573 child_pid.c_str ());
574 }
575
576 /* Add the new inferior first, so that the target_detach below
577 doesn't unpush the target. */
578
579 child_inf = add_inferior (child_ptid.pid ());
580
581 child_inf->attach_flag = parent_inf->attach_flag;
582 copy_terminal_info (child_inf, parent_inf);
583 child_inf->gdbarch = parent_inf->gdbarch;
584 child_inf->tdesc_info = parent_inf->tdesc_info;
585
586 if (has_vforked)
587 {
588 /* If this is a vfork child, then the address-space is shared
589 with the parent. */
590 child_inf->aspace = parent_inf->aspace;
591 child_inf->pspace = parent_inf->pspace;
592
593 exec_on_vfork (child_inf);
594 }
595 else if (detach_fork)
596 {
597 /* We follow the child and detach from the parent: move the parent's
598 program space to the child. This simplifies some things, like
599 doing "next" over fork() and landing on the expected line in the
600 child (note, that is broken with "set detach-on-fork off").
601
602 Before assigning brand new spaces for the parent, remove
603 breakpoints from it: because the new pspace won't match
604 currently inserted locations, the normal detach procedure
605 wouldn't remove them, and we would leave them inserted when
606 detaching. */
607 remove_breakpoints_inf (parent_inf);
608
609 child_inf->aspace = parent_inf->aspace;
610 child_inf->pspace = parent_inf->pspace;
611 parent_inf->aspace = new address_space ();
612 parent_inf->pspace = new program_space (parent_inf->aspace);
613 clone_program_space (parent_inf->pspace, child_inf->pspace);
614
615 /* The parent inferior is still the current one, so keep things
616 in sync. */
617 set_current_program_space (parent_inf->pspace);
618 }
619 else
620 {
621 child_inf->aspace = new address_space ();
622 child_inf->pspace = new program_space (child_inf->aspace);
623 child_inf->removable = true;
624 child_inf->symfile_flags = SYMFILE_NO_READ;
625 clone_program_space (child_inf->pspace, parent_inf->pspace);
626 }
627 }
628
629 gdb_assert (current_inferior () == parent_inf);
630
631 /* If we are setting up an inferior for the child, target_follow_fork is
632 responsible for pushing the appropriate targets on the new inferior's
633 target stack and adding the initial thread (with ptid CHILD_PTID).
634
635 If we are not setting up an inferior for the child (because following
636 the parent and detach_fork is true), it is responsible for detaching
637 from CHILD_PTID. */
638 target_follow_fork (child_inf, child_ptid, fork_kind, follow_child,
640
641 gdb::observers::inferior_forked.notify (parent_inf, child_inf, fork_kind);
642
643 /* target_follow_fork must leave the parent as the current inferior. If we
644 want to follow the child, we make it the current one below. */
645 gdb_assert (current_inferior () == parent_inf);
646
647 /* If there is a child inferior, target_follow_fork must have created a thread
648 for it. */
649 if (child_inf != nullptr)
650 gdb_assert (!child_inf->thread_list.empty ());
651
652 /* Clear the parent thread's pending follow field. Do this before calling
653 target_detach, so that the target can differentiate the two following
654 cases:
655
656 - We continue past a fork with "follow-fork-mode == child" &&
657 "detach-on-fork on", and therefore detach the parent. In that
658 case the target should not detach the fork child.
659 - We run to a fork catchpoint and the user types "detach". In that
660 case, the target should detach the fork child in addition to the
661 parent.
662
663 The former case will have pending_follow cleared, the later will have
664 pending_follow set. */
665 thread_info *parent_thread = parent_inf->find_thread (parent_ptid);
666 gdb_assert (parent_thread != nullptr);
667 parent_thread->pending_follow.set_spurious ();
668
669 /* Detach the parent if needed. */
670 if (follow_child)
671 {
672 /* If we're vforking, we want to hold on to the parent until
673 the child exits or execs. At child exec or exit time we
674 can remove the old breakpoints from the parent and detach
675 or resume debugging it. Otherwise, detach the parent now;
676 we'll want to reuse it's program/address spaces, but we
677 can't set them to the child before removing breakpoints
678 from the parent, otherwise, the breakpoints module could
679 decide to remove breakpoints from the wrong process (since
680 they'd be assigned to the same address space). */
681
682 if (has_vforked)
683 {
684 gdb_assert (child_inf->vfork_parent == nullptr);
685 gdb_assert (parent_inf->vfork_child == nullptr);
686 child_inf->vfork_parent = parent_inf;
687 child_inf->pending_detach = false;
688 parent_inf->vfork_child = child_inf;
689 parent_inf->pending_detach = detach_fork;
690 }
691 else if (detach_fork)
692 {
694 {
695 /* Ensure that we have a process ptid. */
696 ptid_t process_ptid = ptid_t (parent_ptid.pid ());
697
699 gdb_printf (_("[Detaching after fork from "
700 "parent %s]\n"),
701 target_pid_to_str (process_ptid).c_str ());
702 }
703
704 target_detach (parent_inf, 0);
705 }
706 }
707
708 /* If we ended up creating a new inferior, call post_create_inferior to inform
709 the various subcomponents. */
710 if (child_inf != nullptr)
711 {
712 /* If FOLLOW_CHILD, we leave CHILD_INF as the current inferior
713 (do not restore the parent as the current inferior). */
714 gdb::optional<scoped_restore_current_thread> maybe_restore;
715
716 if (!follow_child && !sched_multi)
717 maybe_restore.emplace ();
718
719 switch_to_thread (*child_inf->threads ().begin ());
721 }
722
723 return false;
724}
725
726/* Set the last target status as TP having stopped. */
727
728static void
734
735/* Tell the target to follow the fork we're stopped at. Returns true
736 if the inferior should be resumed; false, if the target for some
737 reason decided it's best not to resume. */
738
739static bool
741{
743
744 bool follow_child = (follow_fork_mode_string == follow_fork_mode_child);
745 bool should_resume = true;
746
747 /* Copy user stepping state to the new inferior thread. FIXME: the
748 followed fork child thread should have a copy of most of the
749 parent thread structure's run control related fields, not just these.
750 Initialized to avoid "may be used uninitialized" warnings from gcc. */
751 struct breakpoint *step_resume_breakpoint = nullptr;
752 struct breakpoint *exception_resume_breakpoint = nullptr;
753 CORE_ADDR step_range_start = 0;
754 CORE_ADDR step_range_end = 0;
755 int current_line = 0;
756 symtab *current_symtab = nullptr;
757 struct frame_id step_frame_id = { 0 };
758
759 if (!non_stop)
760 {
761 thread_info *cur_thr = inferior_thread ();
762
763 ptid_t resume_ptid
765 process_stratum_target *resume_target
766 = user_visible_resume_target (resume_ptid);
767
768 /* Check if there's a thread that we're about to resume, other
769 than the current, with an unfollowed fork/vfork. If so,
770 switch back to it, to tell the target to follow it (in either
771 direction). We'll afterwards refuse to resume, and inform
772 the user what happened. */
773 for (thread_info *tp : all_non_exited_threads (resume_target,
774 resume_ptid))
775 {
776 if (tp == cur_thr)
777 continue;
778
779 /* follow_fork_inferior clears tp->pending_follow, and below
780 we'll need the value after the follow_fork_inferior
781 call. */
782 target_waitkind kind = tp->pending_follow.kind ();
783
784 if (kind != TARGET_WAITKIND_SPURIOUS)
785 {
786 infrun_debug_printf ("need to follow-fork [%s] first",
787 tp->ptid.to_string ().c_str ());
788
789 switch_to_thread (tp);
790
791 /* Set up inferior(s) as specified by the caller, and
792 tell the target to do whatever is necessary to follow
793 either parent or child. */
794 if (follow_child)
795 {
796 /* The thread that started the execution command
797 won't exist in the child. Abort the command and
798 immediately stop in this thread, in the child,
799 inside fork. */
800 should_resume = false;
801 }
802 else
803 {
804 /* Following the parent, so let the thread fork its
805 child freely, it won't influence the current
806 execution command. */
807 if (follow_fork_inferior (follow_child, detach_fork))
808 {
809 /* Target refused to follow, or there's some
810 other reason we shouldn't resume. */
811 switch_to_thread (cur_thr);
813 return false;
814 }
815
816 /* If we're following a vfork, when we need to leave
817 the just-forked thread as selected, as we need to
818 solo-resume it to collect the VFORK_DONE event.
819 If we're following a fork, however, switch back
820 to the original thread that we continue stepping
821 it, etc. */
822 if (kind != TARGET_WAITKIND_VFORKED)
823 {
824 gdb_assert (kind == TARGET_WAITKIND_FORKED);
825 switch_to_thread (cur_thr);
826 }
827 }
828
829 break;
830 }
831 }
832 }
833
835
836 /* If there were any forks/vforks that were caught and are now to be
837 followed, then do so now. */
838 switch (tp->pending_follow.kind ())
839 {
842 {
843 ptid_t parent, child;
844 std::unique_ptr<struct thread_fsm> thread_fsm;
845
846 /* If the user did a next/step, etc, over a fork call,
847 preserve the stepping state in the fork child. */
848 if (follow_child && should_resume)
849 {
850 step_resume_breakpoint = clone_momentary_breakpoint
852 step_range_start = tp->control.step_range_start;
853 step_range_end = tp->control.step_range_end;
854 current_line = tp->current_line;
855 current_symtab = tp->current_symtab;
856 step_frame_id = tp->control.step_frame_id;
857 exception_resume_breakpoint
860
861 /* For now, delete the parent's sr breakpoint, otherwise,
862 parent/child sr breakpoints are considered duplicates,
863 and the child version will not be installed. Remove
864 this when the breakpoints module becomes aware of
865 inferiors and address spaces. */
868 tp->control.step_range_end = 0;
871 }
872
873 parent = inferior_ptid;
874 child = tp->pending_follow.child_ptid ();
875
876 /* If handling a vfork, stop all the inferior's threads, they will be
877 restarted when the vfork shared region is complete. */
880 stop_all_threads ("handling vfork", tp->inf);
881
882 process_stratum_target *parent_targ = tp->inf->process_target ();
883 /* Set up inferior(s) as specified by the caller, and tell the
884 target to do whatever is necessary to follow either parent
885 or child. */
886 if (follow_fork_inferior (follow_child, detach_fork))
887 {
888 /* Target refused to follow, or there's some other reason
889 we shouldn't resume. */
890 should_resume = 0;
891 }
892 else
893 {
894 /* If we followed the child, switch to it... */
895 if (follow_child)
896 {
897 tp = parent_targ->find_thread (child);
898 switch_to_thread (tp);
899
900 /* ... and preserve the stepping state, in case the
901 user was stepping over the fork call. */
902 if (should_resume)
903 {
905 = step_resume_breakpoint;
906 tp->control.step_range_start = step_range_start;
907 tp->control.step_range_end = step_range_end;
908 tp->current_line = current_line;
909 tp->current_symtab = current_symtab;
910 tp->control.step_frame_id = step_frame_id;
912 = exception_resume_breakpoint;
913 tp->set_thread_fsm (std::move (thread_fsm));
914 }
915 else
916 {
917 /* If we get here, it was because we're trying to
918 resume from a fork catchpoint, but, the user
919 has switched threads away from the thread that
920 forked. In that case, the resume command
921 issued is most likely not applicable to the
922 child, so just warn, and refuse to resume. */
923 warning (_("Not resuming: switched threads "
924 "before following fork child."));
925 }
926
927 /* Reset breakpoints in the child as appropriate. */
929 }
930 }
931 }
932 break;
934 /* Nothing to follow. */
935 break;
936 default:
937 internal_error ("Unexpected pending_follow.kind %d\n",
938 tp->pending_follow.kind ());
939 break;
940 }
941
942 if (!should_resume)
944 return should_resume;
945}
946
947static void
949{
950 struct thread_info *tp = inferior_thread ();
951
952 /* Was there a step_resume breakpoint? (There was if the user
953 did a "next" at the fork() call.) If so, explicitly reset its
954 thread number. Cloned step_resume breakpoints are disabled on
955 creation, so enable it here now that it is associated with the
956 correct thread.
957
958 step_resumes are a form of bp that are made to be per-thread.
959 Since we created the step_resume bp when the parent process
960 was being debugged, and now are switching to the child process,
961 from the breakpoint package's viewpoint, that's a switch of
962 "threads". We must update the bp's notion of which thread
963 it is for, or it'll be ignored when it triggers. */
964
966 {
969 }
970
971 /* Treat exception_resume breakpoints like step_resume breakpoints. */
973 {
976 }
977
978 /* Reinsert all breakpoints in the child. The user may have set
979 breakpoints after catching the fork, in which case those
980 were never set in the child, but only in the parent. This makes
981 sure the inserted breakpoints match the breakpoint list. */
982
985}
986
987/* The child has exited or execed: resume THREAD, a thread of the parent,
988 if it was meant to be executing. */
989
990static void
992{
993 if (thread->state == THREAD_RUNNING
994 && !thread->executing ()
995 && !thread->stop_requested
996 && thread->stop_signal () == GDB_SIGNAL_0)
997 {
998 infrun_debug_printf ("resuming vfork parent thread %s",
999 thread->ptid.to_string ().c_str ());
1000
1001 switch_to_thread (thread);
1003 proceed ((CORE_ADDR) -1, GDB_SIGNAL_DEFAULT);
1004 }
1005}
1006
1007/* Called whenever we notice an exec or exit event, to handle
1008 detaching or resuming a vfork parent. */
1009
1010static void
1012{
1014
1015 struct inferior *inf = current_inferior ();
1016
1017 if (inf->vfork_parent)
1018 {
1019 inferior *resume_parent = nullptr;
1020
1021 /* This exec or exit marks the end of the shared memory region
1022 between the parent and the child. Break the bonds. */
1023 inferior *vfork_parent = inf->vfork_parent;
1024 inf->vfork_parent->vfork_child = nullptr;
1025 inf->vfork_parent = nullptr;
1026
1027 /* If the user wanted to detach from the parent, now is the
1028 time. */
1030 {
1031 struct program_space *pspace;
1032 struct address_space *aspace;
1033
1034 /* follow-fork child, detach-on-fork on. */
1035
1036 vfork_parent->pending_detach = false;
1037
1039
1040 /* We're letting loose of the parent. */
1041 thread_info *tp = any_live_thread_of_inferior (vfork_parent);
1042 switch_to_thread (tp);
1043
1044 /* We're about to detach from the parent, which implicitly
1045 removes breakpoints from its address space. There's a
1046 catch here: we want to reuse the spaces for the child,
1047 but, parent/child are still sharing the pspace at this
1048 point, although the exec in reality makes the kernel give
1049 the child a fresh set of new pages. The problem here is
1050 that the breakpoints module being unaware of this, would
1051 likely chose the child process to write to the parent
1052 address space. Swapping the child temporarily away from
1053 the spaces has the desired effect. Yes, this is "sort
1054 of" a hack. */
1055
1056 pspace = inf->pspace;
1057 aspace = inf->aspace;
1058 inf->aspace = nullptr;
1059 inf->pspace = nullptr;
1060
1062 {
1063 std::string pidstr
1064 = target_pid_to_str (ptid_t (vfork_parent->pid));
1065
1067
1068 if (exec)
1069 {
1070 gdb_printf (_("[Detaching vfork parent %s "
1071 "after child exec]\n"), pidstr.c_str ());
1072 }
1073 else
1074 {
1075 gdb_printf (_("[Detaching vfork parent %s "
1076 "after child exit]\n"), pidstr.c_str ());
1077 }
1078 }
1079
1080 target_detach (vfork_parent, 0);
1081
1082 /* Put it back. */
1083 inf->pspace = pspace;
1084 inf->aspace = aspace;
1085 }
1086 else if (exec)
1087 {
1088 /* We're staying attached to the parent, so, really give the
1089 child a new address space. */
1090 inf->pspace = new program_space (maybe_new_address_space ());
1091 inf->aspace = inf->pspace->aspace;
1092 inf->removable = true;
1094
1095 resume_parent = vfork_parent;
1096 }
1097 else
1098 {
1099 /* If this is a vfork child exiting, then the pspace and
1100 aspaces were shared with the parent. Since we're
1101 reporting the process exit, we'll be mourning all that is
1102 found in the address space, and switching to null_ptid,
1103 preparing to start a new inferior. But, since we don't
1104 want to clobber the parent's address/program spaces, we
1105 go ahead and create a new one for this exiting
1106 inferior. */
1107
1108 /* Switch to no-thread while running clone_program_space, so
1109 that clone_program_space doesn't want to read the
1110 selected frame of a dead process. */
1111 scoped_restore_current_thread restore_thread;
1113
1114 inf->pspace = new program_space (maybe_new_address_space ());
1115 inf->aspace = inf->pspace->aspace;
1117 inf->removable = true;
1118 inf->symfile_flags = SYMFILE_NO_READ;
1119 clone_program_space (inf->pspace, vfork_parent->pspace);
1120
1121 resume_parent = vfork_parent;
1122 }
1123
1124 gdb_assert (current_program_space == inf->pspace);
1125
1126 if (non_stop && resume_parent != nullptr)
1127 {
1128 /* If the user wanted the parent to be running, let it go
1129 free now. */
1130 scoped_restore_current_thread restore_thread;
1131
1132 infrun_debug_printf ("resuming vfork parent process %d",
1133 resume_parent->pid);
1134
1135 for (thread_info *thread : resume_parent->threads ())
1136 proceed_after_vfork_done (thread);
1137 }
1138 }
1139}
1140
1141/* Handle TARGET_WAITKIND_VFORK_DONE. */
1142
1143static void
1145{
1147
1148 /* We only care about this event if inferior::thread_waiting_for_vfork_done is
1149 set, that is if we are waiting for a vfork child not under our control
1150 (because we detached it) to exec or exit.
1151
1152 If an inferior has vforked and we are debugging the child, we don't use
1153 the vfork-done event to get notified about the end of the shared address
1154 space window. We rely instead on the child's exec or exit event, and the
1155 inferior::vfork_{parent,child} fields are used instead. See
1156 handle_vfork_child_exec_or_exit for that. */
1157 if (event_thread->inf->thread_waiting_for_vfork_done == nullptr)
1158 {
1159 infrun_debug_printf ("not waiting for a vfork-done event");
1160 return;
1161 }
1162
1163 /* We stopped all threads (other than the vforking thread) of the inferior in
1164 follow_fork and kept them stopped until now. It should therefore not be
1165 possible for another thread to have reported a vfork during that window.
1166 If THREAD_WAITING_FOR_VFORK_DONE is set, it has to be the same thread whose
1167 vfork-done we are handling right now. */
1168 gdb_assert (event_thread->inf->thread_waiting_for_vfork_done == event_thread);
1169
1170 event_thread->inf->thread_waiting_for_vfork_done = nullptr;
1171 event_thread->inf->pspace->breakpoints_not_allowed = 0;
1172
1173 /* On non-stop targets, we stopped all the inferior's threads in follow_fork,
1174 resume them now. On all-stop targets, everything that needs to be resumed
1175 will be when we resume the event thread. */
1176 if (target_is_non_stop_p ())
1177 {
1178 /* restart_threads and start_step_over may change the current thread, make
1179 sure we leave the event thread as the current thread. */
1180 scoped_restore_current_thread restore_thread;
1181
1183 start_step_over ();
1184
1185 if (!step_over_info_valid_p ())
1186 restart_threads (event_thread, event_thread->inf);
1187 }
1188}
1189
1190/* Enum strings for "set|show follow-exec-mode". */
1191
1192static const char follow_exec_mode_new[] = "new";
1193static const char follow_exec_mode_same[] = "same";
1194static const char *const follow_exec_mode_names[] =
1195{
1198 nullptr,
1199};
1200
1202static void
1203show_follow_exec_mode_string (struct ui_file *file, int from_tty,
1204 struct cmd_list_element *c, const char *value)
1205{
1206 gdb_printf (file, _("Follow exec mode is \"%s\".\n"), value);
1207}
1208
1209/* EXEC_FILE_TARGET is assumed to be non-NULL. */
1210
1211static void
1212follow_exec (ptid_t ptid, const char *exec_file_target)
1213{
1214 int pid = ptid.pid ();
1215 ptid_t process_ptid;
1216
1217 /* Switch terminal for any messages produced e.g. by
1218 breakpoint_re_set. */
1220
1221 /* This is an exec event that we actually wish to pay attention to.
1222 Refresh our symbol table to the newly exec'd program, remove any
1223 momentary bp's, etc.
1224
1225 If there are breakpoints, they aren't really inserted now,
1226 since the exec() transformed our inferior into a fresh set
1227 of instructions.
1228
1229 We want to preserve symbolic breakpoints on the list, since
1230 we have hopes that they can be reset after the new a.out's
1231 symbol table is read.
1232
1233 However, any "raw" breakpoints must be removed from the list
1234 (e.g., the solib bp's), since their address is probably invalid
1235 now.
1236
1237 And, we DON'T want to call delete_breakpoints() here, since
1238 that may write the bp's "shadow contents" (the instruction
1239 value that was overwritten with a TRAP instruction). Since
1240 we now have a new a.out, those shadow contents aren't valid. */
1241
1243
1244 /* The target reports the exec event to the main thread, even if
1245 some other thread does the exec, and even if the main thread was
1246 stopped or already gone. We may still have non-leader threads of
1247 the process on our list. E.g., on targets that don't have thread
1248 exit events (like remote); or on native Linux in non-stop mode if
1249 there were only two threads in the inferior and the non-leader
1250 one is the one that execs (and nothing forces an update of the
1251 thread list up to here). When debugging remotely, it's best to
1252 avoid extra traffic, when possible, so avoid syncing the thread
1253 list with the target, and instead go ahead and delete all threads
1254 of the process but one that reported the event. Note this must
1255 be done before calling update_breakpoints_after_exec, as
1256 otherwise clearing the threads' resources would reference stale
1257 thread breakpoints -- it may have been one of these threads that
1258 stepped across the exec. We could just clear their stepping
1259 states, but as long as we're iterating, might as well delete
1260 them. Deleting them now rather than at the next user-visible
1261 stop provides a nicer sequence of events for user and MI
1262 notifications. */
1263 for (thread_info *th : all_threads_safe ())
1264 if (th->ptid.pid () == pid && th->ptid != ptid)
1265 delete_thread (th);
1266
1267 /* We also need to clear any left over stale state for the
1268 leader/event thread. E.g., if there was any step-resume
1269 breakpoint or similar, it's gone now. We cannot truly
1270 step-to-next statement through an exec(). */
1272 th->control.step_resume_breakpoint = nullptr;
1274 th->control.single_step_breakpoints = nullptr;
1275 th->control.step_range_start = 0;
1276 th->control.step_range_end = 0;
1277
1278 /* The user may have had the main thread held stopped in the
1279 previous image (e.g., schedlock on, or non-stop). Release
1280 it now. */
1281 th->stop_requested = 0;
1282
1284
1285 /* What is this a.out's name? */
1286 process_ptid = ptid_t (pid);
1287 gdb_printf (_("%s is executing new program: %s\n"),
1288 target_pid_to_str (process_ptid).c_str (),
1289 exec_file_target);
1290
1291 /* We've followed the inferior through an exec. Therefore, the
1292 inferior has essentially been killed & reborn. */
1293
1295
1296 gdb::unique_xmalloc_ptr<char> exec_file_host
1297 = exec_file_find (exec_file_target, nullptr);
1298
1299 /* If we were unable to map the executable target pathname onto a host
1300 pathname, tell the user that. Otherwise GDB's subsequent behavior
1301 is confusing. Maybe it would even be better to stop at this point
1302 so that the user can specify a file manually before continuing. */
1303 if (exec_file_host == nullptr)
1304 warning (_("Could not load symbols for executable %s.\n"
1305 "Do you need \"set sysroot\"?"),
1306 exec_file_target);
1307
1308 /* Reset the shared library package. This ensures that we get a
1309 shlib event when the child reaches "_start", at which point the
1310 dld will have had a chance to initialize the child. */
1311 /* Also, loading a symbol file below may trigger symbol lookups, and
1312 we don't want those to be satisfied by the libraries of the
1313 previous incarnation of this process. */
1314 no_shared_libraries (nullptr, 0);
1315
1316 inferior *execing_inferior = current_inferior ();
1317 inferior *following_inferior;
1318
1320 {
1321 /* The user wants to keep the old inferior and program spaces
1322 around. Create a new fresh one, and switch to it. */
1323
1324 /* Do exit processing for the original inferior before setting the new
1325 inferior's pid. Having two inferiors with the same pid would confuse
1326 find_inferior_p(t)id. Transfer the terminal state and info from the
1327 old to the new inferior. */
1328 following_inferior = add_inferior_with_spaces ();
1329
1330 swap_terminal_info (following_inferior, execing_inferior);
1331 exit_inferior (execing_inferior);
1332
1333 following_inferior->pid = pid;
1334 }
1335 else
1336 {
1337 /* follow-exec-mode is "same", we continue execution in the execing
1338 inferior. */
1339 following_inferior = execing_inferior;
1340
1341 /* The old description may no longer be fit for the new image.
1342 E.g, a 64-bit process exec'ed a 32-bit process. Clear the
1343 old description; we'll read a new one below. No need to do
1344 this on "follow-exec-mode new", as the old inferior stays
1345 around (its description is later cleared/refetched on
1346 restart). */
1348 }
1349
1350 target_follow_exec (following_inferior, ptid, exec_file_target);
1351
1352 gdb_assert (current_inferior () == following_inferior);
1353 gdb_assert (current_program_space == following_inferior->pspace);
1354
1355 /* Attempt to open the exec file. SYMFILE_DEFER_BP_RESET is used
1356 because the proper displacement for a PIE (Position Independent
1357 Executable) main symbol file will only be computed by
1358 solib_create_inferior_hook below. breakpoint_re_set would fail
1359 to insert the breakpoints with the zero displacement. */
1360 try_open_exec_file (exec_file_host.get (), following_inferior,
1362
1363 /* If the target can specify a description, read it. Must do this
1364 after flipping to the new executable (because the target supplied
1365 description must be compatible with the executable's
1366 architecture, and the old executable may e.g., be 32-bit, while
1367 the new one 64-bit), and before anything involving memory or
1368 registers. */
1370
1371 gdb::observers::inferior_execd.notify (execing_inferior, following_inferior);
1372
1374
1375 /* Reinsert all breakpoints. (Those which were symbolic have
1376 been reset to the proper address in the new a.out, thanks
1377 to symbol_file_command...). */
1379
1380 /* The next resume of this inferior should bring it to the shlib
1381 startup breakpoints. (If the user had also set bp's on
1382 "main" from the old (parent) process, then they'll auto-
1383 matically get reset there in the new process.). */
1384}
1385
1386/* The chain of threads that need to do a step-over operation to get
1387 past e.g., a breakpoint. What technique is used to step over the
1388 breakpoint/watchpoint does not matter -- all threads end up in the
1389 same queue, to maintain rough temporal order of execution, in order
1390 to avoid starvation, otherwise, we could e.g., find ourselves
1391 constantly stepping the same couple threads past their breakpoints
1392 over and over, if the single-step finish fast enough. */
1394
1395/* Bit flags indicating what the thread needs to step over. */
1396
1398 {
1399 /* Step over a breakpoint. */
1401
1402 /* Step past a non-continuable watchpoint, in order to let the
1403 instruction execute so we can evaluate the watchpoint
1404 expression. */
1408
1409/* Info about an instruction that is being stepped over. */
1410
1412{
1413 /* If we're stepping past a breakpoint, this is the address space
1414 and address of the instruction the breakpoint is set at. We'll
1415 skip inserting all breakpoints here. Valid iff ASPACE is
1416 non-NULL. */
1417 const address_space *aspace = nullptr;
1418 CORE_ADDR address = 0;
1419
1420 /* The instruction being stepped over triggers a nonsteppable
1421 watchpoint. If true, we'll skip inserting watchpoints. */
1423
1424 /* The thread's global number. */
1425 int thread = -1;
1426};
1427
1428/* The step-over info of the location that is being stepped over.
1429
1430 Note that with async/breakpoint always-inserted mode, a user might
1431 set a new breakpoint/watchpoint/etc. exactly while a breakpoint is
1432 being stepped over. As setting a new breakpoint inserts all
1433 breakpoints, we need to make sure the breakpoint being stepped over
1434 isn't inserted then. We do that by only clearing the step-over
1435 info when the step-over is actually finished (or aborted).
1436
1437 Presently GDB can only step over one breakpoint at any given time.
1438 Given threads that can't run code in the same address space as the
1439 breakpoint's can't really miss the breakpoint, GDB could be taught
1440 to step-over at most one breakpoint per address space (so this info
1441 could move to the address space object if/when GDB is extended).
1442 The set of breakpoints being stepped over will normally be much
1443 smaller than the set of all breakpoints, so a flag in the
1444 breakpoint location structure would be wasteful. A separate list
1445 also saves complexity and run-time, as otherwise we'd have to go
1446 through all breakpoint locations clearing their flag whenever we
1447 start a new sequence. Similar considerations weigh against storing
1448 this info in the thread object. Plus, not all step overs actually
1449 have breakpoint locations -- e.g., stepping past a single-step
1450 breakpoint, or stepping to complete a non-continuable
1451 watchpoint. */
1453
1454/* Record the address of the breakpoint/instruction we're currently
1455 stepping over.
1456 N.B. We record the aspace and address now, instead of say just the thread,
1457 because when we need the info later the thread may be running. */
1458
1459static void
1469
1470/* Called when we're not longer stepping over a breakpoint / an
1471 instruction, so all breakpoints are free to be (re)inserted. */
1472
1473static void
1475{
1476 infrun_debug_printf ("clearing step over info");
1477 step_over_info.aspace = nullptr;
1481}
1482
1483/* See infrun.h. */
1484
1485int
1494
1495/* See infrun.h. */
1496
1497int
1503
1504/* See infrun.h. */
1505
1506int
1511
1512/* Returns true if step-over info is valid. */
1513
1514static bool
1516{
1517 return (step_over_info.aspace != nullptr
1519}
1520
1521
1522/* Displaced stepping. */
1523
1524/* In non-stop debugging mode, we must take special care to manage
1525 breakpoints properly; in particular, the traditional strategy for
1526 stepping a thread past a breakpoint it has hit is unsuitable.
1527 'Displaced stepping' is a tactic for stepping one thread past a
1528 breakpoint it has hit while ensuring that other threads running
1529 concurrently will hit the breakpoint as they should.
1530
1531 The traditional way to step a thread T off a breakpoint in a
1532 multi-threaded program in all-stop mode is as follows:
1533
1534 a0) Initially, all threads are stopped, and breakpoints are not
1535 inserted.
1536 a1) We single-step T, leaving breakpoints uninserted.
1537 a2) We insert breakpoints, and resume all threads.
1538
1539 In non-stop debugging, however, this strategy is unsuitable: we
1540 don't want to have to stop all threads in the system in order to
1541 continue or step T past a breakpoint. Instead, we use displaced
1542 stepping:
1543
1544 n0) Initially, T is stopped, other threads are running, and
1545 breakpoints are inserted.
1546 n1) We copy the instruction "under" the breakpoint to a separate
1547 location, outside the main code stream, making any adjustments
1548 to the instruction, register, and memory state as directed by
1549 T's architecture.
1550 n2) We single-step T over the instruction at its new location.
1551 n3) We adjust the resulting register and memory state as directed
1552 by T's architecture. This includes resetting T's PC to point
1553 back into the main instruction stream.
1554 n4) We resume T.
1555
1556 This approach depends on the following gdbarch methods:
1557
1558 - gdbarch_max_insn_length and gdbarch_displaced_step_location
1559 indicate where to copy the instruction, and how much space must
1560 be reserved there. We use these in step n1.
1561
1562 - gdbarch_displaced_step_copy_insn copies a instruction to a new
1563 address, and makes any necessary adjustments to the instruction,
1564 register contents, and memory. We use this in step n1.
1565
1566 - gdbarch_displaced_step_fixup adjusts registers and memory after
1567 we have successfully single-stepped the instruction, to yield the
1568 same effect the instruction would have had if we had executed it
1569 at its original address. We use this in step n3.
1570
1571 The gdbarch_displaced_step_copy_insn and
1572 gdbarch_displaced_step_fixup functions must be written so that
1573 copying an instruction with gdbarch_displaced_step_copy_insn,
1574 single-stepping across the copied instruction, and then applying
1575 gdbarch_displaced_insn_fixup should have the same effects on the
1576 thread's memory and registers as stepping the instruction in place
1577 would have. Exactly which responsibilities fall to the copy and
1578 which fall to the fixup is up to the author of those functions.
1579
1580 See the comments in gdbarch.sh for details.
1581
1582 Note that displaced stepping and software single-step cannot
1583 currently be used in combination, although with some care I think
1584 they could be made to. Software single-step works by placing
1585 breakpoints on all possible subsequent instructions; if the
1586 displaced instruction is a PC-relative jump, those breakpoints
1587 could fall in very strange places --- on pages that aren't
1588 executable, or at addresses that are not proper instruction
1589 boundaries. (We do generally let other threads run while we wait
1590 to hit the software single-step breakpoint, and they might
1591 encounter such a corrupted instruction.) One way to work around
1592 this would be to have gdbarch_displaced_step_copy_insn fully
1593 simulate the effect of PC-relative instructions (and return NULL)
1594 on architectures that use software single-stepping.
1595
1596 In non-stop mode, we can have independent and simultaneous step
1597 requests, so more than one thread may need to simultaneously step
1598 over a breakpoint. The current implementation assumes there is
1599 only one scratch space per process. In this case, we have to
1600 serialize access to the scratch space. If thread A wants to step
1601 over a breakpoint, but we are currently waiting for some other
1602 thread to complete a displaced step, we leave thread A stopped and
1603 place it in the displaced_step_request_queue. Whenever a displaced
1604 step finishes, we pick the next thread in the queue and start a new
1605 displaced step operation on it. See displaced_step_prepare and
1606 displaced_step_finish for details. */
1607
1608/* Return true if THREAD is doing a displaced step. */
1609
1610static bool
1612{
1613 gdb_assert (thread != nullptr);
1614
1615 return thread->displaced_step_state.in_progress ();
1616}
1617
1618/* Return true if INF has a thread doing a displaced step. */
1619
1620static bool
1622{
1623 return inf->displaced_step_state.in_progress_count > 0;
1624}
1625
1626/* Return true if any thread is doing a displaced step. */
1627
1628static bool
1630{
1632 {
1634 return true;
1635 }
1636
1637 return false;
1638}
1639
1640static void
1642{
1643 inf->displaced_step_state.reset ();
1644 inf->thread_waiting_for_vfork_done = nullptr;
1645}
1646
1647static void
1649{
1650 /* If some threads where was doing a displaced step in this inferior at the
1651 moment of the exec, they no longer exist. Even if the exec'ing thread
1652 doing a displaced step, we don't want to to any fixup nor restore displaced
1653 stepping buffer bytes. */
1654 follow_inf->displaced_step_state.reset ();
1655
1656 for (thread_info *thread : follow_inf->threads ())
1657 thread->displaced_step_state.reset ();
1658
1659 /* Since an in-line step is done with everything else stopped, if there was
1660 one in progress at the time of the exec, it must have been the exec'ing
1661 thread. */
1663
1664 follow_inf->thread_waiting_for_vfork_done = nullptr;
1665}
1666
1667/* If ON, and the architecture supports it, GDB will use displaced
1668 stepping to step over breakpoints. If OFF, or if the architecture
1669 doesn't support it, GDB will instead use the traditional
1670 hold-and-step approach. If AUTO (which is the default), GDB will
1671 decide which technique to use to step over breakpoints depending on
1672 whether the target works in a non-stop way (see use_displaced_stepping). */
1673
1675
1676static void
1677show_can_use_displaced_stepping (struct ui_file *file, int from_tty,
1678 struct cmd_list_element *c,
1679 const char *value)
1680{
1682 gdb_printf (file,
1683 _("Debugger's willingness to use displaced stepping "
1684 "to step over breakpoints is %s (currently %s).\n"),
1685 value, target_is_non_stop_p () ? "on" : "off");
1686 else
1687 gdb_printf (file,
1688 _("Debugger's willingness to use displaced stepping "
1689 "to step over breakpoints is %s.\n"), value);
1690}
1691
1692/* Return true if the gdbarch implements the required methods to use
1693 displaced stepping. */
1694
1695static bool
1697{
1698 /* Only check for the presence of `prepare`. The gdbarch verification ensures
1699 that if `prepare` is provided, so is `finish`. */
1701}
1702
1703/* Return non-zero if displaced stepping can/should be used to step
1704 over breakpoints of thread TP. */
1705
1706static bool
1708{
1709 /* If the user disabled it explicitly, don't use displaced stepping. */
1711 return false;
1712
1713 /* If "auto", only use displaced stepping if the target operates in a non-stop
1714 way. */
1716 && !target_is_non_stop_p ())
1717 return false;
1718
1720
1721 /* If the architecture doesn't implement displaced stepping, don't use
1722 it. */
1724 return false;
1725
1726 /* If recording, don't use displaced stepping. */
1727 if (find_record_target () != nullptr)
1728 return false;
1729
1730 /* If displaced stepping failed before for this inferior, don't bother trying
1731 again. */
1733 return false;
1734
1735 return true;
1736}
1737
1738/* Simple function wrapper around displaced_step_thread_state::reset. */
1739
1740static void
1742{
1743 displaced->reset ();
1744}
1745
1746/* A cleanup that wraps displaced_step_reset. We use this instead of, say,
1747 SCOPE_EXIT, because it needs to be discardable with "cleanup.release ()". */
1748
1750
1751/* Prepare to single-step, using displaced stepping.
1752
1753 Note that we cannot use displaced stepping when we have a signal to
1754 deliver. If we have a signal to deliver and an instruction to step
1755 over, then after the step, there will be no indication from the
1756 target whether the thread entered a signal handler or ignored the
1757 signal and stepped over the instruction successfully --- both cases
1758 result in a simple SIGTRAP. In the first case we mustn't do a
1759 fixup, and in the second case we must --- but we can't tell which.
1760 Comments in the code for 'random signals' in handle_inferior_event
1761 explain how we handle this case instead.
1762
1763 Returns DISPLACED_STEP_PREPARE_STATUS_OK if preparing was successful -- this
1764 thread is going to be stepped now; DISPLACED_STEP_PREPARE_STATUS_UNAVAILABLE
1765 if displaced stepping this thread got queued; or
1766 DISPLACED_STEP_PREPARE_STATUS_CANT if this instruction can't be displaced
1767 stepped. */
1768
1771{
1773 struct gdbarch *gdbarch = regcache->arch ();
1774 displaced_step_thread_state &disp_step_thread_state
1776
1777 /* We should never reach this function if the architecture does not
1778 support displaced stepping. */
1780
1781 /* Nor if the thread isn't meant to step over a breakpoint. */
1782 gdb_assert (tp->control.trap_expected);
1783
1784 /* Disable range stepping while executing in the scratch pad. We
1785 want a single-step even if executing the displaced instruction in
1786 the scratch buffer lands within the stepping range (e.g., a
1787 jump/branch). */
1788 tp->control.may_range_step = 0;
1789
1790 /* We are about to start a displaced step for this thread. If one is already
1791 in progress, something's wrong. */
1792 gdb_assert (!disp_step_thread_state.in_progress ());
1793
1795 {
1796 /* The gdbarch tells us it's not worth asking to try a prepare because
1797 it is likely that it will return unavailable, so don't bother asking. */
1798
1799 displaced_debug_printf ("deferring step of %s",
1800 tp->ptid.to_string ().c_str ());
1801
1804 }
1805
1806 displaced_debug_printf ("displaced-stepping %s now",
1807 tp->ptid.to_string ().c_str ());
1808
1809 scoped_restore_current_thread restore_thread;
1810
1811 switch_to_thread (tp);
1812
1813 CORE_ADDR original_pc = regcache_read_pc (regcache);
1814 CORE_ADDR displaced_pc;
1815
1816 /* Display the instruction we are going to displaced step. */
1817 if (debug_displaced)
1818 {
1819 string_file tmp_stream;
1820 int dislen = gdb_print_insn (gdbarch, original_pc, &tmp_stream,
1821 nullptr);
1822
1823 if (dislen > 0)
1824 {
1825 gdb::byte_vector insn_buf (dislen);
1826 read_memory (original_pc, insn_buf.data (), insn_buf.size ());
1827
1828 std::string insn_bytes = bytes_to_string (insn_buf);
1829
1830 displaced_debug_printf ("original insn %s: %s \t %s",
1831 paddress (gdbarch, original_pc),
1832 insn_bytes.c_str (),
1833 tmp_stream.string ().c_str ());
1834 }
1835 else
1836 displaced_debug_printf ("original insn %s: invalid length: %d",
1837 paddress (gdbarch, original_pc), dislen);
1838 }
1839
1841 = gdbarch_displaced_step_prepare (gdbarch, tp, displaced_pc);
1842
1844 {
1845 displaced_debug_printf ("failed to prepare (%s)",
1846 tp->ptid.to_string ().c_str ());
1847
1849 }
1851 {
1852 /* Not enough displaced stepping resources available, defer this
1853 request by placing it the queue. */
1854
1855 displaced_debug_printf ("not enough resources available, "
1856 "deferring step of %s",
1857 tp->ptid.to_string ().c_str ());
1858
1860
1862 }
1863
1865
1866 /* Save the information we need to fix things up if the step
1867 succeeds. */
1868 disp_step_thread_state.set (gdbarch);
1869
1871
1872 displaced_debug_printf ("prepared successfully thread=%s, "
1873 "original_pc=%s, displaced_pc=%s",
1874 tp->ptid.to_string ().c_str (),
1875 paddress (gdbarch, original_pc),
1876 paddress (gdbarch, displaced_pc));
1877
1878 /* Display the new displaced instruction(s). */
1879 if (debug_displaced)
1880 {
1881 string_file tmp_stream;
1882 CORE_ADDR addr = displaced_pc;
1883
1884 /* If displaced stepping is going to use h/w single step then we know
1885 that the replacement instruction can only be a single instruction,
1886 in that case set the end address at the next byte.
1887
1888 Otherwise the displaced stepping copy instruction routine could
1889 have generated multiple instructions, and all we know is that they
1890 must fit within the LEN bytes of the buffer. */
1891 CORE_ADDR end
1894
1895 while (addr < end)
1896 {
1897 int dislen = gdb_print_insn (gdbarch, addr, &tmp_stream, nullptr);
1898 if (dislen <= 0)
1899 {
1901 ("replacement insn %s: invalid length: %d",
1902 paddress (gdbarch, addr), dislen);
1903 break;
1904 }
1905
1906 gdb::byte_vector insn_buf (dislen);
1907 read_memory (addr, insn_buf.data (), insn_buf.size ());
1908
1909 std::string insn_bytes = bytes_to_string (insn_buf);
1910 std::string insn_str = tmp_stream.release ();
1911 displaced_debug_printf ("replacement insn %s: %s \t %s",
1912 paddress (gdbarch, addr),
1913 insn_bytes.c_str (),
1914 insn_str.c_str ());
1915 addr += dislen;
1916 }
1917 }
1918
1920}
1921
1922/* Wrapper for displaced_step_prepare_throw that disabled further
1923 attempts at displaced stepping if we get a memory error. */
1924
1927{
1930
1931 try
1932 {
1934 }
1935 catch (const gdb_exception_error &ex)
1936 {
1937 if (ex.error != MEMORY_ERROR
1938 && ex.error != NOT_SUPPORTED_ERROR)
1939 throw;
1940
1941 infrun_debug_printf ("caught exception, disabling displaced stepping: %s",
1942 ex.what ());
1943
1944 /* Be verbose if "set displaced-stepping" is "on", silent if
1945 "auto". */
1947 {
1948 warning (_("disabling displaced stepping: %s"),
1949 ex.what ());
1950 }
1951
1952 /* Disable further displaced stepping attempts. */
1954 }
1955
1956 return status;
1957}
1958
1959/* If we displaced stepped an instruction successfully, adjust registers and
1960 memory to yield the same effect the instruction would have had if we had
1961 executed it at its original address, and return
1962 DISPLACED_STEP_FINISH_STATUS_OK. If the instruction didn't complete,
1963 relocate the PC and return DISPLACED_STEP_FINISH_STATUS_NOT_EXECUTED.
1964
1965 If the thread wasn't displaced stepping, return
1966 DISPLACED_STEP_FINISH_STATUS_OK as well. */
1967
1970 const target_waitstatus &event_status)
1971{
1972 displaced_step_thread_state *displaced = &event_thread->displaced_step_state;
1973
1974 /* Was this thread performing a displaced step? */
1975 if (!displaced->in_progress ())
1977
1978 gdb_assert (event_thread->inf->displaced_step_state.in_progress_count > 0);
1980
1981 /* Fixup may need to read memory/registers. Switch to the thread
1982 that we're fixing up. Also, target_stopped_by_watchpoint checks
1983 the current thread, and displaced_step_restore performs ptid-dependent
1984 memory accesses using current_inferior(). */
1985 switch_to_thread (event_thread);
1986
1987 displaced_step_reset_cleanup cleanup (displaced);
1988
1989 /* Do the fixup, and release the resources acquired to do the displaced
1990 step. */
1992 event_thread, event_status);
1993}
1994
1995/* Data to be passed around while handling an event. This data is
1996 discarded between events. */
1998{
1999 explicit execution_control_state (thread_info *thr = nullptr)
2000 : ptid (thr == nullptr ? null_ptid : thr->ptid),
2001 event_thread (thr)
2002 {
2003 }
2004
2006 ptid_t ptid;
2007 /* The thread that got the event, if this was a thread event; NULL
2008 otherwise. */
2010
2013 CORE_ADDR stop_func_alt_start = 0;
2014 CORE_ADDR stop_func_start = 0;
2015 CORE_ADDR stop_func_end = 0;
2016 const char *stop_func_name = nullptr;
2018
2019 /* True if the event thread hit the single-step breakpoint of
2020 another thread. Thus the event doesn't cause a stop, the thread
2021 needs to be single-stepped past the single-step breakpoint before
2022 we can switch back to the original stepping thread. */
2024};
2025
2026static void keep_going_pass_signal (struct execution_control_state *ecs);
2027static void prepare_to_wait (struct execution_control_state *ecs);
2028static bool keep_going_stepped_thread (struct thread_info *tp);
2029static step_over_what thread_still_needs_step_over (struct thread_info *tp);
2030
2031/* Are there any pending step-over requests? If so, run all we can
2032 now and return true. Otherwise, return false. */
2033
2034static bool
2036{
2038
2039 /* Don't start a new step-over if we already have an in-line
2040 step-over operation ongoing. */
2042 return false;
2043
2044 /* Steal the global thread step over chain. As we try to initiate displaced
2045 steps, threads will be enqueued in the global chain if no buffers are
2046 available. If we iterated on the global chain directly, we might iterate
2047 indefinitely. */
2048 thread_step_over_list threads_to_step
2049 = std::move (global_thread_step_over_list);
2050
2051 infrun_debug_printf ("stealing global queue of threads to step, length = %d",
2052 thread_step_over_chain_length (threads_to_step));
2053
2054 bool started = false;
2055
2056 /* On scope exit (whatever the reason, return or exception), if there are
2057 threads left in the THREADS_TO_STEP chain, put back these threads in the
2058 global list. */
2059 SCOPE_EXIT
2060 {
2061 if (threads_to_step.empty ())
2062 infrun_debug_printf ("step-over queue now empty");
2063 else
2064 {
2065 infrun_debug_printf ("putting back %d threads to step in global queue",
2066 thread_step_over_chain_length (threads_to_step));
2067
2069 (std::move (threads_to_step));
2070 }
2071 };
2072
2074 = make_thread_step_over_list_safe_range (threads_to_step);
2075
2076 for (thread_info *tp : range)
2077 {
2078 step_over_what step_what;
2079 int must_be_in_line;
2080
2081 gdb_assert (!tp->stop_requested);
2082
2083 if (tp->inf->displaced_step_state.unavailable)
2084 {
2085 /* The arch told us to not even try preparing another displaced step
2086 for this inferior. Just leave the thread in THREADS_TO_STEP, it
2087 will get moved to the global chain on scope exit. */
2088 continue;
2089 }
2090
2091 if (tp->inf->thread_waiting_for_vfork_done != nullptr)
2092 {
2093 /* When we stop all threads, handling a vfork, any thread in the step
2094 over chain remains there. A user could also try to continue a
2095 thread stopped at a breakpoint while another thread is waiting for
2096 a vfork-done event. In any case, we don't want to start a step
2097 over right now. */
2098 continue;
2099 }
2100
2101 /* Remove thread from the THREADS_TO_STEP chain. If anything goes wrong
2102 while we try to prepare the displaced step, we don't add it back to
2103 the global step over chain. This is to avoid a thread staying in the
2104 step over chain indefinitely if something goes wrong when resuming it
2105 If the error is intermittent and it still needs a step over, it will
2106 get enqueued again when we try to resume it normally. */
2107 threads_to_step.erase (threads_to_step.iterator_to (*tp));
2108
2109 step_what = thread_still_needs_step_over (tp);
2110 must_be_in_line = ((step_what & STEP_OVER_WATCHPOINT)
2111 || ((step_what & STEP_OVER_BREAKPOINT)
2112 && !use_displaced_stepping (tp)));
2113
2114 /* We currently stop all threads of all processes to step-over
2115 in-line. If we need to start a new in-line step-over, let
2116 any pending displaced steps finish first. */
2117 if (must_be_in_line && displaced_step_in_progress_any_thread ())
2118 {
2120 continue;
2121 }
2122
2123 if (tp->control.trap_expected
2124 || tp->resumed ()
2125 || tp->executing ())
2126 {
2127 internal_error ("[%s] has inconsistent state: "
2128 "trap_expected=%d, resumed=%d, executing=%d\n",
2129 tp->ptid.to_string ().c_str (),
2130 tp->control.trap_expected,
2131 tp->resumed (),
2132 tp->executing ());
2133 }
2134
2135 infrun_debug_printf ("resuming [%s] for step-over",
2136 tp->ptid.to_string ().c_str ());
2137
2138 /* keep_going_pass_signal skips the step-over if the breakpoint
2139 is no longer inserted. In all-stop, we want to keep looking
2140 for a thread that needs a step-over instead of resuming TP,
2141 because we wouldn't be able to resume anything else until the
2142 target stops again. In non-stop, the resume always resumes
2143 only TP, so it's OK to let the thread resume freely. */
2144 if (!target_is_non_stop_p () && !step_what)
2145 continue;
2146
2147 switch_to_thread (tp);
2148 execution_control_state ecs (tp);
2150
2151 if (!ecs.wait_some_more)
2152 error (_("Command aborted."));
2153
2154 /* If the thread's step over could not be initiated because no buffers
2155 were available, it was re-added to the global step over chain. */
2156 if (tp->resumed ())
2157 {
2158 infrun_debug_printf ("[%s] was resumed.",
2159 tp->ptid.to_string ().c_str ());
2160 gdb_assert (!thread_is_in_step_over_chain (tp));
2161 }
2162 else
2163 {
2164 infrun_debug_printf ("[%s] was NOT resumed.",
2165 tp->ptid.to_string ().c_str ());
2166 gdb_assert (thread_is_in_step_over_chain (tp));
2167 }
2168
2169 /* If we started a new in-line step-over, we're done. */
2171 {
2172 gdb_assert (tp->control.trap_expected);
2173 started = true;
2174 break;
2175 }
2176
2177 if (!target_is_non_stop_p ())
2178 {
2179 /* On all-stop, shouldn't have resumed unless we needed a
2180 step over. */
2181 gdb_assert (tp->control.trap_expected
2182 || tp->step_after_step_resume_breakpoint);
2183
2184 /* With remote targets (at least), in all-stop, we can't
2185 issue any further remote commands until the program stops
2186 again. */
2187 started = true;
2188 break;
2189 }
2190
2191 /* Either the thread no longer needed a step-over, or a new
2192 displaced stepping sequence started. Even in the latter
2193 case, continue looking. Maybe we can also start another
2194 displaced step on a thread of other process. */
2195 }
2196
2197 return started;
2198}
2199
2200/* Update global variables holding ptids to hold NEW_PTID if they were
2201 holding OLD_PTID. */
2202static void
2204 ptid_t old_ptid, ptid_t new_ptid)
2205{
2206 if (inferior_ptid == old_ptid
2207 && current_inferior ()->process_target () == target)
2208 inferior_ptid = new_ptid;
2209}
2210
2211
2212
2213static const char schedlock_off[] = "off";
2214static const char schedlock_on[] = "on";
2215static const char schedlock_step[] = "step";
2216static const char schedlock_replay[] = "replay";
2217static const char *const scheduler_enums[] = {
2222 nullptr
2223};
2225static void
2226show_scheduler_mode (struct ui_file *file, int from_tty,
2227 struct cmd_list_element *c, const char *value)
2228{
2229 gdb_printf (file,
2230 _("Mode for locking scheduler "
2231 "during execution is \"%s\".\n"),
2232 value);
2233}
2234
2235static void
2236set_schedlock_func (const char *args, int from_tty, struct cmd_list_element *c)
2237{
2239 {
2241 error (_("Target '%s' cannot support this command."),
2242 target_shortname ());
2243 }
2244}
2245
2246/* True if execution commands resume all threads of all processes by
2247 default; otherwise, resume only threads of the current inferior
2248 process. */
2249bool sched_multi = false;
2250
2251/* Try to setup for software single stepping. Return true if target_resume()
2252 should use hardware single step.
2253
2254 GDBARCH the current gdbarch. */
2255
2256static bool
2258{
2259 bool hw_step = true;
2260
2264
2265 return hw_step;
2266}
2267
2268/* See infrun.h. */
2269
2270ptid_t
2272{
2273 ptid_t resume_ptid;
2274
2275 if (non_stop)
2276 {
2277 /* With non-stop mode on, threads are always handled
2278 individually. */
2279 resume_ptid = inferior_ptid;
2280 }
2281 else if ((scheduler_mode == schedlock_on)
2282 || (scheduler_mode == schedlock_step && step))
2283 {
2284 /* User-settable 'scheduler' mode requires solo thread
2285 resume. */
2286 resume_ptid = inferior_ptid;
2287 }
2288 else if ((scheduler_mode == schedlock_replay)
2290 {
2291 /* User-settable 'scheduler' mode requires solo thread resume in replay
2292 mode. */
2293 resume_ptid = inferior_ptid;
2294 }
2296 {
2297 /* Resume all threads of the current process (and none of other
2298 processes). */
2299 resume_ptid = ptid_t (inferior_ptid.pid ());
2300 }
2301 else
2302 {
2303 /* Resume all threads of all processes. */
2304 resume_ptid = RESUME_ALL;
2305 }
2306
2307 return resume_ptid;
2308}
2309
2310/* See infrun.h. */
2311
2313user_visible_resume_target (ptid_t resume_ptid)
2314{
2315 return (resume_ptid == minus_one_ptid && sched_multi
2316 ? nullptr
2318}
2319
2320/* Find a thread from the inferiors that we'll resume that is waiting
2321 for a vfork-done event. */
2322
2323static thread_info *
2325{
2326 gdb_assert (!target_is_non_stop_p ());
2327
2328 if (sched_multi)
2329 {
2331 if (inf->thread_waiting_for_vfork_done != nullptr)
2332 return inf->thread_waiting_for_vfork_done;
2333 }
2334 else
2335 {
2337 if (cur_inf->thread_waiting_for_vfork_done != nullptr)
2338 return cur_inf->thread_waiting_for_vfork_done;
2339 }
2340 return nullptr;
2341}
2342
2343/* Return a ptid representing the set of threads that we will resume,
2344 in the perspective of the target, assuming run control handling
2345 does not require leaving some threads stopped (e.g., stepping past
2346 breakpoint). USER_STEP indicates whether we're about to start the
2347 target for a stepping command. */
2348
2349static ptid_t
2351{
2352 /* In non-stop, we always control threads individually. Note that
2353 the target may always work in non-stop mode even with "set
2354 non-stop off", in which case user_visible_resume_ptid could
2355 return a wildcard ptid. */
2356 if (target_is_non_stop_p ())
2357 return inferior_ptid;
2358
2359 /* The rest of the function assumes non-stop==off and
2360 target-non-stop==off.
2361
2362 If a thread is waiting for a vfork-done event, it means breakpoints are out
2363 for this inferior (well, program space in fact). We don't want to resume
2364 any thread other than the one waiting for vfork done, otherwise these other
2365 threads could miss breakpoints. So if a thread in the resumption set is
2366 waiting for a vfork-done event, resume only that thread.
2367
2368 The resumption set width depends on whether schedule-multiple is on or off.
2369
2370 Note that if the target_resume interface was more flexible, we could be
2371 smarter here when schedule-multiple is on. For example, imagine 3
2372 inferiors with 2 threads each (1.1, 1.2, 2.1, 2.2, 3.1 and 3.2). Threads
2373 2.1 and 3.2 are both waiting for a vfork-done event. Then we could ask the
2374 target(s) to resume:
2375
2376 - All threads of inferior 1
2377 - Thread 2.1
2378 - Thread 3.2
2379
2380 Since we don't have that flexibility (we can only pass one ptid), just
2381 resume the first thread waiting for a vfork-done event we find (e.g. thread
2382 2.1). */
2384 if (thr != nullptr)
2385 {
2386 /* If we have a thread that is waiting for a vfork-done event,
2387 then we should have switched to it earlier. Calling
2388 target_resume with thread scope is only possible when the
2389 current thread matches the thread scope. */
2390 gdb_assert (thr->ptid == inferior_ptid);
2391 gdb_assert (thr->inf->process_target ()
2392 == inferior_thread ()->inf->process_target ());
2393 return thr->ptid;
2394 }
2395
2396 return user_visible_resume_ptid (user_step);
2397}
2398
2399/* Wrapper for target_resume, that handles infrun-specific
2400 bookkeeping. */
2401
2402static void
2403do_target_resume (ptid_t resume_ptid, bool step, enum gdb_signal sig)
2404{
2405 struct thread_info *tp = inferior_thread ();
2406
2407 gdb_assert (!tp->stop_requested);
2408
2409 /* Install inferior's terminal modes. */
2411
2412 /* Avoid confusing the next resume, if the next stop/resume
2413 happens to apply to another thread. */
2414 tp->set_stop_signal (GDB_SIGNAL_0);
2415
2416 /* Advise target which signals may be handled silently.
2417
2418 If we have removed breakpoints because we are stepping over one
2419 in-line (in any thread), we need to receive all signals to avoid
2420 accidentally skipping a breakpoint during execution of a signal
2421 handler.
2422
2423 Likewise if we're displaced stepping, otherwise a trap for a
2424 breakpoint in a signal handler might be confused with the
2425 displaced step finishing. We don't make the displaced_step_finish
2426 step distinguish the cases instead, because:
2427
2428 - a backtrace while stopped in the signal handler would show the
2429 scratch pad as frame older than the signal handler, instead of
2430 the real mainline code.
2431
2432 - when the thread is later resumed, the signal handler would
2433 return to the scratch pad area, which would no longer be
2434 valid. */
2438 else
2440
2441 infrun_debug_printf ("resume_ptid=%s, step=%d, sig=%s",
2442 resume_ptid.to_string ().c_str (),
2443 step, gdb_signal_to_symbol_string (sig));
2444
2445 target_resume (resume_ptid, step, sig);
2446}
2447
2448/* Resume the inferior. SIG is the signal to give the inferior
2449 (GDB_SIGNAL_0 for none). Note: don't call this directly; instead
2450 call 'resume', which handles exceptions. */
2451
2452static void
2453resume_1 (enum gdb_signal sig)
2454{
2456 struct gdbarch *gdbarch = regcache->arch ();
2457 struct thread_info *tp = inferior_thread ();
2458 const address_space *aspace = regcache->aspace ();
2459 ptid_t resume_ptid;
2460 /* This represents the user's step vs continue request. When
2461 deciding whether "set scheduler-locking step" applies, it's the
2462 user's intention that counts. */
2463 const int user_step = tp->control.stepping_command;
2464 /* This represents what we'll actually request the target to do.
2465 This can decay from a step to a continue, if e.g., we need to
2466 implement single-stepping with breakpoints (software
2467 single-step). */
2468 bool step;
2469
2470 gdb_assert (!tp->stop_requested);
2471 gdb_assert (!thread_is_in_step_over_chain (tp));
2472
2473 if (tp->has_pending_waitstatus ())
2474 {
2476 ("thread %s has pending wait "
2477 "status %s (currently_stepping=%d).",
2478 tp->ptid.to_string ().c_str (),
2479 tp->pending_waitstatus ().to_string ().c_str (),
2480 currently_stepping (tp));
2481
2482 tp->inf->process_target ()->threads_executing = true;
2483 tp->set_resumed (true);
2484
2485 /* FIXME: What should we do if we are supposed to resume this
2486 thread with a signal? Maybe we should maintain a queue of
2487 pending signals to deliver. */
2488 if (sig != GDB_SIGNAL_0)
2489 {
2490 warning (_("Couldn't deliver signal %s to %s."),
2491 gdb_signal_to_name (sig),
2492 tp->ptid.to_string ().c_str ());
2493 }
2494
2495 tp->set_stop_signal (GDB_SIGNAL_0);
2496
2497 if (target_can_async_p ())
2498 {
2499 target_async (true);
2500 /* Tell the event loop we have an event to process. */
2502 }
2503 return;
2504 }
2505
2506 tp->stepped_breakpoint = 0;
2507
2508 /* Depends on stepped_breakpoint. */
2509 step = currently_stepping (tp);
2510
2511 if (current_inferior ()->thread_waiting_for_vfork_done != nullptr)
2512 {
2513 /* Don't try to single-step a vfork parent that is waiting for
2514 the child to get out of the shared memory region (by exec'ing
2515 or exiting). This is particularly important on software
2516 single-step archs, as the child process would trip on the
2517 software single step breakpoint inserted for the parent
2518 process. Since the parent will not actually execute any
2519 instruction until the child is out of the shared region (such
2520 are vfork's semantics), it is safe to simply continue it.
2521 Eventually, we'll see a TARGET_WAITKIND_VFORK_DONE event for
2522 the parent, and tell it to `keep_going', which automatically
2523 re-sets it stepping. */
2524 infrun_debug_printf ("resume : clear step");
2525 step = false;
2526 }
2527
2528 CORE_ADDR pc = regcache_read_pc (regcache);
2529
2530 infrun_debug_printf ("step=%d, signal=%s, trap_expected=%d, "
2531 "current thread [%s] at %s",
2532 step, gdb_signal_to_symbol_string (sig),
2534 inferior_ptid.to_string ().c_str (),
2535 paddress (gdbarch, pc));
2536
2537 /* Normally, by the time we reach `resume', the breakpoints are either
2538 removed or inserted, as appropriate. The exception is if we're sitting
2539 at a permanent breakpoint; we need to step over it, but permanent
2540 breakpoints can't be removed. So we have to test for it here. */
2541 if (breakpoint_here_p (aspace, pc) == permanent_breakpoint_here)
2542 {
2543 if (sig != GDB_SIGNAL_0)
2544 {
2545 /* We have a signal to pass to the inferior. The resume
2546 may, or may not take us to the signal handler. If this
2547 is a step, we'll need to stop in the signal handler, if
2548 there's one, (if the target supports stepping into
2549 handlers), or in the next mainline instruction, if
2550 there's no handler. If this is a continue, we need to be
2551 sure to run the handler with all breakpoints inserted.
2552 In all cases, set a breakpoint at the current address
2553 (where the handler returns to), and once that breakpoint
2554 is hit, resume skipping the permanent breakpoint. If
2555 that breakpoint isn't hit, then we've stepped into the
2556 signal handler (or hit some other event). We'll delete
2557 the step-resume breakpoint then. */
2558
2559 infrun_debug_printf ("resume: skipping permanent breakpoint, "
2560 "deliver signal first");
2561
2563 tp->control.trap_expected = 0;
2564
2565 if (tp->control.step_resume_breakpoint == nullptr)
2566 {
2567 /* Set a "high-priority" step-resume, as we don't want
2568 user breakpoints at PC to trigger (again) when this
2569 hits. */
2571 gdb_assert (tp->control.step_resume_breakpoint->first_loc ()
2572 .permanent);
2573
2575 }
2576
2578 }
2579 else
2580 {
2581 /* There's no signal to pass, we can go ahead and skip the
2582 permanent breakpoint manually. */
2583 infrun_debug_printf ("skipping permanent breakpoint");
2585 /* Update pc to reflect the new address from which we will
2586 execute instructions. */
2588
2589 if (step)
2590 {
2591 /* We've already advanced the PC, so the stepping part
2592 is done. Now we need to arrange for a trap to be
2593 reported to handle_inferior_event. Set a breakpoint
2594 at the current PC, and run to it. Don't update
2595 prev_pc, because if we end in
2596 switch_back_to_stepped_thread, we want the "expected
2597 thread advanced also" branch to be taken. IOW, we
2598 don't want this thread to step further from PC
2599 (overstep). */
2600 gdb_assert (!step_over_info_valid_p ());
2603
2604 resume_ptid = internal_resume_ptid (user_step);
2605 do_target_resume (resume_ptid, false, GDB_SIGNAL_0);
2606 tp->set_resumed (true);
2607 return;
2608 }
2609 }
2610 }
2611
2612 /* If we have a breakpoint to step over, make sure to do a single
2613 step only. Same if we have software watchpoints. */
2615 tp->control.may_range_step = 0;
2616
2617 /* If displaced stepping is enabled, step over breakpoints by executing a
2618 copy of the instruction at a different address.
2619
2620 We can't use displaced stepping when we have a signal to deliver;
2621 the comments for displaced_step_prepare explain why. The
2622 comments in the handle_inferior event for dealing with 'random
2623 signals' explain what we do instead.
2624
2625 We can't use displaced stepping when we are waiting for vfork_done
2626 event, displaced stepping breaks the vfork child similarly as single
2627 step software breakpoint. */
2628 if (tp->control.trap_expected
2631 && sig == GDB_SIGNAL_0
2632 && current_inferior ()->thread_waiting_for_vfork_done == nullptr)
2633 {
2634 displaced_step_prepare_status prepare_status
2636
2637 if (prepare_status == DISPLACED_STEP_PREPARE_STATUS_UNAVAILABLE)
2638 {
2639 infrun_debug_printf ("Got placed in step-over queue");
2640
2641 tp->control.trap_expected = 0;
2642 return;
2643 }
2644 else if (prepare_status == DISPLACED_STEP_PREPARE_STATUS_CANT)
2645 {
2646 /* Fallback to stepping over the breakpoint in-line. */
2647
2648 if (target_is_non_stop_p ())
2649 stop_all_threads ("displaced stepping falling back on inline stepping");
2650
2653
2655
2657 }
2658 else if (prepare_status == DISPLACED_STEP_PREPARE_STATUS_OK)
2659 {
2660 /* Update pc to reflect the new address from which we will
2661 execute instructions due to displaced stepping. */
2663
2665 }
2666 else
2667 gdb_assert_not_reached ("Invalid displaced_step_prepare_status "
2668 "value.");
2669 }
2670
2671 /* Do we need to do it the hard way, w/temp breakpoints? */
2672 else if (step)
2674
2675 /* Currently, our software single-step implementation leads to different
2676 results than hardware single-stepping in one situation: when stepping
2677 into delivering a signal which has an associated signal handler,
2678 hardware single-step will stop at the first instruction of the handler,
2679 while software single-step will simply skip execution of the handler.
2680
2681 For now, this difference in behavior is accepted since there is no
2682 easy way to actually implement single-stepping into a signal handler
2683 without kernel support.
2684
2685 However, there is one scenario where this difference leads to follow-on
2686 problems: if we're stepping off a breakpoint by removing all breakpoints
2687 and then single-stepping. In this case, the software single-step
2688 behavior means that even if there is a *breakpoint* in the signal
2689 handler, GDB still would not stop.
2690
2691 Fortunately, we can at least fix this particular issue. We detect
2692 here the case where we are about to deliver a signal while software
2693 single-stepping with breakpoints removed. In this situation, we
2694 revert the decisions to remove all breakpoints and insert single-
2695 step breakpoints, and instead we install a step-resume breakpoint
2696 at the current address, deliver the signal without stepping, and
2697 once we arrive back at the step-resume breakpoint, actually step
2698 over the breakpoint we originally wanted to step over. */
2700 && sig != GDB_SIGNAL_0
2702 {
2703 /* If we have nested signals or a pending signal is delivered
2704 immediately after a handler returns, might already have
2705 a step-resume breakpoint set on the earlier handler. We cannot
2706 set another step-resume breakpoint; just continue on until the
2707 original breakpoint is hit. */
2708 if (tp->control.step_resume_breakpoint == nullptr)
2709 {
2712 }
2713
2715
2717 tp->control.trap_expected = 0;
2718
2720 }
2721
2722 /* If STEP is set, it's a request to use hardware stepping
2723 facilities. But in that case, we should never
2724 use singlestep breakpoint. */
2725 gdb_assert (!(thread_has_single_step_breakpoints_set (tp) && step));
2726
2727 /* Decide the set of threads to ask the target to resume. */
2728 if (tp->control.trap_expected)
2729 {
2730 /* We're allowing a thread to run past a breakpoint it has
2731 hit, either by single-stepping the thread with the breakpoint
2732 removed, or by displaced stepping, with the breakpoint inserted.
2733 In the former case, we need to single-step only this thread,
2734 and keep others stopped, as they can miss this breakpoint if
2735 allowed to run. That's not really a problem for displaced
2736 stepping, but, we still keep other threads stopped, in case
2737 another thread is also stopped for a breakpoint waiting for
2738 its turn in the displaced stepping queue. */
2739 resume_ptid = inferior_ptid;
2740 }
2741 else
2742 resume_ptid = internal_resume_ptid (user_step);
2743
2745 && step && breakpoint_inserted_here_p (aspace, pc))
2746 {
2747 /* There are two cases where we currently need to step a
2748 breakpoint instruction when we have a signal to deliver:
2749
2750 - See handle_signal_stop where we handle random signals that
2751 could take out us out of the stepping range. Normally, in
2752 that case we end up continuing (instead of stepping) over the
2753 signal handler with a breakpoint at PC, but there are cases
2754 where we should _always_ single-step, even if we have a
2755 step-resume breakpoint, like when a software watchpoint is
2756 set. Assuming single-stepping and delivering a signal at the
2757 same time would takes us to the signal handler, then we could
2758 have removed the breakpoint at PC to step over it. However,
2759 some hardware step targets (like e.g., Mac OS) can't step
2760 into signal handlers, and for those, we need to leave the
2761 breakpoint at PC inserted, as otherwise if the handler
2762 recurses and executes PC again, it'll miss the breakpoint.
2763 So we leave the breakpoint inserted anyway, but we need to
2764 record that we tried to step a breakpoint instruction, so
2765 that adjust_pc_after_break doesn't end up confused.
2766
2767 - In non-stop if we insert a breakpoint (e.g., a step-resume)
2768 in one thread after another thread that was stepping had been
2769 momentarily paused for a step-over. When we re-resume the
2770 stepping thread, it may be resumed from that address with a
2771 breakpoint that hasn't trapped yet. Seen with
2772 gdb.threads/non-stop-fair-events.exp, on targets that don't
2773 do displaced stepping. */
2774
2775 infrun_debug_printf ("resume: [%s] stepped breakpoint",
2776 tp->ptid.to_string ().c_str ());
2777
2778 tp->stepped_breakpoint = 1;
2779
2780 /* Most targets can step a breakpoint instruction, thus
2781 executing it normally. But if this one cannot, just
2782 continue and we will hit it anyway. */
2784 step = false;
2785 }
2786
2787 if (tp->control.may_range_step)
2788 {
2789 /* If we're resuming a thread with the PC out of the step
2790 range, then we're doing some nested/finer run control
2791 operation, like stepping the thread out of the dynamic
2792 linker or the displaced stepping scratch pad. We
2793 shouldn't have allowed a range step then. */
2794 gdb_assert (pc_in_thread_step_range (pc, tp));
2795 }
2796
2797 do_target_resume (resume_ptid, step, sig);
2798 tp->set_resumed (true);
2799}
2800
2801/* Resume the inferior. SIG is the signal to give the inferior
2802 (GDB_SIGNAL_0 for none). This is a wrapper around 'resume_1' that
2803 rolls back state on error. */
2804
2805static void
2806resume (gdb_signal sig)
2807{
2808 try
2809 {
2810 resume_1 (sig);
2811 }
2812 catch (const gdb_exception &ex)
2813 {
2814 /* If resuming is being aborted for any reason, delete any
2815 single-step breakpoint resume_1 may have created, to avoid
2816 confusing the following resumption, and to avoid leaving
2817 single-step breakpoints perturbing other threads, in case
2818 we're running in non-stop mode. */
2819 if (inferior_ptid != null_ptid)
2821 throw;
2822 }
2823}
2824
2825
2826/* Proceeding. */
2827
2828/* See infrun.h. */
2829
2830/* Counter that tracks number of user visible stops. This can be used
2831 to tell whether a command has proceeded the inferior past the
2832 current location. This allows e.g., inferior function calls in
2833 breakpoint commands to not interrupt the command list. When the
2834 call finishes successfully, the inferior is standing at the same
2835 breakpoint as if nothing happened (and so we don't call
2836 normal_stop). */
2837static ULONGEST current_stop_id;
2838
2839/* See infrun.h. */
2840
2841ULONGEST
2843{
2844 return current_stop_id;
2845}
2846
2847/* Called when we report a user visible stop. */
2848
2849static void
2851{
2853}
2854
2855/* Clear out all variables saying what to do when inferior is continued.
2856 First do this, then set the ones you want, then call `proceed'. */
2857
2858static void
2860{
2861 infrun_debug_printf ("%s", tp->ptid.to_string ().c_str ());
2862
2863 /* If we're starting a new sequence, then the previous finished
2864 single-step is no longer relevant. */
2865 if (tp->has_pending_waitstatus ())
2866 {
2868 {
2869 infrun_debug_printf ("pending event of %s was a finished step. "
2870 "Discarding.",
2871 tp->ptid.to_string ().c_str ());
2872
2875 }
2876 else
2877 {
2879 ("thread %s has pending wait status %s (currently_stepping=%d).",
2880 tp->ptid.to_string ().c_str (),
2881 tp->pending_waitstatus ().to_string ().c_str (),
2882 currently_stepping (tp));
2883 }
2884 }
2885
2886 /* If this signal should not be seen by program, give it zero.
2887 Used for debugging signals. */
2888 if (!signal_pass_state (tp->stop_signal ()))
2889 tp->set_stop_signal (GDB_SIGNAL_0);
2890
2891 tp->release_thread_fsm ();
2892
2893 tp->control.trap_expected = 0;
2894 tp->control.step_range_start = 0;
2895 tp->control.step_range_end = 0;
2896 tp->control.may_range_step = 0;
2900 tp->control.step_start_function = nullptr;
2901 tp->stop_requested = 0;
2902
2903 tp->control.stop_step = 0;
2904
2905 tp->control.proceed_to_finish = 0;
2906
2907 tp->control.stepping_command = 0;
2908
2909 /* Discard any remaining commands or status from previous stop. */
2911}
2912
2913/* Notify the current interpreter and observers that the target is about to
2914 proceed. */
2915
2916static void
2922
2923void
2925{
2926 /* With scheduler-locking replay, stop replaying other threads if we're
2927 not replaying the user-visible resume ptid.
2928
2929 This is a convenience feature to not require the user to explicitly
2930 stop replaying the other threads. We're assuming that the user's
2931 intent is to resume tracing the recorded process. */
2933 && target_record_is_replaying (minus_one_ptid)
2937
2938 if (!non_stop && inferior_ptid != null_ptid)
2939 {
2940 ptid_t resume_ptid = user_visible_resume_ptid (step);
2941 process_stratum_target *resume_target
2942 = user_visible_resume_target (resume_ptid);
2943
2944 /* In all-stop mode, delete the per-thread status of all threads
2945 we're about to resume, implicitly and explicitly. */
2946 for (thread_info *tp : all_non_exited_threads (resume_target, resume_ptid))
2948 }
2949
2950 if (inferior_ptid != null_ptid)
2951 {
2952 struct inferior *inferior;
2953
2954 if (non_stop)
2955 {
2956 /* If in non-stop mode, only delete the per-thread status of
2957 the current thread. */
2959 }
2960
2963 }
2964
2966}
2967
2968/* Returns true if TP is still stopped at a breakpoint that needs
2969 stepping-over in order to make progress. If the breakpoint is gone
2970 meanwhile, we can skip the whole step-over dance. */
2971
2972static bool
2974{
2976 {
2977 struct regcache *regcache = get_thread_regcache (tp);
2978
2982 return true;
2983
2985 }
2986
2987 return false;
2988}
2989
2990/* Check whether thread TP still needs to start a step-over in order
2991 to make progress when resumed. Returns an bitwise or of enum
2992 step_over_what bits, indicating what needs to be stepped over. */
2993
2994static step_over_what
2996{
2997 step_over_what what = 0;
2998
3000 what |= STEP_OVER_BREAKPOINT;
3001
3004 what |= STEP_OVER_WATCHPOINT;
3005
3006 return what;
3007}
3008
3009/* Returns true if scheduler locking applies. STEP indicates whether
3010 we're about to do a step/next-like command to a thread. */
3011
3012static bool
3014{
3015 return (scheduler_mode == schedlock_on
3019 && target_record_will_replay (minus_one_ptid,
3021}
3022
3023/* Set process_stratum_target::COMMIT_RESUMED_STATE in all target
3024 stacks that have threads executing and don't have threads with
3025 pending events. */
3026
3027static void
3029{
3030 scoped_restore_current_thread restore_thread;
3031
3033 {
3034 process_stratum_target *proc_target = inf->process_target ();
3035
3036 if (proc_target->commit_resumed_state)
3037 {
3038 /* We already set this in a previous iteration, via another
3039 inferior sharing the process_stratum target. */
3040 continue;
3041 }
3042
3043 /* If the target has no resumed threads, it would be useless to
3044 ask it to commit the resumed threads. */
3045 if (!proc_target->threads_executing)
3046 {
3047 infrun_debug_printf ("not requesting commit-resumed for target "
3048 "%s, no resumed threads",
3049 proc_target->shortname ());
3050 continue;
3051 }
3052
3053 /* As an optimization, if a thread from this target has some
3054 status to report, handle it before requiring the target to
3055 commit its resumed threads: handling the status might lead to
3056 resuming more threads. */
3057 if (proc_target->has_resumed_with_pending_wait_status ())
3058 {
3059 infrun_debug_printf ("not requesting commit-resumed for target %s, a"
3060 " thread has a pending waitstatus",
3061 proc_target->shortname ());
3062 continue;
3063 }
3064
3066
3068 {
3069 infrun_debug_printf ("not requesting commit-resumed for target %s, "
3070 "target has pending events",
3071 proc_target->shortname ());
3072 continue;
3073 }
3074
3075 infrun_debug_printf ("enabling commit-resumed for target %s",
3076 proc_target->shortname ());
3077
3078 proc_target->commit_resumed_state = true;
3079 }
3080}
3081
3082/* See infrun.h. */
3083
3084void
3086{
3087 scoped_restore_current_thread restore_thread;
3088
3090 {
3091 process_stratum_target *proc_target = inf->process_target ();
3092
3093 if (!proc_target->commit_resumed_state)
3094 continue;
3095
3097
3098 infrun_debug_printf ("calling commit_resumed for target %s",
3099 proc_target->shortname());
3100
3102 }
3103}
3104
3105/* To track nesting of scoped_disable_commit_resumed objects, ensuring
3106 that only the outermost one attempts to re-enable
3107 commit-resumed. */
3108static bool enable_commit_resumed = true;
3109
3110/* See infrun.h. */
3111
3113 (const char *reason)
3114 : m_reason (reason),
3115 m_prev_enable_commit_resumed (enable_commit_resumed)
3116{
3117 infrun_debug_printf ("reason=%s", m_reason);
3118
3119 enable_commit_resumed = false;
3120
3122 {
3123 process_stratum_target *proc_target = inf->process_target ();
3124
3126 {
3127 /* This is the outermost instance: force all
3128 COMMIT_RESUMED_STATE to false. */
3129 proc_target->commit_resumed_state = false;
3130 }
3131 else
3132 {
3133 /* This is not the outermost instance, we expect
3134 COMMIT_RESUMED_STATE to have been cleared by the
3135 outermost instance. */
3136 gdb_assert (!proc_target->commit_resumed_state);
3137 }
3138 }
3139}
3140
3141/* See infrun.h. */
3142
3143void
3145{
3146 if (m_reset)
3147 return;
3148 m_reset = true;
3149
3150 infrun_debug_printf ("reason=%s", m_reason);
3151
3152 gdb_assert (!enable_commit_resumed);
3153
3155
3157 {
3158 /* This is the outermost instance, re-enable
3159 COMMIT_RESUMED_STATE on the targets where it's possible. */
3161 }
3162 else
3163 {
3164 /* This is not the outermost instance, we expect
3165 COMMIT_RESUMED_STATE to still be false. */
3167 {
3168 process_stratum_target *proc_target = inf->process_target ();
3169 gdb_assert (!proc_target->commit_resumed_state);
3170 }
3171 }
3172}
3173
3174/* See infrun.h. */
3175
3180
3181/* See infrun.h. */
3182
3183void
3189
3190/* See infrun.h. */
3191
3193 (const char *reason)
3194 : m_reason (reason),
3195 m_prev_enable_commit_resumed (enable_commit_resumed)
3196{
3197 infrun_debug_printf ("reason=%s", m_reason);
3198
3200 {
3201 enable_commit_resumed = true;
3202
3203 /* Re-enable COMMIT_RESUMED_STATE on the targets where it's
3204 possible. */
3206
3208 }
3209}
3210
3211/* See infrun.h. */
3212
3214{
3215 infrun_debug_printf ("reason=%s", m_reason);
3216
3217 gdb_assert (enable_commit_resumed);
3218
3220
3222 {
3223 /* Force all COMMIT_RESUMED_STATE back to false. */
3225 {
3226 process_stratum_target *proc_target = inf->process_target ();
3227 proc_target->commit_resumed_state = false;
3228 }
3229 }
3230}
3231
3232/* Check that all the targets we're about to resume are in non-stop
3233 mode. Ideally, we'd only care whether all targets support
3234 target-async, but we're not there yet. E.g., stop_all_threads
3235 doesn't know how to handle all-stop targets. Also, the remote
3236 protocol in all-stop mode is synchronous, irrespective of
3237 target-async, which means that things like a breakpoint re-set
3238 triggered by one target would try to read memory from all targets
3239 and fail. */
3240
3241static void
3243{
3244 if (!non_stop && resume_target == nullptr)
3245 {
3246 scoped_restore_current_thread restore_thread;
3247
3248 /* This is used to track whether we're resuming more than one
3249 target. */
3250 process_stratum_target *first_connection = nullptr;
3251
3252 /* The first inferior we see with a target that does not work in
3253 always-non-stop mode. */
3254 inferior *first_not_non_stop = nullptr;
3255
3257 {
3259
3260 if (!target_has_execution ())
3261 continue;
3262
3263 process_stratum_target *proc_target
3265
3266 if (!target_is_non_stop_p ())
3267 first_not_non_stop = inf;
3268
3269 if (first_connection == nullptr)
3270 first_connection = proc_target;
3271 else if (first_connection != proc_target
3272 && first_not_non_stop != nullptr)
3273 {
3274 switch_to_inferior_no_thread (first_not_non_stop);
3275
3276 proc_target = current_inferior ()->process_target();
3277
3278 error (_("Connection %d (%s) does not support "
3279 "multi-target resumption."),
3280 proc_target->connection_number,
3281 make_target_connection_string (proc_target).c_str ());
3282 }
3283 }
3284 }
3285}
3286
3287/* Helper function for `proceed`. Check if thread TP is suitable for
3288 resuming, and, if it is, switch to the thread and call
3289 `keep_going_pass_signal`. If TP is not suitable for resuming then this
3290 function will just return without switching threads. */
3291
3292static void
3294{
3295 if (!tp->inf->has_execution ())
3296 {
3297 infrun_debug_printf ("[%s] target has no execution",
3298 tp->ptid.to_string ().c_str ());
3299 return;
3300 }
3301
3302 if (tp->resumed ())
3303 {
3304 infrun_debug_printf ("[%s] resumed",
3305 tp->ptid.to_string ().c_str ());
3306 gdb_assert (tp->executing () || tp->has_pending_waitstatus ());
3307 return;
3308 }
3309
3311 {
3312 infrun_debug_printf ("[%s] needs step-over",
3313 tp->ptid.to_string ().c_str ());
3314 return;
3315 }
3316
3317 /* When handling a vfork GDB removes all breakpoints from the program
3318 space in which the vfork is being handled. If we are following the
3319 parent then GDB will set the thread_waiting_for_vfork_done member of
3320 the parent inferior. In this case we should take care to only resume
3321 the vfork parent thread, the kernel will hold this thread suspended
3322 until the vfork child has exited or execd, at which point the parent
3323 will be resumed and a VFORK_DONE event sent to GDB. */
3324 if (tp->inf->thread_waiting_for_vfork_done != nullptr)
3325 {
3326 if (target_is_non_stop_p ())
3327 {
3328 /* For non-stop targets, regardless of whether GDB is using
3329 all-stop or non-stop mode, threads are controlled
3330 individually.
3331
3332 When a thread is handling a vfork, breakpoints are removed
3333 from the inferior (well, program space in fact), so it is
3334 critical that we don't try to resume any thread other than the
3335 vfork parent. */
3336 if (tp != tp->inf->thread_waiting_for_vfork_done)
3337 {
3338 infrun_debug_printf ("[%s] thread %s of this inferior is "
3339 "waiting for vfork-done",
3340 tp->ptid.to_string ().c_str (),
3342 ->ptid.to_string ().c_str ());
3343 return;
3344 }
3345 }
3346 else
3347 {
3348 /* For all-stop targets, when we attempt to resume the inferior,
3349 we will only resume the vfork parent thread, this is handled
3350 in internal_resume_ptid.
3351
3352 Additionally, we will always be called with the vfork parent
3353 thread as the current thread (TP) thanks to follow_fork, as
3354 such the following assertion should hold.
3355
3356 Beyond this there is nothing more that needs to be done
3357 here. */
3358 gdb_assert (tp == tp->inf->thread_waiting_for_vfork_done);
3359 }
3360 }
3361
3362 /* When handling a vfork GDB removes all breakpoints from the program
3363 space in which the vfork is being handled. If we are following the
3364 child then GDB will set vfork_child member of the vfork parent
3365 inferior. Once the child has either exited or execd then GDB will
3366 detach from the parent process. Until that point GDB should not
3367 resume any thread in the parent process. */
3368 if (tp->inf->vfork_child != nullptr)
3369 {
3370 infrun_debug_printf ("[%s] thread is part of a vfork parent, child is %d",
3371 tp->ptid.to_string ().c_str (),
3372 tp->inf->vfork_child->pid);
3373 return;
3374 }
3375
3376 infrun_debug_printf ("resuming %s",
3377 tp->ptid.to_string ().c_str ());
3378
3379 execution_control_state ecs (tp);
3380 switch_to_thread (tp);
3382 if (!ecs.wait_some_more)
3383 error (_("Command aborted."));
3384}
3385
3386/* Basic routine for continuing the program in various fashions.
3387
3388 ADDR is the address to resume at, or -1 for resume where stopped.
3389 SIGGNAL is the signal to give it, or GDB_SIGNAL_0 for none,
3390 or GDB_SIGNAL_DEFAULT for act according to how it stopped.
3391
3392 You should call clear_proceed_status before calling proceed. */
3393
3394void
3395proceed (CORE_ADDR addr, enum gdb_signal siggnal)
3396{
3398
3399 struct regcache *regcache;
3400 struct gdbarch *gdbarch;
3401 CORE_ADDR pc;
3402
3403 /* If we're stopped at a fork/vfork, switch to either the parent or child
3404 thread as defined by the "set follow-fork-mode" command, or, if both
3405 the parent and child are controlled by GDB, and schedule-multiple is
3406 on, follow the child. If none of the above apply then we just proceed
3407 resuming the current thread. */
3408 if (!follow_fork ())
3409 {
3410 /* The target for some reason decided not to resume. */
3411 normal_stop ();
3412 if (target_can_async_p ())
3414 return;
3415 }
3416
3417 /* We'll update this if & when we switch to a new thread. */
3419
3421 gdbarch = regcache->arch ();
3422 const address_space *aspace = regcache->aspace ();
3423
3425
3426 thread_info *cur_thr = inferior_thread ();
3427
3428 infrun_debug_printf ("cur_thr = %s", cur_thr->ptid.to_string ().c_str ());
3429
3430 /* Fill in with reasonable starting values. */
3432
3433 gdb_assert (!thread_is_in_step_over_chain (cur_thr));
3434
3435 ptid_t resume_ptid
3437 process_stratum_target *resume_target
3438 = user_visible_resume_target (resume_ptid);
3439
3440 check_multi_target_resumption (resume_target);
3441
3442 if (addr == (CORE_ADDR) -1)
3443 {
3444 if (cur_thr->stop_pc_p ()
3445 && pc == cur_thr->stop_pc ()
3448 /* There is a breakpoint at the address we will resume at,
3449 step one instruction before inserting breakpoints so that
3450 we do not stop right away (and report a second hit at this
3451 breakpoint).
3452
3453 Note, we don't do this in reverse, because we won't
3454 actually be executing the breakpoint insn anyway.
3455 We'll be (un-)executing the previous instruction. */
3456 cur_thr->stepping_over_breakpoint = 1;
3460 /* We stepped onto an instruction that needs to be stepped
3461 again before re-inserting the breakpoint, do so. */
3462 cur_thr->stepping_over_breakpoint = 1;
3463 }
3464 else
3465 {
3467 }
3468
3469 if (siggnal != GDB_SIGNAL_DEFAULT)
3470 cur_thr->set_stop_signal (siggnal);
3471
3472 /* If an exception is thrown from this point on, make sure to
3473 propagate GDB's knowledge of the executing state to the
3474 frontend/user running state. */
3475 scoped_finish_thread_state finish_state (resume_target, resume_ptid);
3476
3477 /* Even if RESUME_PTID is a wildcard, and we end up resuming fewer
3478 threads (e.g., we might need to set threads stepping over
3479 breakpoints first), from the user/frontend's point of view, all
3480 threads in RESUME_PTID are now running. Unless we're calling an
3481 inferior function, as in that case we pretend the inferior
3482 doesn't run at all. */
3483 if (!cur_thr->control.in_infcall)
3484 set_running (resume_target, resume_ptid, true);
3485
3486 infrun_debug_printf ("addr=%s, signal=%s, resume_ptid=%s",
3487 paddress (gdbarch, addr),
3488 gdb_signal_to_symbol_string (siggnal),
3489 resume_ptid.to_string ().c_str ());
3490
3492
3493 /* Make sure that output from GDB appears before output from the
3494 inferior. */
3496
3497 /* Since we've marked the inferior running, give it the terminal. A
3498 QUIT/Ctrl-C from here on is forwarded to the target (which can
3499 still detect attempts to unblock a stuck connection with repeated
3500 Ctrl-C from within target_pass_ctrlc). */
3502
3503 /* In a multi-threaded task we may select another thread and
3504 then continue or step.
3505
3506 But if a thread that we're resuming had stopped at a breakpoint,
3507 it will immediately cause another breakpoint stop without any
3508 execution (i.e. it will report a breakpoint hit incorrectly). So
3509 we must step over it first.
3510
3511 Look for threads other than the current (TP) that reported a
3512 breakpoint hit and haven't been resumed yet since. */
3513
3514 /* If scheduler locking applies, we can avoid iterating over all
3515 threads. */
3516 if (!non_stop && !schedlock_applies (cur_thr))
3517 {
3518 for (thread_info *tp : all_non_exited_threads (resume_target,
3519 resume_ptid))
3520 {
3522
3523 /* Ignore the current thread here. It's handled
3524 afterwards. */
3525 if (tp == cur_thr)
3526 continue;
3527
3529 continue;
3530
3531 gdb_assert (!thread_is_in_step_over_chain (tp));
3532
3533 infrun_debug_printf ("need to step-over [%s] first",
3534 tp->ptid.to_string ().c_str ());
3535
3537 }
3538
3539 switch_to_thread (cur_thr);
3540 }
3541
3542 /* Enqueue the current thread last, so that we move all other
3543 threads over their breakpoints first. */
3544 if (cur_thr->stepping_over_breakpoint)
3546
3547 /* If the thread isn't started, we'll still need to set its prev_pc,
3548 so that switch_back_to_stepped_thread knows the thread hasn't
3549 advanced. Must do this before resuming any thread, as in
3550 all-stop/remote, once we resume we can't send any other packet
3551 until the target stops again. */
3553
3554 {
3555 scoped_disable_commit_resumed disable_commit_resumed ("proceeding");
3556 bool step_over_started = start_step_over ();
3557
3559 {
3560 /* Either this thread started a new in-line step over, or some
3561 other thread was already doing one. In either case, don't
3562 resume anything else until the step-over is finished. */
3563 }
3564 else if (step_over_started && !target_is_non_stop_p ())
3565 {
3566 /* A new displaced stepping sequence was started. In all-stop,
3567 we can't talk to the target anymore until it next stops. */
3568 }
3569 else if (!non_stop && target_is_non_stop_p ())
3570 {
3572 ("resuming threads, all-stop-on-top-of-non-stop");
3573
3574 /* In all-stop, but the target is always in non-stop mode.
3575 Start all other threads that are implicitly resumed too. */
3576 for (thread_info *tp : all_non_exited_threads (resume_target,
3577 resume_ptid))
3578 {
3581 }
3582 }
3583 else
3585
3586 disable_commit_resumed.reset_and_commit ();
3587 }
3588
3589 finish_state.release ();
3590
3591 /* If we've switched threads above, switch back to the previously
3592 current thread. We don't want the user to see a different
3593 selected thread. */
3594 switch_to_thread (cur_thr);
3595
3596 /* Tell the event loop to wait for it to stop. If the target
3597 supports asynchronous execution, it'll do this from within
3598 target_resume. */
3599 if (!target_can_async_p ())
3601}
3602
3603
3604/* Start remote-debugging of a machine over a serial link. */
3605
3606void
3607start_remote (int from_tty)
3608{
3610 inf->control.stop_soon = STOP_QUIETLY_REMOTE;
3611
3612 /* Always go on waiting for the target, regardless of the mode. */
3613 /* FIXME: cagney/1999-09-23: At present it isn't possible to
3614 indicate to wait_for_inferior that a target should timeout if
3615 nothing is returned (instead of just blocking). Because of this,
3616 targets expecting an immediate response need to, internally, set
3617 things up so that the target_wait() is forced to eventually
3618 timeout. */
3619 /* FIXME: cagney/1999-09-24: It isn't possible for target_open() to
3620 differentiate to its caller what the state of the target is after
3621 the initial open has been performed. Here we're assuming that
3622 the target has stopped. It should be possible to eventually have
3623 target_open() return to the caller an indication that the target
3624 is currently running and GDB state should be set to the same as
3625 for an async run. */
3627
3628 /* Now that the inferior has stopped, do any bookkeeping like
3629 loading shared libraries. We want to do this before normal_stop,
3630 so that the displayed frame is up to date. */
3631 post_create_inferior (from_tty);
3632
3633 normal_stop ();
3634}
3635
3636/* Initialize static vars when a new inferior begins. */
3637
3638void
3640{
3641 /* These are meaningless until the first time through wait_for_inferior. */
3642
3644
3646
3648
3650}
3651
3652
3653
3654static void handle_inferior_event (struct execution_control_state *ecs);
3655
3656static void handle_step_into_function (struct gdbarch *gdbarch,
3657 struct execution_control_state *ecs);
3659 struct execution_control_state *ecs);
3660static void handle_signal_stop (struct execution_control_state *ecs);
3663
3664static void end_stepping_range (struct execution_control_state *ecs);
3665static void stop_waiting (struct execution_control_state *ecs);
3666static void keep_going (struct execution_control_state *ecs);
3667static void process_event_stop_test (struct execution_control_state *ecs);
3669
3670/* This function is attached as a "thread_stop_requested" observer.
3671 Cleanup local state that assumed the PTID was to be resumed, and
3672 report the stop to the frontend. */
3673
3674static void
3676{
3678
3679 /* PTID was requested to stop. If the thread was already stopped,
3680 but the user/frontend doesn't know about that yet (e.g., the
3681 thread had been temporarily paused for some step-over), set up
3682 for reporting the stop now. */
3683 for (thread_info *tp : all_threads (curr_target, ptid))
3684 {
3685 if (tp->state != THREAD_RUNNING)
3686 continue;
3687 if (tp->executing ())
3688 continue;
3689
3690 /* Remove matching threads from the step-over queue, so
3691 start_step_over doesn't try to resume them
3692 automatically. */
3695
3696 /* If the thread is stopped, but the user/frontend doesn't
3697 know about that yet, queue a pending event, as if the
3698 thread had just stopped now. Unless the thread already had
3699 a pending event. */
3700 if (!tp->has_pending_waitstatus ())
3701 {
3703 ws.set_stopped (GDB_SIGNAL_0);
3704 tp->set_pending_waitstatus (ws);
3705 }
3706
3707 /* Clear the inline-frame state, since we're re-processing the
3708 stop. */
3710
3711 /* If this thread was paused because some other thread was
3712 doing an inline-step over, let that finish first. Once
3713 that happens, we'll restart all threads and consume pending
3714 stop events then. */
3716 continue;
3717
3718 /* Otherwise we can process the (new) pending event now. Set
3719 it so this pending event is considered by
3720 do_target_wait. */
3721 tp->set_resumed (true);
3722 }
3723}
3724
3725/* Delete the step resume, single-step and longjmp/exception resume
3726 breakpoints of TP. */
3727
3728static void
3735
3736/* If the target still has execution, call FUNC for each thread that
3737 just stopped. In all-stop, that's all the non-exited threads; in
3738 non-stop, that's the current thread, only. */
3739
3741 (struct thread_info *tp);
3742
3743static void
3745{
3746 if (!target_has_execution () || inferior_ptid == null_ptid)
3747 return;
3748
3749 if (target_is_non_stop_p ())
3750 {
3751 /* If in non-stop mode, only the current thread stopped. */
3752 func (inferior_thread ());
3753 }
3754 else
3755 {
3756 /* In all-stop mode, all threads have stopped. */
3757 for (thread_info *tp : all_non_exited_threads ())
3758 func (tp);
3759 }
3760}
3761
3762/* Delete the step resume and longjmp/exception resume breakpoints of
3763 the threads that just stopped. */
3764
3765static void
3770
3771/* Delete the single-step breakpoints of the threads that just
3772 stopped. */
3773
3774static void
3779
3780/* See infrun.h. */
3781
3782void
3783print_target_wait_results (ptid_t waiton_ptid, ptid_t result_ptid,
3784 const struct target_waitstatus &ws)
3785{
3786 infrun_debug_printf ("target_wait (%s [%s], status) =",
3787 waiton_ptid.to_string ().c_str (),
3788 target_pid_to_str (waiton_ptid).c_str ());
3789 infrun_debug_printf (" %s [%s],",
3790 result_ptid.to_string ().c_str (),
3791 target_pid_to_str (result_ptid).c_str ());
3792 infrun_debug_printf (" %s", ws.to_string ().c_str ());
3793}
3794
3795/* Select a thread at random, out of those which are resumed and have
3796 had events. */
3797
3798static struct thread_info *
3800{
3801 process_stratum_target *proc_target = inf->process_target ();
3802 thread_info *thread
3803 = proc_target->random_resumed_with_pending_wait_status (inf, waiton_ptid);
3804
3805 if (thread == nullptr)
3806 {
3807 infrun_debug_printf ("None found.");
3808 return nullptr;
3809 }
3810
3811 infrun_debug_printf ("Found %s.", thread->ptid.to_string ().c_str ());
3812 gdb_assert (thread->resumed ());
3813 gdb_assert (thread->has_pending_waitstatus ());
3814
3815 return thread;
3816}
3817
3818/* Wrapper for target_wait that first checks whether threads have
3819 pending statuses to report before actually asking the target for
3820 more events. INF is the inferior we're using to call target_wait
3821 on. */
3822
3823static ptid_t
3825 target_waitstatus *status, target_wait_flags options)
3826{
3827 struct thread_info *tp;
3828
3829 /* We know that we are looking for an event in the target of inferior
3830 INF, but we don't know which thread the event might come from. As
3831 such we want to make sure that INFERIOR_PTID is reset so that none of
3832 the wait code relies on it - doing so is always a mistake. */
3834
3835 /* First check if there is a resumed thread with a wait status
3836 pending. */
3837 if (ptid == minus_one_ptid || ptid.is_pid ())
3838 {
3839 tp = random_pending_event_thread (inf, ptid);
3840 }
3841 else
3842 {
3843 infrun_debug_printf ("Waiting for specific thread %s.",
3844 ptid.to_string ().c_str ());
3845
3846 /* We have a specific thread to check. */
3847 tp = inf->find_thread (ptid);
3848 gdb_assert (tp != nullptr);
3849 if (!tp->has_pending_waitstatus ())
3850 tp = nullptr;
3851 }
3852
3853 if (tp != nullptr
3856 {
3857 struct regcache *regcache = get_thread_regcache (tp);
3858 struct gdbarch *gdbarch = regcache->arch ();
3859 CORE_ADDR pc;
3860 int discard = 0;
3861
3863
3864 if (pc != tp->stop_pc ())
3865 {
3866 infrun_debug_printf ("PC of %s changed. was=%s, now=%s",
3867 tp->ptid.to_string ().c_str (),
3868 paddress (gdbarch, tp->stop_pc ()),
3869 paddress (gdbarch, pc));
3870 discard = 1;
3871 }
3872 else if (!breakpoint_inserted_here_p (regcache->aspace (), pc))
3873 {
3874 infrun_debug_printf ("previous breakpoint of %s, at %s gone",
3875 tp->ptid.to_string ().c_str (),
3876 paddress (gdbarch, pc));
3877
3878 discard = 1;
3879 }
3880
3881 if (discard)
3882 {
3883 infrun_debug_printf ("pending event of %s cancelled.",
3884 tp->ptid.to_string ().c_str ());
3885
3888 ws.set_spurious ();
3889 tp->set_pending_waitstatus (ws);
3891 }
3892 }
3893
3894 if (tp != nullptr)
3895 {
3896 infrun_debug_printf ("Using pending wait status %s for %s.",
3897 tp->pending_waitstatus ().to_string ().c_str (),
3898 tp->ptid.to_string ().c_str ());
3899
3900 /* Now that we've selected our final event LWP, un-adjust its PC
3901 if it was a software breakpoint (and the target doesn't
3902 always adjust the PC itself). */
3905 {
3906 struct regcache *regcache;
3907 struct gdbarch *gdbarch;
3908 int decr_pc;
3909
3911 gdbarch = regcache->arch ();
3912
3914 if (decr_pc != 0)
3915 {
3916 CORE_ADDR pc;
3917
3919 regcache_write_pc (regcache, pc + decr_pc);
3920 }
3921 }
3922
3924 *status = tp->pending_waitstatus ();
3926
3927 /* Wake up the event loop again, until all pending events are
3928 processed. */
3929 if (target_is_async_p ())
3931 return tp->ptid;
3932 }
3933
3934 /* But if we don't find one, we'll have to wait. */
3935
3936 /* We can't ask a non-async target to do a non-blocking wait, so this will be
3937 a blocking wait. */
3938 if (!target_can_async_p ())
3939 options &= ~TARGET_WNOHANG;
3940
3941 return target_wait (ptid, status, options);
3942}
3943
3944/* Wrapper for target_wait that first checks whether threads have
3945 pending statuses to report before actually asking the target for
3946 more events. Polls for events from all inferiors/targets. */
3947
3948static bool
3949do_target_wait (execution_control_state *ecs, target_wait_flags options)
3950{
3951 int num_inferiors = 0;
3952 int random_selector;
3953
3954 /* For fairness, we pick the first inferior/target to poll at random
3955 out of all inferiors that may report events, and then continue
3956 polling the rest of the inferior list starting from that one in a
3957 circular fashion until the whole list is polled once. */
3958
3959 auto inferior_matches = [] (inferior *inf)
3960 {
3961 return inf->process_target () != nullptr;
3962 };
3963
3964 /* First see how many matching inferiors we have. */
3965 for (inferior *inf : all_inferiors ())
3966 if (inferior_matches (inf))
3967 num_inferiors++;
3968
3969 if (num_inferiors == 0)
3970 {
3971 ecs->ws.set_ignore ();
3972 return false;
3973 }
3974
3975 /* Now randomly pick an inferior out of those that matched. */
3976 random_selector = (int)
3977 ((num_inferiors * (double) rand ()) / (RAND_MAX + 1.0));
3978
3979 if (num_inferiors > 1)
3980 infrun_debug_printf ("Found %d inferiors, starting at #%d",
3981 num_inferiors, random_selector);
3982
3983 /* Select the Nth inferior that matched. */
3984
3985 inferior *selected = nullptr;
3986
3987 for (inferior *inf : all_inferiors ())
3988 if (inferior_matches (inf))
3989 if (random_selector-- == 0)
3990 {
3991 selected = inf;
3992 break;
3993 }
3994
3995 /* Now poll for events out of each of the matching inferior's
3996 targets, starting from the selected one. */
3997
3998 auto do_wait = [&] (inferior *inf)
3999 {
4000 ecs->ptid = do_target_wait_1 (inf, minus_one_ptid, &ecs->ws, options);
4001 ecs->target = inf->process_target ();
4002 return (ecs->ws.kind () != TARGET_WAITKIND_IGNORE);
4003 };
4004
4005 /* Needed in 'all-stop + target-non-stop' mode, because we end up
4006 here spuriously after the target is all stopped and we've already
4007 reported the stop to the user, polling for events. */
4008 scoped_restore_current_thread restore_thread;
4009
4010 intrusive_list_iterator<inferior> start
4011 = inferior_list.iterator_to (*selected);
4012
4013 for (intrusive_list_iterator<inferior> it = start;
4014 it != inferior_list.end ();
4015 ++it)
4016 {
4017 inferior *inf = &*it;
4018
4019 if (inferior_matches (inf) && do_wait (inf))
4020 return true;
4021 }
4022
4023 for (intrusive_list_iterator<inferior> it = inferior_list.begin ();
4024 it != start;
4025 ++it)
4026 {
4027 inferior *inf = &*it;
4028
4029 if (inferior_matches (inf) && do_wait (inf))
4030 return true;
4031 }
4032
4033 ecs->ws.set_ignore ();
4034 return false;
4035}
4036
4037/* An event reported by wait_one. */
4038
4040{
4041 /* The target the event came out of. */
4043
4044 /* The PTID the event was for. */
4045 ptid_t ptid;
4046
4047 /* The waitstatus. */
4049};
4050
4051static bool handle_one (const wait_one_event &event);
4052
4053/* Prepare and stabilize the inferior for detaching it. E.g.,
4054 detaching while a thread is displaced stepping is a recipe for
4055 crashing it, as nothing would readjust the PC out of the scratch
4056 pad. */
4057
4058void
4060{
4061 struct inferior *inf = current_inferior ();
4062 ptid_t pid_ptid = ptid_t (inf->pid);
4063 scoped_restore_current_thread restore_thread;
4064
4065 scoped_restore restore_detaching = make_scoped_restore (&inf->detaching, true);
4066
4067 /* Remove all threads of INF from the global step-over chain. We
4068 want to stop any ongoing step-over, not start any new one. */
4071
4072 for (thread_info *tp : range)
4073 if (tp->inf == inf)
4074 {
4075 infrun_debug_printf ("removing thread %s from global step over chain",
4076 tp->ptid.to_string ().c_str ());
4078 }
4079
4080 /* If we were already in the middle of an inline step-over, and the
4081 thread stepping belongs to the inferior we're detaching, we need
4082 to restart the threads of other inferiors. */
4083 if (step_over_info.thread != -1)
4084 {
4085 infrun_debug_printf ("inline step-over in-process while detaching");
4086
4088 if (thr->inf == inf)
4089 {
4090 /* Since we removed threads of INF from the step-over chain,
4091 we know this won't start a step-over for INF. */
4093
4094 if (target_is_non_stop_p ())
4095 {
4096 /* Start a new step-over in another thread if there's
4097 one that needs it. */
4098 start_step_over ();
4099
4100 /* Restart all other threads (except the
4101 previously-stepping thread, since that one is still
4102 running). */
4103 if (!step_over_info_valid_p ())
4104 restart_threads (thr);
4105 }
4106 }
4107 }
4108
4110 {
4111 infrun_debug_printf ("displaced-stepping in-process while detaching");
4112
4113 /* Stop threads currently displaced stepping, aborting it. */
4114
4115 for (thread_info *thr : inf->non_exited_threads ())
4116 {
4117 if (thr->displaced_step_state.in_progress ())
4118 {
4119 if (thr->executing ())
4120 {
4121 if (!thr->stop_requested)
4122 {
4123 target_stop (thr->ptid);
4124 thr->stop_requested = true;
4125 }
4126 }
4127 else
4128 thr->set_resumed (false);
4129 }
4130 }
4131
4133 {
4134 wait_one_event event;
4135
4136 event.target = inf->process_target ();
4137 event.ptid = do_target_wait_1 (inf, pid_ptid, &event.ws, 0);
4138
4139 if (debug_infrun)
4140 print_target_wait_results (pid_ptid, event.ptid, event.ws);
4141
4142 handle_one (event);
4143 }
4144
4145 /* It's OK to leave some of the threads of INF stopped, since
4146 they'll be detached shortly. */
4147 }
4148}
4149
4150/* If all-stop, but there exists a non-stop target, stop all threads
4151 now that we're presenting the stop to the user. */
4152
4153static void
4155{
4157 stop_all_threads ("presenting stop to user in all-stop");
4158}
4159
4160/* Wait for control to return from inferior to debugger.
4161
4162 If inferior gets a signal, we may decide to start it up again
4163 instead of returning. That is why there is a loop in this function.
4164 When this function actually returns it means the inferior
4165 should be left stopped and GDB should read more commands. */
4166
4167static void
4169{
4170 infrun_debug_printf ("wait_for_inferior ()");
4171
4173
4174 /* If an error happens while handling the event, propagate GDB's
4175 knowledge of the executing state to the frontend/user running
4176 state. */
4177 scoped_finish_thread_state finish_state
4178 (inf->process_target (), minus_one_ptid);
4179
4180 while (1)
4181 {
4183
4185
4186 /* Flush target cache before starting to handle each event.
4187 Target was running and cache could be stale. This is just a
4188 heuristic. Running threads may modify target memory, but we
4189 don't get any event. */
4191
4192 ecs.ptid = do_target_wait_1 (inf, minus_one_ptid, &ecs.ws, 0);
4193 ecs.target = inf->process_target ();
4194
4195 if (debug_infrun)
4196 print_target_wait_results (minus_one_ptid, ecs.ptid, ecs.ws);
4197
4198 /* Now figure out what to do with the result of the result. */
4199 handle_inferior_event (&ecs);
4200
4201 if (!ecs.wait_some_more)
4202 break;
4203 }
4204
4206
4207 /* No error, don't finish the state yet. */
4208 finish_state.release ();
4209}
4210
4211/* Cleanup that reinstalls the readline callback handler, if the
4212 target is running in the background. If while handling the target
4213 event something triggered a secondary prompt, like e.g., a
4214 pagination prompt, we'll have removed the callback handler (see
4215 gdb_readline_wrapper_line). Need to do this as we go back to the
4216 event loop, ready to process further input. Note this has no
4217 effect if the handler hasn't actually been removed, because calling
4218 rl_callback_handler_install resets the line buffer, thus losing
4219 input. */
4220
4221static void
4223{
4224 struct ui *ui = current_ui;
4225
4226 if (!ui->async)
4227 {
4228 /* We're not going back to the top level event loop yet. Don't
4229 install the readline callback, as it'd prep the terminal,
4230 readline-style (raw, noecho) (e.g., --batch). We'll install
4231 it the next time the prompt is displayed, when we're ready
4232 for input. */
4233 return;
4234 }
4235
4238}
4239
4240/* Clean up the FSMs of threads that are now stopped. In non-stop,
4241 that's just the event thread. In all-stop, that's all threads. */
4242
4243static void
4245{
4246 /* The first clean_up call below assumes the event thread is the current
4247 one. */
4248 if (ecs->event_thread != nullptr)
4249 gdb_assert (ecs->event_thread == inferior_thread ());
4250
4251 if (ecs->event_thread != nullptr
4252 && ecs->event_thread->thread_fsm () != nullptr)
4254
4255 if (!non_stop)
4256 {
4257 scoped_restore_current_thread restore_thread;
4258
4259 for (thread_info *thr : all_non_exited_threads ())
4260 {
4261 if (thr->thread_fsm () == nullptr)
4262 continue;
4263 if (thr == ecs->event_thread)
4264 continue;
4265
4266 switch_to_thread (thr);
4267 thr->thread_fsm ()->clean_up (thr);
4268 }
4269 }
4270}
4271
4272/* Helper for all_uis_check_sync_execution_done that works on the
4273 current UI. */
4274
4275static void
4277{
4278 struct ui *ui = current_ui;
4279
4281 && ui->async
4283 {
4287 }
4288}
4289
4290/* See infrun.h. */
4291
4292void
4300
4301/* See infrun.h. */
4302
4303void
4312
4313/* A quit_handler callback installed while we're handling inferior
4314 events. */
4315
4316static void
4318{
4320 {
4321 /* Do nothing.
4322
4323 default_quit_handler would throw a quit in this case, but if
4324 we're handling an event while we have the terminal, it means
4325 the target is running a background execution command, and
4326 thus when users press Ctrl-C, they're wanting to interrupt
4327 whatever command they were executing in the command line.
4328 E.g.:
4329
4330 (gdb) c&
4331 (gdb) foo bar whatever<ctrl-c>
4332
4333 That Ctrl-C should clear the input line, not interrupt event
4334 handling if it happens that the user types Ctrl-C at just the
4335 "wrong" time!
4336
4337 It's as-if background event handling was handled by a
4338 separate background thread.
4339
4340 To be clear, the Ctrl-C is not lost -- it will be processed
4341 by the next QUIT call once we're out of fetch_inferior_event
4342 again. */
4343 }
4344 else
4345 {
4346 if (check_quit_flag ())
4348 }
4349}
4350
4351/* Asynchronous version of wait_for_inferior. It is called by the
4352 event loop whenever a change of state is detected on the file
4353 descriptor corresponding to the target. It can be called more than
4354 once to complete a single execution command. In such cases we need
4355 to keep the state in a global variable ECSS. If it is the last time
4356 that this function is called for a single execution command, then
4357 report to the user that the inferior has stopped, and do the
4358 necessary cleanups. */
4359
4360void
4362{
4364
4366 int cmd_done = 0;
4367
4368 /* Events are always processed with the main UI as current UI. This
4369 way, warnings, debug output, etc. are always consistently sent to
4370 the main console. */
4371 scoped_restore save_ui = make_scoped_restore (&current_ui, main_ui);
4372
4373 /* Temporarily disable pagination. Otherwise, the user would be
4374 given an option to press 'q' to quit, which would cause an early
4375 exit and could leave GDB in a half-baked state. */
4376 scoped_restore save_pagination
4377 = make_scoped_restore (&pagination_enabled, false);
4378
4379 /* Install a quit handler that does nothing if we have the terminal
4380 (meaning the target is running a background execution command),
4381 so that Ctrl-C never interrupts GDB before the event is fully
4382 handled. */
4383 scoped_restore restore_quit_handler
4384 = make_scoped_restore (&quit_handler, infrun_quit_handler);
4385
4386 /* Make sure a SIGINT does not interrupt an extension language while
4387 we're handling an event. That could interrupt a Python unwinder
4388 or a Python observer or some such. A Ctrl-C should either be
4389 forwarded to the inferior if the inferior has the terminal, or,
4390 if GDB has the terminal, should interrupt the command the user is
4391 typing in the CLI. */
4393
4394 /* End up with readline processing input, if necessary. */
4395 {
4397
4398 /* We're handling a live event, so make sure we're doing live
4399 debugging. If we're looking at traceframes while the target is
4400 running, we're going to need to get back to that mode after
4401 handling the event. */
4402 gdb::optional<scoped_restore_current_traceframe> maybe_restore_traceframe;
4403 if (non_stop)
4404 {
4405 maybe_restore_traceframe.emplace ();
4407 }
4408
4409 /* The user/frontend should not notice a thread switch due to
4410 internal events. Make sure we revert to the user selected
4411 thread and frame after handling the event and running any
4412 breakpoint commands. */
4413 scoped_restore_current_thread restore_thread;
4414
4416 /* Flush target cache before starting to handle each event. Target
4417 was running and cache could be stale. This is just a heuristic.
4418 Running threads may modify target memory, but we don't get any
4419 event. */
4421
4422 scoped_restore save_exec_dir
4423 = make_scoped_restore (&execution_direction,
4425
4426 /* Allow targets to pause their resumed threads while we handle
4427 the event. */
4428 scoped_disable_commit_resumed disable_commit_resumed ("handling event");
4429
4430 if (!do_target_wait (&ecs, TARGET_WNOHANG))
4431 {
4432 infrun_debug_printf ("do_target_wait returned no event");
4433 disable_commit_resumed.reset_and_commit ();
4434 return;
4435 }
4436
4437 gdb_assert (ecs.ws.kind () != TARGET_WAITKIND_IGNORE);
4438
4439 /* Switch to the inferior that generated the event, so we can do
4440 target calls. If the event was not associated to a ptid, */
4441 if (ecs.ptid != null_ptid
4442 && ecs.ptid != minus_one_ptid)
4444 else
4446
4447 if (debug_infrun)
4448 print_target_wait_results (minus_one_ptid, ecs.ptid, ecs.ws);
4449
4450 /* If an error happens while handling the event, propagate GDB's
4451 knowledge of the executing state to the frontend/user running
4452 state. */
4453 ptid_t finish_ptid = !target_is_non_stop_p () ? minus_one_ptid : ecs.ptid;
4454 scoped_finish_thread_state finish_state (ecs.target, finish_ptid);
4455
4456 /* Get executed before scoped_restore_current_thread above to apply
4457 still for the thread which has thrown the exception. */
4458 auto defer_bpstat_clear
4459 = make_scope_exit (bpstat_clear_actions);
4460 auto defer_delete_threads
4462
4463 int stop_id = get_stop_id ();
4464
4465 /* Now figure out what to do with the result of the result. */
4466 handle_inferior_event (&ecs);
4467
4468 if (!ecs.wait_some_more)
4469 {
4470 struct inferior *inf = find_inferior_ptid (ecs.target, ecs.ptid);
4471 bool should_stop = true;
4472 struct thread_info *thr = ecs.event_thread;
4473
4475
4476 if (thr != nullptr && thr->thread_fsm () != nullptr)
4477 should_stop = thr->thread_fsm ()->should_stop (thr);
4478
4479 if (!should_stop)
4480 {
4481 keep_going (&ecs);
4482 }
4483 else
4484 {
4485 bool should_notify_stop = true;
4486 bool proceeded = false;
4487
4489
4491
4492 if (stop_id != get_stop_id ())
4493 {
4494 /* If the stop-id has changed then a stop has already been
4495 presented to the user in handle_inferior_event, this is
4496 likely a failed inferior call. As the stop has already
4497 been announced then we should not notify again.
4498
4499 Also, if the prompt state is not PROMPT_NEEDED then GDB
4500 will not be ready for user input after this function. */
4501 should_notify_stop = false;
4502 gdb_assert (current_ui->prompt_state == PROMPT_NEEDED);
4503 }
4504 else if (thr != nullptr && thr->thread_fsm () != nullptr)
4505 should_notify_stop
4506 = thr->thread_fsm ()->should_notify_stop ();
4507
4508 if (should_notify_stop)
4509 {
4510 /* We may not find an inferior if this was a process exit. */
4511 if (inf == nullptr || inf->control.stop_soon == NO_STOP_QUIETLY)
4512 proceeded = normal_stop ();
4513 }
4514
4515 if (!proceeded)
4516 {
4518 cmd_done = 1;
4519 }
4520
4521 /* If we got a TARGET_WAITKIND_NO_RESUMED event, then the
4522 previously selected thread is gone. We have two
4523 choices - switch to no thread selected, or restore the
4524 previously selected thread (now exited). We chose the
4525 later, just because that's what GDB used to do. After
4526 this, "info threads" says "The current thread <Thread
4527 ID 2> has terminated." instead of "No thread
4528 selected.". */
4529 if (!non_stop
4530 && cmd_done
4531 && ecs.ws.kind () != TARGET_WAITKIND_NO_RESUMED)
4532 restore_thread.dont_restore ();
4533 }
4534 }
4535
4536 defer_delete_threads.release ();
4537 defer_bpstat_clear.release ();
4538
4539 /* No error, don't finish the thread states yet. */
4540 finish_state.release ();
4541
4542 disable_commit_resumed.reset_and_commit ();
4543
4544 /* This scope is used to ensure that readline callbacks are
4545 reinstalled here. */
4546 }
4547
4548 /* Handling this event might have caused some inferiors to become prunable.
4549 For example, the exit of an inferior that was automatically added. Try
4550 to get rid of them. Keeping those around slows down things linearly.
4551
4552 Note that this never removes the current inferior. Therefore, call this
4553 after RESTORE_THREAD went out of scope, in case the event inferior (which was
4554 temporarily made the current inferior) is meant to be deleted.
4555
4556 Call this before all_uis_check_sync_execution_done, so that notifications about
4557 removed inferiors appear before the prompt. */
4558 prune_inferiors ();
4559
4560 /* If a UI was in sync execution mode, and now isn't, restore its
4561 prompt (a synchronous execution command has finished, and we're
4562 ready for input). */
4564
4565 if (cmd_done
4567 && (inferior_ptid == null_ptid
4568 || inferior_thread ()->state != THREAD_RUNNING))
4569 gdb_printf (_("completed.\n"));
4570}
4571
4572/* See infrun.h. */
4573
4574void
4576 struct symtab_and_line sal)
4577{
4578 /* This can be removed once this function no longer implicitly relies on the
4579 inferior_ptid value. */
4580 gdb_assert (inferior_ptid == tp->ptid);
4581
4582 tp->control.step_frame_id = get_frame_id (frame);
4584
4585 tp->current_symtab = sal.symtab;
4586 tp->current_line = sal.line;
4587
4589 ("symtab = %s, line = %d, step_frame_id = %s, step_stack_frame_id = %s",
4590 tp->current_symtab != nullptr ? tp->current_symtab->filename : "<null>",
4591 tp->current_line,
4592 tp->control.step_frame_id.to_string ().c_str (),
4593 tp->control.step_stack_frame_id.to_string ().c_str ());
4594}
4595
4596/* Clear context switchable stepping state. */
4597
4598void
4606
4607/* See infrun.h. */
4608
4609void
4617
4618/* See infrun.h. */
4619
4620void
4623{
4624 if (target != nullptr)
4625 *target = target_last_proc_target;
4626 if (ptid != nullptr)
4627 *ptid = target_last_wait_ptid;
4628 if (status != nullptr)
4630}
4631
4632/* See infrun.h. */
4633
4634void
4636{
4637 target_last_proc_target = nullptr;
4638 target_last_wait_ptid = minus_one_ptid;
4640}
4641
4642/* Switch thread contexts. */
4643
4644static void
4646{
4647 if (ecs->ptid != inferior_ptid
4648 && (inferior_ptid == null_ptid
4649 || ecs->event_thread != inferior_thread ()))
4650 {
4651 infrun_debug_printf ("Switching context from %s to %s",
4652 inferior_ptid.to_string ().c_str (),
4653 ecs->ptid.to_string ().c_str ());
4654 }
4655
4657}
4658
4659/* If the target can't tell whether we've hit breakpoints
4660 (target_supports_stopped_by_sw_breakpoint), and we got a SIGTRAP,
4661 check whether that could have been caused by a breakpoint. If so,
4662 adjust the PC, per gdbarch_decr_pc_after_break. */
4663
4664static void
4666 const target_waitstatus &ws)
4667{
4668 struct regcache *regcache;
4669 struct gdbarch *gdbarch;
4670 CORE_ADDR breakpoint_pc, decr_pc;
4671
4672 /* If we've hit a breakpoint, we'll normally be stopped with SIGTRAP. If
4673 we aren't, just return.
4674
4675 We assume that waitkinds other than TARGET_WAITKIND_STOPPED are not
4676 affected by gdbarch_decr_pc_after_break. Other waitkinds which are
4677 implemented by software breakpoints should be handled through the normal
4678 breakpoint layer.
4679
4680 NOTE drow/2004-01-31: On some targets, breakpoints may generate
4681 different signals (SIGILL or SIGEMT for instance), but it is less
4682 clear where the PC is pointing afterwards. It may not match
4683 gdbarch_decr_pc_after_break. I don't know any specific target that
4684 generates these signals at breakpoints (the code has been in GDB since at
4685 least 1992) so I can not guess how to handle them here.
4686
4687 In earlier versions of GDB, a target with
4688 gdbarch_have_nonsteppable_watchpoint would have the PC after hitting a
4689 watchpoint affected by gdbarch_decr_pc_after_break. I haven't found any
4690 target with both of these set in GDB history, and it seems unlikely to be
4691 correct, so gdbarch_have_nonsteppable_watchpoint is not checked here. */
4692
4693 if (ws.kind () != TARGET_WAITKIND_STOPPED)
4694 return;
4695
4696 if (ws.sig () != GDB_SIGNAL_TRAP)
4697 return;
4698
4699 /* In reverse execution, when a breakpoint is hit, the instruction
4700 under it has already been de-executed. The reported PC always
4701 points at the breakpoint address, so adjusting it further would
4702 be wrong. E.g., consider this case on a decr_pc_after_break == 1
4703 architecture:
4704
4705 B1 0x08000000 : INSN1
4706 B2 0x08000001 : INSN2
4707 0x08000002 : INSN3
4708 PC -> 0x08000003 : INSN4
4709
4710 Say you're stopped at 0x08000003 as above. Reverse continuing
4711 from that point should hit B2 as below. Reading the PC when the
4712 SIGTRAP is reported should read 0x08000001 and INSN2 should have
4713 been de-executed already.
4714
4715 B1 0x08000000 : INSN1
4716 B2 PC -> 0x08000001 : INSN2
4717 0x08000002 : INSN3
4718 0x08000003 : INSN4
4719
4720 We can't apply the same logic as for forward execution, because
4721 we would wrongly adjust the PC to 0x08000000, since there's a
4722 breakpoint at PC - 1. We'd then report a hit on B1, although
4723 INSN1 hadn't been de-executed yet. Doing nothing is the correct
4724 behaviour. */
4726 return;
4727
4728 /* If the target can tell whether the thread hit a SW breakpoint,
4729 trust it. Targets that can tell also adjust the PC
4730 themselves. */
4732 return;
4733
4734 /* Note that relying on whether a breakpoint is planted in memory to
4735 determine this can fail. E.g,. the breakpoint could have been
4736 removed since. Or the thread could have been told to step an
4737 instruction the size of a breakpoint instruction, and only
4738 _after_ was a breakpoint inserted at its address. */
4739
4740 /* If this target does not decrement the PC after breakpoints, then
4741 we have nothing to do. */
4742 regcache = get_thread_regcache (thread);
4743 gdbarch = regcache->arch ();
4744
4746 if (decr_pc == 0)
4747 return;
4748
4749 const address_space *aspace = regcache->aspace ();
4750
4751 /* Find the location where (if we've hit a breakpoint) the
4752 breakpoint would be. */
4753 breakpoint_pc = regcache_read_pc (regcache) - decr_pc;
4754
4755 /* If the target can't tell whether a software breakpoint triggered,
4756 fallback to figuring it out based on breakpoints we think were
4757 inserted in the target, and on whether the thread was stepped or
4758 continued. */
4759
4760 /* Check whether there actually is a software breakpoint inserted at
4761 that location.
4762
4763 If in non-stop mode, a race condition is possible where we've
4764 removed a breakpoint, but stop events for that breakpoint were
4765 already queued and arrive later. To suppress those spurious
4766 SIGTRAPs, we keep a list of such breakpoint locations for a bit,
4767 and retire them after a number of stop events are reported. Note
4768 this is an heuristic and can thus get confused. The real fix is
4769 to get the "stopped by SW BP and needs adjustment" info out of
4770 the target/kernel (and thus never reach here; see above). */
4771 if (software_breakpoint_inserted_here_p (aspace, breakpoint_pc)
4773 && moribund_breakpoint_here_p (aspace, breakpoint_pc)))
4774 {
4775 gdb::optional<scoped_restore_tmpl<int>> restore_operation_disable;
4776
4777 if (record_full_is_used ())
4778 restore_operation_disable.emplace
4780
4781 /* When using hardware single-step, a SIGTRAP is reported for both
4782 a completed single-step and a software breakpoint. Need to
4783 differentiate between the two, as the latter needs adjusting
4784 but the former does not.
4785
4786 The SIGTRAP can be due to a completed hardware single-step only if
4787 - we didn't insert software single-step breakpoints
4788 - this thread is currently being stepped
4789
4790 If any of these events did not occur, we must have stopped due
4791 to hitting a software breakpoint, and have to back up to the
4792 breakpoint address.
4793
4794 As a special case, we could have hardware single-stepped a
4795 software breakpoint. In this case (prev_pc == breakpoint_pc),
4796 we also need to back up to the breakpoint address. */
4797
4799 || !currently_stepping (thread)
4800 || (thread->stepped_breakpoint
4801 && thread->prev_pc == breakpoint_pc))
4802 regcache_write_pc (regcache, breakpoint_pc);
4803 }
4804}
4805
4806static bool
4807stepped_in_from (frame_info_ptr frame, struct frame_id step_frame_id)
4808{
4809 for (frame = get_prev_frame (frame);
4810 frame != nullptr;
4811 frame = get_prev_frame (frame))
4812 {
4813 if (get_frame_id (frame) == step_frame_id)
4814 return true;
4815
4816 if (get_frame_type (frame) != INLINE_FRAME)
4817 break;
4818 }
4819
4820 return false;
4821}
4822
4823/* Look for an inline frame that is marked for skip.
4824 If PREV_FRAME is TRUE start at the previous frame,
4825 otherwise start at the current frame. Stop at the
4826 first non-inline frame, or at the frame where the
4827 step started. */
4828
4829static bool
4831{
4833
4834 if (prev_frame)
4835 frame = get_prev_frame (frame);
4836
4837 for (; frame != nullptr; frame = get_prev_frame (frame))
4838 {
4839 const char *fn = nullptr;
4840 symtab_and_line sal;
4841 struct symbol *sym;
4842
4843 if (get_frame_id (frame) == tp->control.step_frame_id)
4844 break;
4845 if (get_frame_type (frame) != INLINE_FRAME)
4846 break;
4847
4848 sal = find_frame_sal (frame);
4849 sym = get_frame_function (frame);
4850
4851 if (sym != nullptr)
4852 fn = sym->print_name ();
4853
4854 if (sal.line != 0
4856 return true;
4857 }
4858
4859 return false;
4860}
4861
4862/* If the event thread has the stop requested flag set, pretend it
4863 stopped for a GDB_SIGNAL_0 (i.e., as if it stopped due to
4864 target_stop). */
4865
4866static bool
4868{
4869 if (ecs->event_thread->stop_requested)
4870 {
4871 ecs->ws.set_stopped (GDB_SIGNAL_0);
4872 handle_signal_stop (ecs);
4873 return true;
4874 }
4875 return false;
4876}
4877
4878/* Auxiliary function that handles syscall entry/return events.
4879 It returns true if the inferior should keep going (and GDB
4880 should ignore the event), or false if the event deserves to be
4881 processed. */
4882
4883static bool
4885{
4886 struct regcache *regcache;
4887 int syscall_number;
4888
4889 context_switch (ecs);
4890
4892 syscall_number = ecs->ws.syscall_number ();
4894
4895 if (catch_syscall_enabled () > 0
4896 && catching_syscall_number (syscall_number))
4897 {
4898 infrun_debug_printf ("syscall number=%d", syscall_number);
4899
4902 ecs->event_thread->stop_pc (),
4903 ecs->event_thread, ecs->ws);
4904
4905 if (handle_stop_requested (ecs))
4906 return false;
4907
4909 {
4910 /* Catchpoint hit. */
4911 return false;
4912 }
4913 }
4914
4915 if (handle_stop_requested (ecs))
4916 return false;
4917
4918 /* If no catchpoint triggered for this, then keep going. */
4919 keep_going (ecs);
4920
4921 return true;
4922}
4923
4924/* Lazily fill in the execution_control_state's stop_func_* fields. */
4925
4926static void
4928 struct execution_control_state *ecs)
4929{
4930 if (!ecs->stop_func_filled_in)
4931 {
4932 const block *block;
4933 const general_symbol_info *gsi;
4934
4935 /* Don't care about return value; stop_func_start and stop_func_name
4936 will both be 0 if it doesn't work. */
4938 &gsi,
4939 &ecs->stop_func_start,
4940 &ecs->stop_func_end,
4941 &block);
4942 ecs->stop_func_name = gsi == nullptr ? nullptr : gsi->print_name ();
4943
4944 /* The call to find_pc_partial_function, above, will set
4945 stop_func_start and stop_func_end to the start and end
4946 of the range containing the stop pc. If this range
4947 contains the entry pc for the block (which is always the
4948 case for contiguous blocks), advance stop_func_start past
4949 the function's start offset and entrypoint. Note that
4950 stop_func_start is NOT advanced when in a range of a
4951 non-contiguous block that does not contain the entry pc. */
4952 if (block != nullptr
4953 && ecs->stop_func_start <= block->entry_pc ()
4954 && block->entry_pc () < ecs->stop_func_end)
4955 {
4956 ecs->stop_func_start
4958
4959 /* PowerPC functions have a Local Entry Point (LEP) and a Global
4960 Entry Point (GEP). There is only one Entry Point (GEP = LEP) for
4961 other architectures. */
4963
4965 ecs->stop_func_start
4967 }
4968
4969 ecs->stop_func_filled_in = 1;
4970 }
4971}
4972
4973
4974/* Return the STOP_SOON field of the inferior pointed at by ECS. */
4975
4976static enum stop_kind
4978{
4979 struct inferior *inf = find_inferior_ptid (ecs->target, ecs->ptid);
4980
4981 gdb_assert (inf != nullptr);
4982 return inf->control.stop_soon;
4983}
4984
4985/* Poll for one event out of the current target. Store the resulting
4986 waitstatus in WS, and return the event ptid. Does not block. */
4987
4988static ptid_t
4990{
4991 ptid_t event_ptid;
4992
4994
4995 /* Flush target cache before starting to handle each event.
4996 Target was running and cache could be stale. This is just a
4997 heuristic. Running threads may modify target memory, but we
4998 don't get any event. */
5000
5001 event_ptid = target_wait (minus_one_ptid, ws, TARGET_WNOHANG);
5002
5003 if (debug_infrun)
5004 print_target_wait_results (minus_one_ptid, event_ptid, *ws);
5005
5006 return event_ptid;
5007}
5008
5009/* Wait for one event out of any target. */
5010
5011static wait_one_event
5013{
5014 while (1)
5015 {
5016 for (inferior *inf : all_inferiors ())
5017 {
5018 process_stratum_target *target = inf->process_target ();
5019 if (target == nullptr
5020 || !target->is_async_p ()
5021 || !target->threads_executing)
5022 continue;
5023
5025
5026 wait_one_event event;
5027 event.target = target;
5028 event.ptid = poll_one_curr_target (&event.ws);
5029
5030 if (event.ws.kind () == TARGET_WAITKIND_NO_RESUMED)
5031 {
5032 /* If nothing is resumed, remove the target from the
5033 event loop. */
5034 target_async (false);
5035 }
5036 else if (event.ws.kind () != TARGET_WAITKIND_IGNORE)
5037 return event;
5038 }
5039
5040 /* Block waiting for some event. */
5041
5042 fd_set readfds;
5043 int nfds = 0;
5044
5045 FD_ZERO (&readfds);
5046
5047 for (inferior *inf : all_inferiors ())
5048 {
5049 process_stratum_target *target = inf->process_target ();
5050 if (target == nullptr
5051 || !target->is_async_p ()
5052 || !target->threads_executing)
5053 continue;
5054
5055 int fd = target->async_wait_fd ();
5056 FD_SET (fd, &readfds);
5057 if (nfds <= fd)
5058 nfds = fd + 1;
5059 }
5060
5061 if (nfds == 0)
5062 {
5063 /* No waitable targets left. All must be stopped. */
5065 ws.set_no_resumed ();
5066 return {nullptr, minus_one_ptid, std::move (ws)};
5067 }
5068
5069 QUIT;
5070
5071 int numfds = interruptible_select (nfds, &readfds, 0, nullptr, 0);
5072 if (numfds < 0)
5073 {
5074 if (errno == EINTR)
5075 continue;
5076 else
5077 perror_with_name ("interruptible_select");
5078 }
5079 }
5080}
5081
5082/* Save the thread's event and stop reason to process it later. */
5083
5084static void
5086{
5087 infrun_debug_printf ("saving status %s for %s",
5088 ws.to_string ().c_str (),
5089 tp->ptid.to_string ().c_str ());
5090
5091 /* Record for later. */
5092 tp->set_pending_waitstatus (ws);
5093
5094 if (ws.kind () == TARGET_WAITKIND_STOPPED
5095 && ws.sig () == GDB_SIGNAL_TRAP)
5096 {
5097 struct regcache *regcache = get_thread_regcache (tp);
5098 const address_space *aspace = regcache->aspace ();
5099 CORE_ADDR pc = regcache_read_pc (regcache);
5100
5102
5103 scoped_restore_current_thread restore_thread;
5104 switch_to_thread (tp);
5105
5121 && currently_stepping (tp))
5123 }
5124}
5125
5126/* Mark the non-executing threads accordingly. In all-stop, all
5127 threads of all processes are stopped when we get any event
5128 reported. In non-stop mode, only the event thread stops. */
5129
5130static void
5132 ptid_t event_ptid,
5133 const target_waitstatus &ws)
5134{
5135 ptid_t mark_ptid;
5136
5137 if (!target_is_non_stop_p ())
5138 mark_ptid = minus_one_ptid;
5139 else if (ws.kind () == TARGET_WAITKIND_SIGNALLED
5140 || ws.kind () == TARGET_WAITKIND_EXITED)
5141 {
5142 /* If we're handling a process exit in non-stop mode, even
5143 though threads haven't been deleted yet, one would think
5144 that there is nothing to do, as threads of the dead process
5145 will be soon deleted, and threads of any other process were
5146 left running. However, on some targets, threads survive a
5147 process exit event. E.g., for the "checkpoint" command,
5148 when the current checkpoint/fork exits, linux-fork.c
5149 automatically switches to another fork from within
5150 target_mourn_inferior, by associating the same
5151 inferior/thread to another fork. We haven't mourned yet at
5152 this point, but we must mark any threads left in the
5153 process as not-executing so that finish_thread_state marks
5154 them stopped (in the user's perspective) if/when we present
5155 the stop to the user. */
5156 mark_ptid = ptid_t (event_ptid.pid ());
5157 }
5158 else
5159 mark_ptid = event_ptid;
5160
5161 set_executing (target, mark_ptid, false);
5162
5163 /* Likewise the resumed flag. */
5164 set_resumed (target, mark_ptid, false);
5165}
5166
5167/* Handle one event after stopping threads. If the eventing thread
5168 reports back any interesting event, we leave it pending. If the
5169 eventing thread was in the middle of a displaced step, we
5170 cancel/finish it, and unless the thread's inferior is being
5171 detached, put the thread back in the step-over chain. Returns true
5172 if there are no resumed threads left in the target (thus there's no
5173 point in waiting further), false otherwise. */
5174
5175static bool
5177{
5179 ("%s %s", event.ws.to_string ().c_str (),
5180 event.ptid.to_string ().c_str ());
5181
5182 if (event.ws.kind () == TARGET_WAITKIND_NO_RESUMED)
5183 {
5184 /* All resumed threads exited. */
5185 return true;
5186 }
5187 else if (event.ws.kind () == TARGET_WAITKIND_THREAD_EXITED
5188 || event.ws.kind () == TARGET_WAITKIND_EXITED
5189 || event.ws.kind () == TARGET_WAITKIND_SIGNALLED)
5190 {
5191 /* One thread/process exited/signalled. */
5192
5193 thread_info *t = nullptr;
5194
5195 /* The target may have reported just a pid. If so, try
5196 the first non-exited thread. */
5197 if (event.ptid.is_pid ())
5198 {
5199 int pid = event.ptid.pid ();
5201 for (thread_info *tp : inf->non_exited_threads ())
5202 {
5203 t = tp;
5204 break;
5205 }
5206
5207 /* If there is no available thread, the event would
5208 have to be appended to a per-inferior event list,
5209 which does not exist (and if it did, we'd have
5210 to adjust run control command to be able to
5211 resume such an inferior). We assert here instead
5212 of going into an infinite loop. */
5213 gdb_assert (t != nullptr);
5214
5216 ("using %s", t->ptid.to_string ().c_str ());
5217 }
5218 else
5219 {
5220 t = event.target->find_thread (event.ptid);
5221 /* Check if this is the first time we see this thread.
5222 Don't bother adding if it individually exited. */
5223 if (t == nullptr
5224 && event.ws.kind () != TARGET_WAITKIND_THREAD_EXITED)
5225 t = add_thread (event.target, event.ptid);
5226 }
5227
5228 if (t != nullptr)
5229 {
5230 /* Set the threads as non-executing to avoid
5231 another stop attempt on them. */
5234 event.ws);
5235 save_waitstatus (t, event.ws);
5236 t->stop_requested = false;
5237 }
5238 }
5239 else
5240 {
5241 thread_info *t = event.target->find_thread (event.ptid);
5242 if (t == nullptr)
5243 t = add_thread (event.target, event.ptid);
5244
5245 t->stop_requested = 0;
5246 t->set_executing (false);
5247 t->set_resumed (false);
5248 t->control.may_range_step = 0;
5249
5250 /* This may be the first time we see the inferior report
5251 a stop. */
5252 if (t->inf->needs_setup)
5253 {
5255 setup_inferior (0);
5256 }
5257
5258 if (event.ws.kind () == TARGET_WAITKIND_STOPPED
5259 && event.ws.sig () == GDB_SIGNAL_0)
5260 {
5261 /* We caught the event that we intended to catch, so
5262 there's no event to save as pending. */
5263
5264 if (displaced_step_finish (t, event.ws)
5266 {
5267 /* Add it back to the step-over queue. */
5269 ("displaced-step of %s canceled",
5270 t->ptid.to_string ().c_str ());
5271
5272 t->control.trap_expected = 0;
5273 if (!t->inf->detaching)
5275 }
5276 }
5277 else
5278 {
5279 struct regcache *regcache;
5280
5282 ("target_wait %s, saving status for %s",
5283 event.ws.to_string ().c_str (),
5284 t->ptid.to_string ().c_str ());
5285
5286 /* Record for later. */
5287 save_waitstatus (t, event.ws);
5288
5289 if (displaced_step_finish (t, event.ws)
5291 {
5292 /* Add it back to the step-over queue. */
5293 t->control.trap_expected = 0;
5294 if (!t->inf->detaching)
5296 }
5297
5300
5301 infrun_debug_printf ("saved stop_pc=%s for %s "
5302 "(currently_stepping=%d)",
5303 paddress (target_gdbarch (), t->stop_pc ()),
5304 t->ptid.to_string ().c_str (),
5305 currently_stepping (t));
5306 }
5307 }
5308
5309 return false;
5310}
5311
5312/* See infrun.h. */
5313
5314void
5315stop_all_threads (const char *reason, inferior *inf)
5316{
5317 /* We may need multiple passes to discover all threads. */
5318 int pass;
5319 int iterations = 0;
5320
5321 gdb_assert (exists_non_stop_target ());
5322
5323 INFRUN_SCOPED_DEBUG_START_END ("reason=%s, inf=%d", reason,
5324 inf != nullptr ? inf->num : -1);
5325
5326 infrun_debug_show_threads ("non-exited threads",
5328
5329 scoped_restore_current_thread restore_thread;
5330
5331 /* Enable thread events on relevant targets. */
5332 for (auto *target : all_non_exited_process_targets ())
5333 {
5334 if (inf != nullptr && inf->process_target () != target)
5335 continue;
5336
5338 target_thread_events (true);
5339 }
5340
5341 SCOPE_EXIT
5342 {
5343 /* Disable thread events on relevant targets. */
5344 for (auto *target : all_non_exited_process_targets ())
5345 {
5346 if (inf != nullptr && inf->process_target () != target)
5347 continue;
5348
5350 target_thread_events (false);
5351 }
5352
5353 /* Use debug_prefixed_printf directly to get a meaningful function
5354 name. */
5355 if (debug_infrun)
5356 debug_prefixed_printf ("infrun", "stop_all_threads", "done");
5357 };
5358
5359 /* Request threads to stop, and then wait for the stops. Because
5360 threads we already know about can spawn more threads while we're
5361 trying to stop them, and we only learn about new threads when we
5362 update the thread list, do this in a loop, and keep iterating
5363 until two passes find no threads that need to be stopped. */
5364 for (pass = 0; pass < 2; pass++, iterations++)
5365 {
5366 infrun_debug_printf ("pass=%d, iterations=%d", pass, iterations);
5367 while (1)
5368 {
5369 int waits_needed = 0;
5370
5371 for (auto *target : all_non_exited_process_targets ())
5372 {
5373 if (inf != nullptr && inf->process_target () != target)
5374 continue;
5375
5378 }
5379
5380 /* Go through all threads looking for threads that we need
5381 to tell the target to stop. */
5383 {
5384 if (inf != nullptr && t->inf != inf)
5385 continue;
5386
5387 /* For a single-target setting with an all-stop target,
5388 we would not even arrive here. For a multi-target
5389 setting, until GDB is able to handle a mixture of
5390 all-stop and non-stop targets, simply skip all-stop
5391 targets' threads. This should be fine due to the
5392 protection of 'check_multi_target_resumption'. */
5393
5395 if (!target_is_non_stop_p ())
5396 continue;
5397
5398 if (t->executing ())
5399 {
5400 /* If already stopping, don't request a stop again.
5401 We just haven't seen the notification yet. */
5402 if (!t->stop_requested)
5403 {
5404 infrun_debug_printf (" %s executing, need stop",
5405 t->ptid.to_string ().c_str ());
5406 target_stop (t->ptid);
5407 t->stop_requested = 1;
5408 }
5409 else
5410 {
5411 infrun_debug_printf (" %s executing, already stopping",
5412 t->ptid.to_string ().c_str ());
5413 }
5414
5415 if (t->stop_requested)
5416 waits_needed++;
5417 }
5418 else
5419 {
5420 infrun_debug_printf (" %s not executing",
5421 t->ptid.to_string ().c_str ());
5422
5423 /* The thread may be not executing, but still be
5424 resumed with a pending status to process. */
5425 t->set_resumed (false);
5426 }
5427 }
5428
5429 if (waits_needed == 0)
5430 break;
5431
5432 /* If we find new threads on the second iteration, restart
5433 over. We want to see two iterations in a row with all
5434 threads stopped. */
5435 if (pass > 0)
5436 pass = -1;
5437
5438 for (int i = 0; i < waits_needed; i++)
5439 {
5440 wait_one_event event = wait_one ();
5441 if (handle_one (event))
5442 break;
5443 }
5444 }
5445 }
5446}
5447
5448/* Handle a TARGET_WAITKIND_NO_RESUMED event. */
5449
5450static bool
5452{
5453 if (target_can_async_p ())
5454 {
5455 bool any_sync = false;
5456
5457 for (ui *ui : all_uis ())
5458 {
5460 {
5461 any_sync = true;
5462 break;
5463 }
5464 }
5465 if (!any_sync)
5466 {
5467 /* There were no unwaited-for children left in the target, but,
5468 we're not synchronously waiting for events either. Just
5469 ignore. */
5470
5471 infrun_debug_printf ("TARGET_WAITKIND_NO_RESUMED (ignoring: bg)");
5472 prepare_to_wait (ecs);
5473 return true;
5474 }
5475 }
5476
5477 /* Otherwise, if we were running a synchronous execution command, we
5478 may need to cancel it and give the user back the terminal.
5479
5480 In non-stop mode, the target can't tell whether we've already
5481 consumed previous stop events, so it can end up sending us a
5482 no-resumed event like so:
5483
5484 #0 - thread 1 is left stopped
5485
5486 #1 - thread 2 is resumed and hits breakpoint
5487 -> TARGET_WAITKIND_STOPPED
5488
5489 #2 - thread 3 is resumed and exits
5490 this is the last resumed thread, so
5491 -> TARGET_WAITKIND_NO_RESUMED
5492
5493 #3 - gdb processes stop for thread 2 and decides to re-resume
5494 it.
5495
5496 #4 - gdb processes the TARGET_WAITKIND_NO_RESUMED event.
5497 thread 2 is now resumed, so the event should be ignored.
5498
5499 IOW, if the stop for thread 2 doesn't end a foreground command,
5500 then we need to ignore the following TARGET_WAITKIND_NO_RESUMED
5501 event. But it could be that the event meant that thread 2 itself
5502 (or whatever other thread was the last resumed thread) exited.
5503
5504 To address this we refresh the thread list and check whether we
5505 have resumed threads _now_. In the example above, this removes
5506 thread 3 from the thread list. If thread 2 was re-resumed, we
5507 ignore this event. If we find no thread resumed, then we cancel
5508 the synchronous command and show "no unwaited-for " to the
5509 user. */
5510
5511 inferior *curr_inf = current_inferior ();
5512
5513 scoped_restore_current_thread restore_thread;
5515
5516 /* If:
5517
5518 - the current target has no thread executing, and
5519 - the current inferior is native, and
5520 - the current inferior is the one which has the terminal, and
5521 - we did nothing,
5522
5523 then a Ctrl-C from this point on would remain stuck in the
5524 kernel, until a thread resumes and dequeues it. That would
5525 result in the GDB CLI not reacting to Ctrl-C, not able to
5526 interrupt the program. To address this, if the current inferior
5527 no longer has any thread executing, we give the terminal to some
5528 other inferior that has at least one thread executing. */
5529 bool swap_terminal = true;
5530
5531 /* Whether to ignore this TARGET_WAITKIND_NO_RESUMED event, or
5532 whether to report it to the user. */
5533 bool ignore_event = false;
5534
5535 for (thread_info *thread : all_non_exited_threads ())
5536 {
5537 if (swap_terminal && thread->executing ())
5538 {
5539 if (thread->inf != curr_inf)
5540 {
5542
5543 switch_to_thread (thread);
5545 }
5546 swap_terminal = false;
5547 }
5548
5549 if (!ignore_event && thread->resumed ())
5550 {
5551 /* Either there were no unwaited-for children left in the
5552 target at some point, but there are now, or some target
5553 other than the eventing one has unwaited-for children
5554 left. Just ignore. */
5555 infrun_debug_printf ("TARGET_WAITKIND_NO_RESUMED "
5556 "(ignoring: found resumed)");
5557
5558 ignore_event = true;
5559 }
5560
5561 if (ignore_event && !swap_terminal)
5562 break;
5563 }
5564
5565 if (ignore_event)
5566 {
5568 prepare_to_wait (ecs);
5569 return true;
5570 }
5571
5572 /* Go ahead and report the event. */
5573 return false;
5574}
5575
5576/* Given an execution control state that has been freshly filled in by
5577 an event from the inferior, figure out what it means and take
5578 appropriate action.
5579
5580 The alternatives are:
5581
5582 1) stop_waiting and return; to really stop and return to the
5583 debugger.
5584
5585 2) keep_going and return; to wait for the next event (set
5586 ecs->event_thread->stepping_over_breakpoint to 1 to single step
5587 once). */
5588
5589static void
5591{
5592 /* Make sure that all temporary struct value objects that were
5593 created during the handling of the event get deleted at the
5594 end. */
5595 scoped_value_mark free_values;
5596
5597 infrun_debug_printf ("%s", ecs->ws.to_string ().c_str ());
5598
5599 if (ecs->ws.kind () == TARGET_WAITKIND_IGNORE)
5600 {
5601 /* We had an event in the inferior, but we are not interested in
5602 handling it at this level. The lower layers have already
5603 done what needs to be done, if anything.
5604
5605 One of the possible circumstances for this is when the
5606 inferior produces output for the console. The inferior has
5607 not stopped, and we are ignoring the event. Another possible
5608 circumstance is any event which the lower level knows will be
5609 reported multiple times without an intervening resume. */
5610 prepare_to_wait (ecs);
5611 return;
5612 }
5613
5614 if (ecs->ws.kind () == TARGET_WAITKIND_THREAD_EXITED)
5615 {
5616 prepare_to_wait (ecs);
5617 return;
5618 }
5619
5620 if (ecs->ws.kind () == TARGET_WAITKIND_NO_RESUMED
5621 && handle_no_resumed (ecs))
5622 return;
5623
5624 /* Cache the last target/ptid/waitstatus. */
5625 set_last_target_status (ecs->target, ecs->ptid, ecs->ws);
5626
5627 /* Always clear state belonging to the previous time we stopped. */
5629
5630 if (ecs->ws.kind () == TARGET_WAITKIND_NO_RESUMED)
5631 {
5632 /* No unwaited-for children left. IOW, all resumed children
5633 have exited. */
5634 stop_print_frame = false;
5635 stop_waiting (ecs);
5636 return;
5637 }
5638
5639 if (ecs->ws.kind () != TARGET_WAITKIND_EXITED
5640 && ecs->ws.kind () != TARGET_WAITKIND_SIGNALLED)
5641 {
5642 ecs->event_thread = ecs->target->find_thread (ecs->ptid);
5643 /* If it's a new thread, add it to the thread database. */
5644 if (ecs->event_thread == nullptr)
5645 ecs->event_thread = add_thread (ecs->target, ecs->ptid);
5646
5647 /* Disable range stepping. If the next step request could use a
5648 range, this will be end up re-enabled then. */
5650 }
5651
5652 /* Dependent on valid ECS->EVENT_THREAD. */
5654
5655 /* Dependent on the current PC value modified by adjust_pc_after_break. */
5657
5659
5660 /* First, distinguish signals caused by the debugger from signals
5661 that have to do with the program's own actions. Note that
5662 breakpoint insns may cause SIGTRAP or SIGILL or SIGEMT, depending
5663 on the operating system version. Here we detect when a SIGILL or
5664 SIGEMT is really a breakpoint and change it to SIGTRAP. We do
5665 something similar for SIGSEGV, since a SIGSEGV will be generated
5666 when we're trying to execute a breakpoint instruction on a
5667 non-executable stack. This happens for call dummy breakpoints
5668 for architectures like SPARC that place call dummies on the
5669 stack. */
5670 if (ecs->ws.kind () == TARGET_WAITKIND_STOPPED
5671 && (ecs->ws.sig () == GDB_SIGNAL_ILL
5672 || ecs->ws.sig () == GDB_SIGNAL_SEGV
5673 || ecs->ws.sig () == GDB_SIGNAL_EMT))
5674 {
5676
5679 {
5680 infrun_debug_printf ("Treating signal as SIGTRAP");
5681 ecs->ws.set_stopped (GDB_SIGNAL_TRAP);
5682 }
5683 }
5684
5685 mark_non_executing_threads (ecs->target, ecs->ptid, ecs->ws);
5686
5687 switch (ecs->ws.kind ())
5688 {
5690 {
5691 context_switch (ecs);
5692 /* Ignore gracefully during startup of the inferior, as it might
5693 be the shell which has just loaded some objects, otherwise
5694 add the symbols for the newly loaded objects. Also ignore at
5695 the beginning of an attach or remote session; we will query
5696 the full list of libraries once the connection is
5697 established. */
5698
5699 stop_kind stop_soon = get_inferior_stop_soon (ecs);
5700 if (stop_soon == NO_STOP_QUIETLY)
5701 {
5702 struct regcache *regcache;
5703
5705
5707
5711 ecs->event_thread->stop_pc (),
5712 ecs->event_thread, ecs->ws);
5713
5714 if (handle_stop_requested (ecs))
5715 return;
5716
5718 {
5719 /* A catchpoint triggered. */
5721 return;
5722 }
5723
5724 /* If requested, stop when the dynamic linker notifies
5725 gdb of events. This allows the user to get control
5726 and place breakpoints in initializer routines for
5727 dynamically loaded objects (among other things). */
5728 ecs->event_thread->set_stop_signal (GDB_SIGNAL_0);
5730 {
5731 /* Make sure we print "Stopped due to solib-event" in
5732 normal_stop. */
5733 stop_print_frame = true;
5734
5735 stop_waiting (ecs);
5736 return;
5737 }
5738 }
5739
5740 /* If we are skipping through a shell, or through shared library
5741 loading that we aren't interested in, resume the program. If
5742 we're running the program normally, also resume. */
5743 if (stop_soon == STOP_QUIETLY || stop_soon == NO_STOP_QUIETLY)
5744 {
5745 /* Loading of shared libraries might have changed breakpoint
5746 addresses. Make sure new breakpoints are inserted. */
5747 if (stop_soon == NO_STOP_QUIETLY)
5749 resume (GDB_SIGNAL_0);
5750 prepare_to_wait (ecs);
5751 return;
5752 }
5753
5754 /* But stop if we're attaching or setting up a remote
5755 connection. */
5756 if (stop_soon == STOP_QUIETLY_NO_SIGSTOP
5757 || stop_soon == STOP_QUIETLY_REMOTE)
5758 {
5759 infrun_debug_printf ("quietly stopped");
5760 stop_waiting (ecs);
5761 return;
5762 }
5763
5764 internal_error (_("unhandled stop_soon: %d"), (int) stop_soon);
5765 }
5766
5768 if (handle_stop_requested (ecs))
5769 return;
5770 context_switch (ecs);
5771 resume (GDB_SIGNAL_0);
5772 prepare_to_wait (ecs);
5773 return;
5774
5776 if (handle_stop_requested (ecs))
5777 return;
5778 context_switch (ecs);
5780 keep_going (ecs);
5781 return;
5782
5785 {
5786 /* Depending on the system, ecs->ptid may point to a thread or
5787 to a process. On some targets, target_mourn_inferior may
5788 need to have access to the just-exited thread. That is the
5789 case of GNU/Linux's "checkpoint" support, for example.
5790 Call the switch_to_xxx routine as appropriate. */
5791 thread_info *thr = ecs->target->find_thread (ecs->ptid);
5792 if (thr != nullptr)
5793 switch_to_thread (thr);
5794 else
5795 {
5796 inferior *inf = find_inferior_ptid (ecs->target, ecs->ptid);
5798 }
5799 }
5801 target_terminal::ours (); /* Must do this before mourn anyway. */
5802
5803 /* Clearing any previous state of convenience variables. */
5805
5806 if (ecs->ws.kind () == TARGET_WAITKIND_EXITED)
5807 {
5808 /* Record the exit code in the convenience variable $_exitcode, so
5809 that the user can inspect this again later. */
5811 (LONGEST) ecs->ws.exit_status ());
5812
5813 /* Also record this in the inferior itself. */
5815 current_inferior ()->exit_code = (LONGEST) ecs->ws.exit_status ();
5816
5817 /* Support the --return-child-result option. */
5819
5821 }
5822 else
5823 {
5825
5827 {
5828 /* Set the value of the internal variable $_exitsignal,
5829 which holds the signal uncaught by the inferior. */
5832 ecs->ws.sig ()));
5833 }
5834 else
5835 {
5836 /* We don't have access to the target's method used for
5837 converting between signal numbers (GDB's internal
5838 representation <-> target's representation).
5839 Therefore, we cannot do a good job at displaying this
5840 information to the user. It's better to just warn
5841 her about it (if infrun debugging is enabled), and
5842 give up. */
5843 infrun_debug_printf ("Cannot fill $_exitsignal with the correct "
5844 "signal number.");
5845 }
5846
5848 }
5849
5852 stop_print_frame = false;
5853 stop_waiting (ecs);
5854 return;
5855
5858 /* Check whether the inferior is displaced stepping. */
5859 {
5861 struct gdbarch *gdbarch = regcache->arch ();
5862 inferior *parent_inf = find_inferior_ptid (ecs->target, ecs->ptid);
5863
5864 /* If this is a fork (child gets its own address space copy)
5865 and some displaced step buffers were in use at the time of
5866 the fork, restore the displaced step buffer bytes in the
5867 child process.
5868
5869 Architectures which support displaced stepping and fork
5870 events must supply an implementation of
5871 gdbarch_displaced_step_restore_all_in_ptid. This is not
5872 enforced during gdbarch validation to support architectures
5873 which support displaced stepping but not forks. */
5874 if (ecs->ws.kind () == TARGET_WAITKIND_FORKED
5877 (gdbarch, parent_inf, ecs->ws.child_ptid ());
5878
5879 /* If displaced stepping is supported, and thread ecs->ptid is
5880 displaced stepping. */
5882 {
5883 struct regcache *child_regcache;
5884 CORE_ADDR parent_pc;
5885
5886 /* GDB has got TARGET_WAITKIND_FORKED or TARGET_WAITKIND_VFORKED,
5887 indicating that the displaced stepping of syscall instruction
5888 has been done. Perform cleanup for parent process here. Note
5889 that this operation also cleans up the child process for vfork,
5890 because their pages are shared. */
5892 /* Start a new step-over in another thread if there's one
5893 that needs it. */
5894 start_step_over ();
5895
5896 /* Since the vfork/fork syscall instruction was executed in the scratchpad,
5897 the child's PC is also within the scratchpad. Set the child's PC
5898 to the parent's PC value, which has already been fixed up.
5899 FIXME: we use the parent's aspace here, although we're touching
5900 the child, because the child hasn't been added to the inferior
5901 list yet at this point. */
5902
5903 child_regcache
5904 = get_thread_arch_aspace_regcache (parent_inf,
5905 ecs->ws.child_ptid (),
5906 gdbarch,
5907 parent_inf->aspace);
5908 /* Read PC value of parent process. */
5909 parent_pc = regcache_read_pc (regcache);
5910
5911 displaced_debug_printf ("write child pc from %s to %s",
5913 regcache_read_pc (child_regcache)),
5914 paddress (gdbarch, parent_pc));
5915
5916 regcache_write_pc (child_regcache, parent_pc);
5917 }
5918 }
5919
5920 context_switch (ecs);
5921
5922 /* Immediately detach breakpoints from the child before there's
5923 any chance of letting the user delete breakpoints from the
5924 breakpoint lists. If we don't do this early, it's easy to
5925 leave left over traps in the child, vis: "break foo; catch
5926 fork; c; <fork>; del; c; <child calls foo>". We only follow
5927 the fork on the last `continue', and by that time the
5928 breakpoint at "foo" is long gone from the breakpoint table.
5929 If we vforked, then we don't need to unpatch here, since both
5930 parent and child are sharing the same memory pages; we'll
5931 need to unpatch at follow/detach time instead to be certain
5932 that new breakpoints added between catchpoint hit time and
5933 vfork follow are detached. */
5934 if (ecs->ws.kind () != TARGET_WAITKIND_VFORKED)
5935 {
5936 /* This won't actually modify the breakpoint list, but will
5937 physically remove the breakpoints from the child. */
5939 }
5940
5942
5943 /* In case the event is caught by a catchpoint, remember that
5944 the event is to be followed at the next resume of the thread,
5945 and not immediately. */
5946 ecs->event_thread->pending_follow = ecs->ws;
5947
5950
5953 ecs->event_thread->stop_pc (),
5954 ecs->event_thread, ecs->ws);
5955
5956 if (handle_stop_requested (ecs))
5957 return;
5958
5959 /* If no catchpoint triggered for this, then keep going. Note
5960 that we're interested in knowing the bpstat actually causes a
5961 stop, not just if it may explain the signal. Software
5962 watchpoints, for example, always appear in the bpstat. */
5964 {
5965 bool follow_child
5967
5968 ecs->event_thread->set_stop_signal (GDB_SIGNAL_0);
5969
5971 = ecs->event_thread->inf->process_target ();
5972
5973 bool should_resume = follow_fork ();
5974
5975 /* Note that one of these may be an invalid pointer,
5976 depending on detach_fork. */
5977 thread_info *parent = ecs->event_thread;
5978 thread_info *child = targ->find_thread (ecs->ws.child_ptid ());
5979
5980 /* At this point, the parent is marked running, and the
5981 child is marked stopped. */
5982
5983 /* If not resuming the parent, mark it stopped. */
5984 if (follow_child && !detach_fork && !non_stop && !sched_multi)
5985 parent->set_running (false);
5986
5987 /* If resuming the child, mark it running. */
5988 if (follow_child || (!detach_fork && (non_stop || sched_multi)))
5989 child->set_running (true);
5990
5991 /* In non-stop mode, also resume the other branch. */
5992 if (!detach_fork && (non_stop
5994 {
5995 if (follow_child)
5996 switch_to_thread (parent);
5997 else
5998 switch_to_thread (child);
5999
6000 ecs->event_thread = inferior_thread ();
6001 ecs->ptid = inferior_ptid;
6002 keep_going (ecs);
6003 }
6004
6005 if (follow_child)
6006 switch_to_thread (child);
6007 else
6008 switch_to_thread (parent);
6009
6010 ecs->event_thread = inferior_thread ();
6011 ecs->ptid = inferior_ptid;
6012
6013 if (should_resume)
6014 {
6015 /* Never call switch_back_to_stepped_thread if we are waiting for
6016 vfork-done (waiting for an external vfork child to exec or
6017 exit). We will resume only the vforking thread for the purpose
6018 of collecting the vfork-done event, and we will restart any
6019 step once the critical shared address space window is done. */
6020 if ((!follow_child
6021 && detach_fork
6022 && parent->inf->thread_waiting_for_vfork_done != nullptr)
6024 keep_going (ecs);
6025 }
6026 else
6027 stop_waiting (ecs);
6028 return;
6029 }
6031 return;
6032
6034 /* Done with the shared memory region. Re-insert breakpoints in
6035 the parent, and keep going. */
6036
6037 context_switch (ecs);
6038
6040 gdb_assert (inferior_thread () == ecs->event_thread);
6041
6042 if (handle_stop_requested (ecs))
6043 return;
6044
6046 {
6047 gdb_assert (inferior_thread () == ecs->event_thread);
6048 /* This also takes care of reinserting breakpoints in the
6049 previously locked inferior. */
6050 keep_going (ecs);
6051 }
6052 return;
6053
6055
6056 /* Note we can't read registers yet (the stop_pc), because we
6057 don't yet know the inferior's post-exec architecture.
6058 'stop_pc' is explicitly read below instead. */
6060
6061 /* Do whatever is necessary to the parent branch of the vfork. */
6063
6064 /* This causes the eventpoints and symbol table to be reset.
6065 Must do this now, before trying to determine whether to
6066 stop. */
6068
6069 /* In follow_exec we may have deleted the original thread and
6070 created a new one. Make sure that the event thread is the
6071 execd thread for that case (this is a nop otherwise). */
6072 ecs->event_thread = inferior_thread ();
6073
6076
6079 ecs->event_thread->stop_pc (),
6080 ecs->event_thread, ecs->ws);
6081
6082 if (handle_stop_requested (ecs))
6083 return;
6084
6085 /* If no catchpoint triggered for this, then keep going. */
6087 {
6088 ecs->event_thread->set_stop_signal (GDB_SIGNAL_0);
6089 keep_going (ecs);
6090 return;
6091 }
6093 return;
6094
6095 /* Be careful not to try to gather much state about a thread
6096 that's in a syscall. It's frequently a losing proposition. */
6098 /* Getting the current syscall number. */
6099 if (handle_syscall_event (ecs) == 0)
6101 return;
6102
6103 /* Before examining the threads further, step this thread to
6104 get it entirely out of the syscall. (We get notice of the
6105 event when the thread is just on the verge of exiting a
6106 syscall. Stepping one instruction seems to get it back
6107 into user code.) */
6109 if (handle_syscall_event (ecs) == 0)
6111 return;
6112
6114 handle_signal_stop (ecs);
6115 return;
6116
6118 /* Reverse execution: target ran out of history info. */
6119
6120 /* Switch to the stopped thread. */
6121 context_switch (ecs);
6122 infrun_debug_printf ("stopped");
6123
6127
6128 if (handle_stop_requested (ecs))
6129 return;
6130
6132 stop_waiting (ecs);
6133 return;
6134 }
6135}
6136
6137/* Restart threads back to what they were trying to do back when we
6138 paused them (because of an in-line step-over or vfork, for example).
6139 The EVENT_THREAD thread is ignored (not restarted).
6140
6141 If INF is non-nullptr, only resume threads from INF. */
6142
6143static void
6144restart_threads (struct thread_info *event_thread, inferior *inf)
6145{
6146 INFRUN_SCOPED_DEBUG_START_END ("event_thread=%s, inf=%d",
6147 event_thread->ptid.to_string ().c_str (),
6148 inf != nullptr ? inf->num : -1);
6149
6150 gdb_assert (!step_over_info_valid_p ());
6151
6152 /* In case the instruction just stepped spawned a new thread. */
6154
6155 for (thread_info *tp : all_non_exited_threads ())
6156 {
6157 if (inf != nullptr && tp->inf != inf)
6158 continue;
6159
6160 if (tp->inf->detaching)
6161 {
6162 infrun_debug_printf ("restart threads: [%s] inferior detaching",
6163 tp->ptid.to_string ().c_str ());
6164 continue;
6165 }
6166
6168
6169 if (tp == event_thread)
6170 {
6171 infrun_debug_printf ("restart threads: [%s] is event thread",
6172 tp->ptid.to_string ().c_str ());
6173 continue;
6174 }
6175
6176 if (!(tp->state == THREAD_RUNNING || tp->control.in_infcall))
6177 {
6178 infrun_debug_printf ("restart threads: [%s] not meant to be running",
6179 tp->ptid.to_string ().c_str ());
6180 continue;
6181 }
6182
6183 if (tp->resumed ())
6184 {
6185 infrun_debug_printf ("restart threads: [%s] resumed",
6186 tp->ptid.to_string ().c_str ());
6187 gdb_assert (tp->executing () || tp->has_pending_waitstatus ());
6188 continue;
6189 }
6190
6192 {
6193 infrun_debug_printf ("restart threads: [%s] needs step-over",
6194 tp->ptid.to_string ().c_str ());
6195 gdb_assert (!tp->resumed ());
6196 continue;
6197 }
6198
6199
6200 if (tp->has_pending_waitstatus ())
6201 {
6202 infrun_debug_printf ("restart threads: [%s] has pending status",
6203 tp->ptid.to_string ().c_str ());
6204 tp->set_resumed (true);
6205 continue;
6206 }
6207
6208 gdb_assert (!tp->stop_requested);
6209
6210 /* If some thread needs to start a step-over at this point, it
6211 should still be in the step-over queue, and thus skipped
6212 above. */
6214 {
6215 internal_error ("thread [%s] needs a step-over, but not in "
6216 "step-over queue\n",
6217 tp->ptid.to_string ().c_str ());
6218 }
6219
6220 if (currently_stepping (tp))
6221 {
6222 infrun_debug_printf ("restart threads: [%s] was stepping",
6223 tp->ptid.to_string ().c_str ());
6225 }
6226 else
6227 {
6228 infrun_debug_printf ("restart threads: [%s] continuing",
6229 tp->ptid.to_string ().c_str ());
6230 execution_control_state ecs (tp);
6231 switch_to_thread (tp);
6233 }
6234 }
6235}
6236
6237/* Callback for iterate_over_threads. Find a resumed thread that has
6238 a pending waitstatus. */
6239
6240static int
6242 void *arg)
6243{
6244 return tp->resumed () && tp->has_pending_waitstatus ();
6245}
6246
6247/* Called when we get an event that may finish an in-line or
6248 out-of-line (displaced stepping) step-over started previously.
6249 Return true if the event is processed and we should go back to the
6250 event loop; false if the caller should continue processing the
6251 event. */
6252
6253static int
6255{
6257
6258 bool had_step_over_info = step_over_info_valid_p ();
6259
6260 if (had_step_over_info)
6261 {
6262 /* If we're stepping over a breakpoint with all threads locked,
6263 then only the thread that was stepped should be reporting
6264 back an event. */
6265 gdb_assert (ecs->event_thread->control.trap_expected);
6266
6268 }
6269
6270 if (!target_is_non_stop_p ())
6271 return 0;
6272
6273 /* Start a new step-over in another thread if there's one that
6274 needs it. */
6275 start_step_over ();
6276
6277 /* If we were stepping over a breakpoint before, and haven't started
6278 a new in-line step-over sequence, then restart all other threads
6279 (except the event thread). We can't do this in all-stop, as then
6280 e.g., we wouldn't be able to issue any other remote packet until
6281 these other threads stop. */
6282 if (had_step_over_info && !step_over_info_valid_p ())
6283 {
6284 struct thread_info *pending;
6285
6286 /* If we only have threads with pending statuses, the restart
6287 below won't restart any thread and so nothing re-inserts the
6288 breakpoint we just stepped over. But we need it inserted
6289 when we later process the pending events, otherwise if
6290 another thread has a pending event for this breakpoint too,
6291 we'd discard its event (because the breakpoint that
6292 originally caused the event was no longer inserted). */
6293 context_switch (ecs);
6295
6297
6298 /* If we have events pending, go through handle_inferior_event
6299 again, picking up a pending event at random. This avoids
6300 thread starvation. */
6301
6302 /* But not if we just stepped over a watchpoint in order to let
6303 the instruction execute so we can evaluate its expression.
6304 The set of watchpoints that triggered is recorded in the
6305 breakpoint objects themselves (see bp->watchpoint_triggered).
6306 If we processed another event first, that other event could
6307 clobber this info. */
6309 return 0;
6310
6312 nullptr);
6313 if (pending != nullptr)
6314 {
6315 struct thread_info *tp = ecs->event_thread;
6316 struct regcache *regcache;
6317
6318 infrun_debug_printf ("found resumed threads with "
6319 "pending events, saving status");
6320
6321 gdb_assert (pending != tp);
6322
6323 /* Record the event thread's event for later. */
6324 save_waitstatus (tp, ecs->ws);
6325 /* This was cleared early, by handle_inferior_event. Set it
6326 so this pending event is considered by
6327 do_target_wait. */
6328 tp->set_resumed (true);
6329
6330 gdb_assert (!tp->executing ());
6331
6334
6335 infrun_debug_printf ("saved stop_pc=%s for %s "
6336 "(currently_stepping=%d)",
6337 paddress (target_gdbarch (), tp->stop_pc ()),
6338 tp->ptid.to_string ().c_str (),
6339 currently_stepping (tp));
6340
6341 /* This in-line step-over finished; clear this so we won't
6342 start a new one. This is what handle_signal_stop would
6343 do, if we returned false. */
6345
6346 /* Wake up the event loop again. */
6348
6349 prepare_to_wait (ecs);
6350 return 1;
6351 }
6352 }
6353
6354 return 0;
6355}
6356
6357/* See infrun.h. */
6358
6359void
6361{
6364}
6365
6366/* See infrun.h. */
6367
6368void
6374
6375/* See infrun.h. */
6376
6377void notify_user_selected_context_changed (user_selected_what selection)
6378{
6381}
6382
6383/* Come here when the program has stopped with a signal. */
6384
6385static void
6387{
6388 frame_info_ptr frame;
6389 struct gdbarch *gdbarch;
6390 int stopped_by_watchpoint;
6391 enum stop_kind stop_soon;
6392 int random_signal;
6393
6394 gdb_assert (ecs->ws.kind () == TARGET_WAITKIND_STOPPED);
6395
6396 ecs->event_thread->set_stop_signal (ecs->ws.sig ());
6397
6398 /* Do we need to clean up the state of a thread that has
6399 completed a displaced single-step? (Doing so usually affects
6400 the PC, so do it here, before we set stop_pc.) */
6401 if (finish_step_over (ecs))
6402 return;
6403
6404 /* If we either finished a single-step or hit a breakpoint, but
6405 the user wanted this thread to be stopped, pretend we got a
6406 SIG0 (generic unsignaled stop). */
6408 && ecs->event_thread->stop_signal () == GDB_SIGNAL_TRAP)
6409 ecs->event_thread->set_stop_signal (GDB_SIGNAL_0);
6410
6413
6414 context_switch (ecs);
6415
6418
6419 if (debug_infrun)
6420 {
6422 struct gdbarch *reg_gdbarch = regcache->arch ();
6423
6425 ("stop_pc=%s", paddress (reg_gdbarch, ecs->event_thread->stop_pc ()));
6427 {
6428 CORE_ADDR addr;
6429
6430 infrun_debug_printf ("stopped by watchpoint");
6431
6432 if (target_stopped_data_address (current_inferior ()->top_target (),
6433 &addr))
6434 infrun_debug_printf ("stopped data address=%s",
6435 paddress (reg_gdbarch, addr));
6436 else
6437 infrun_debug_printf ("(no data address available)");
6438 }
6439 }
6440
6441 /* This is originated from start_remote(), start_inferior() and
6442 shared libraries hook functions. */
6443 stop_soon = get_inferior_stop_soon (ecs);
6444 if (stop_soon == STOP_QUIETLY || stop_soon == STOP_QUIETLY_REMOTE)
6445 {
6446 infrun_debug_printf ("quietly stopped");
6447 stop_print_frame = true;
6448 stop_waiting (ecs);
6449 return;
6450 }
6451
6452 /* This originates from attach_command(). We need to overwrite
6453 the stop_signal here, because some kernels don't ignore a
6454 SIGSTOP in a subsequent ptrace(PTRACE_CONT,SIGSTOP) call.
6455 See more comments in inferior.h. On the other hand, if we
6456 get a non-SIGSTOP, report it to the user - assume the backend
6457 will handle the SIGSTOP if it should show up later.
6458
6459 Also consider that the attach is complete when we see a
6460 SIGTRAP. Some systems (e.g. Windows), and stubs supporting
6461 target extended-remote report it instead of a SIGSTOP
6462 (e.g. gdbserver). We already rely on SIGTRAP being our
6463 signal, so this is no exception.
6464
6465 Also consider that the attach is complete when we see a
6466 GDB_SIGNAL_0. In non-stop mode, GDB will explicitly tell
6467 the target to stop all threads of the inferior, in case the
6468 low level attach operation doesn't stop them implicitly. If
6469 they weren't stopped implicitly, then the stub will report a
6470 GDB_SIGNAL_0, meaning: stopped for no particular reason
6471 other than GDB's request. */
6472 if (stop_soon == STOP_QUIETLY_NO_SIGSTOP
6473 && (ecs->event_thread->stop_signal () == GDB_SIGNAL_STOP
6474 || ecs->event_thread->stop_signal () == GDB_SIGNAL_TRAP
6475 || ecs->event_thread->stop_signal () == GDB_SIGNAL_0))
6476 {
6477 stop_print_frame = true;
6478 stop_waiting (ecs);
6479 ecs->event_thread->set_stop_signal (GDB_SIGNAL_0);
6480 return;
6481 }
6482
6483 /* At this point, get hold of the now-current thread's frame. */
6484 frame = get_current_frame ();
6485 gdbarch = get_frame_arch (frame);
6486
6487 /* Pull the single step breakpoints out of the target. */
6488 if (ecs->event_thread->stop_signal () == GDB_SIGNAL_TRAP)
6489 {
6490 struct regcache *regcache;
6491 CORE_ADDR pc;
6492
6494 const address_space *aspace = regcache->aspace ();
6495
6497
6498 /* However, before doing so, if this single-step breakpoint was
6499 actually for another thread, set this thread up for moving
6500 past it. */
6502 aspace, pc))
6503 {
6505 {
6506 infrun_debug_printf ("[%s] hit another thread's single-step "
6507 "breakpoint",
6508 ecs->ptid.to_string ().c_str ());
6510 }
6511 }
6512 else
6513 {
6514 infrun_debug_printf ("[%s] hit its single-step breakpoint",
6515 ecs->ptid.to_string ().c_str ());
6516 }
6517 }
6519
6520 if (ecs->event_thread->stop_signal () == GDB_SIGNAL_TRAP
6523 stopped_by_watchpoint = 0;
6524 else
6525 stopped_by_watchpoint = watchpoints_triggered (ecs->ws);
6526
6527 /* If necessary, step over this watchpoint. We'll be back to display
6528 it in a moment. */
6529 if (stopped_by_watchpoint
6532 {
6533 /* At this point, we are stopped at an instruction which has
6534 attempted to write to a piece of memory under control of
6535 a watchpoint. The instruction hasn't actually executed
6536 yet. If we were to evaluate the watchpoint expression
6537 now, we would get the old value, and therefore no change
6538 would seem to have occurred.
6539
6540 In order to make watchpoints work `right', we really need
6541 to complete the memory write, and then evaluate the
6542 watchpoint expression. We do this by single-stepping the
6543 target.
6544
6545 It may not be necessary to disable the watchpoint to step over
6546 it. For example, the PA can (with some kernel cooperation)
6547 single step over a watchpoint without disabling the watchpoint.
6548
6549 It is far more common to need to disable a watchpoint to step
6550 the inferior over it. If we have non-steppable watchpoints,
6551 we must disable the current watchpoint; it's simplest to
6552 disable all watchpoints.
6553
6554 Any breakpoint at PC must also be stepped over -- if there's
6555 one, it will have already triggered before the watchpoint
6556 triggered, and we either already reported it to the user, or
6557 it didn't cause a stop and we called keep_going. In either
6558 case, if there was a breakpoint at PC, we must be trying to
6559 step past it. */
6561 keep_going (ecs);
6562 return;
6563 }
6564
6568 ecs->event_thread->control.stop_step = 0;
6569 stop_print_frame = true;
6571 bpstat *stop_chain = nullptr;
6572
6573 /* Hide inlined functions starting here, unless we just performed stepi or
6574 nexti. After stepi and nexti, always show the innermost frame (not any
6575 inline function call sites). */
6576 if (ecs->event_thread->control.step_range_end != 1)
6577 {
6578 const address_space *aspace
6580
6581 /* skip_inline_frames is expensive, so we avoid it if we can
6582 determine that the address is one where functions cannot have
6583 been inlined. This improves performance with inferiors that
6584 load a lot of shared libraries, because the solib event
6585 breakpoint is defined as the address of a function (i.e. not
6586 inline). Note that we have to check the previous PC as well
6587 as the current one to catch cases when we have just
6588 single-stepped off a breakpoint prior to reinstating it.
6589 Note that we're assuming that the code we single-step to is
6590 not inline, but that's not definitive: there's nothing
6591 preventing the event breakpoint function from containing
6592 inlined code, and the single-step ending up there. If the
6593 user had set a breakpoint on that inlined code, the missing
6594 skip_inline_frames call would break things. Fortunately
6595 that's an extremely unlikely scenario. */
6597 ecs->event_thread->stop_pc (),
6598 ecs->ws)
6599 && !(ecs->event_thread->stop_signal () == GDB_SIGNAL_TRAP
6602 ecs->event_thread->prev_pc,
6603 ecs->ws)))
6604 {
6605 stop_chain = build_bpstat_chain (aspace,
6606 ecs->event_thread->stop_pc (),
6607 ecs->ws);
6608 skip_inline_frames (ecs->event_thread, stop_chain);
6609
6610 /* Re-fetch current thread's frame in case that invalidated
6611 the frame cache. */
6612 frame = get_current_frame ();
6613 gdbarch = get_frame_arch (frame);
6614 }
6615 }
6616
6617 if (ecs->event_thread->stop_signal () == GDB_SIGNAL_TRAP
6621 {
6622 /* We're trying to step off a breakpoint. Turns out that we're
6623 also on an instruction that needs to be stepped multiple
6624 times before it's been fully executing. E.g., architectures
6625 with a delay slot. It needs to be stepped twice, once for
6626 the instruction and once for the delay slot. */
6627 int step_through_delay
6629
6630 if (step_through_delay)
6631 infrun_debug_printf ("step through delay");
6632
6633 if (ecs->event_thread->control.step_range_end == 0
6634 && step_through_delay)
6635 {
6636 /* The user issued a continue when stopped at a breakpoint.
6637 Set up for another trap and get out of here. */
6639 keep_going (ecs);
6640 return;
6641 }
6642 else if (step_through_delay)
6643 {
6644 /* The user issued a step when stopped at a breakpoint.
6645 Maybe we should stop, maybe we should not - the delay
6646 slot *might* correspond to a line of source. In any
6647 case, don't decide that here, just set
6648 ecs->stepping_over_breakpoint, making sure we
6649 single-step again before breakpoints are re-inserted. */
6651 }
6652 }
6653
6654 /* See if there is a breakpoint/watchpoint/catchpoint/etc. that
6655 handles this event. */
6658 ecs->event_thread->stop_pc (),
6659 ecs->event_thread, ecs->ws, stop_chain);
6660
6661 /* Following in case break condition called a
6662 function. */
6663 stop_print_frame = true;
6664
6665 /* This is where we handle "moribund" watchpoints. Unlike
6666 software breakpoints traps, hardware watchpoint traps are
6667 always distinguishable from random traps. If no high-level
6668 watchpoint is associated with the reported stop data address
6669 anymore, then the bpstat does not explain the signal ---
6670 simply make sure to ignore it if `stopped_by_watchpoint' is
6671 set. */
6672
6673 if (ecs->event_thread->stop_signal () == GDB_SIGNAL_TRAP
6675 GDB_SIGNAL_TRAP)
6676 && stopped_by_watchpoint)
6677 {
6678 infrun_debug_printf ("no user watchpoint explains watchpoint SIGTRAP, "
6679 "ignoring");
6680 }
6681
6682 /* NOTE: cagney/2003-03-29: These checks for a random signal
6683 at one stage in the past included checks for an inferior
6684 function call's call dummy's return breakpoint. The original
6685 comment, that went with the test, read:
6686
6687 ``End of a stack dummy. Some systems (e.g. Sony news) give
6688 another signal besides SIGTRAP, so check here as well as
6689 above.''
6690
6691 If someone ever tries to get call dummys on a
6692 non-executable stack to work (where the target would stop
6693 with something like a SIGSEGV), then those tests might need
6694 to be re-instated. Given, however, that the tests were only
6695 enabled when momentary breakpoints were not being used, I
6696 suspect that it won't be the case.
6697
6698 NOTE: kettenis/2004-02-05: Indeed such checks don't seem to
6699 be necessary for call dummies on a non-executable stack on
6700 SPARC. */
6701
6702 /* See if the breakpoints module can explain the signal. */
6703 random_signal
6705 ecs->event_thread->stop_signal ());
6706
6707 /* Maybe this was a trap for a software breakpoint that has since
6708 been removed. */
6709 if (random_signal && target_stopped_by_sw_breakpoint ())
6710 {
6712 ecs->event_thread->stop_pc ()))
6713 {
6714 struct regcache *regcache;
6715 int decr_pc;
6716
6717 /* Re-adjust PC to what the program would see if GDB was not
6718 debugging it. */
6721 if (decr_pc != 0)
6722 {
6723 gdb::optional<scoped_restore_tmpl<int>>
6724 restore_operation_disable;
6725
6726 if (record_full_is_used ())
6727 restore_operation_disable.emplace
6729
6731 ecs->event_thread->stop_pc () + decr_pc);
6732 }
6733 }
6734 else
6735 {
6736 /* A delayed software breakpoint event. Ignore the trap. */
6737 infrun_debug_printf ("delayed software breakpoint trap, ignoring");
6738 random_signal = 0;
6739 }
6740 }
6741
6742 /* Maybe this was a trap for a hardware breakpoint/watchpoint that
6743 has since been removed. */
6744 if (random_signal && target_stopped_by_hw_breakpoint ())
6745 {
6746 /* A delayed hardware breakpoint event. Ignore the trap. */
6747 infrun_debug_printf ("delayed hardware breakpoint/watchpoint "
6748 "trap, ignoring");
6749 random_signal = 0;
6750 }
6751
6752 /* If not, perhaps stepping/nexting can. */
6753 if (random_signal)
6754 random_signal = !(ecs->event_thread->stop_signal () == GDB_SIGNAL_TRAP
6756
6757 /* Perhaps the thread hit a single-step breakpoint of _another_
6758 thread. Single-step breakpoints are transparent to the
6759 breakpoints module. */
6760 if (random_signal)
6761 random_signal = !ecs->hit_singlestep_breakpoint;
6762
6763 /* No? Perhaps we got a moribund watchpoint. */
6764 if (random_signal)
6765 random_signal = !stopped_by_watchpoint;
6766
6767 /* Always stop if the user explicitly requested this thread to
6768 remain stopped. */
6769 if (ecs->event_thread->stop_requested)
6770 {
6771 random_signal = 1;
6772 infrun_debug_printf ("user-requested stop");
6773 }
6774
6775 /* For the program's own signals, act according to
6776 the signal handling tables. */
6777
6778 if (random_signal)
6779 {
6780 /* Signal not for debugging purposes. */
6781 enum gdb_signal stop_signal = ecs->event_thread->stop_signal ();
6782
6783 infrun_debug_printf ("random signal (%s)",
6784 gdb_signal_to_symbol_string (stop_signal));
6785
6787
6788 /* Always stop on signals if we're either just gaining control
6789 of the program, or the user explicitly requested this thread
6790 to remain stopped. */
6791 if (stop_soon != NO_STOP_QUIETLY
6794 {
6795 stop_waiting (ecs);
6796 return;
6797 }
6798
6799 /* Notify observers the signal has "handle print" set. Note we
6800 returned early above if stopping; normal_stop handles the
6801 printing in that case. */
6803 {
6804 /* The signal table tells us to print about this signal. */
6808 }
6809
6810 /* Clear the signal if it should not be passed. */
6811 if (signal_program[ecs->event_thread->stop_signal ()] == 0)
6812 ecs->event_thread->set_stop_signal (GDB_SIGNAL_0);
6813
6814 if (ecs->event_thread->prev_pc == ecs->event_thread->stop_pc ()
6816 && ecs->event_thread->control.step_resume_breakpoint == nullptr)
6817 {
6818 /* We were just starting a new sequence, attempting to
6819 single-step off of a breakpoint and expecting a SIGTRAP.
6820 Instead this signal arrives. This signal will take us out
6821 of the stepping range so GDB needs to remember to, when
6822 the signal handler returns, resume stepping off that
6823 breakpoint. */
6824 /* To simplify things, "continue" is forced to use the same
6825 code paths as single-step - set a breakpoint at the
6826 signal return address and then, once hit, step off that
6827 breakpoint. */
6828 infrun_debug_printf ("signal arrived while stepping over breakpoint");
6829
6832 /* Reset trap_expected to ensure breakpoints are re-inserted. */
6834
6835 /* If we were nexting/stepping some other thread, switch to
6836 it, so that we don't continue it, losing control. */
6838 keep_going (ecs);
6839 return;
6840 }
6841
6842 if (ecs->event_thread->stop_signal () != GDB_SIGNAL_0
6844 ecs->event_thread)
6845 || ecs->event_thread->control.step_range_end == 1)
6846 && (get_stack_frame_id (frame)
6848 && ecs->event_thread->control.step_resume_breakpoint == nullptr)
6849 {
6850 /* The inferior is about to take a signal that will take it
6851 out of the single step range. Set a breakpoint at the
6852 current PC (which is presumably where the signal handler
6853 will eventually return) and then allow the inferior to
6854 run free.
6855
6856 Note that this is only needed for a signal delivered
6857 while in the single-step range. Nested signals aren't a
6858 problem as they eventually all return. */
6859 infrun_debug_printf ("signal may take us out of single-step range");
6860
6864 /* Reset trap_expected to ensure breakpoints are re-inserted. */
6866 keep_going (ecs);
6867 return;
6868 }
6869
6870 /* Note: step_resume_breakpoint may be non-NULL. This occurs
6871 when either there's a nested signal, or when there's a
6872 pending signal enabled just as the signal handler returns
6873 (leaving the inferior at the step-resume-breakpoint without
6874 actually executing it). Either way continue until the
6875 breakpoint is really hit. */
6876
6878 {
6879 infrun_debug_printf ("random signal, keep going");
6880
6881 keep_going (ecs);
6882 }
6883 return;
6884 }
6885
6887}
6888
6889/* Come here when we've got some debug event / signal we can explain
6890 (IOW, not a random signal), and test whether it should cause a
6891 stop, or whether we should resume the inferior (transparently).
6892 E.g., could be a breakpoint whose condition evaluates false; we
6893 could be still stepping within the line; etc. */
6894
6895static void
6897{
6898 struct symtab_and_line stop_pc_sal;
6899 frame_info_ptr frame;
6900 struct gdbarch *gdbarch;
6901 CORE_ADDR jmp_buf_pc;
6902 struct bpstat_what what;
6903
6904 /* Handle cases caused by hitting a breakpoint. */
6905
6906 frame = get_current_frame ();
6907 gdbarch = get_frame_arch (frame);
6908
6910
6911 if (what.call_dummy)
6912 {
6914 }
6915
6916 /* A few breakpoint types have callbacks associated (e.g.,
6917 bp_jit_event). Run them now. */
6919
6920 /* If we hit an internal event that triggers symbol changes, the
6921 current frame will be invalidated within bpstat_what (e.g., if we
6922 hit an internal solib event). Re-fetch it. */
6923 frame = get_current_frame ();
6924 gdbarch = get_frame_arch (frame);
6925
6926 switch (what.main_action)
6927 {
6929 /* If we hit the breakpoint at longjmp while stepping, we
6930 install a momentary breakpoint at the target of the
6931 jmp_buf. */
6932
6933 infrun_debug_printf ("BPSTAT_WHAT_SET_LONGJMP_RESUME");
6934
6936
6937 if (what.is_longjmp)
6938 {
6939 struct value *arg_value;
6940
6941 /* If we set the longjmp breakpoint via a SystemTap probe,
6942 then use it to extract the arguments. The destination PC
6943 is the third argument to the probe. */
6944 arg_value = probe_safe_evaluate_at_pc (frame, 2);
6945 if (arg_value)
6946 {
6947 jmp_buf_pc = value_as_address (arg_value);
6948 jmp_buf_pc = gdbarch_addr_bits_remove (gdbarch, jmp_buf_pc);
6949 }
6952 frame, &jmp_buf_pc))
6953 {
6954 infrun_debug_printf ("BPSTAT_WHAT_SET_LONGJMP_RESUME "
6955 "(!gdbarch_get_longjmp_target)");
6956 keep_going (ecs);
6957 return;
6958 }
6959
6960 /* Insert a breakpoint at resume address. */
6962 }
6963 else
6964 check_exception_resume (ecs, frame);
6965 keep_going (ecs);
6966 return;
6967
6969 {
6970 frame_info_ptr init_frame;
6971
6972 /* There are several cases to consider.
6973
6974 1. The initiating frame no longer exists. In this case we
6975 must stop, because the exception or longjmp has gone too
6976 far.
6977
6978 2. The initiating frame exists, and is the same as the
6979 current frame. We stop, because the exception or longjmp
6980 has been caught.
6981
6982 3. The initiating frame exists and is different from the
6983 current frame. This means the exception or longjmp has
6984 been caught beneath the initiating frame, so keep going.
6985
6986 4. longjmp breakpoint has been placed just to protect
6987 against stale dummy frames and user is not interested in
6988 stopping around longjmps. */
6989
6990 infrun_debug_printf ("BPSTAT_WHAT_CLEAR_LONGJMP_RESUME");
6991
6993 != nullptr);
6995
6996 if (what.is_longjmp)
6997 {
6999
7001 {
7002 /* Case 4. */
7003 keep_going (ecs);
7004 return;
7005 }
7006 }
7007
7008 init_frame = frame_find_by_id (ecs->event_thread->initiating_frame);
7009
7010 if (init_frame)
7011 {
7012 struct frame_id current_id
7014 if (current_id == ecs->event_thread->initiating_frame)
7015 {
7016 /* Case 2. Fall through. */
7017 }
7018 else
7019 {
7020 /* Case 3. */
7021 keep_going (ecs);
7022 return;
7023 }
7024 }
7025
7026 /* For Cases 1 and 2, remove the step-resume breakpoint, if it
7027 exists. */
7029
7030 end_stepping_range (ecs);
7031 }
7032 return;
7033
7034 case BPSTAT_WHAT_SINGLE:
7035 infrun_debug_printf ("BPSTAT_WHAT_SINGLE");
7037 /* Still need to check other stuff, at least the case where we
7038 are stepping and step out of the right range. */
7039 break;
7040
7042 infrun_debug_printf ("BPSTAT_WHAT_STEP_RESUME");
7043
7047 {
7048 struct thread_info *tp = ecs->event_thread;
7049
7050 /* We are finishing a function in reverse, and just hit the
7051 step-resume breakpoint at the start address of the
7052 function, and we're almost there -- just need to back up
7053 by one more single-step, which should take us back to the
7054 function call. */
7056 keep_going (ecs);
7057 return;
7058 }
7060 if (ecs->event_thread->stop_pc () == ecs->stop_func_start
7062 {
7063 /* We are stepping over a function call in reverse, and just
7064 hit the step-resume breakpoint at the start address of
7065 the function. Go back to single-stepping, which should
7066 take us back to the function call. */
7068 keep_going (ecs);
7069 return;
7070 }
7071 break;
7072
7074 infrun_debug_printf ("BPSTAT_WHAT_STOP_NOISY");
7075 stop_print_frame = true;
7076
7077 /* Assume the thread stopped for a breakpoint. We'll still check
7078 whether a/the breakpoint is there when the thread is next
7079 resumed. */
7081
7082 stop_waiting (ecs);
7083 return;
7084
7086 infrun_debug_printf ("BPSTAT_WHAT_STOP_SILENT");
7087 stop_print_frame = false;
7088
7089 /* Assume the thread stopped for a breakpoint. We'll still check
7090 whether a/the breakpoint is there when the thread is next
7091 resumed. */
7093 stop_waiting (ecs);
7094 return;
7095
7097 infrun_debug_printf ("BPSTAT_WHAT_HP_STEP_RESUME");
7098
7101 {
7102 /* Back when the step-resume breakpoint was inserted, we
7103 were trying to single-step off a breakpoint. Go back to
7104 doing that. */
7107 keep_going (ecs);
7108 return;
7109 }
7110 break;
7111
7113 break;
7114 }
7115
7116 /* If we stepped a permanent breakpoint and we had a high priority
7117 step-resume breakpoint for the address we stepped, but we didn't
7118 hit it, then we must have stepped into the signal handler. The
7119 step-resume was only necessary to catch the case of _not_
7120 stepping into the handler, so delete it, and fall through to
7121 checking whether the step finished. */
7123 {
7124 struct breakpoint *sr_bp
7126
7127 if (sr_bp != nullptr
7128 && sr_bp->first_loc ().permanent
7129 && sr_bp->type == bp_hp_step_resume
7130 && sr_bp->first_loc ().address == ecs->event_thread->prev_pc)
7131 {
7132 infrun_debug_printf ("stepped permanent breakpoint, stopped in handler");
7135 }
7136 }
7137
7138 /* We come here if we hit a breakpoint but should not stop for it.
7139 Possibly we also were stepping and should stop for that. So fall
7140 through and test for stepping. But, if not stepping, do not
7141 stop. */
7142
7143 /* In all-stop mode, if we're currently stepping but have stopped in
7144 some other thread, we need to switch back to the stepped thread. */
7146 return;
7147
7149 {
7150 infrun_debug_printf ("step-resume breakpoint is inserted");
7151
7152 /* Having a step-resume breakpoint overrides anything
7153 else having to do with stepping commands until
7154 that breakpoint is reached. */
7155 keep_going (ecs);
7156 return;
7157 }
7158
7159 if (ecs->event_thread->control.step_range_end == 0)
7160 {
7161 infrun_debug_printf ("no stepping, continue");
7162 /* Likewise if we aren't even stepping. */
7163 keep_going (ecs);
7164 return;
7165 }
7166
7167 /* Re-fetch current thread's frame in case the code above caused
7168 the frame cache to be re-initialized, making our FRAME variable
7169 a dangling pointer. */
7170 frame = get_current_frame ();
7171 gdbarch = get_frame_arch (frame);
7173
7174 /* If stepping through a line, keep going if still within it.
7175
7176 Note that step_range_end is the address of the first instruction
7177 beyond the step range, and NOT the address of the last instruction
7178 within it!
7179
7180 Note also that during reverse execution, we may be stepping
7181 through a function epilogue and therefore must detect when
7182 the current-frame changes in the middle of a line. */
7183
7185 ecs->event_thread)
7187 || get_frame_id (frame) == ecs->event_thread->control.step_frame_id))
7188 {
7190 ("stepping inside range [%s-%s]",
7193
7194 /* Tentatively re-enable range stepping; `resume' disables it if
7195 necessary (e.g., if we're stepping over a breakpoint or we
7196 have software watchpoints). */
7198
7199 /* When stepping backward, stop at beginning of line range
7200 (unless it's the function entry point, in which case
7201 keep going back to the call point). */
7202 CORE_ADDR stop_pc = ecs->event_thread->stop_pc ();
7203 if (stop_pc == ecs->event_thread->control.step_range_start
7204 && stop_pc != ecs->stop_func_start
7206 end_stepping_range (ecs);
7207 else
7208 keep_going (ecs);
7209
7210 return;
7211 }
7212
7213 /* We stepped out of the stepping range. */
7214
7215 /* If we are stepping at the source level and entered the runtime
7216 loader dynamic symbol resolution code...
7217
7218 EXEC_FORWARD: we keep on single stepping until we exit the run
7219 time loader code and reach the callee's address.
7220
7221 EXEC_REVERSE: we've already executed the callee (backward), and
7222 the runtime loader code is handled just like any other
7223 undebuggable function call. Now we need only keep stepping
7224 backward through the trampoline code, and that's handled further
7225 down, so there is nothing for us to do here. */
7226
7230 && (ecs->event_thread->control.step_start_function == nullptr
7233 ->entry_pc ())))
7234 {
7235 CORE_ADDR pc_after_resolver =
7237
7238 infrun_debug_printf ("stepped into dynsym resolve code");
7239
7240 if (pc_after_resolver)
7241 {
7242 /* Set up a step-resume breakpoint at the address
7243 indicated by SKIP_SOLIB_RESOLVER. */
7244 symtab_and_line sr_sal;
7245 sr_sal.pc = pc_after_resolver;
7246 sr_sal.pspace = get_frame_program_space (frame);
7247
7249 sr_sal, null_frame_id);
7250 }
7251
7252 keep_going (ecs);
7253 return;
7254 }
7255
7256 /* Step through an indirect branch thunk. */
7259 ecs->event_thread->stop_pc ()))
7260 {
7261 infrun_debug_printf ("stepped into indirect branch thunk");
7262 keep_going (ecs);
7263 return;
7264 }
7265
7266 if (ecs->event_thread->control.step_range_end != 1
7269 && get_frame_type (frame) == SIGTRAMP_FRAME)
7270 {
7271 infrun_debug_printf ("stepped into signal trampoline");
7272 /* The inferior, while doing a "step" or "next", has ended up in
7273 a signal trampoline (either by a signal being delivered or by
7274 the signal handler returning). Just single-step until the
7275 inferior leaves the trampoline (either by calling the handler
7276 or returning). */
7277 keep_going (ecs);
7278 return;
7279 }
7280
7281 /* If we're in the return path from a shared library trampoline,
7282 we want to proceed through the trampoline when stepping. */
7283 /* macro/2012-04-25: This needs to come before the subroutine
7284 call check below as on some targets return trampolines look
7285 like subroutine calls (MIPS16 return thunks). */
7287 ecs->event_thread->stop_pc (),
7288 ecs->stop_func_name)
7290 {
7291 /* Determine where this trampoline returns. */
7292 CORE_ADDR stop_pc = ecs->event_thread->stop_pc ();
7293 CORE_ADDR real_stop_pc
7294 = gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc);
7295
7296 infrun_debug_printf ("stepped into solib return tramp");
7297
7298 /* Only proceed through if we know where it's going. */
7299 if (real_stop_pc)
7300 {
7301 /* And put the step-breakpoint there and go until there. */
7302 symtab_and_line sr_sal;
7303 sr_sal.pc = real_stop_pc;
7304 sr_sal.section = find_pc_overlay (sr_sal.pc);
7305 sr_sal.pspace = get_frame_program_space (frame);
7306
7307 /* Do not specify what the fp should be when we stop since
7308 on some machines the prologue is where the new fp value
7309 is established. */
7311 sr_sal, null_frame_id);
7312
7313 /* Restart without fiddling with the step ranges or
7314 other state. */
7315 keep_going (ecs);
7316 return;
7317 }
7318 }
7319
7320 /* Check for subroutine calls. The check for the current frame
7321 equalling the step ID is not necessary - the check of the
7322 previous frame's ID is sufficient - but it is a common case and
7323 cheaper than checking the previous frame's ID.
7324
7325 NOTE: frame_id::operator== will never report two invalid frame IDs as
7326 being equal, so to get into this block, both the current and
7327 previous frame must have valid frame IDs. */
7328 /* The outer_frame_id check is a heuristic to detect stepping
7329 through startup code. If we step over an instruction which
7330 sets the stack pointer from an invalid value to a valid value,
7331 we may detect that as a subroutine call from the mythical
7332 "outermost" function. This could be fixed by marking
7333 outermost frames as !stack_p,code_p,special_p. Then the
7334 initial outermost frame, before sp was valid, would
7335 have code_addr == &_start. See the comment in frame_id::operator==
7336 for more. */
7337 if ((get_stack_frame_id (frame)
7342 != outer_frame_id)
7344 != find_pc_function (ecs->event_thread->stop_pc ())))))
7345 {
7346 CORE_ADDR stop_pc = ecs->event_thread->stop_pc ();
7347 CORE_ADDR real_stop_pc;
7348
7349 infrun_debug_printf ("stepped into subroutine");
7350
7352 {
7353 /* I presume that step_over_calls is only 0 when we're
7354 supposed to be stepping at the assembly language level
7355 ("stepi"). Just stop. */
7356 /* And this works the same backward as frontward. MVS */
7357 end_stepping_range (ecs);
7358 return;
7359 }
7360
7361 /* Reverse stepping through solib trampolines. */
7362
7365 && (gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc)
7366 || (ecs->stop_func_start == 0
7367 && in_solib_dynsym_resolve_code (stop_pc))))
7368 {
7369 /* Any solib trampoline code can be handled in reverse
7370 by simply continuing to single-step. We have already
7371 executed the solib function (backwards), and a few
7372 steps will take us back through the trampoline to the
7373 caller. */
7374 keep_going (ecs);
7375 return;
7376 }
7377
7379 {
7380 /* We're doing a "next".
7381
7382 Normal (forward) execution: set a breakpoint at the
7383 callee's return address (the address at which the caller
7384 will resume).
7385
7386 Reverse (backward) execution. set the step-resume
7387 breakpoint at the start of the function that we just
7388 stepped into (backwards), and continue to there. When we
7389 get there, we'll need to single-step back to the caller. */
7390
7392 {
7393 /* If we're already at the start of the function, we've either
7394 just stepped backward into a single instruction function,
7395 or stepped back out of a signal handler to the first instruction
7396 of the function. Just keep going, which will single-step back
7397 to the caller. */
7398 if (ecs->stop_func_start != stop_pc && ecs->stop_func_start != 0)
7399 {
7400 /* Normal function call return (static or dynamic). */
7401 symtab_and_line sr_sal;
7402 sr_sal.pc = ecs->stop_func_start;
7403 sr_sal.pspace = get_frame_program_space (frame);
7405 sr_sal, get_stack_frame_id (frame));
7406 }
7407 }
7408 else
7410
7411 keep_going (ecs);
7412 return;
7413 }
7414
7415 /* If we are in a function call trampoline (a stub between the
7416 calling routine and the real function), locate the real
7417 function. That's what tells us (a) whether we want to step
7418 into it at all, and (b) what prologue we want to run to the
7419 end of, if we do step into it. */
7420 real_stop_pc = skip_language_trampoline (frame, stop_pc);
7421 if (real_stop_pc == 0)
7422 real_stop_pc = gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc);
7423 if (real_stop_pc != 0)
7424 ecs->stop_func_start = real_stop_pc;
7425
7426 if (real_stop_pc != 0 && in_solib_dynsym_resolve_code (real_stop_pc))
7427 {
7428 symtab_and_line sr_sal;
7429 sr_sal.pc = ecs->stop_func_start;
7430 sr_sal.pspace = get_frame_program_space (frame);
7431
7433 sr_sal, null_frame_id);
7434 keep_going (ecs);
7435 return;
7436 }
7437
7438 /* If we have line number information for the function we are
7439 thinking of stepping into and the function isn't on the skip
7440 list, step into it.
7441
7442 If there are several symtabs at that PC (e.g. with include
7443 files), just want to know whether *any* of them have line
7444 numbers. find_pc_line handles this. */
7445 {
7446 struct symtab_and_line tmp_sal;
7447
7448 tmp_sal = find_pc_line (ecs->stop_func_start, 0);
7449 if (tmp_sal.line != 0
7451 tmp_sal)
7453 {
7456 else
7458 return;
7459 }
7460 }
7461
7462 /* If we have no line number and the step-stop-if-no-debug is
7463 set, we stop the step so that the user has a chance to switch
7464 in assembly mode. */
7467 {
7468 end_stepping_range (ecs);
7469 return;
7470 }
7471
7473 {
7474 /* If we're already at the start of the function, we've either just
7475 stepped backward into a single instruction function without line
7476 number info, or stepped back out of a signal handler to the first
7477 instruction of the function without line number info. Just keep
7478 going, which will single-step back to the caller. */
7479 if (ecs->stop_func_start != stop_pc)
7480 {
7481 /* Set a breakpoint at callee's start address.
7482 From there we can step once and be back in the caller. */
7483 symtab_and_line sr_sal;
7484 sr_sal.pc = ecs->stop_func_start;
7485 sr_sal.pspace = get_frame_program_space (frame);
7487 sr_sal, null_frame_id);
7488 }
7489 }
7490 else
7491 /* Set a breakpoint at callee's return address (the address
7492 at which the caller will resume). */
7494
7495 keep_going (ecs);
7496 return;
7497 }
7498
7499 /* Reverse stepping through solib trampolines. */
7500
7503 {
7504 CORE_ADDR stop_pc = ecs->event_thread->stop_pc ();
7505
7506 if (gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc)
7507 || (ecs->stop_func_start == 0
7508 && in_solib_dynsym_resolve_code (stop_pc)))
7509 {
7510 /* Any solib trampoline code can be handled in reverse
7511 by simply continuing to single-step. We have already
7512 executed the solib function (backwards), and a few
7513 steps will take us back through the trampoline to the
7514 caller. */
7515 keep_going (ecs);
7516 return;
7517 }
7518 else if (in_solib_dynsym_resolve_code (stop_pc))
7519 {
7520 /* Stepped backward into the solib dynsym resolver.
7521 Set a breakpoint at its start and continue, then
7522 one more step will take us out. */
7523 symtab_and_line sr_sal;
7524 sr_sal.pc = ecs->stop_func_start;
7525 sr_sal.pspace = get_frame_program_space (frame);
7527 sr_sal, null_frame_id);
7528 keep_going (ecs);
7529 return;
7530 }
7531 }
7532
7533 /* This always returns the sal for the inner-most frame when we are in a
7534 stack of inlined frames, even if GDB actually believes that it is in a
7535 more outer frame. This is checked for below by calls to
7536 inline_skipped_frames. */
7537 stop_pc_sal = find_pc_line (ecs->event_thread->stop_pc (), 0);
7538
7539 /* NOTE: tausq/2004-05-24: This if block used to be done before all
7540 the trampoline processing logic, however, there are some trampolines
7541 that have no names, so we should do trampoline handling first. */
7543 && ecs->stop_func_name == nullptr
7544 && stop_pc_sal.line == 0)
7545 {
7546 infrun_debug_printf ("stepped into undebuggable function");
7547
7548 /* The inferior just stepped into, or returned to, an
7549 undebuggable function (where there is no debugging information
7550 and no line number corresponding to the address where the
7551 inferior stopped). Since we want to skip this kind of code,
7552 we keep going until the inferior returns from this
7553 function - unless the user has asked us not to (via
7554 set step-mode) or we no longer know how to get back
7555 to the call site. */
7557 || !frame_id_p (frame_unwind_caller_id (frame)))
7558 {
7559 /* If we have no line number and the step-stop-if-no-debug
7560 is set, we stop the step so that the user has a chance to
7561 switch in assembly mode. */
7562 end_stepping_range (ecs);
7563 return;
7564 }
7565 else
7566 {
7567 /* Set a breakpoint at callee's return address (the address
7568 at which the caller will resume). */
7570 keep_going (ecs);
7571 return;
7572 }
7573 }
7574
7577 && ecs->event_thread->stop_pc () >= ecs->stop_func_alt_start
7578 && ecs->event_thread->stop_pc () < ecs->stop_func_start)
7579 {
7580 /* We are executing the reverse-finish command.
7581 If the system supports multiple entry points and we are finishing a
7582 function in reverse. If we are between the entry points single-step
7583 back to the alternate entry point. If we are at the alternate entry
7584 point -- just need to back up by one more single-step, which
7585 should take us back to the function call. */
7588 keep_going (ecs);
7589 return;
7590
7591 }
7592
7593 if (ecs->event_thread->control.step_range_end == 1)
7594 {
7595 /* It is stepi or nexti. We always want to stop stepping after
7596 one instruction. */
7597 infrun_debug_printf ("stepi/nexti");
7598 end_stepping_range (ecs);
7599 return;
7600 }
7601
7602 if (stop_pc_sal.line == 0)
7603 {
7604 /* We have no line number information. That means to stop
7605 stepping (does this always happen right after one instruction,
7606 when we do "s" in a function with no line numbers,
7607 or can this happen as a result of a return or longjmp?). */
7608 infrun_debug_printf ("line number info");
7609 end_stepping_range (ecs);
7610 return;
7611 }
7612
7613 /* Look for "calls" to inlined functions, part one. If the inline
7614 frame machinery detected some skipped call sites, we have entered
7615 a new inline function. */
7616
7620 {
7621 infrun_debug_printf ("stepped into inlined function");
7622
7624
7626 {
7627 /* For "step", we're going to stop. But if the call site
7628 for this inlined function is on the same source line as
7629 we were previously stepping, go down into the function
7630 first. Otherwise stop at the call site. */
7631
7632 if (call_sal.line == ecs->event_thread->current_line
7633 && call_sal.symtab == ecs->event_thread->current_symtab)
7634 {
7637 {
7638 keep_going (ecs);
7639 return;
7640 }
7641 }
7642
7643 end_stepping_range (ecs);
7644 return;
7645 }
7646 else
7647 {
7648 /* For "next", we should stop at the call site if it is on a
7649 different source line. Otherwise continue through the
7650 inlined function. */
7651 if (call_sal.line == ecs->event_thread->current_line
7652 && call_sal.symtab == ecs->event_thread->current_symtab)
7653 keep_going (ecs);
7654 else
7655 end_stepping_range (ecs);
7656 return;
7657 }
7658 }
7659
7660 /* Look for "calls" to inlined functions, part two. If we are still
7661 in the same real function we were stepping through, but we have
7662 to go further up to find the exact frame ID, we are stepping
7663 through a more inlined call beyond its call site. */
7664
7670 {
7671 infrun_debug_printf ("stepping through inlined function");
7672
7675 keep_going (ecs);
7676 else
7677 end_stepping_range (ecs);
7678 return;
7679 }
7680
7681 bool refresh_step_info = true;
7682 if ((ecs->event_thread->stop_pc () == stop_pc_sal.pc)
7683 && (ecs->event_thread->current_line != stop_pc_sal.line
7684 || ecs->event_thread->current_symtab != stop_pc_sal.symtab))
7685 {
7686 /* We are at a different line. */
7687
7688 if (stop_pc_sal.is_stmt)
7689 {
7690 /* We are at the start of a statement.
7691
7692 So stop. Note that we don't stop if we step into the middle of a
7693 statement. That is said to make things like for (;;) statements
7694 work better. */
7695 infrun_debug_printf ("stepped to a different line");
7696 end_stepping_range (ecs);
7697 return;
7698 }
7699 else if (get_frame_id (get_current_frame ())
7701 {
7702 /* We are not at the start of a statement, and we have not changed
7703 frame.
7704
7705 We ignore this line table entry, and continue stepping forward,
7706 looking for a better place to stop. */
7707 refresh_step_info = false;
7708 infrun_debug_printf ("stepped to a different line, but "
7709 "it's not the start of a statement");
7710 }
7711 else
7712 {
7713 /* We are not the start of a statement, and we have changed frame.
7714
7715 We ignore this line table entry, and continue stepping forward,
7716 looking for a better place to stop. Keep refresh_step_info at
7717 true to note that the frame has changed, but ignore the line
7718 number to make sure we don't ignore a subsequent entry with the
7719 same line number. */
7720 stop_pc_sal.line = 0;
7721 infrun_debug_printf ("stepped to a different frame, but "
7722 "it's not the start of a statement");
7723 }
7724 }
7725
7726 /* We aren't done stepping.
7727
7728 Optimize by setting the stepping range to the line.
7729 (We might not be in the original line, but if we entered a
7730 new line in mid-statement, we continue stepping. This makes
7731 things like for(;;) statements work better.)
7732
7733 If we entered a SAL that indicates a non-statement line table entry,
7734 then we update the stepping range, but we don't update the step info,
7735 which includes things like the line number we are stepping away from.
7736 This means we will stop when we find a line table entry that is marked
7737 as is-statement, even if it matches the non-statement one we just
7738 stepped into. */
7739
7740 ecs->event_thread->control.step_range_start = stop_pc_sal.pc;
7741 ecs->event_thread->control.step_range_end = stop_pc_sal.end;
7744 ("updated step range, start = %s, end = %s, may_range_step = %d",
7748 if (refresh_step_info)
7749 set_step_info (ecs->event_thread, frame, stop_pc_sal);
7750
7751 infrun_debug_printf ("keep going");
7752 keep_going (ecs);
7753}
7754
7755static bool restart_stepped_thread (process_stratum_target *resume_target,
7756 ptid_t resume_ptid);
7757
7758/* In all-stop mode, if we're currently stepping but have stopped in
7759 some other thread, we may need to switch back to the stepped
7760 thread. Returns true we set the inferior running, false if we left
7761 it stopped (and the event needs further processing). */
7762
7763static bool
7765{
7766 if (!target_is_non_stop_p ())
7767 {
7768 /* If any thread is blocked on some internal breakpoint, and we
7769 simply need to step over that breakpoint to get it going
7770 again, do that first. */
7771
7772 /* However, if we see an event for the stepping thread, then we
7773 know all other threads have been moved past their breakpoints
7774 already. Let the caller check whether the step is finished,
7775 etc., before deciding to move it past a breakpoint. */
7776 if (ecs->event_thread->control.step_range_end != 0)
7777 return false;
7778
7779 /* Check if the current thread is blocked on an incomplete
7780 step-over, interrupted by a random signal. */
7782 && ecs->event_thread->stop_signal () != GDB_SIGNAL_TRAP)
7783 {
7785 ("need to finish step-over of [%s]",
7786 ecs->event_thread->ptid.to_string ().c_str ());
7787 keep_going (ecs);
7788 return true;
7789 }
7790
7791 /* Check if the current thread is blocked by a single-step
7792 breakpoint of another thread. */
7794 {
7795 infrun_debug_printf ("need to step [%s] over single-step breakpoint",
7796 ecs->ptid.to_string ().c_str ());
7797 keep_going (ecs);
7798 return true;
7799 }
7800
7801 /* If this thread needs yet another step-over (e.g., stepping
7802 through a delay slot), do it first before moving on to
7803 another thread. */
7805 {
7807 ("thread [%s] still needs step-over",
7808 ecs->event_thread->ptid.to_string ().c_str ());
7809 keep_going (ecs);
7810 return true;
7811 }
7812
7813 /* If scheduler locking applies even if not stepping, there's no
7814 need to walk over threads. Above we've checked whether the
7815 current thread is stepping. If some other thread not the
7816 event thread is stepping, then it must be that scheduler
7817 locking is not in effect. */
7819 return false;
7820
7821 /* Otherwise, we no longer expect a trap in the current thread.
7822 Clear the trap_expected flag before switching back -- this is
7823 what keep_going does as well, if we call it. */
7825
7826 /* Likewise, clear the signal if it should not be passed. */
7828 ecs->event_thread->set_stop_signal (GDB_SIGNAL_0);
7829
7830 if (restart_stepped_thread (ecs->target, ecs->ptid))
7831 {
7832 prepare_to_wait (ecs);
7833 return true;
7834 }
7835
7837 }
7838
7839 return false;
7840}
7841
7842/* Look for the thread that was stepping, and resume it.
7843 RESUME_TARGET / RESUME_PTID indicate the set of threads the caller
7844 is resuming. Return true if a thread was started, false
7845 otherwise. */
7846
7847static bool
7849 ptid_t resume_ptid)
7850{
7851 /* Do all pending step-overs before actually proceeding with
7852 step/next/etc. */
7853 if (start_step_over ())
7854 return true;
7855
7856 for (thread_info *tp : all_threads_safe ())
7857 {
7858 if (tp->state == THREAD_EXITED)
7859 continue;
7860
7861 if (tp->has_pending_waitstatus ())
7862 continue;
7863
7864 /* Ignore threads of processes the caller is not
7865 resuming. */
7866 if (!sched_multi
7867 && (tp->inf->process_target () != resume_target
7868 || tp->inf->pid != resume_ptid.pid ()))
7869 continue;
7870
7871 if (tp->control.trap_expected)
7872 {
7873 infrun_debug_printf ("switching back to stepped thread (step-over)");
7874
7876 return true;
7877 }
7878 }
7879
7880 for (thread_info *tp : all_threads_safe ())
7881 {
7882 if (tp->state == THREAD_EXITED)
7883 continue;
7884
7885 if (tp->has_pending_waitstatus ())
7886 continue;
7887
7888 /* Ignore threads of processes the caller is not
7889 resuming. */
7890 if (!sched_multi
7891 && (tp->inf->process_target () != resume_target
7892 || tp->inf->pid != resume_ptid.pid ()))
7893 continue;
7894
7895 /* Did we find the stepping thread? */
7896 if (tp->control.step_range_end)
7897 {
7898 infrun_debug_printf ("switching back to stepped thread (stepping)");
7899
7901 return true;
7902 }
7903 }
7904
7905 return false;
7906}
7907
7908/* See infrun.h. */
7909
7910void
7912{
7913 /* Note we don't check target_is_non_stop_p() here, because the
7914 current inferior may no longer have a process_stratum target
7915 pushed, as we just detached. */
7916
7917 /* See if we have a THREAD_RUNNING thread that need to be
7918 re-resumed. If we have any thread that is already executing,
7919 then we don't need to resume the target -- it is already been
7920 resumed. With the remote target (in all-stop), it's even
7921 impossible to issue another resumption if the target is already
7922 resumed, until the target reports a stop. */
7923 for (thread_info *thr : all_threads (proc_target))
7924 {
7925 if (thr->state != THREAD_RUNNING)
7926 continue;
7927
7928 /* If we have any thread that is already executing, then we
7929 don't need to resume the target -- it is already been
7930 resumed. */
7931 if (thr->executing ())
7932 return;
7933
7934 /* If we have a pending event to process, skip resuming the
7935 target and go straight to processing it. */
7936 if (thr->resumed () && thr->has_pending_waitstatus ())
7937 return;
7938 }
7939
7940 /* Alright, we need to re-resume the target. If a thread was
7941 stepping, we need to restart it stepping. */
7942 if (restart_stepped_thread (proc_target, minus_one_ptid))
7943 return;
7944
7945 /* Otherwise, find the first THREAD_RUNNING thread and resume
7946 it. */
7947 for (thread_info *thr : all_threads (proc_target))
7948 {
7949 if (thr->state != THREAD_RUNNING)
7950 continue;
7951
7952 execution_control_state ecs (thr);
7953 switch_to_thread (thr);
7954 keep_going (&ecs);
7955 return;
7956 }
7957}
7958
7959/* Set a previously stepped thread back to stepping. Returns true on
7960 success, false if the resume is not possible (e.g., the thread
7961 vanished). */
7962
7963static bool
7965{
7966 frame_info_ptr frame;
7967
7968 /* If the stepping thread exited, then don't try to switch back and
7969 resume it, which could fail in several different ways depending
7970 on the target. Instead, just keep going.
7971
7972 We can find a stepping dead thread in the thread list in two
7973 cases:
7974
7975 - The target supports thread exit events, and when the target
7976 tries to delete the thread from the thread list, inferior_ptid
7977 pointed at the exiting thread. In such case, calling
7978 delete_thread does not really remove the thread from the list;
7979 instead, the thread is left listed, with 'exited' state.
7980
7981 - The target's debug interface does not support thread exit
7982 events, and so we have no idea whatsoever if the previously
7983 stepping thread is still alive. For that reason, we need to
7984 synchronously query the target now. */
7985
7986 if (tp->state == THREAD_EXITED || !target_thread_alive (tp->ptid))
7987 {
7988 infrun_debug_printf ("not resuming previously stepped thread, it has "
7989 "vanished");
7990
7991 delete_thread (tp);
7992 return false;
7993 }
7994
7995 infrun_debug_printf ("resuming previously stepped thread");
7996
7997 execution_control_state ecs (tp);
7998 switch_to_thread (tp);
7999
8001 frame = get_current_frame ();
8002
8003 /* If the PC of the thread we were trying to single-step has
8004 changed, then that thread has trapped or been signaled, but the
8005 event has not been reported to GDB yet. Re-poll the target
8006 looking for this particular thread's event (i.e. temporarily
8007 enable schedlock) by:
8008
8009 - setting a break at the current PC
8010 - resuming that particular thread, only (by setting trap
8011 expected)
8012
8013 This prevents us continuously moving the single-step breakpoint
8014 forward, one instruction at a time, overstepping. */
8015
8016 if (tp->stop_pc () != tp->prev_pc)
8017 {
8018 ptid_t resume_ptid;
8019
8020 infrun_debug_printf ("expected thread advanced also (%s -> %s)",
8022 paddress (target_gdbarch (), tp->stop_pc ()));
8023
8024 /* Clear the info of the previous step-over, as it's no longer
8025 valid (if the thread was trying to step over a breakpoint, it
8026 has already succeeded). It's what keep_going would do too,
8027 if we called it. Do this before trying to insert the sss
8028 breakpoint, otherwise if we were previously trying to step
8029 over this exact address in another thread, the breakpoint is
8030 skipped. */
8032 tp->control.trap_expected = 0;
8033
8036 tp->stop_pc ());
8037
8038 tp->set_resumed (true);
8039 resume_ptid = internal_resume_ptid (tp->control.stepping_command);
8040 do_target_resume (resume_ptid, false, GDB_SIGNAL_0);
8041 }
8042 else
8043 {
8044 infrun_debug_printf ("expected thread still hasn't advanced");
8045
8047 }
8048
8049 return true;
8050}
8051
8052/* Is thread TP in the middle of (software or hardware)
8053 single-stepping? (Note the result of this function must never be
8054 passed directly as target_resume's STEP parameter.) */
8055
8056static bool
8058{
8059 return ((tp->control.step_range_end
8060 && tp->control.step_resume_breakpoint == nullptr)
8061 || tp->control.trap_expected
8062 || tp->stepped_breakpoint
8063 || bpstat_should_step ());
8064}
8065
8066/* Inferior has stepped into a subroutine call with source code that
8067 we should not step over. Do step to the first line of code in
8068 it. */
8069
8070static void
8072 struct execution_control_state *ecs)
8073{
8075
8076 compunit_symtab *cust
8078 if (cust != nullptr && cust->language () != language_asm)
8079 ecs->stop_func_start
8081
8082 symtab_and_line stop_func_sal = find_pc_line (ecs->stop_func_start, 0);
8083 /* Use the step_resume_break to step until the end of the prologue,
8084 even if that involves jumps (as it seems to on the vax under
8085 4.2). */
8086 /* If the prologue ends in the middle of a source line, continue to
8087 the end of that source line (if it is still within the function).
8088 Otherwise, just go to end of prologue. */
8089 if (stop_func_sal.end
8090 && stop_func_sal.pc != ecs->stop_func_start
8091 && stop_func_sal.end < ecs->stop_func_end)
8092 ecs->stop_func_start = stop_func_sal.end;
8093
8094 /* Architectures which require breakpoint adjustment might not be able
8095 to place a breakpoint at the computed address. If so, the test
8096 ``ecs->stop_func_start == stop_pc'' will never succeed. Adjust
8097 ecs->stop_func_start to an address at which a breakpoint may be
8098 legitimately placed.
8099
8100 Note: kevinb/2004-01-19: On FR-V, if this adjustment is not
8101 made, GDB will enter an infinite loop when stepping through
8102 optimized code consisting of VLIW instructions which contain
8103 subinstructions corresponding to different source lines. On
8104 FR-V, it's not permitted to place a breakpoint on any but the
8105 first subinstruction of a VLIW instruction. When a breakpoint is
8106 set, GDB will adjust the breakpoint address to the beginning of
8107 the VLIW instruction. Thus, we need to make the corresponding
8108 adjustment here when computing the stop address. */
8109
8111 {
8112 ecs->stop_func_start
8114 ecs->stop_func_start);
8115 }
8116
8117 if (ecs->stop_func_start == ecs->event_thread->stop_pc ())
8118 {
8119 /* We are already there: stop now. */
8120 end_stepping_range (ecs);
8121 return;
8122 }
8123 else
8124 {
8125 /* Put the step-breakpoint there and go until there. */
8126 symtab_and_line sr_sal;
8127 sr_sal.pc = ecs->stop_func_start;
8128 sr_sal.section = find_pc_overlay (ecs->stop_func_start);
8130
8131 /* Do not specify what the fp should be when we stop since on
8132 some machines the prologue is where the new fp value is
8133 established. */
8135
8136 /* And make sure stepping stops right away then. */
8139 }
8140 keep_going (ecs);
8141}
8142
8143/* Inferior has stepped backward into a subroutine call with source
8144 code that we should not step over. Do step to the beginning of the
8145 last line of code in it. */
8146
8147static void
8149 struct execution_control_state *ecs)
8150{
8151 struct compunit_symtab *cust;
8152 struct symtab_and_line stop_func_sal;
8153
8155
8157 if (cust != nullptr && cust->language () != language_asm)
8158 ecs->stop_func_start
8160
8161 stop_func_sal = find_pc_line (ecs->event_thread->stop_pc (), 0);
8162
8163 /* OK, we're just going to keep stepping here. */
8164 if (stop_func_sal.pc == ecs->event_thread->stop_pc ())
8165 {
8166 /* We're there already. Just stop stepping now. */
8167 end_stepping_range (ecs);
8168 }
8169 else
8170 {
8171 /* Else just reset the step range and keep going.
8172 No step-resume breakpoint, they don't work for
8173 epilogues, which can have multiple entry paths. */
8174 ecs->event_thread->control.step_range_start = stop_func_sal.pc;
8175 ecs->event_thread->control.step_range_end = stop_func_sal.end;
8176 keep_going (ecs);
8177 }
8178 return;
8179}
8180
8181/* Insert a "step-resume breakpoint" at SR_SAL with frame ID SR_ID.
8182 This is used to both functions and to skip over code. */
8183
8184static void
8186 struct symtab_and_line sr_sal,
8187 struct frame_id sr_id,
8188 enum bptype sr_type)
8189{
8190 /* There should never be more than one step-resume or longjmp-resume
8191 breakpoint per thread, so we should never be setting a new
8192 step_resume_breakpoint when one is already active. */
8193 gdb_assert (inferior_thread ()->control.step_resume_breakpoint == nullptr);
8194 gdb_assert (sr_type == bp_step_resume || sr_type == bp_hp_step_resume);
8195
8196 infrun_debug_printf ("inserting step-resume breakpoint at %s",
8197 paddress (gdbarch, sr_sal.pc));
8198
8200 = set_momentary_breakpoint (gdbarch, sr_sal, sr_id, sr_type).release ();
8201}
8202
8203void
8205 struct symtab_and_line sr_sal,
8206 struct frame_id sr_id)
8207{
8209 sr_sal, sr_id,
8211}
8212
8213/* Insert a "high-priority step-resume breakpoint" at RETURN_FRAME.pc.
8214 This is used to skip a potential signal handler.
8215
8216 This is called with the interrupted function's frame. The signal
8217 handler, when it returns, will resume the interrupted function at
8218 RETURN_FRAME.pc. */
8219
8220static void
8222{
8223 gdb_assert (return_frame != nullptr);
8224
8225 struct gdbarch *gdbarch = get_frame_arch (return_frame);
8226
8227 symtab_and_line sr_sal;
8228 sr_sal.pc = gdbarch_addr_bits_remove (gdbarch, get_frame_pc (return_frame));
8229 sr_sal.section = find_pc_overlay (sr_sal.pc);
8230 sr_sal.pspace = get_frame_program_space (return_frame);
8231
8233 get_stack_frame_id (return_frame),
8235}
8236
8237/* Insert a "step-resume breakpoint" at the previous frame's PC. This
8238 is used to skip a function after stepping into it (for "next" or if
8239 the called function has no debugging information).
8240
8241 The current function has almost always been reached by single
8242 stepping a call or return instruction. NEXT_FRAME belongs to the
8243 current function, and the breakpoint will be set at the caller's
8244 resume address.
8245
8246 This is a separate function rather than reusing
8247 insert_hp_step_resume_breakpoint_at_frame in order to avoid
8248 get_prev_frame, which may stop prematurely (see the implementation
8249 of frame_unwind_caller_id for an example). */
8250
8251static void
8253{
8254 /* We shouldn't have gotten here if we don't know where the call site
8255 is. */
8256 gdb_assert (frame_id_p (frame_unwind_caller_id (next_frame)));
8257
8258 struct gdbarch *gdbarch = frame_unwind_caller_arch (next_frame);
8259
8260 symtab_and_line sr_sal;
8262 frame_unwind_caller_pc (next_frame));
8263 sr_sal.section = find_pc_overlay (sr_sal.pc);
8264 sr_sal.pspace = frame_unwind_program_space (next_frame);
8265
8267 frame_unwind_caller_id (next_frame));
8268}
8269
8270/* Insert a "longjmp-resume" breakpoint at PC. This is used to set a
8271 new breakpoint at the target of a jmp_buf. The handling of
8272 longjmp-resume uses the same mechanisms used for handling
8273 "step-resume" breakpoints. */
8274
8275static void
8277{
8278 /* There should never be more than one longjmp-resume breakpoint per
8279 thread, so we should never be setting a new
8280 longjmp_resume_breakpoint when one is already active. */
8281 gdb_assert (inferior_thread ()->control.exception_resume_breakpoint == nullptr);
8282
8283 infrun_debug_printf ("inserting longjmp-resume breakpoint at %s",
8284 paddress (gdbarch, pc));
8285
8288}
8289
8290/* Insert an exception resume breakpoint. TP is the thread throwing
8291 the exception. The block B is the block of the unwinder debug hook
8292 function. FRAME is the frame corresponding to the call to this
8293 function. SYM is the symbol of the function argument holding the
8294 target PC of the exception. */
8295
8296static void
8298 const struct block *b,
8299 frame_info_ptr frame,
8300 struct symbol *sym)
8301{
8302 try
8303 {
8304 struct block_symbol vsym;
8305 struct value *value;
8306 CORE_ADDR handler;
8307 struct breakpoint *bp;
8308
8309 vsym = lookup_symbol_search_name (sym->search_name (),
8310 b, VAR_DOMAIN);
8311 value = read_var_value (vsym.symbol, vsym.block, frame);
8312 /* If the value was optimized out, revert to the old behavior. */
8313 if (! value->optimized_out ())
8314 {
8315 handler = value_as_address (value);
8316
8317 infrun_debug_printf ("exception resume at %lx",
8318 (unsigned long) handler);
8319
8320 /* set_momentary_breakpoint_at_pc creates a thread-specific
8321 breakpoint for the current inferior thread. */
8322 gdb_assert (tp == inferior_thread ());
8324 handler,
8325 bp_exception_resume).release ();
8326
8327 /* set_momentary_breakpoint_at_pc invalidates FRAME. */
8328 frame = nullptr;
8329
8331 }
8332 }
8333 catch (const gdb_exception_error &e)
8334 {
8335 /* We want to ignore errors here. */
8336 }
8337}
8338
8339/* A helper for check_exception_resume that sets an
8340 exception-breakpoint based on a SystemTap probe. */
8341
8342static void
8344 const struct bound_probe *probe,
8345 frame_info_ptr frame)
8346{
8347 struct value *arg_value;
8348 CORE_ADDR handler;
8349 struct breakpoint *bp;
8350
8351 arg_value = probe_safe_evaluate_at_pc (frame, 1);
8352 if (!arg_value)
8353 return;
8354
8355 handler = value_as_address (arg_value);
8356
8357 infrun_debug_printf ("exception resume at %s",
8358 paddress (probe->objfile->arch (), handler));
8359
8360 /* set_momentary_breakpoint_at_pc creates a thread-specific breakpoint
8361 for the current inferior thread. */
8362 gdb_assert (tp == inferior_thread ());
8364 handler, bp_exception_resume).release ();
8366}
8367
8368/* This is called when an exception has been intercepted. Check to
8369 see whether the exception's destination is of interest, and if so,
8370 set an exception resume breakpoint there. */
8371
8372static void
8374 frame_info_ptr frame)
8375{
8376 struct bound_probe probe;
8377 struct symbol *func;
8378
8379 /* First see if this exception unwinding breakpoint was set via a
8380 SystemTap probe point. If so, the probe has two arguments: the
8381 CFA and the HANDLER. We ignore the CFA, extract the handler, and
8382 set a breakpoint there. */
8384 if (probe.prob)
8385 {
8387 return;
8388 }
8389
8390 func = get_frame_function (frame);
8391 if (!func)
8392 return;
8393
8394 try
8395 {
8396 const struct block *b;
8397 int argno = 0;
8398
8399 /* The exception breakpoint is a thread-specific breakpoint on
8400 the unwinder's debug hook, declared as:
8401
8402 void _Unwind_DebugHook (void *cfa, void *handler);
8403
8404 The CFA argument indicates the frame to which control is
8405 about to be transferred. HANDLER is the destination PC.
8406
8407 We ignore the CFA and set a temporary breakpoint at HANDLER.
8408 This is not extremely efficient but it avoids issues in gdb
8409 with computing the DWARF CFA, and it also works even in weird
8410 cases such as throwing an exception from inside a signal
8411 handler. */
8412
8413 b = func->value_block ();
8414 for (struct symbol *sym : block_iterator_range (b))
8415 {
8416 if (!sym->is_argument ())
8417 continue;
8418
8419 if (argno == 0)
8420 ++argno;
8421 else
8422 {
8424 b, frame, sym);
8425 break;
8426 }
8427 }
8428 }
8429 catch (const gdb_exception_error &e)
8430 {
8431 }
8432}
8433
8434static void
8436{
8437 infrun_debug_printf ("stop_waiting");
8438
8439 /* Let callers know we don't want to wait for the inferior anymore. */
8440 ecs->wait_some_more = 0;
8441}
8442
8443/* Like keep_going, but passes the signal to the inferior, even if the
8444 signal is set to nopass. */
8445
8446static void
8448{
8449 gdb_assert (ecs->event_thread->ptid == inferior_ptid);
8450 gdb_assert (!ecs->event_thread->resumed ());
8451
8452 /* Save the pc before execution, to compare with pc after stop. */
8453 ecs->event_thread->prev_pc
8455
8457 {
8458 struct thread_info *tp = ecs->event_thread;
8459
8460 infrun_debug_printf ("%s has trap_expected set, "
8461 "resuming to collect trap",
8462 tp->ptid.to_string ().c_str ());
8463
8464 /* We haven't yet gotten our trap, and either: intercepted a
8465 non-signal event (e.g., a fork); or took a signal which we
8466 are supposed to pass through to the inferior. Simply
8467 continue. */
8468 resume (ecs->event_thread->stop_signal ());
8469 }
8470 else if (step_over_info_valid_p ())
8471 {
8472 /* Another thread is stepping over a breakpoint in-line. If
8473 this thread needs a step-over too, queue the request. In
8474 either case, this resume must be deferred for later. */
8475 struct thread_info *tp = ecs->event_thread;
8476
8479 {
8480 infrun_debug_printf ("step-over already in progress: "
8481 "step-over for %s deferred",
8482 tp->ptid.to_string ().c_str ());
8484 }
8485 else
8486 infrun_debug_printf ("step-over in progress: resume of %s deferred",
8487 tp->ptid.to_string ().c_str ());
8488 }
8489 else
8490 {
8492 int remove_bp;
8493 int remove_wps;
8494 step_over_what step_what;
8495
8496 /* Either the trap was not expected, but we are continuing
8497 anyway (if we got a signal, the user asked it be passed to
8498 the child)
8499 -- or --
8500 We got our expected trap, but decided we should resume from
8501 it.
8502
8503 We're going to run this baby now!
8504
8505 Note that insert_breakpoints won't try to re-insert
8506 already inserted breakpoints. Therefore, we don't
8507 care if breakpoints were already inserted, or not. */
8508
8509 /* If we need to step over a breakpoint, and we're not using
8510 displaced stepping to do so, insert all breakpoints
8511 (watchpoints, etc.) but the one we're stepping over, step one
8512 instruction, and then re-insert the breakpoint when that step
8513 is finished. */
8514
8515 step_what = thread_still_needs_step_over (ecs->event_thread);
8516
8517 remove_bp = (ecs->hit_singlestep_breakpoint
8518 || (step_what & STEP_OVER_BREAKPOINT));
8519 remove_wps = (step_what & STEP_OVER_WATCHPOINT);
8520
8521 /* We can't use displaced stepping if we need to step past a
8522 watchpoint. The instruction copied to the scratch pad would
8523 still trigger the watchpoint. */
8524 if (remove_bp
8525 && (remove_wps || !use_displaced_stepping (ecs->event_thread)))
8526 {
8528 regcache_read_pc (regcache), remove_wps,
8529 ecs->event_thread->global_num);
8530 }
8531 else if (remove_wps)
8532 set_step_over_info (nullptr, 0, remove_wps, -1);
8533
8534 /* If we now need to do an in-line step-over, we need to stop
8535 all other threads. Note this must be done before
8536 insert_breakpoints below, because that removes the breakpoint
8537 we're about to step over, otherwise other threads could miss
8538 it. */
8540 stop_all_threads ("starting in-line step-over");
8541
8542 /* Stop stepping if inserting breakpoints fails. */
8543 try
8544 {
8546 }
8547 catch (const gdb_exception_error &e)
8548 {
8550 stop_waiting (ecs);
8552 return;
8553 }
8554
8555 ecs->event_thread->control.trap_expected = (remove_bp || remove_wps);
8556
8557 resume (ecs->event_thread->stop_signal ());
8558 }
8559
8560 prepare_to_wait (ecs);
8561}
8562
8563/* Called when we should continue running the inferior, because the
8564 current event doesn't cause a user visible stop. This does the
8565 resuming part; waiting for the next event is done elsewhere. */
8566
8567static void
8569{
8571 && ecs->event_thread->stop_signal () == GDB_SIGNAL_TRAP)
8573
8575 ecs->event_thread->set_stop_signal (GDB_SIGNAL_0);
8577}
8578
8579/* This function normally comes after a resume, before
8580 handle_inferior_event exits. It takes care of any last bits of
8581 housekeeping, and sets the all-important wait_some_more flag. */
8582
8583static void
8585{
8586 infrun_debug_printf ("prepare_to_wait");
8587
8588 ecs->wait_some_more = 1;
8589
8590 /* If the target can't async, emulate it by marking the infrun event
8591 handler such that as soon as we get back to the event-loop, we
8592 immediately end up in fetch_inferior_event again calling
8593 target_wait. */
8594 if (!target_can_async_p ())
8596}
8597
8598/* We are done with the step range of a step/next/si/ni command.
8599 Called once for each n of a "step n" operation. */
8600
8601static void
8603{
8604 ecs->event_thread->control.stop_step = 1;
8605 stop_waiting (ecs);
8606}
8607
8608/* Several print_*_reason functions to print why the inferior has stopped.
8609 We always print something when the inferior exits, or receives a signal.
8610 The rest of the cases are dealt with later on in normal_stop and
8611 print_it_typical. Ideally there should be a call to one of these
8612 print_*_reason functions functions from handle_inferior_event each time
8613 stop_waiting is called.
8614
8615 Note that we don't call these directly, instead we delegate that to
8616 the interpreters, through observers. Interpreters then call these
8617 with whatever uiout is right. */
8618
8619void
8620print_signal_exited_reason (struct ui_out *uiout, enum gdb_signal siggnal)
8621{
8623 if (uiout->is_mi_like_p ())
8624 uiout->field_string
8626 uiout->text ("\nProgram terminated with signal ");
8628 uiout->field_string ("signal-name",
8629 gdb_signal_to_name (siggnal));
8631 uiout->text (", ");
8633 uiout->field_string ("signal-meaning",
8634 gdb_signal_to_string (siggnal));
8636 uiout->text (".\n");
8637 uiout->text ("The program no longer exists.\n");
8638}
8639
8640void
8641print_exited_reason (struct ui_out *uiout, int exitstatus)
8642{
8643 struct inferior *inf = current_inferior ();
8644 std::string pidstr = target_pid_to_str (ptid_t (inf->pid));
8645
8646 annotate_exited (exitstatus);
8647 if (exitstatus)
8648 {
8649 if (uiout->is_mi_like_p ())
8651 std::string exit_code_str
8652 = string_printf ("0%o", (unsigned int) exitstatus);
8653 uiout->message ("[Inferior %s (%s) exited with code %pF]\n",
8654 plongest (inf->num), pidstr.c_str (),
8655 string_field ("exit-code", exit_code_str.c_str ()));
8656 }
8657 else
8658 {
8659 if (uiout->is_mi_like_p ())
8660 uiout->field_string
8662 uiout->message ("[Inferior %s (%s) exited normally]\n",
8663 plongest (inf->num), pidstr.c_str ());
8664 }
8665}
8666
8667void
8668print_signal_received_reason (struct ui_out *uiout, enum gdb_signal siggnal)
8669{
8670 struct thread_info *thr = inferior_thread ();
8671
8672 infrun_debug_printf ("signal = %s", gdb_signal_to_string (siggnal));
8673
8674 annotate_signal ();
8675
8676 if (uiout->is_mi_like_p ())
8677 ;
8678 else if (show_thread_that_caused_stop ())
8679 {
8680 uiout->text ("\nThread ");
8681 uiout->field_string ("thread-id", print_thread_id (thr));
8682
8683 const char *name = thread_name (thr);
8684 if (name != nullptr)
8685 {
8686 uiout->text (" \"");
8687 uiout->field_string ("name", name);
8688 uiout->text ("\"");
8689 }
8690 }
8691 else
8692 uiout->text ("\nProgram");
8693
8694 if (siggnal == GDB_SIGNAL_0 && !uiout->is_mi_like_p ())
8695 uiout->text (" stopped");
8696 else
8697 {
8698 uiout->text (" received signal ");
8700 if (uiout->is_mi_like_p ())
8701 uiout->field_string
8703 uiout->field_string ("signal-name", gdb_signal_to_name (siggnal));
8705 uiout->text (", ");
8707 uiout->field_string ("signal-meaning", gdb_signal_to_string (siggnal));
8708
8710 struct gdbarch *gdbarch = regcache->arch ();
8712 gdbarch_report_signal_info (gdbarch, uiout, siggnal);
8713
8715 }
8716 uiout->text (".\n");
8717}
8718
8719void
8721{
8722 if (uiout->is_mi_like_p ())
8724 else
8725 uiout->text ("\nNo more reverse-execution history.\n");
8726}
8727
8728/* Print current location without a level number, if we have changed
8729 functions or hit a breakpoint. Print source line if we have one.
8730 bpstat_print contains the logic deciding in detail what to print,
8731 based on the event(s) that just occurred. */
8732
8733static void
8735{
8736 int bpstat_ret;
8737 enum print_what source_flag;
8738 int do_frame_printing = 1;
8739 struct thread_info *tp = inferior_thread ();
8740
8741 bpstat_ret = bpstat_print (tp->control.stop_bpstat, ws.kind ());
8742 switch (bpstat_ret)
8743 {
8744 case PRINT_UNKNOWN:
8745 /* FIXME: cagney/2002-12-01: Given that a frame ID does (or
8746 should) carry around the function and does (or should) use
8747 that when doing a frame comparison. */
8748 if (tp->control.stop_step
8749 && (tp->control.step_frame_id
8752 == find_pc_function (tp->stop_pc ())))
8753 {
8754 /* Finished step, just print source line. */
8755 source_flag = SRC_LINE;
8756 }
8757 else
8758 {
8759 /* Print location and source line. */
8760 source_flag = SRC_AND_LOC;
8761 }
8762 break;
8763 case PRINT_SRC_AND_LOC:
8764 /* Print location and source line. */
8765 source_flag = SRC_AND_LOC;
8766 break;
8767 case PRINT_SRC_ONLY:
8768 source_flag = SRC_LINE;
8769 break;
8770 case PRINT_NOTHING:
8771 /* Something bogus. */
8772 source_flag = SRC_LINE;
8773 do_frame_printing = 0;
8774 break;
8775 default:
8776 internal_error (_("Unknown value."));
8777 }
8778
8779 /* The behavior of this routine with respect to the source
8780 flag is:
8781 SRC_LINE: Print only source line
8782 LOCATION: Print only location
8783 SRC_AND_LOC: Print location and source line. */
8784 if (do_frame_printing)
8785 print_stack_frame (get_selected_frame (nullptr), 0, source_flag, 1);
8786}
8787
8788/* See infrun.h. */
8789
8790void
8791print_stop_event (struct ui_out *uiout, bool displays)
8792{
8793 struct target_waitstatus last;
8794 struct thread_info *tp;
8795
8796 get_last_target_status (nullptr, nullptr, &last);
8797
8798 {
8799 scoped_restore save_uiout = make_scoped_restore (&current_uiout, uiout);
8800
8801 print_stop_location (last);
8802
8803 /* Display the auto-display expressions. */
8804 if (displays)
8805 do_displays ();
8806 }
8807
8808 tp = inferior_thread ();
8809 if (tp->thread_fsm () != nullptr
8810 && tp->thread_fsm ()->finished_p ())
8811 {
8812 struct return_value_info *rv;
8813
8814 rv = tp->thread_fsm ()->return_value ();
8815 if (rv != nullptr)
8816 print_return_value (uiout, rv);
8817 }
8818}
8819
8820/* See infrun.h. */
8821
8822void
8824{
8826 {
8827 if (remove_breakpoints ())
8828 {
8830 gdb_printf (_("Cannot remove breakpoints because "
8831 "program is no longer writable.\nFurther "
8832 "execution is probably impossible.\n"));
8833 }
8834 }
8835}
8836
8837/* The execution context that just caused a normal stop. */
8838
8840{
8841 stop_context ();
8842
8844
8845 bool changed () const;
8846
8847 /* The stop ID. */
8848 ULONGEST stop_id;
8849
8850 /* The event PTID. */
8851
8852 ptid_t ptid;
8853
8854 /* If stopp for a thread event, this is the thread that caused the
8855 stop. */
8857
8858 /* The inferior that caused the stop. */
8860};
8861
8862/* Initializes a new stop context. If stopped for a thread event, this
8863 takes a strong reference to the thread. */
8864
8866{
8867 stop_id = get_stop_id ();
8870
8871 if (inferior_ptid != null_ptid)
8872 {
8873 /* Take a strong reference so that the thread can't be deleted
8874 yet. */
8875 thread = thread_info_ref::new_reference (inferior_thread ());
8876 }
8877}
8878
8879/* Return true if the current context no longer matches the saved stop
8880 context. */
8881
8882bool
8884{
8885 if (ptid != inferior_ptid)
8886 return true;
8887 if (inf_num != current_inferior ()->num)
8888 return true;
8889 if (thread != nullptr && thread->state != THREAD_STOPPED)
8890 return true;
8891 if (get_stop_id () != stop_id)
8892 return true;
8893 return false;
8894}
8895
8896/* See infrun.h. */
8897
8898bool
8900{
8901 struct target_waitstatus last;
8902
8903 get_last_target_status (nullptr, nullptr, &last);
8904
8905 new_stop_id ();
8906
8907 /* If an exception is thrown from this point on, make sure to
8908 propagate GDB's knowledge of the executing state to the
8909 frontend/user running state. A QUIT is an easy exception to see
8910 here, so do this before any filtered output. */
8911
8912 ptid_t finish_ptid = null_ptid;
8913
8914 if (!non_stop)
8915 finish_ptid = minus_one_ptid;
8916 else if (last.kind () == TARGET_WAITKIND_SIGNALLED
8917 || last.kind () == TARGET_WAITKIND_EXITED)
8918 {
8919 /* On some targets, we may still have live threads in the
8920 inferior when we get a process exit event. E.g., for
8921 "checkpoint", when the current checkpoint/fork exits,
8922 linux-fork.c automatically switches to another fork from
8923 within target_mourn_inferior. */
8924 if (inferior_ptid != null_ptid)
8925 finish_ptid = ptid_t (inferior_ptid.pid ());
8926 }
8927 else if (last.kind () != TARGET_WAITKIND_NO_RESUMED)
8928 finish_ptid = inferior_ptid;
8929
8930 gdb::optional<scoped_finish_thread_state> maybe_finish_thread_state;
8931 if (finish_ptid != null_ptid)
8932 {
8933 maybe_finish_thread_state.emplace
8934 (user_visible_resume_target (finish_ptid), finish_ptid);
8935 }
8936
8937 /* As we're presenting a stop, and potentially removing breakpoints,
8938 update the thread list so we can tell whether there are threads
8939 running on the target. With target remote, for example, we can
8940 only learn about new threads when we explicitly update the thread
8941 list. Do this before notifying the interpreters about signal
8942 stops, end of stepping ranges, etc., so that the "new thread"
8943 output is emitted before e.g., "Program received signal FOO",
8944 instead of after. */
8946
8948 notify_signal_received (inferior_thread ()->stop_signal ());
8949
8950 /* As with the notification of thread events, we want to delay
8951 notifying the user that we've switched thread context until
8952 the inferior actually stops.
8953
8954 There's no point in saying anything if the inferior has exited.
8955 Note that SIGNALLED here means "exited with a signal", not
8956 "received a signal".
8957
8958 Also skip saying anything in non-stop mode. In that mode, as we
8959 don't want GDB to switch threads behind the user's back, to avoid
8960 races where the user is typing a command to apply to thread x,
8961 but GDB switches to thread y before the user finishes entering
8962 the command, fetch_inferior_event installs a cleanup to restore
8963 the current thread back to the thread the user had selected right
8964 after this event is handled, so we're not really switching, only
8965 informing of a stop. */
8966 if (!non_stop)
8967 {
8968 if ((last.kind () != TARGET_WAITKIND_SIGNALLED
8969 && last.kind () != TARGET_WAITKIND_EXITED
8970 && last.kind () != TARGET_WAITKIND_NO_RESUMED)
8973 {
8975 {
8977 gdb_printf (_("[Switching to %s]\n"),
8978 target_pid_to_str (inferior_ptid).c_str ());
8980 }
8981 }
8982
8984 }
8985
8986 if (last.kind () == TARGET_WAITKIND_NO_RESUMED)
8987 {
8990 {
8992 gdb_printf (_("No unwaited-for children left.\n"));
8993 }
8994 }
8995
8996 /* Note: this depends on the update_thread_list call above. */
8998
8999 /* If an auto-display called a function and that got a signal,
9000 delete that auto-display to avoid an infinite recursion. */
9001
9004
9006 {
9008 }
9009
9010 /* Let the user/frontend see the threads as stopped. */
9011 maybe_finish_thread_state.reset ();
9012
9013 /* Select innermost stack frame - i.e., current frame is frame 0,
9014 and current location is based on that. Handle the case where the
9015 dummy call is returning after being stopped. E.g. the dummy call
9016 previously hit a breakpoint. (If the dummy call returns
9017 normally, we won't reach here.) Do this before the stop hook is
9018 run, so that it doesn't get to see the temporary dummy frame,
9019 which is not where we'll present the stop. */
9020 if (has_stack_frames ())
9021 {
9023 {
9024 /* Pop the empty frame that contains the stack dummy. This
9025 also restores inferior state prior to the call (struct
9026 infcall_suspend_state). */
9028
9029 gdb_assert (get_frame_type (frame) == DUMMY_FRAME);
9030 frame_pop (frame);
9031 /* frame_pop calls reinit_frame_cache as the last thing it
9032 does which means there's now no selected frame. */
9033 }
9034
9036
9037 /* Set the current source location. */
9039 }
9040
9041 /* Look up the hook_stop and run it (CLI internally handles problem
9042 of stop_command's pre-hook not existing). */
9043 stop_context saved_context;
9044
9045 try
9046 {
9048 }
9049 catch (const gdb_exception_error &ex)
9050 {
9052 "Error while running hook_stop:\n");
9053 }
9054
9055 /* If the stop hook resumes the target, then there's no point in
9056 trying to notify about the previous stop; its context is
9057 gone. Likewise if the command switches thread or inferior --
9058 the observers would print a stop for the wrong
9059 thread/inferior. */
9060 if (saved_context.changed ())
9061 return true;
9062
9063 /* Notify observers about the stop. This is where the interpreters
9064 print the stop event. */
9065 notify_normal_stop ((inferior_ptid != null_ptid
9066 ? inferior_thread ()->control.stop_bpstat
9067 : nullptr),
9070
9071 if (target_has_execution ())
9072 {
9073 if (last.kind () != TARGET_WAITKIND_SIGNALLED
9074 && last.kind () != TARGET_WAITKIND_EXITED
9075 && last.kind () != TARGET_WAITKIND_NO_RESUMED)
9076 /* Delete the breakpoint we stopped at, if it wants to be deleted.
9077 Delete any breakpoint that is to be deleted at the next stop. */
9078 breakpoint_auto_delete (inferior_thread ()->control.stop_bpstat);
9079 }
9080
9081 return false;
9082}
9083
9084int
9086{
9087 return signal_stop[signo];
9088}
9089
9090int
9092{
9093 return signal_print[signo];
9094}
9095
9096int
9098{
9099 return signal_program[signo];
9100}
9101
9102static void
9104{
9105 if (signo == -1)
9106 {
9107 for (signo = 0; signo < (int) GDB_SIGNAL_LAST; signo++)
9108 signal_cache_update (signo);
9109
9110 return;
9111 }
9112
9113 signal_pass[signo] = (signal_stop[signo] == 0
9114 && signal_print[signo] == 0
9115 && signal_program[signo] == 1
9116 && signal_catch[signo] == 0);
9117}
9118
9119int
9120signal_stop_update (int signo, int state)
9121{
9122 int ret = signal_stop[signo];
9123
9124 signal_stop[signo] = state;
9125 signal_cache_update (signo);
9126 return ret;
9127}
9128
9129int
9130signal_print_update (int signo, int state)
9131{
9132 int ret = signal_print[signo];
9133
9134 signal_print[signo] = state;
9135 signal_cache_update (signo);
9136 return ret;
9137}
9138
9139int
9140signal_pass_update (int signo, int state)
9141{
9142 int ret = signal_program[signo];
9143
9144 signal_program[signo] = state;
9145 signal_cache_update (signo);
9146 return ret;
9147}
9148
9149/* Update the global 'signal_catch' from INFO and notify the
9150 target. */
9151
9152void
9153signal_catch_update (const unsigned int *info)
9154{
9155 int i;
9156
9157 for (i = 0; i < GDB_SIGNAL_LAST; ++i)
9158 signal_catch[i] = info[i] > 0;
9161}
9162
9163static void
9165{
9166 gdb_printf (_("Signal Stop\tPrint\tPass "
9167 "to program\tDescription\n"));
9168}
9169
9170static void
9171sig_print_info (enum gdb_signal oursig)
9172{
9173 const char *name = gdb_signal_to_name (oursig);
9174 int name_padding = 13 - strlen (name);
9175
9176 if (name_padding <= 0)
9177 name_padding = 0;
9178
9179 gdb_printf ("%s", name);
9180 gdb_printf ("%*.*s ", name_padding, name_padding, " ");
9181 gdb_printf ("%s\t", signal_stop[oursig] ? "Yes" : "No");
9182 gdb_printf ("%s\t", signal_print[oursig] ? "Yes" : "No");
9183 gdb_printf ("%s\t\t", signal_program[oursig] ? "Yes" : "No");
9184 gdb_printf ("%s\n", gdb_signal_to_string (oursig));
9185}
9186
9187/* Specify how various signals in the inferior should be handled. */
9188
9189static void
9190handle_command (const char *args, int from_tty)
9191{
9192 int digits, wordlen;
9193 int sigfirst, siglast;
9194 enum gdb_signal oursig;
9195 int allsigs;
9196
9197 if (args == nullptr)
9198 {
9199 error_no_arg (_("signal to handle"));
9200 }
9201
9202 /* Allocate and zero an array of flags for which signals to handle. */
9203
9204 const size_t nsigs = GDB_SIGNAL_LAST;
9205 unsigned char sigs[nsigs] {};
9206
9207 /* Break the command line up into args. */
9208
9209 gdb_argv built_argv (args);
9210
9211 /* Walk through the args, looking for signal oursigs, signal names, and
9212 actions. Signal numbers and signal names may be interspersed with
9213 actions, with the actions being performed for all signals cumulatively
9214 specified. Signal ranges can be specified as <LOW>-<HIGH>. */
9215
9216 for (char *arg : built_argv)
9217 {
9218 wordlen = strlen (arg);
9219 for (digits = 0; isdigit (arg[digits]); digits++)
9220 {;
9221 }
9222 allsigs = 0;
9223 sigfirst = siglast = -1;
9224
9225 if (wordlen >= 1 && !strncmp (arg, "all", wordlen))
9226 {
9227 /* Apply action to all signals except those used by the
9228 debugger. Silently skip those. */
9229 allsigs = 1;
9230 sigfirst = 0;
9231 siglast = nsigs - 1;
9232 }
9233 else if (wordlen >= 1 && !strncmp (arg, "stop", wordlen))
9234 {
9235 SET_SIGS (nsigs, sigs, signal_stop);
9236 SET_SIGS (nsigs, sigs, signal_print);
9237 }
9238 else if (wordlen >= 1 && !strncmp (arg, "ignore", wordlen))
9239 {
9240 UNSET_SIGS (nsigs, sigs, signal_program);
9241 }
9242 else if (wordlen >= 2 && !strncmp (arg, "print", wordlen))
9243 {
9244 SET_SIGS (nsigs, sigs, signal_print);
9245 }
9246 else if (wordlen >= 2 && !strncmp (arg, "pass", wordlen))
9247 {
9248 SET_SIGS (nsigs, sigs, signal_program);
9249 }
9250 else if (wordlen >= 3 && !strncmp (arg, "nostop", wordlen))
9251 {
9252 UNSET_SIGS (nsigs, sigs, signal_stop);
9253 }
9254 else if (wordlen >= 3 && !strncmp (arg, "noignore", wordlen))
9255 {
9256 SET_SIGS (nsigs, sigs, signal_program);
9257 }
9258 else if (wordlen >= 4 && !strncmp (arg, "noprint", wordlen))
9259 {
9260 UNSET_SIGS (nsigs, sigs, signal_print);
9261 UNSET_SIGS (nsigs, sigs, signal_stop);
9262 }
9263 else if (wordlen >= 4 && !strncmp (arg, "nopass", wordlen))
9264 {
9265 UNSET_SIGS (nsigs, sigs, signal_program);
9266 }
9267 else if (digits > 0)
9268 {
9269 /* It is numeric. The numeric signal refers to our own
9270 internal signal numbering from target.h, not to host/target
9271 signal number. This is a feature; users really should be
9272 using symbolic names anyway, and the common ones like
9273 SIGHUP, SIGINT, SIGALRM, etc. will work right anyway. */
9274
9275 sigfirst = siglast = (int)
9276 gdb_signal_from_command (atoi (arg));
9277 if (arg[digits] == '-')
9278 {
9279 siglast = (int)
9280 gdb_signal_from_command (atoi (arg + digits + 1));
9281 }
9282 if (sigfirst > siglast)
9283 {
9284 /* Bet he didn't figure we'd think of this case... */
9285 std::swap (sigfirst, siglast);
9286 }
9287 }
9288 else
9289 {
9290 oursig = gdb_signal_from_name (arg);
9291 if (oursig != GDB_SIGNAL_UNKNOWN)
9292 {
9293 sigfirst = siglast = (int) oursig;
9294 }
9295 else
9296 {
9297 /* Not a number and not a recognized flag word => complain. */
9298 error (_("Unrecognized or ambiguous flag word: \"%s\"."), arg);
9299 }
9300 }
9301
9302 /* If any signal numbers or symbol names were found, set flags for
9303 which signals to apply actions to. */
9304
9305 for (int signum = sigfirst; signum >= 0 && signum <= siglast; signum++)
9306 {
9307 switch ((enum gdb_signal) signum)
9308 {
9309 case GDB_SIGNAL_TRAP:
9310 case GDB_SIGNAL_INT:
9311 if (!allsigs && !sigs[signum])
9312 {
9313 if (query (_("%s is used by the debugger.\n\
9314Are you sure you want to change it? "),
9315 gdb_signal_to_name ((enum gdb_signal) signum)))
9316 {
9317 sigs[signum] = 1;
9318 }
9319 else
9320 gdb_printf (_("Not confirmed, unchanged.\n"));
9321 }
9322 break;
9323 case GDB_SIGNAL_0:
9324 case GDB_SIGNAL_DEFAULT:
9325 case GDB_SIGNAL_UNKNOWN:
9326 /* Make sure that "all" doesn't print these. */
9327 break;
9328 default:
9329 sigs[signum] = 1;
9330 break;
9331 }
9332 }
9333 }
9334
9335 for (int signum = 0; signum < nsigs; signum++)
9336 if (sigs[signum])
9337 {
9341
9342 if (from_tty)
9343 {
9344 /* Show the results. */
9346 for (; signum < nsigs; signum++)
9347 if (sigs[signum])
9348 sig_print_info ((enum gdb_signal) signum);
9349 }
9350
9351 break;
9352 }
9353}
9354
9355/* Complete the "handle" command. */
9356
9357static void
9359 completion_tracker &tracker,
9360 const char *text, const char *word)
9361{
9362 static const char * const keywords[] =
9363 {
9364 "all",
9365 "stop",
9366 "ignore",
9367 "print",
9368 "pass",
9369 "nostop",
9370 "noignore",
9371 "noprint",
9372 "nopass",
9373 nullptr,
9374 };
9375
9376 signal_completer (ignore, tracker, text, word);
9377 complete_on_enum (tracker, keywords, word, word);
9378}
9379
9380enum gdb_signal
9382{
9383 if (num >= 1 && num <= 15)
9384 return (enum gdb_signal) num;
9385 error (_("Only signals 1-15 are valid as numeric signals.\n\
9386Use \"info signals\" for a list of symbolic signals."));
9387}
9388
9389/* Print current contents of the tables set by the handle command.
9390 It is possible we should just be printing signals actually used
9391 by the current target (but for things to work right when switching
9392 targets, all signals should be in the signal tables). */
9393
9394static void
9395info_signals_command (const char *signum_exp, int from_tty)
9396{
9397 enum gdb_signal oursig;
9398
9400
9401 if (signum_exp)
9402 {
9403 /* First see if this is a symbol name. */
9404 oursig = gdb_signal_from_name (signum_exp);
9405 if (oursig == GDB_SIGNAL_UNKNOWN)
9406 {
9407 /* No, try numeric. */
9408 oursig =
9410 }
9411 sig_print_info (oursig);
9412 return;
9413 }
9414
9415 gdb_printf ("\n");
9416 /* These ugly casts brought to you by the native VAX compiler. */
9417 for (oursig = GDB_SIGNAL_FIRST;
9418 (int) oursig < (int) GDB_SIGNAL_LAST;
9419 oursig = (enum gdb_signal) ((int) oursig + 1))
9420 {
9421 QUIT;
9422
9423 if (oursig != GDB_SIGNAL_UNKNOWN
9424 && oursig != GDB_SIGNAL_DEFAULT && oursig != GDB_SIGNAL_0)
9425 sig_print_info (oursig);
9426 }
9427
9428 gdb_printf (_("\nUse the \"handle\" command "
9429 "to change these tables.\n"));
9430}
9431
9432/* The $_siginfo convenience variable is a bit special. We don't know
9433 for sure the type of the value until we actually have a chance to
9434 fetch the data. The type can change depending on gdbarch, so it is
9435 also dependent on which thread you have selected.
9436
9437 1. making $_siginfo be an internalvar that creates a new value on
9438 access.
9439
9440 2. making the value of $_siginfo be an lval_computed value. */
9441
9442/* This function implements the lval_computed support for reading a
9443 $_siginfo value. */
9444
9445static void
9447{
9448 LONGEST transferred;
9449
9450 /* If we can access registers, so can we access $_siginfo. Likewise
9451 vice versa. */
9453
9454 transferred =
9455 target_read (current_inferior ()->top_target (),
9457 nullptr,
9458 v->contents_all_raw ().data (),
9459 v->offset (),
9460 v->type ()->length ());
9461
9462 if (transferred != v->type ()->length ())
9463 error (_("Unable to read siginfo"));
9464}
9465
9466/* This function implements the lval_computed support for writing a
9467 $_siginfo value. */
9468
9469static void
9470siginfo_value_write (struct value *v, struct value *fromval)
9471{
9472 LONGEST transferred;
9473
9474 /* If we can access registers, so can we access $_siginfo. Likewise
9475 vice versa. */
9477
9478 transferred = target_write (current_inferior ()->top_target (),
9480 nullptr,
9481 fromval->contents_all_raw ().data (),
9482 v->offset (),
9483 fromval->type ()->length ());
9484
9485 if (transferred != fromval->type ()->length ())
9486 error (_("Unable to write siginfo"));
9487}
9488
9489static const struct lval_funcs siginfo_value_funcs =
9490 {
9493 };
9494
9495/* Return a new value with the correct type for the siginfo object of
9496 the current thread using architecture GDBARCH. Return a void value
9497 if there's no object available. */
9498
9499static struct value *
9501 void *ignore)
9502{
9503 if (target_has_stack ()
9504 && inferior_ptid != null_ptid
9506 {
9508
9510 }
9511
9512 return value::allocate (builtin_type (gdbarch)->builtin_void);
9513}
9514
9515
9516/* infcall_suspend_state contains state about the program itself like its
9517 registers and any signal it received when it last stopped.
9518 This state must be restored regardless of how the inferior function call
9519 ends (either successfully, or after it hits a breakpoint or signal)
9520 if the program is to properly continue where it left off. */
9521
9523{
9524public:
9525 /* Capture state from GDBARCH, TP, and REGCACHE that must be restored
9526 once the inferior function call has finished. */
9528 const struct thread_info *tp,
9529 struct regcache *regcache)
9531 {
9533
9534 gdb::unique_xmalloc_ptr<gdb_byte> siginfo_data;
9535
9537 {
9539 size_t len = type->length ();
9540
9541 siginfo_data.reset ((gdb_byte *) xmalloc (len));
9542
9543 if (target_read (current_inferior ()->top_target (),
9545 siginfo_data.get (), 0, len) != len)
9546 {
9547 /* Errors ignored. */
9548 siginfo_data.reset (nullptr);
9549 }
9550 }
9551
9552 if (siginfo_data)
9553 {
9555 m_siginfo_data = std::move (siginfo_data);
9556 }
9557 }
9558
9559 /* Return a pointer to the stored register state. */
9560
9562 {
9563 return m_registers.get ();
9564 }
9565
9566 /* Restores the stored state into GDBARCH, TP, and REGCACHE. */
9567
9568 void restore (struct gdbarch *gdbarch,
9569 struct thread_info *tp,
9570 struct regcache *regcache) const
9571 {
9573
9575 {
9577
9578 /* Errors ignored. */
9579 target_write (current_inferior ()->top_target (),
9581 m_siginfo_data.get (), 0, type->length ());
9582 }
9583
9584 /* The inferior can be gone if the user types "print exit(0)"
9585 (and perhaps other times). */
9586 if (target_has_execution ())
9587 /* NB: The register write goes through to the target. */
9589 }
9590
9591private:
9592 /* How the current thread stopped before the inferior function call was
9593 executed. */
9595
9596 /* The registers before the inferior function call was executed. */
9597 std::unique_ptr<readonly_detached_regcache> m_registers;
9598
9599 /* Format of SIGINFO_DATA or NULL if it is not present. */
9600 struct gdbarch *m_siginfo_gdbarch = nullptr;
9601
9602 /* The inferior format depends on SIGINFO_GDBARCH and it has a length of
9603 gdbarch_get_siginfo_type ()->length (). For different gdbarch the
9604 content would be invalid. */
9605 gdb::unique_xmalloc_ptr<gdb_byte> m_siginfo_data;
9606};
9607
9610{
9611 struct thread_info *tp = inferior_thread ();
9613 struct gdbarch *gdbarch = regcache->arch ();
9614
9615 infcall_suspend_state_up inf_state
9616 (new struct infcall_suspend_state (gdbarch, tp, regcache));
9617
9618 /* Having saved the current state, adjust the thread state, discarding
9619 any stop signal information. The stop signal is not useful when
9620 starting an inferior function call, and run_inferior_call will not use
9621 the signal due to its `proceed' call with GDB_SIGNAL_0. */
9622 tp->set_stop_signal (GDB_SIGNAL_0);
9623
9624 return inf_state;
9625}
9626
9627/* Restore inferior session state to INF_STATE. */
9628
9629void
9631{
9632 struct thread_info *tp = inferior_thread ();
9634 struct gdbarch *gdbarch = regcache->arch ();
9635
9636 inf_state->restore (gdbarch, tp, regcache);
9638}
9639
9640void
9642{
9643 delete inf_state;
9644}
9645
9648{
9649 return inf_state->registers ();
9650}
9651
9652/* infcall_control_state contains state regarding gdb's control of the
9653 inferior itself like stepping control. It also contains session state like
9654 the user's currently selected frame. */
9655
9657{
9660
9661 /* Other fields: */
9664
9665 /* ID and level of the selected frame when the inferior function
9666 call was made. */
9669};
9670
9671/* Save all of the information associated with the inferior<==>gdb
9672 connection. */
9673
9676{
9677 infcall_control_state_up inf_status (new struct infcall_control_state);
9678 struct thread_info *tp = inferior_thread ();
9679 struct inferior *inf = current_inferior ();
9680
9681 inf_status->thread_control = tp->control;
9682 inf_status->inferior_control = inf->control;
9683
9684 tp->control.step_resume_breakpoint = nullptr;
9686
9687 /* Save original bpstat chain to INF_STATUS; replace it in TP with copy of
9688 chain. If caller's caller is walking the chain, they'll be happier if we
9689 hand them back the original chain when restore_infcall_control_state is
9690 called. */
9692
9693 /* Other fields: */
9694 inf_status->stop_stack_dummy = stop_stack_dummy;
9695 inf_status->stopped_by_random_signal = stopped_by_random_signal;
9696
9697 save_selected_frame (&inf_status->selected_frame_id,
9698 &inf_status->selected_frame_level);
9699
9700 return inf_status;
9701}
9702
9703/* Restore inferior session state to INF_STATUS. */
9704
9705void
9707{
9708 struct thread_info *tp = inferior_thread ();
9709 struct inferior *inf = current_inferior ();
9710
9713
9717
9718 /* Handle the bpstat_copy of the chain. */
9720
9721 tp->control = inf_status->thread_control;
9722 inf->control = inf_status->inferior_control;
9723
9724 /* Other fields: */
9725 stop_stack_dummy = inf_status->stop_stack_dummy;
9727
9728 if (target_has_stack ())
9729 {
9731 inf_status->selected_frame_level);
9732 }
9733
9734 delete inf_status;
9735}
9736
9737void
9739{
9740 if (inf_status->thread_control.step_resume_breakpoint)
9743
9747
9748 /* See save_infcall_control_state for info on stop_bpstat. */
9750
9751 delete inf_status;
9752}
9753
9754/* See infrun.h. */
9755
9756void
9758{
9759 clear_internalvar (lookup_internalvar ("_exitsignal"));
9760 clear_internalvar (lookup_internalvar ("_exitcode"));
9761}
9762
9763
9764/* User interface for reverse debugging:
9765 Set exec-direction / show exec-direction commands
9766 (returns error unless target implements to_set_exec_direction method). */
9767
9769static const char exec_forward[] = "forward";
9770static const char exec_reverse[] = "reverse";
9771static const char *exec_direction = exec_forward;
9772static const char *const exec_direction_names[] = {
9775 nullptr
9776};
9777
9778static void
9779set_exec_direction_func (const char *args, int from_tty,
9780 struct cmd_list_element *cmd)
9781{
9783 {
9784 if (!strcmp (exec_direction, exec_forward))
9786 else if (!strcmp (exec_direction, exec_reverse))
9788 }
9789 else
9790 {
9792 error (_("Target does not support this operation."));
9793 }
9794}
9795
9796static void
9797show_exec_direction_func (struct ui_file *out, int from_tty,
9798 struct cmd_list_element *cmd, const char *value)
9799{
9800 switch (execution_direction) {
9801 case EXEC_FORWARD:
9802 gdb_printf (out, _("Forward.\n"));
9803 break;
9804 case EXEC_REVERSE:
9805 gdb_printf (out, _("Reverse.\n"));
9806 break;
9807 default:
9808 internal_error (_("bogus execution_direction value: %d"),
9809 (int) execution_direction);
9810 }
9811}
9812
9813static void
9814show_schedule_multiple (struct ui_file *file, int from_tty,
9815 struct cmd_list_element *c, const char *value)
9816{
9817 gdb_printf (file, _("Resuming the execution of threads "
9818 "of all processes is %s.\n"), value);
9819}
9820
9821/* Implementation of `siginfo' variable. */
9822
9824{
9826 nullptr,
9827};
9828
9829/* Callback for infrun's target events source. This is marked when a
9830 thread has a pending status to process. */
9831
9832static void
9838
9839#if GDB_SELF_TEST
9840namespace selftests
9841{
9842
9843/* Verify that when two threads with the same ptid exist (from two different
9844 targets) and one of them changes ptid, we only update inferior_ptid if
9845 it is appropriate. */
9846
9847static void
9849{
9850 gdbarch *arch = current_inferior ()->gdbarch;
9851
9852 /* The thread which inferior_ptid represents changes ptid. */
9853 {
9855
9856 scoped_mock_context<test_target_ops> target1 (arch);
9857 scoped_mock_context<test_target_ops> target2 (arch);
9858
9859 ptid_t old_ptid (111, 222);
9860 ptid_t new_ptid (111, 333);
9861
9862 target1.mock_inferior.pid = old_ptid.pid ();
9863 target1.mock_thread.ptid = old_ptid;
9864 target1.mock_inferior.ptid_thread_map.clear ();
9865 target1.mock_inferior.ptid_thread_map[old_ptid] = &target1.mock_thread;
9866
9867 target2.mock_inferior.pid = old_ptid.pid ();
9868 target2.mock_thread.ptid = old_ptid;
9869 target2.mock_inferior.ptid_thread_map.clear ();
9870 target2.mock_inferior.ptid_thread_map[old_ptid] = &target2.mock_thread;
9871
9872 auto restore_inferior_ptid = make_scoped_restore (&inferior_ptid, old_ptid);
9873 set_current_inferior (&target1.mock_inferior);
9874
9875 thread_change_ptid (&target1.mock_target, old_ptid, new_ptid);
9876
9877 gdb_assert (inferior_ptid == new_ptid);
9878 }
9879
9880 /* A thread with the same ptid as inferior_ptid, but from another target,
9881 changes ptid. */
9882 {
9884
9885 scoped_mock_context<test_target_ops> target1 (arch);
9886 scoped_mock_context<test_target_ops> target2 (arch);
9887
9888 ptid_t old_ptid (111, 222);
9889 ptid_t new_ptid (111, 333);
9890
9891 target1.mock_inferior.pid = old_ptid.pid ();
9892 target1.mock_thread.ptid = old_ptid;
9893 target1.mock_inferior.ptid_thread_map.clear ();
9894 target1.mock_inferior.ptid_thread_map[old_ptid] = &target1.mock_thread;
9895
9896 target2.mock_inferior.pid = old_ptid.pid ();
9897 target2.mock_thread.ptid = old_ptid;
9898 target2.mock_inferior.ptid_thread_map.clear ();
9899 target2.mock_inferior.ptid_thread_map[old_ptid] = &target2.mock_thread;
9900
9901 auto restore_inferior_ptid = make_scoped_restore (&inferior_ptid, old_ptid);
9902 set_current_inferior (&target2.mock_inferior);
9903
9904 thread_change_ptid (&target1.mock_target, old_ptid, new_ptid);
9905
9906 gdb_assert (inferior_ptid == old_ptid);
9907 }
9908}
9909
9910} /* namespace selftests */
9911
9912#endif /* GDB_SELF_TEST */
9913
9914void _initialize_infrun ();
9915void
9917{
9918 struct cmd_list_element *c;
9919
9920 /* Register extra event sources in the event loop. */
9923 "infrun");
9924
9925 cmd_list_element *info_signals_cmd
9926 = add_info ("signals", info_signals_command, _("\
9927What debugger does when program gets various signals.\n\
9928Specify a signal as argument to print info on that signal only."));
9929 add_info_alias ("handle", info_signals_cmd, 0);
9930
9931 c = add_com ("handle", class_run, handle_command, _("\
9932Specify how to handle signals.\n\
9933Usage: handle SIGNAL [ACTIONS]\n\
9934Args are signals and actions to apply to those signals.\n\
9935If no actions are specified, the current settings for the specified signals\n\
9936will be displayed instead.\n\
9937\n\
9938Symbolic signals (e.g. SIGSEGV) are recommended but numeric signals\n\
9939from 1-15 are allowed for compatibility with old versions of GDB.\n\
9940Numeric ranges may be specified with the form LOW-HIGH (e.g. 1-5).\n\
9941The special arg \"all\" is recognized to mean all signals except those\n\
9942used by the debugger, typically SIGTRAP and SIGINT.\n\
9943\n\
9944Recognized actions include \"stop\", \"nostop\", \"print\", \"noprint\",\n\
9945\"pass\", \"nopass\", \"ignore\", or \"noignore\".\n\
9946Stop means reenter debugger if this signal happens (implies print).\n\
9947Print means print a message if this signal happens.\n\
9948Pass means let program see this signal; otherwise program doesn't know.\n\
9949Ignore is a synonym for nopass and noignore is a synonym for pass.\n\
9950Pass and Stop may be combined.\n\
9951\n\
9952Multiple signals may be specified. Signal numbers and signal names\n\
9953may be interspersed with actions, with the actions being performed for\n\
9954all signals cumulatively specified."));
9956
9959There is no `stop' command, but you can set a hook on `stop'.\n\
9960This allows you to set a list of commands to be run each time execution\n\
9961of the program stops."), &cmdlist);
9962
9964 ("infrun", class_maintenance, &debug_infrun,
9965 _("Set inferior debugging."),
9966 _("Show inferior debugging."),
9967 _("When non-zero, inferior specific debugging is enabled."),
9969
9970 add_setshow_boolean_cmd ("non-stop", no_class,
9971 &non_stop_1, _("\
9972Set whether gdb controls the inferior in non-stop mode."), _("\
9973Show whether gdb controls the inferior in non-stop mode."), _("\
9974When debugging a multi-threaded program and this setting is\n\
9975off (the default, also called all-stop mode), when one thread stops\n\
9976(for a breakpoint, watchpoint, exception, or similar events), GDB stops\n\
9977all other threads in the program while you interact with the thread of\n\
9978interest. When you continue or step a thread, you can allow the other\n\
9979threads to run, or have them remain stopped, but while you inspect any\n\
9980thread's state, all threads stop.\n\
9981\n\
9982In non-stop mode, when one thread stops, other threads can continue\n\
9983to run freely. You'll be able to step each thread independently,\n\
9984leave it stopped or free to run as needed."),
9987 &setlist,
9988 &showlist);
9989
9990 for (size_t i = 0; i < GDB_SIGNAL_LAST; i++)
9991 {
9992 signal_stop[i] = 1;
9993 signal_print[i] = 1;
9994 signal_program[i] = 1;
9995 signal_catch[i] = 0;
9996 }
9997
9998 /* Signals caused by debugger's own actions should not be given to
9999 the program afterwards.
10000
10001 Do not deliver GDB_SIGNAL_TRAP by default, except when the user
10002 explicitly specifies that it should be delivered to the target
10003 program. Typically, that would occur when a user is debugging a
10004 target monitor on a simulator: the target monitor sets a
10005 breakpoint; the simulator encounters this breakpoint and halts
10006 the simulation handing control to GDB; GDB, noting that the stop
10007 address doesn't map to any known breakpoint, returns control back
10008 to the simulator; the simulator then delivers the hardware
10009 equivalent of a GDB_SIGNAL_TRAP to the program being
10010 debugged. */
10011 signal_program[GDB_SIGNAL_TRAP] = 0;
10012 signal_program[GDB_SIGNAL_INT] = 0;
10013
10014 /* Signals that are not errors should not normally enter the debugger. */
10015 signal_stop[GDB_SIGNAL_ALRM] = 0;
10016 signal_print[GDB_SIGNAL_ALRM] = 0;
10017 signal_stop[GDB_SIGNAL_VTALRM] = 0;
10018 signal_print[GDB_SIGNAL_VTALRM] = 0;
10019 signal_stop[GDB_SIGNAL_PROF] = 0;
10020 signal_print[GDB_SIGNAL_PROF] = 0;
10021 signal_stop[GDB_SIGNAL_CHLD] = 0;
10022 signal_print[GDB_SIGNAL_CHLD] = 0;
10023 signal_stop[GDB_SIGNAL_IO] = 0;
10024 signal_print[GDB_SIGNAL_IO] = 0;
10025 signal_stop[GDB_SIGNAL_POLL] = 0;
10026 signal_print[GDB_SIGNAL_POLL] = 0;
10027 signal_stop[GDB_SIGNAL_URG] = 0;
10028 signal_print[GDB_SIGNAL_URG] = 0;
10029 signal_stop[GDB_SIGNAL_WINCH] = 0;
10030 signal_print[GDB_SIGNAL_WINCH] = 0;
10031 signal_stop[GDB_SIGNAL_PRIO] = 0;
10032 signal_print[GDB_SIGNAL_PRIO] = 0;
10033
10034 /* These signals are used internally by user-level thread
10035 implementations. (See signal(5) on Solaris.) Like the above
10036 signals, a healthy program receives and handles them as part of
10037 its normal operation. */
10038 signal_stop[GDB_SIGNAL_LWP] = 0;
10039 signal_print[GDB_SIGNAL_LWP] = 0;
10040 signal_stop[GDB_SIGNAL_WAITING] = 0;
10041 signal_print[GDB_SIGNAL_WAITING] = 0;
10042 signal_stop[GDB_SIGNAL_CANCEL] = 0;
10043 signal_print[GDB_SIGNAL_CANCEL] = 0;
10044 signal_stop[GDB_SIGNAL_LIBRT] = 0;
10045 signal_print[GDB_SIGNAL_LIBRT] = 0;
10046
10047 /* Update cached state. */
10049
10050 add_setshow_zinteger_cmd ("stop-on-solib-events", class_support,
10052Set stopping for shared library events."), _("\
10053Show stopping for shared library events."), _("\
10054If nonzero, gdb will give control to the user when the dynamic linker\n\
10055notifies gdb of shared library events. The most common event of interest\n\
10056to the user would be loading/unloading of a new library."),
10059 &setlist, &showlist);
10060
10061 add_setshow_enum_cmd ("follow-fork-mode", class_run,
10064Set debugger response to a program call of fork or vfork."), _("\
10065Show debugger response to a program call of fork or vfork."), _("\
10066A fork or vfork creates a new process. follow-fork-mode can be:\n\
10067 parent - the original process is debugged after a fork\n\
10068 child - the new process is debugged after a fork\n\
10069The unfollowed process will continue to run.\n\
10070By default, the debugger will follow the parent process."),
10071 nullptr,
10073 &setlist, &showlist);
10074
10075 add_setshow_enum_cmd ("follow-exec-mode", class_run,
10078Set debugger response to a program call of exec."), _("\
10079Show debugger response to a program call of exec."), _("\
10080An exec call replaces the program image of a process.\n\
10081\n\
10082follow-exec-mode can be:\n\
10083\n\
10084 new - the debugger creates a new inferior and rebinds the process\n\
10085to this new inferior. The program the process was running before\n\
10086the exec call can be restarted afterwards by restarting the original\n\
10087inferior.\n\
10088\n\
10089 same - the debugger keeps the process bound to the same inferior.\n\
10090The new executable image replaces the previous executable loaded in\n\
10091the inferior. Restarting the inferior after the exec call restarts\n\
10092the executable the process was running after the exec call.\n\
10093\n\
10094By default, the debugger will use the same inferior."),
10095 nullptr,
10097 &setlist, &showlist);
10098
10099 add_setshow_enum_cmd ("scheduler-locking", class_run,
10101Set mode for locking scheduler during execution."), _("\
10102Show mode for locking scheduler during execution."), _("\
10103off == no locking (threads may preempt at any time)\n\
10104on == full locking (no thread except the current thread may run)\n\
10105 This applies to both normal execution and replay mode.\n\
10106step == scheduler locked during stepping commands (step, next, stepi, nexti).\n\
10107 In this mode, other threads may run during other commands.\n\
10108 This applies to both normal execution and replay mode.\n\
10109replay == scheduler locked in replay mode and unlocked during normal execution."),
10110 set_schedlock_func, /* traps on target vector */
10112 &setlist, &showlist);
10113
10114 add_setshow_boolean_cmd ("schedule-multiple", class_run, &sched_multi, _("\
10115Set mode for resuming threads of all processes."), _("\
10116Show mode for resuming threads of all processes."), _("\
10117When on, execution commands (such as 'continue' or 'next') resume all\n\
10118threads of all processes. When off (which is the default), execution\n\
10119commands only resume the threads of the current process. The set of\n\
10120threads that are resumed is further refined by the scheduler-locking\n\
10121mode (see help set scheduler-locking)."),
10122 nullptr,
10124 &setlist, &showlist);
10125
10127Set mode of the step operation."), _("\
10128Show mode of the step operation."), _("\
10129When set, doing a step over a function without debug line information\n\
10130will stop at the first instruction of that function. Otherwise, the\n\
10131function is skipped and the step command stops at a different source line."),
10132 nullptr,
10134 &setlist, &showlist);
10135
10136 add_setshow_auto_boolean_cmd ("displaced-stepping", class_run,
10138Set debugger's willingness to use displaced stepping."), _("\
10139Show debugger's willingness to use displaced stepping."), _("\
10140If on, gdb will use displaced stepping to step over breakpoints if it is\n\
10141supported by the target architecture. If off, gdb will not use displaced\n\
10142stepping to step over breakpoints, even if such is supported by the target\n\
10143architecture. If auto (which is the default), gdb will use displaced stepping\n\
10144if the target architecture supports it and non-stop mode is active, but will not\n\
10145use it in all-stop mode (see help set non-stop)."),
10146 nullptr,
10148 &setlist, &showlist);
10149
10151 &exec_direction, _("Set direction of execution.\n\
10152Options are 'forward' or 'reverse'."),
10153 _("Show direction of execution (forward/reverse)."),
10154 _("Tells gdb whether to execute forward or backward."),
10156 &setlist, &showlist);
10157
10158 /* Set/show detach-on-fork: user-settable mode. */
10159
10160 add_setshow_boolean_cmd ("detach-on-fork", class_run, &detach_fork, _("\
10161Set whether gdb will detach the child of a fork."), _("\
10162Show whether gdb will detach the child of a fork."), _("\
10163Tells gdb whether to detach the child of a fork."),
10164 nullptr, nullptr, &setlist, &showlist);
10165
10166 /* Set/show disable address space randomization mode. */
10167
10168 add_setshow_boolean_cmd ("disable-randomization", class_support,
10170Set disabling of debuggee's virtual address space randomization."), _("\
10171Show disabling of debuggee's virtual address space randomization."), _("\
10172When this mode is on (which is the default), randomization of the virtual\n\
10173address space is disabled. Standalone programs run with the randomization\n\
10174enabled by default on some platforms."),
10177 &setlist, &showlist);
10178
10179 /* ptid initializations */
10180 inferior_ptid = null_ptid;
10181 target_last_wait_ptid = minus_one_ptid;
10182
10184 "infrun");
10186 "infrun");
10189
10190 /* Explicitly create without lookup, since that tries to create a
10191 value with a void typed value, and when we get here, gdbarch
10192 isn't initialized yet. At this point, we're quite sure there
10193 isn't another convenience variable of the same name. */
10194 create_internalvar_type_lazy ("_siginfo", &siginfo_funcs, nullptr);
10195
10196 add_setshow_boolean_cmd ("observer", no_class,
10197 &observer_mode_1, _("\
10198Set whether gdb controls the inferior in observer mode."), _("\
10199Show whether gdb controls the inferior in observer mode."), _("\
10200In observer mode, GDB can get data from the inferior, but not\n\
10201affect its execution. Registers and memory may not be changed,\n\
10202breakpoints may not be set, and the program cannot be interrupted\n\
10203or signalled."),
10206 &setlist,
10207 &showlist);
10208
10209#if GDB_SELF_TEST
10210 selftests::register_test ("infrun_thread_ptid_changed",
10211 selftests::infrun_thread_ptid_changed);
10212#endif
10213}
const char *const name
void * xmalloc(YYSIZE_T)
void annotate_starting(void)
Definition annotate.c:96
void annotate_exited(int exitstatus)
Definition annotate.c:110
void annotate_signalled(void)
Definition annotate.c:117
void annotate_signal_string(void)
Definition annotate.c:141
void annotate_signal(void)
Definition annotate.c:155
void annotate_signal_string_end(void)
Definition annotate.c:148
void annotate_signal_name(void)
Definition annotate.c:127
void annotate_signal_name_end(void)
Definition annotate.c:134
void annotate_stopped(void)
Definition annotate.c:103
void annotate_thread_changed(void)
Definition annotate.c:224
struct gdbarch * target_gdbarch(void)
CORE_ADDR gdbarch_skip_prologue_noexcept(gdbarch *gdbarch, CORE_ADDR pc) noexcept
void mark_async_event_handler(async_event_handler *async_handler_ptr)
async_event_handler * create_async_event_handler(async_event_handler_func *proc, gdb_client_data client_data, const char *name)
void clear_async_event_handler(async_event_handler *async_handler_ptr)
iterator_range< block_iterator_wrapper > block_iterator_range
Definition block.h:553
bool find_pc_partial_function_sym(CORE_ADDR pc, const struct general_symbol_info **sym, CORE_ADDR *address, CORE_ADDR *endaddr, const struct block **block)
Definition blockframe.c:213
struct symbol * find_pc_function(CORE_ADDR pc)
Definition blockframe.c:150
struct symbol * get_frame_function(frame_info_ptr frame)
Definition blockframe.c:118
int catch_syscall_enabled(void)
bool catching_syscall_number(int syscall_number)
void breakpoint_re_set(void)
breakpoint_up set_momentary_breakpoint_at_pc(struct gdbarch *gdbarch, CORE_ADDR pc, enum bptype type)
void check_longjmp_breakpoint_for_call_dummy(struct thread_info *tp)
void breakpoint_retire_moribund(void)
void breakpoint_init_inferior(enum inf_context context)
int software_breakpoint_inserted_here_p(const address_space *aspace, CORE_ADDR pc)
void bpstat_run_callbacks(bpstat *bs_head)
void breakpoint_re_set_thread(struct breakpoint *b)
int remove_breakpoints(void)
int breakpoints_should_be_inserted_now(void)
Definition breakpoint.c:598
bool bpstat_should_step()
int detach_breakpoints(ptid_t ptid)
struct breakpoint * clone_momentary_breakpoint(struct breakpoint *orig)
int watchpoints_triggered(const target_waitstatus &ws)
int moribund_breakpoint_here_p(const address_space *aspace, CORE_ADDR pc)
enum breakpoint_here breakpoint_here_p(const address_space *aspace, CORE_ADDR pc)
bool bpstat_explains_signal(bpstat *bsp, enum gdb_signal sig)
void breakpoint_auto_delete(bpstat *bs)
void bpstat_clear_actions(void)
bool bpstat_causes_stop(bpstat *bs)
void remove_breakpoints_inf(inferior *inf)
int hardware_breakpoint_inserted_here_p(const address_space *aspace, CORE_ADDR pc)
int pc_at_non_inline_function(const address_space *aspace, CORE_ADDR pc, const target_waitstatus &ws)
struct bpstat_what bpstat_what(bpstat *bs_head)
bpstat * bpstat_stop_status_nowatch(const address_space *aspace, CORE_ADDR bp_addr, thread_info *thread, const target_waitstatus &ws)
void bpstat_clear(bpstat **bsp)
bpstat * build_bpstat_chain(const address_space *aspace, CORE_ADDR bp_addr, const target_waitstatus &ws)
int insert_single_step_breakpoints(struct gdbarch *gdbarch)
bpstat * bpstat_stop_status(const address_space *aspace, CORE_ADDR bp_addr, thread_info *thread, const target_waitstatus &ws, bpstat *stop_chain)
int single_step_breakpoint_inserted_here_p(const address_space *aspace, CORE_ADDR pc)
int breakpoint_inserted_here_p(const address_space *aspace, CORE_ADDR pc)
bpstat * bpstat_copy(bpstat *bs)
int breakpoint_address_match(const address_space *aspace1, CORE_ADDR addr1, const address_space *aspace2, CORE_ADDR addr2)
void mark_breakpoints_out(void)
breakpoint_up set_momentary_breakpoint(struct gdbarch *gdbarch, struct symtab_and_line sal, struct frame_id frame_id, enum bptype type)
void insert_breakpoints(void)
void update_breakpoints_after_exec(void)
enum print_stop_action bpstat_print(bpstat *bs, target_waitkind kind)
void insert_single_step_breakpoint(struct gdbarch *gdbarch, const address_space *aspace, CORE_ADDR next_pc)
@ disp_del_at_next_stop
Definition breakpoint.h:238
@ BPSTAT_WHAT_STOP_NOISY
@ BPSTAT_WHAT_CLEAR_LONGJMP_RESUME
@ BPSTAT_WHAT_STOP_SILENT
@ BPSTAT_WHAT_SINGLE
@ BPSTAT_WHAT_SET_LONGJMP_RESUME
@ BPSTAT_WHAT_HP_STEP_RESUME
@ BPSTAT_WHAT_STEP_RESUME
@ BPSTAT_WHAT_KEEP_CHECKING
bptype
Definition breakpoint.h:84
@ bp_exception_resume
Definition breakpoint.h:109
@ bp_step_resume
Definition breakpoint.h:113
@ bp_hp_step_resume
Definition breakpoint.h:117
@ bp_longjmp_resume
Definition breakpoint.h:96
@ inf_starting
@ inf_execd
void disable_current_display(void)
Definition printcmd.c:2274
@ PRINT_SRC_AND_LOC
Definition breakpoint.h:548
@ PRINT_NOTHING
Definition breakpoint.h:556
@ PRINT_UNKNOWN
Definition breakpoint.h:544
@ PRINT_SRC_ONLY
Definition breakpoint.h:552
@ ordinary_breakpoint_here
@ permanent_breakpoint_here
stop_stack_kind
@ STOP_NONE
@ STOP_STACK_DUMMY
void do_displays(void)
Definition printcmd.c:2252
CORE_ADDR address
Definition breakpoint.h:437
struct gdbarch * m_siginfo_gdbarch
Definition infrun.c:9600
void restore(struct gdbarch *gdbarch, struct thread_info *tp, struct regcache *regcache) const
Definition infrun.c:9568
struct thread_suspend_state m_thread_suspend
Definition infrun.c:9594
std::unique_ptr< readonly_detached_regcache > m_registers
Definition infrun.c:9597
infcall_suspend_state(struct gdbarch *gdbarch, const struct thread_info *tp, struct regcache *regcache)
Definition infrun.c:9527
readonly_detached_regcache * registers() const
Definition infrun.c:9561
gdb::unique_xmalloc_ptr< gdb_byte > m_siginfo_data
Definition infrun.c:9605
inferior_control_state control
Definition inferior.h:570
target_desc_info tdesc_info
Definition inferior.h:648
int pid
Definition inferior.h:561
inferior * vfork_parent
Definition inferior.h:597
thread_info * find_thread(ptid_t ptid)
Definition inferior.c:238
bool has_exit_code
Definition inferior.h:639
symfile_add_flags symfile_flags
Definition inferior.h:644
bool pending_detach
Definition inferior.h:607
displaced_step_inferior_state displaced_step_state
Definition inferior.h:664
bool has_execution()
Definition inferior.h:456
struct process_stratum_target * process_target()
Definition inferior.h:449
bool needs_setup
Definition inferior.h:621
thread_info * thread_waiting_for_vfork_done
Definition inferior.h:612
const std::string & args() const
Definition inferior.h:533
bool removable
Definition inferior.h:576
struct address_space * aspace
Definition inferior.h:579
bool detaching
Definition inferior.h:615
struct gdbarch * gdbarch
Definition inferior.h:661
LONGEST exit_code
Definition inferior.h:640
inferior * vfork_child
Definition inferior.h:603
bool attach_flag
Definition inferior.h:593
inf_threads_range threads()
Definition inferior.h:472
intrusive_list< thread_info > thread_list
Definition inferior.h:460
inferior(int pid)
Definition inferior.c:85
int num
Definition inferior.h:557
struct program_space * pspace
Definition inferior.h:582
virtual void on_about_to_proceed()
Definition interps.h:157
virtual void on_sync_execution_done()
Definition interps.h:111
Definition probe.h:115
thread_info * find_thread(ptid_t ptid)
bool has_resumed_with_pending_wait_status() const
thread_info * random_resumed_with_pending_wait_status(inferior *inf, ptid_t filter_ptid)
friend class regcache
Definition regcache.h:269
gdbarch * arch() const
Definition regcache.c:231
const address_space * aspace() const
Definition regcache.h:343
void restore(readonly_detached_regcache *src)
Definition regcache.c:277
std::string release()
Definition ui-file.h:204
const std::string & string()
Definition ui-file.h:198
static void inferior()
Definition target.c:952
static void ours_for_output()
Definition target.c:1088
static bool is_ours()
Definition target.h:189
static void ours()
Definition target.c:1070
int stepping_over_watchpoint
Definition gdbthread.h:500
void set_pending_waitstatus(const target_waitstatus &ws)
Definition thread.c:408
bool stop_pc_p() const
Definition gdbthread.h:387
void set_resumed(bool resumed)
Definition thread.c:385
CORE_ADDR stop_pc() const
Definition gdbthread.h:364
void set_thread_fsm(std::unique_ptr< struct thread_fsm > fsm)
Definition gdbthread.h:470
std::unique_ptr< struct thread_fsm > release_thread_fsm()
Definition gdbthread.h:461
enum thread_state state
Definition gdbthread.h:339
struct symtab * current_symtab
Definition gdbthread.h:477
void set_running(bool running)
Definition thread.c:884
ptid_t ptid
Definition gdbthread.h:259
void restore_suspend_from(const thread_suspend_state &suspend)
Definition gdbthread.h:354
gdb_signal stop_signal() const
Definition gdbthread.h:424
void save_suspend_to(thread_suspend_state &suspend) const
Definition gdbthread.h:347
void set_executing(bool executing)
Definition thread.c:375
bool resumed() const
Definition gdbthread.h:326
int current_line
Definition gdbthread.h:476
bool has_pending_waitstatus() const
Definition gdbthread.h:394
void set_stop_reason(target_stop_reason reason)
Definition gdbthread.h:445
void clear_pending_waitstatus()
Definition thread.c:422
struct frame_id initiating_frame
Definition gdbthread.h:525
struct target_waitstatus pending_follow
Definition gdbthread.h:516
const target_waitstatus & pending_waitstatus() const
Definition gdbthread.h:403
target_stop_reason stop_reason() const
Definition gdbthread.h:438
int step_after_step_resume_breakpoint
Definition gdbthread.h:511
int stepping_over_breakpoint
Definition gdbthread.h:495
struct thread_fsm * thread_fsm() const
Definition gdbthread.h:452
displaced_step_thread_state displaced_step_state
Definition gdbthread.h:552
bool executing() const
Definition gdbthread.h:319
int stepped_breakpoint
Definition gdbthread.h:492
int stop_requested
Definition gdbthread.h:519
struct inferior * inf
Definition gdbthread.h:301
void set_stop_pc(CORE_ADDR stop_pc)
Definition gdbthread.h:372
void set_stop_signal(gdb_signal sig)
Definition gdbthread.h:431
CORE_ADDR prev_pc
Definition gdbthread.h:487
thread_control_state control
Definition gdbthread.h:343
void field_string(const char *fldname, const char *string, const ui_file_style &style=ui_file_style())
Definition ui-out.c:511
void text(const char *string)
Definition ui-out.c:566
bool is_mi_like_p() const
Definition ui-out.c:810
void message(const char *format,...) ATTRIBUTE_PRINTF(2
Definition ui-out.c:774
struct cmd_list_element * showlist
Definition cli-cmds.c:127
void error_no_arg(const char *why)
Definition cli-cmds.c:206
struct cmd_list_element * cmdlist
Definition cli-cmds.c:87
struct cmd_list_element * setlist
Definition cli-cmds.c:119
struct cmd_list_element * showdebuglist
Definition cli-cmds.c:167
struct cmd_list_element * setdebuglist
Definition cli-cmds.c:165
set_show_commands add_setshow_zinteger_cmd(const char *name, enum command_class theclass, int *var, const char *set_doc, const char *show_doc, const char *help_doc, cmd_func_ftype *set_func, show_value_ftype *show_func, struct cmd_list_element **set_list, struct cmd_list_element **show_list)
struct cmd_list_element * add_cmd(const char *name, enum command_class theclass, const char *doc, struct cmd_list_element **list)
Definition cli-decode.c:233
void set_cmd_completer(struct cmd_list_element *cmd, completer_ftype *completer)
Definition cli-decode.c:117
struct cmd_list_element * add_com(const char *name, enum command_class theclass, cmd_simple_func_ftype *fun, const char *doc)
set_show_commands add_setshow_enum_cmd(const char *name, enum command_class theclass, const char *const *enumlist, const char **var, const char *set_doc, const char *show_doc, const char *help_doc, cmd_func_ftype *set_func, show_value_ftype *show_func, struct cmd_list_element **set_list, struct cmd_list_element **show_list)
Definition cli-decode.c:688
void not_just_help_class_command(const char *args, int from_tty)
Definition cli-decode.c:483
set_show_commands add_setshow_boolean_cmd(const char *name, enum command_class theclass, bool *var, const char *set_doc, const char *show_doc, const char *help_doc, cmd_func_ftype *set_func, show_value_ftype *show_func, struct cmd_list_element **set_list, struct cmd_list_element **show_list)
Definition cli-decode.c:809
cmd_list_element * add_info_alias(const char *name, cmd_list_element *target, int abbrev_flag)
set_show_commands add_setshow_auto_boolean_cmd(const char *name, enum command_class theclass, enum auto_boolean *var, const char *set_doc, const char *show_doc, const char *help_doc, cmd_func_ftype *set_func, show_value_ftype *show_func, struct cmd_list_element **set_list, struct cmd_list_element **show_list)
Definition cli-decode.c:752
void complete_on_enum(completion_tracker &tracker, const char *const *enumlist, const char *text, const char *word)
struct cmd_list_element * add_info(const char *name, cmd_simple_func_ftype *fun, const char *doc)
void execute_cmd_pre_hook(struct cmd_list_element *c)
Definition cli-script.c:379
@ class_obscure
Definition command.h:64
@ class_maintenance
Definition command.h:65
@ class_support
Definition command.h:58
@ class_run
Definition command.h:54
@ no_class
Definition command.h:53
void signal_completer(struct cmd_list_element *ignore, completion_tracker &tracker, const char *text, const char *word)
Definition completer.c:1756
void read_memory(CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
Definition corefile.c:238
int check_quit_flag(void)
Definition extension.c:857
quit_handler_ftype * quit_handler
Definition event-top.c:1080
auto_boolean
Definition defs.h:247
@ AUTO_BOOLEAN_TRUE
Definition defs.h:248
@ AUTO_BOOLEAN_AUTO
Definition defs.h:250
@ AUTO_BOOLEAN_FALSE
Definition defs.h:249
@ language_asm
Definition defs.h:221
#define QUIT
Definition defs.h:187
void(* deprecated_context_hook)(int)
Definition top.c:256
int gdb_print_insn(struct gdbarch *gdbarch, CORE_ADDR memaddr, struct ui_file *stream, int *branch_delay_insns)
Definition disasm.c:1217
bool debug_displaced
#define displaced_debug_printf(fmt,...)
displaced_step_prepare_status
@ DISPLACED_STEP_PREPARE_STATUS_CANT
@ DISPLACED_STEP_PREPARE_STATUS_UNAVAILABLE
@ DISPLACED_STEP_PREPARE_STATUS_OK
displaced_step_finish_status
@ DISPLACED_STEP_FINISH_STATUS_OK
@ DISPLACED_STEP_FINISH_STATUS_NOT_EXECUTED
LONGEST parse_and_eval_long(const char *exp)
Definition eval.c:62
void async_enable_stdin(void)
Definition event-top.c:507
bool exec_done_display_p
Definition event-top.c:98
int interruptible_select(int n, fd_set *readfds, fd_set *writefds, fd_set *exceptfds, struct timeval *timeout)
Definition event-top.c:1103
void gdb_rl_callback_handler_reinstall(void)
Definition event-top.c:368
void async_disable_stdin(void)
Definition event-top.c:523
void exception_fprintf(struct ui_file *file, const struct gdb_exception &e, const char *prefix,...)
Definition exceptions.c:116
void exception_print(struct ui_file *file, const struct gdb_exception &e)
Definition exceptions.c:106
void try_open_exec_file(const char *exec_file_host, struct inferior *inf, symfile_add_flags add_flags)
Definition exec.c:166
void exec_on_vfork(inferior *vfork_child)
Definition exec.c:687
struct value * read_var_value(struct symbol *var, const struct block *var_block, frame_info_ptr frame)
Definition findvar.c:739
const struct frame_id null_frame_id
Definition frame.c:688
const struct frame_id outer_frame_id
Definition frame.c:689
struct program_space * get_frame_program_space(frame_info_ptr frame)
Definition frame.c:2965
void restore_selected_frame(frame_id frame_id, int frame_level) noexcept
Definition frame.c:1761
void select_frame(frame_info_ptr fi)
Definition frame.c:1927
CORE_ADDR frame_unwind_caller_pc(frame_info_ptr this_frame)
Definition frame.c:1042
CORE_ADDR get_frame_pc(frame_info_ptr frame)
Definition frame.c:2712
void reinit_frame_cache(void)
Definition frame.c:2107
void frame_pop(frame_info_ptr this_frame)
Definition frame.c:1126
struct frame_id get_stack_frame_id(frame_info_ptr next_frame)
Definition frame.c:662
const address_space * get_frame_address_space(frame_info_ptr frame)
Definition frame.c:2982
bool frame_id_p(frame_id l)
Definition frame.c:781
void save_selected_frame(frame_id *frame_id, int *frame_level) noexcept
Definition frame.c:1751
struct gdbarch * get_frame_arch(frame_info_ptr this_frame)
Definition frame.c:3027
enum frame_type get_frame_type(frame_info_ptr frame)
Definition frame.c:2955
struct frame_id frame_unwind_caller_id(frame_info_ptr next_frame)
Definition frame.c:668
struct program_space * frame_unwind_program_space(frame_info_ptr this_frame)
Definition frame.c:2971
bool has_stack_frames()
Definition frame.c:1859
frame_info_ptr get_selected_frame(const char *message)
Definition frame.c:1888
frame_info_ptr frame_find_by_id(struct frame_id id)
Definition frame.c:916
frame_info_ptr get_current_frame(void)
Definition frame.c:1670
struct frame_id get_frame_id(frame_info_ptr fi)
Definition frame.c:631
frame_info_ptr get_prev_frame(frame_info_ptr this_frame)
Definition frame.c:2614
symtab_and_line find_frame_sal(frame_info_ptr frame)
Definition frame.c:2821
struct gdbarch * frame_unwind_caller_arch(frame_info_ptr next_frame)
Definition frame.c:3059
print_what
Definition frame.h:803
@ SRC_AND_LOC
Definition frame.h:811
@ SRC_LINE
Definition frame.h:805
@ DUMMY_FRAME
Definition frame.h:190
@ SIGTRAMP_FRAME
Definition frame.h:198
@ INLINE_FRAME
Definition frame.h:193
void set_current_sal_from_frame(frame_info_ptr)
Definition stack.c:921
void print_stack_frame(frame_info_ptr, int print_level, enum print_what print_what, int set_current_sal)
Definition stack.c:353
void gdbarch_skip_permanent_breakpoint(struct gdbarch *gdbarch, struct regcache *regcache)
Definition gdbarch.c:4058
int gdbarch_have_nonsteppable_watchpoint(struct gdbarch *gdbarch)
Definition gdbarch.c:3564
CORE_ADDR gdbarch_adjust_breakpoint_address(struct gdbarch *gdbarch, CORE_ADDR bpaddr)
Definition gdbarch.c:2852
bool gdbarch_displaced_step_hw_singlestep(struct gdbarch *gdbarch)
Definition gdbarch.c:4124
int gdbarch_single_step_through_delay(struct gdbarch *gdbarch, frame_info_ptr frame)
Definition gdbarch.c:3319
bool gdbarch_adjust_breakpoint_address_p(struct gdbarch *gdbarch)
Definition gdbarch.c:2845
bool gdbarch_in_indirect_branch_thunk(struct gdbarch *gdbarch, CORE_ADDR pc)
Definition gdbarch.c:3421
bool gdbarch_program_breakpoint_here_p(struct gdbarch *gdbarch, CORE_ADDR address)
Definition gdbarch.c:5157
bool gdbarch_get_siginfo_type_p(struct gdbarch *gdbarch)
Definition gdbarch.c:4444
CORE_ADDR gdbarch_skip_trampoline_code(struct gdbarch *gdbarch, frame_info_ptr frame, CORE_ADDR pc)
Definition gdbarch.c:3353
CORE_ADDR gdbarch_deprecated_function_start_offset(struct gdbarch *gdbarch)
Definition gdbarch.c:2920
void gdbarch_displaced_step_restore_all_in_ptid(struct gdbarch *gdbarch, inferior *parent_inf, ptid_t child_ptid)
Definition gdbarch.c:4223
bool gdbarch_single_step_through_delay_p(struct gdbarch *gdbarch)
Definition gdbarch.c:3312
bool gdbarch_displaced_step_prepare_p(struct gdbarch *gdbarch)
Definition gdbarch.c:4158
int gdbarch_get_longjmp_target(struct gdbarch *gdbarch, frame_info_ptr frame, CORE_ADDR *pc)
Definition gdbarch.c:2443
bool gdbarch_skip_entrypoint_p(struct gdbarch *gdbarch)
Definition gdbarch.c:2736
displaced_step_prepare_status gdbarch_displaced_step_prepare(struct gdbarch *gdbarch, thread_info *thread, CORE_ADDR &displaced_pc)
Definition gdbarch.c:4165
int gdbarch_gdb_signal_to_target(struct gdbarch *gdbarch, enum gdb_signal signal)
Definition gdbarch.c:4427
CORE_ADDR gdbarch_skip_entrypoint(struct gdbarch *gdbarch, CORE_ADDR ip)
Definition gdbarch.c:2743
void gdbarch_report_signal_info(struct gdbarch *gdbarch, struct ui_out *uiout, enum gdb_signal siggnal)
Definition gdbarch.c:2020
bool gdbarch_report_signal_info_p(struct gdbarch *gdbarch)
Definition gdbarch.c:2013
displaced_step_finish_status gdbarch_displaced_step_finish(struct gdbarch *gdbarch, thread_info *thread, const target_waitstatus &ws)
Definition gdbarch.c:4182
bool gdbarch_gdb_signal_to_target_p(struct gdbarch *gdbarch)
Definition gdbarch.c:4420
CORE_ADDR gdbarch_skip_solib_resolver(struct gdbarch *gdbarch, CORE_ADDR pc)
Definition gdbarch.c:3387
int gdbarch_cannot_step_breakpoint(struct gdbarch *gdbarch)
Definition gdbarch.c:3547
CORE_ADDR gdbarch_addr_bits_remove(struct gdbarch *gdbarch, CORE_ADDR addr)
Definition gdbarch.c:3152
CORE_ADDR gdbarch_decr_pc_after_break(struct gdbarch *gdbarch)
Definition gdbarch.c:2903
bool gdbarch_get_longjmp_target_p(struct gdbarch *gdbarch)
Definition gdbarch.c:2436
ULONGEST gdbarch_displaced_step_buffer_length(struct gdbarch *gdbarch)
Definition gdbarch.c:4240
bool gdbarch_software_single_step_p(struct gdbarch *gdbarch)
Definition gdbarch.c:3288
struct type * gdbarch_get_siginfo_type(struct gdbarch *gdbarch)
Definition gdbarch.c:4451
int gdbarch_in_solib_return_trampoline(struct gdbarch *gdbarch, CORE_ADDR pc, const char *name)
Definition gdbarch.c:3404
struct thread_info * any_live_thread_of_inferior(inferior *inf)
Definition thread.c:663
all_threads_safe_range all_threads_safe()
Definition gdbthread.h:770
int thread_step_over_chain_length(const thread_step_over_list &l)
Definition thread.c:443
struct thread_info * add_thread(process_stratum_target *targ, ptid_t ptid)
Definition thread.c:336
void global_thread_step_over_chain_remove(thread_info *tp)
Definition thread.c:476
void global_thread_step_over_chain_enqueue_chain(thread_step_over_list &&list)
Definition thread.c:468
all_matching_threads_range all_threads(process_stratum_target *proc_target=nullptr, ptid_t filter_ptid=minus_one_ptid)
Definition gdbthread.h:742
void delete_thread(thread_info *thread)
Definition thread.c:527
iterator_range< thread_step_over_list_safe_iterator > thread_step_over_list_safe_range
Definition gdbthread.h:950
void switch_to_thread_no_regs(struct thread_info *thread)
Definition thread.c:1328
all_non_exited_threads_range all_non_exited_threads(process_stratum_target *proc_target=nullptr, ptid_t filter_ptid=minus_one_ptid)
Definition gdbthread.h:753
void validate_registers_access(void)
Definition thread.c:958
@ THREAD_STOPPED
Definition gdbthread.h:72
@ THREAD_RUNNING
Definition gdbthread.h:75
@ THREAD_EXITED
Definition gdbthread.h:79
int show_thread_that_caused_stop(void)
Definition thread.c:1450
bool pc_in_thread_step_range(CORE_ADDR pc, struct thread_info *thread)
Definition thread.c:1000
static thread_step_over_list_safe_range make_thread_step_over_list_safe_range(thread_step_over_list &list)
Definition gdbthread.h:953
struct thread_info * find_thread_global_id(int global_id)
Definition thread.c:539
void global_thread_step_over_chain_enqueue(thread_info *tp)
Definition thread.c:456
void update_thread_list(void)
Definition thread.c:2086
void set_executing(process_stratum_target *targ, ptid_t ptid, bool executing)
Definition thread.c:908
struct thread_info * inferior_thread(void)
Definition thread.c:85
void switch_to_thread(struct thread_info *thr)
Definition thread.c:1360
void set_running(process_stratum_target *targ, ptid_t ptid, bool running)
Definition thread.c:891
int thread_has_single_step_breakpoint_here(struct thread_info *tp, const address_space *aspace, CORE_ADDR addr)
Definition thread.c:150
void delete_step_resume_breakpoint(struct thread_info *)
Definition thread.c:104
struct thread_info * iterate_over_threads(thread_callback_func, void *)
Definition thread.c:584
void thread_change_ptid(process_stratum_target *targ, ptid_t old_ptid, ptid_t new_ptid)
Definition thread.c:811
FORWARD_SCOPE_EXIT(finish_thread_state) scoped_finish_thread_state
Definition gdbthread.h:836
void set_resumed(process_stratum_target *targ, ptid_t ptid, bool resumed)
Definition thread.c:838
int thread_has_single_step_breakpoints_set(struct thread_info *tp)
Definition thread.c:142
const char * thread_name(thread_info *thread)
Definition thread.c:2095
int thread_is_in_step_over_chain(struct thread_info *tp)
Definition thread.c:435
void delete_exception_resume_breakpoint(struct thread_info *)
Definition thread.c:111
@ STEP_OVER_NONE
Definition gdbthread.h:88
@ STEP_OVER_UNDEBUGGABLE
Definition gdbthread.h:90
@ STEP_OVER_ALL
Definition gdbthread.h:89
gdb::ref_ptr< struct thread_info, refcounted_object_ref_policy > thread_info_ref
Definition gdbthread.h:592
intrusive_list< thread_info, thread_step_over_list_node > thread_step_over_list
Definition gdbthread.h:947
void delete_single_step_breakpoints(struct thread_info *tp)
Definition thread.c:120
void switch_to_no_thread()
Definition thread.c:1345
const char * print_thread_id(struct thread_info *thr)
Definition thread.c:1470
static struct inf * cur_inf(void)
Definition gnu-nat.c:2084
mach_port_t mach_port_t name mach_port_t mach_port_t name kern_return_t int int rusage_t pid_t pid
Definition gnu-nat.c:1791
mach_port_t mach_port_t name mach_port_t mach_port_t name kern_return_t int status
Definition gnu-nat.c:1790
void inferior_event_handler(enum inferior_event_type event_type)
Definition inf-loop.c:37
ptid_t inferior_ptid
Definition infcmd.c:74
int stopped_by_random_signal
Definition infcmd.c:83
void setup_inferior(int from_tty)
Definition infcmd.c:2538
enum stop_stack_kind stop_stack_dummy
Definition infcmd.c:78
void post_create_inferior(int from_tty)
Definition infcmd.c:232
void print_return_value(struct ui_out *uiout, struct return_value_info *rv)
Definition infcmd.c:1571
struct inferior * add_inferior_with_spaces(void)
Definition inferior.c:832
struct inferior * find_inferior_ptid(process_stratum_target *targ, ptid_t ptid)
Definition inferior.c:406
struct inferior * find_inferior_pid(process_stratum_target *targ, int pid)
Definition inferior.c:389
void set_current_inferior(struct inferior *inf)
Definition inferior.c:61
bool print_inferior_events
Definition inferior.c:47
struct inferior * current_inferior(void)
Definition inferior.c:55
struct inferior * add_inferior(int pid)
Definition inferior.c:218
void switch_to_inferior_no_thread(inferior *inf)
Definition inferior.c:712
void exit_inferior(struct inferior *inf)
Definition inferior.c:307
void prune_inferiors(void)
Definition inferior.c:471
intrusive_list< inferior > inferior_list
Definition inferior.c:43
std::unique_ptr< infcall_control_state, infcall_control_state_deleter > infcall_control_state_up
Definition inferior.h:119
all_inferiors_range all_inferiors(process_stratum_target *proc_target=nullptr)
Definition inferior.h:821
stop_kind
Definition inferior.h:293
@ NO_STOP_QUIETLY
Definition inferior.h:294
@ STOP_QUIETLY_REMOTE
Definition inferior.h:296
@ STOP_QUIETLY_NO_SIGSTOP
Definition inferior.h:297
@ STOP_QUIETLY
Definition inferior.h:295
std::unique_ptr< infcall_suspend_state, infcall_suspend_state_deleter > infcall_suspend_state_up
Definition inferior.h:103
all_non_exited_inferiors_range all_non_exited_inferiors(process_stratum_target *proc_target=nullptr)
Definition inferior.h:830
void copy_terminal_info(struct inferior *to, struct inferior *from)
Definition inflow.c:621
void swap_terminal_info(inferior *a, inferior *b)
Definition inflow.c:642
#define UNSET_SIGS(nsigs, sigs, flags)
Definition infrun.c:348
static unsigned char signal_pass[GDB_SIGNAL_LAST]
Definition infrun.c:338
int thread_is_stepping_over_breakpoint(int thread)
Definition infrun.c:1498
static bool step_over_info_valid_p(void)
Definition infrun.c:1515
static displaced_step_prepare_status displaced_step_prepare_throw(thread_info *tp)
Definition infrun.c:1770
static const struct internalvar_funcs siginfo_funcs
Definition infrun.c:9823
static void set_observer_mode(const char *args, int from_tty, struct cmd_list_element *c)
Definition infrun.c:259
static void for_each_just_stopped_thread(for_each_just_stopped_thread_callback_func func)
Definition infrun.c:3744
static void stop_all_threads_if_all_stop_mode()
Definition infrun.c:4154
static ptid_t do_target_wait_1(inferior *inf, ptid_t ptid, target_waitstatus *status, target_wait_flags options)
Definition infrun.c:3824
static const char exec_forward[]
Definition infrun.c:9769
static void mark_non_executing_threads(process_stratum_target *target, ptid_t event_ptid, const target_waitstatus &ws)
Definition infrun.c:5131
static enum stop_kind get_inferior_stop_soon(execution_control_state *ecs)
Definition infrun.c:4977
static bool handle_syscall_event(struct execution_control_state *ecs)
Definition infrun.c:4884
static bool gdbarch_supports_displaced_stepping(gdbarch *arch)
Definition infrun.c:1696
void print_no_history_reason(struct ui_out *uiout)
Definition infrun.c:8720
static void set_exec_direction_func(const char *args, int from_tty, struct cmd_list_element *cmd)
Definition infrun.c:9779
void set_step_info(thread_info *tp, frame_info_ptr frame, struct symtab_and_line sal)
Definition infrun.c:4575
static void insert_hp_step_resume_breakpoint_at_frame(frame_info_ptr)
Definition infrun.c:8221
void update_signals_program_target(void)
Definition infrun.c:360
static void infrun_quit_handler()
Definition infrun.c:4317
int stepping_past_instruction_at(struct address_space *aspace, CORE_ADDR address)
Definition infrun.c:1486
static process_stratum_target * target_last_proc_target
Definition infrun.c:402
static bool observer_mode
Definition infrun.c:255
void insert_step_resume_breakpoint_at_sal(struct gdbarch *gdbarch, struct symtab_and_line sr_sal, struct frame_id sr_id)
Definition infrun.c:8204
static void show_debug_infrun(struct ui_file *file, int from_tty, struct cmd_list_element *c, const char *value)
Definition infrun.c:189
void maybe_call_commit_resumed_all_targets()
Definition infrun.c:3085
static void set_disable_randomization(const char *args, int from_tty, struct cmd_list_element *c)
Definition infrun.c:215
static void adjust_pc_after_break(struct thread_info *thread, const target_waitstatus &ws)
Definition infrun.c:4665
static const struct lval_funcs siginfo_value_funcs
Definition infrun.c:9489
static void insert_step_resume_breakpoint_at_caller(frame_info_ptr)
Definition infrun.c:8252
static void show_follow_exec_mode_string(struct ui_file *file, int from_tty, struct cmd_list_element *c, const char *value)
Definition infrun.c:1203
static void fill_in_stop_func(struct gdbarch *gdbarch, struct execution_control_state *ecs)
Definition infrun.c:4927
static void follow_exec(ptid_t ptid, const char *exec_file_target)
Definition infrun.c:1212
void mark_infrun_async_event_handler(void)
Definition infrun.c:139
void print_stop_event(struct ui_out *uiout, bool displays)
Definition infrun.c:8791
static unsigned char signal_print[GDB_SIGNAL_LAST]
Definition infrun.c:327
static bool maybe_software_singlestep(struct gdbarch *gdbarch)
Definition infrun.c:2257
static void clear_proceed_status_thread(struct thread_info *tp)
Definition infrun.c:2859
void print_signal_exited_reason(struct ui_out *uiout, enum gdb_signal siggnal)
Definition infrun.c:8620
static ptid_t target_last_wait_ptid
Definition infrun.c:403
static void process_event_stop_test(struct execution_control_state *ecs)
Definition infrun.c:6896
static bool displaced_step_in_progress(inferior *inf)
Definition infrun.c:1621
static wait_one_event wait_one()
Definition infrun.c:5012
static int infrun_is_async
Definition infrun.c:116
static step_over_what thread_still_needs_step_over(struct thread_info *tp)
Definition infrun.c:2995
void maybe_remove_breakpoints(void)
Definition infrun.c:8823
static bool restart_stepped_thread(process_stratum_target *resume_target, ptid_t resume_ptid)
Definition infrun.c:7848
void notify_signal_received(gdb_signal sig)
Definition infrun.c:6360
static void show_can_use_displaced_stepping(struct ui_file *file, int from_tty, struct cmd_list_element *c, const char *value)
Definition infrun.c:1677
bool normal_stop()
Definition infrun.c:8899
int signal_print_state(int signo)
Definition infrun.c:9091
static displaced_step_finish_status displaced_step_finish(thread_info *event_thread, const target_waitstatus &event_status)
Definition infrun.c:1969
static const char *const follow_exec_mode_names[]
Definition infrun.c:1194
static void stop_waiting(struct execution_control_state *ecs)
Definition infrun.c:8435
infcall_suspend_state_up save_infcall_suspend_state()
Definition infrun.c:9609
static void signal_cache_update(int signo)
Definition infrun.c:9103
static void infrun_async_inferior_event_handler(gdb_client_data data)
Definition infrun.c:9833
static void proceed_resume_thread_checked(thread_info *tp)
Definition infrun.c:3293
static bool switch_back_to_stepped_thread(struct execution_control_state *ecs)
Definition infrun.c:7764
void start_remote(int from_tty)
Definition infrun.c:3607
void nullify_last_target_wait_ptid(void)
Definition infrun.c:4635
static void set_last_target_status_stopped(thread_info *tp)
Definition infrun.c:729
static void new_stop_id(void)
Definition infrun.c:2850
static void show_stop_on_solib_events(struct ui_file *file, int from_tty, struct cmd_list_element *c, const char *value)
Definition infrun.c:388
static unsigned char signal_program[GDB_SIGNAL_LAST]
Definition infrun.c:328
static void end_stepping_range(struct execution_control_state *ecs)
Definition infrun.c:8602
static void show_schedule_multiple(struct ui_file *file, int from_tty, struct cmd_list_element *c, const char *value)
Definition infrun.c:9814
static void check_multi_target_resumption(process_stratum_target *resume_target)
Definition infrun.c:3242
static void set_non_stop(const char *args, int from_tty, struct cmd_list_element *c)
Definition infrun.c:230
bool step_stop_if_no_debug
Definition infrun.c:147
static void wait_for_inferior(inferior *inf)
Definition infrun.c:4168
static void insert_longjmp_resume_breakpoint(struct gdbarch *, CORE_ADDR)
Definition infrun.c:8276
static bool stop_print_frame
Definition infrun.c:397
static void save_waitstatus(struct thread_info *tp, const target_waitstatus &ws)
Definition infrun.c:5085
static void handle_vfork_child_exec_or_exit(int exec)
Definition infrun.c:1011
static void delete_just_stopped_threads_single_step_breakpoints(void)
Definition infrun.c:3775
static bool enable_commit_resumed
Definition infrun.c:3108
thread_info * get_previous_thread()
Definition infrun.c:175
static void siginfo_value_write(struct value *v, struct value *fromval)
Definition infrun.c:9470
static struct async_event_handler * infrun_async_inferior_event_token
Definition infrun.c:112
static bool currently_stepping(struct thread_info *tp)
Definition infrun.c:8057
static void insert_step_resume_breakpoint_at_sal_1(struct gdbarch *gdbarch, struct symtab_and_line sr_sal, struct frame_id sr_id, enum bptype sr_type)
Definition infrun.c:8185
infcall_control_state_up save_infcall_control_state()
Definition infrun.c:9675
static void maybe_set_commit_resumed_all_targets()
Definition infrun.c:3028
static int resumed_thread_with_pending_status(struct thread_info *tp, void *arg)
Definition infrun.c:6241
static void show_observer_mode(struct ui_file *file, int from_tty, struct cmd_list_element *c, const char *value)
Definition infrun.c:295
static bool schedlock_applies(struct thread_info *tp)
Definition infrun.c:3013
static void handle_command(const char *args, int from_tty)
Definition infrun.c:9190
static void do_target_resume(ptid_t resume_ptid, bool step, enum gdb_signal sig)
Definition infrun.c:2403
int signal_print_update(int signo, int state)
Definition infrun.c:9130
int signal_pass_update(int signo, int state)
Definition infrun.c:9140
static unsigned char signal_catch[GDB_SIGNAL_LAST]
Definition infrun.c:333
static const char *const scheduler_enums[]
Definition infrun.c:2217
static bool thread_still_needs_step_over_bp(struct thread_info *tp)
Definition infrun.c:2973
static void show_step_stop_if_no_debug(struct ui_file *file, int from_tty, struct cmd_list_element *c, const char *value)
Definition infrun.c:149
static void resume_1(enum gdb_signal sig)
Definition infrun.c:2453
static const char * follow_fork_mode_string
Definition infrun.c:417
static ptid_t poll_one_curr_target(struct target_waitstatus *ws)
Definition infrun.c:4989
static bool handle_no_resumed(struct execution_control_state *ecs)
Definition infrun.c:5451
static enum auto_boolean can_use_displaced_stepping
Definition infrun.c:1674
void(* for_each_just_stopped_thread_callback_func)(struct thread_info *tp)
Definition infrun.c:3741
process_stratum_target * user_visible_resume_target(ptid_t resume_ptid)
Definition infrun.c:2313
static int finish_step_over(struct execution_control_state *ecs)
Definition infrun.c:6254
static void handle_vfork_done(thread_info *event_thread)
Definition infrun.c:1144
static thread_info_ref previous_thread
Definition infrun.c:159
static bool detach_fork
Definition infrun.c:185
void notify_user_selected_context_changed(user_selected_what selection)
Definition infrun.c:6377
void restart_after_all_stop_detach(process_stratum_target *proc_target)
Definition infrun.c:7911
static void displaced_step_reset(displaced_step_thread_state *displaced)
Definition infrun.c:1741
#define RESUME_ALL
Definition infrun.c:367
static const char schedlock_on[]
Definition infrun.c:2214
void clear_proceed_status(int step)
Definition infrun.c:2924
static bool handle_one(const wait_one_event &event)
Definition infrun.c:5176
static bool stepped_in_from(frame_info_ptr frame, struct frame_id step_frame_id)
Definition infrun.c:4807
int signal_stop_state(int signo)
Definition infrun.c:9085
static const char *const exec_direction_names[]
Definition infrun.c:9772
int stepping_past_nonsteppable_watchpoint(void)
Definition infrun.c:1507
static bool observer_mode_1
Definition infrun.c:256
FORWARD_SCOPE_EXIT(displaced_step_reset) displaced_step_reset_cleanup
Definition infrun.c:1749
void set_last_target_status(process_stratum_target *target, ptid_t ptid, const target_waitstatus &status)
Definition infrun.c:4610
static void infrun_thread_ptid_changed(process_stratum_target *target, ptid_t old_ptid, ptid_t new_ptid)
Definition infrun.c:2203
void clear_exit_convenience_vars(void)
Definition infrun.c:9757
static void siginfo_value_read(struct value *v)
Definition infrun.c:9446
static void delete_just_stopped_threads_infrun_breakpoints(void)
Definition infrun.c:3766
static bool keep_going_stepped_thread(struct thread_info *tp)
Definition infrun.c:7964
static void follow_inferior_reset_breakpoints(void)
Definition infrun.c:948
static struct value * siginfo_make_value(struct gdbarch *gdbarch, struct internalvar *var, void *ignore)
Definition infrun.c:9500
static void check_exception_resume(struct execution_control_state *, frame_info_ptr)
Definition infrun.c:8373
static void print_stop_location(const target_waitstatus &ws)
Definition infrun.c:8734
static void keep_going(struct execution_control_state *ecs)
Definition infrun.c:8568
bool debug_infrun
Definition infrun.c:187
static void show_scheduler_mode(struct ui_file *file, int from_tty, struct cmd_list_element *c, const char *value)
Definition infrun.c:2226
static void insert_exception_resume_breakpoint(struct thread_info *tp, const struct block *b, frame_info_ptr frame, struct symbol *sym)
Definition infrun.c:8297
enum exec_direction_kind execution_direction
Definition infrun.c:9768
static void prepare_to_wait(struct execution_control_state *ecs)
Definition infrun.c:8584
static const char schedlock_step[]
Definition infrun.c:2215
void notify_normal_stop(bpstat *bs, int print_frame)
Definition infrun.c:6369
bool sched_multi
Definition infrun.c:2249
void discard_infcall_control_state(struct infcall_control_state *inf_status)
Definition infrun.c:9738
static void infrun_thread_stop_requested(ptid_t ptid)
Definition infrun.c:3675
void infrun_async(int enable)
Definition infrun.c:121
void restore_infcall_control_state(struct infcall_control_state *inf_status)
Definition infrun.c:9706
bool non_stop
Definition infrun.c:226
static struct cmd_list_element * stop_command
Definition infrun.c:371
static void delete_thread_infrun_breakpoints(struct thread_info *tp)
Definition infrun.c:3729
static void keep_going_pass_signal(struct execution_control_state *ecs)
Definition infrun.c:8447
void update_previous_thread()
Definition infrun.c:164
static bool follow_fork()
Definition infrun.c:740
static void show_exec_direction_func(struct ui_file *out, int from_tty, struct cmd_list_element *cmd, const char *value)
Definition infrun.c:9797
static void show_follow_fork_mode_string(struct ui_file *file, int from_tty, struct cmd_list_element *c, const char *value)
Definition infrun.c:419
thread_step_over_list global_thread_step_over_list
Definition infrun.c:1393
void prepare_for_detach(void)
Definition infrun.c:4059
void update_observer_mode(void)
Definition infrun.c:308
bool disable_randomization
Definition infrun.c:197
static const char * follow_exec_mode_string
Definition infrun.c:1201
static const char exec_reverse[]
Definition infrun.c:9770
void _initialize_infrun()
Definition infrun.c:9916
static bool follow_fork_inferior(bool follow_child, bool detach_fork)
Definition infrun.c:436
DEF_ENUM_FLAGS_TYPE(enum step_over_what_flag, step_over_what)
#define SET_SIGS(nsigs, sigs, flags)
Definition infrun.c:340
static bool use_displaced_stepping(thread_info *tp)
Definition infrun.c:1707
static const char schedlock_replay[]
Definition infrun.c:2216
int signal_pass_state(int signo)
Definition infrun.c:9097
static bool displaced_step_in_progress_any_thread()
Definition infrun.c:1629
static void show_disable_randomization(struct ui_file *file, int from_tty, struct cmd_list_element *c, const char *value)
Definition infrun.c:200
static void sig_print_header(void)
Definition infrun.c:9164
static void info_signals_command(const char *signum_exp, int from_tty)
Definition infrun.c:9395
static void context_switch(execution_control_state *ecs)
Definition infrun.c:4645
static void set_step_over_info(const address_space *aspace, CORE_ADDR address, int nonsteppable_watchpoint_p, int thread)
Definition infrun.c:1460
static thread_info * find_thread_waiting_for_vfork_done()
Definition infrun.c:2324
static displaced_step_prepare_status displaced_step_prepare(thread_info *thread)
Definition infrun.c:1926
static struct target_waitstatus target_last_waitstatus
Definition infrun.c:404
static const char schedlock_off[]
Definition infrun.c:2213
int signal_stop_update(int signo, int state)
Definition infrun.c:9120
static bool handle_stop_requested(struct execution_control_state *ecs)
Definition infrun.c:4867
void all_uis_on_sync_execution_starting(void)
Definition infrun.c:4304
static void handle_completer(struct cmd_list_element *ignore, completion_tracker &tracker, const char *text, const char *word)
Definition infrun.c:9358
static unsigned char signal_stop[GDB_SIGNAL_LAST]
Definition infrun.c:326
void init_thread_stepping_state(struct thread_info *tss)
Definition infrun.c:4599
static const char * exec_direction
Definition infrun.c:9771
static const char follow_fork_mode_parent[]
Definition infrun.c:409
int stop_on_solib_events
Definition infrun.c:375
enum gdb_signal gdb_signal_from_command(int num)
Definition infrun.c:9381
void all_uis_check_sync_execution_done(void)
Definition infrun.c:4293
static void infrun_inferior_exit(struct inferior *inf)
Definition infrun.c:1641
static void resume(gdb_signal sig)
Definition infrun.c:2806
void stop_all_threads(const char *reason, inferior *inf)
Definition infrun.c:5315
static void handle_inferior_event(struct execution_control_state *ecs)
Definition infrun.c:5590
void init_wait_for_inferior(void)
Definition infrun.c:3639
static const char *const follow_fork_mode_kind_names[]
Definition infrun.c:411
static const char follow_exec_mode_new[]
Definition infrun.c:1192
static const char * scheduler_mode
Definition infrun.c:2224
static void proceed_after_vfork_done(thread_info *thread)
Definition infrun.c:991
static bool do_target_wait(execution_control_state *ecs, target_wait_flags options)
Definition infrun.c:3949
step_over_what_flag
Definition infrun.c:1398
@ STEP_OVER_BREAKPOINT
Definition infrun.c:1400
@ STEP_OVER_WATCHPOINT
Definition infrun.c:1405
static void insert_exception_resume_from_probe(struct thread_info *tp, const struct bound_probe *probe, frame_info_ptr frame)
Definition infrun.c:8343
static bool displaced_step_in_progress_thread(thread_info *thread)
Definition infrun.c:1611
static void set_stop_on_solib_events(const char *args, int from_tty, struct cmd_list_element *c)
Definition infrun.c:381
void print_exited_reason(struct ui_out *uiout, int exitstatus)
Definition infrun.c:8641
static void check_curr_ui_sync_execution_done(void)
Definition infrun.c:4276
static void clean_up_just_stopped_threads_fsms(struct execution_control_state *ecs)
Definition infrun.c:4244
void print_target_wait_results(ptid_t waiton_ptid, ptid_t result_ptid, const struct target_waitstatus &ws)
Definition infrun.c:3783
void restore_infcall_suspend_state(struct infcall_suspend_state *inf_state)
Definition infrun.c:9630
static struct thread_info * random_pending_event_thread(inferior *inf, ptid_t waiton_ptid)
Definition infrun.c:3799
static void infrun_inferior_execd(inferior *exec_inf, inferior *follow_inf)
Definition infrun.c:1648
ULONGEST get_stop_id(void)
Definition infrun.c:2842
static ULONGEST current_stop_id
Definition infrun.c:2837
static bool inline_frame_is_marked_for_skip(bool prev_frame, struct thread_info *tp)
Definition infrun.c:4830
static void reinstall_readline_callback_handler_cleanup()
Definition infrun.c:4222
static void handle_signal_stop(struct execution_control_state *ecs)
Definition infrun.c:6386
static const char follow_exec_mode_same[]
Definition infrun.c:1193
static bool non_stop_1
Definition infrun.c:227
static void set_schedlock_func(const char *args, int from_tty, struct cmd_list_element *c)
Definition infrun.c:2236
void discard_infcall_suspend_state(struct infcall_suspend_state *inf_state)
Definition infrun.c:9641
static void handle_step_into_function(struct gdbarch *gdbarch, struct execution_control_state *ecs)
Definition infrun.c:8071
static void sig_print_info(enum gdb_signal)
Definition infrun.c:9171
static const char follow_fork_mode_child[]
Definition infrun.c:408
static bool start_step_over(void)
Definition infrun.c:2035
static void restart_threads(struct thread_info *event_thread, inferior *inf=nullptr)
Definition infrun.c:6144
static void clear_step_over_info(void)
Definition infrun.c:1474
void print_signal_received_reason(struct ui_out *uiout, enum gdb_signal siggnal)
Definition infrun.c:8668
void get_last_target_status(process_stratum_target **target, ptid_t *ptid, target_waitstatus *status)
Definition infrun.c:4621
void proceed(CORE_ADDR addr, enum gdb_signal siggnal)
Definition infrun.c:3395
void signal_catch_update(const unsigned int *info)
Definition infrun.c:9153
void fetch_inferior_event()
Definition infrun.c:4361
static void show_non_stop(struct ui_file *file, int from_tty, struct cmd_list_element *c, const char *value)
Definition infrun.c:243
ptid_t user_visible_resume_ptid(int step)
Definition infrun.c:2271
static ptid_t internal_resume_ptid(int user_step)
Definition infrun.c:2350
static void handle_step_into_function_backward(struct gdbarch *gdbarch, struct execution_control_state *ecs)
Definition infrun.c:8148
readonly_detached_regcache * get_infcall_suspend_state_regcache(struct infcall_suspend_state *inf_state)
Definition infrun.c:9647
static void notify_about_to_proceed()
Definition infrun.c:2917
void maybe_call_commit_resumed_all_targets()
Definition infrun.c:3085
exec_direction_kind
Definition infrun.h:112
@ EXEC_REVERSE
Definition infrun.h:114
@ EXEC_FORWARD
Definition infrun.h:113
#define INFRUN_SCOPED_DEBUG_ENTER_EXIT
Definition infrun.h:48
#define infrun_debug_printf(fmt,...)
Definition infrun.h:38
static void infrun_debug_show_threads(const char *title, ThreadRange threads)
Definition infrun.h:61
#define INFRUN_SCOPED_DEBUG_START_END(fmt,...)
Definition infrun.h:43
void clear_inline_frame_state(process_stratum_target *target, ptid_t filter_ptid)
int inline_skipped_frames(thread_info *thread)
void step_into_inline_frame(thread_info *thread)
void skip_inline_frames(thread_info *thread, bpstat *stop_chain)
void interps_notify_no_history()
Definition interps.c:392
struct interp * top_level_interpreter(void)
Definition interps.c:345
void interps_notify_signal_received(gdb_signal sig)
Definition interps.c:376
void interps_notify_normal_stop(bpstat *bs, int print_frame)
Definition interps.c:400
void interps_notify_signal_exited(gdb_signal sig)
Definition interps.c:384
void interps_notify_user_selected_context_changed(user_selected_what selection)
Definition interps.c:416
void interps_notify_exited(int status)
Definition interps.c:408
CORE_ADDR skip_language_trampoline(const frame_info_ptr &frame, CORE_ADDR pc)
Definition language.c:526
static const char * range
Definition language.c:96
int return_child_result_value
Definition main.c:93
const char * async_reason_lookup(enum async_reply_reason reason)
Definition mi-common.c:50
@ EXEC_ASYNC_NO_HISTORY
Definition mi-common.h:45
@ EXEC_ASYNC_SIGNAL_RECEIVED
Definition mi-common.h:38
@ EXEC_ASYNC_EXITED_NORMALLY
Definition mi-common.h:37
@ EXEC_ASYNC_EXITED_SIGNALLED
Definition mi-common.h:35
@ EXEC_ASYNC_EXITED
Definition mi-common.h:36
observable< ptid_t > thread_stop_requested
observable< struct inferior * > inferior_exit
observable< inferior *, inferior * > inferior_execd
observable< inferior *, inferior *, target_waitkind > inferior_forked
observable< user_selected_what > user_selected_context_changed
observable< process_stratum_target *, ptid_t, ptid_t > thread_ptid_changed
observable about_to_proceed
observable< struct bpstat *, int > normal_stop
observable< enum gdb_signal > signal_received
struct value * probe_safe_evaluate_at_pc(frame_info_ptr frame, unsigned n)
Definition probe.c:789
struct bound_probe find_probe_by_pc(CORE_ADDR pc)
Definition probe.c:243
std::set< process_stratum_target * > all_non_exited_process_targets()
void switch_to_target_no_thread(process_stratum_target *target)
struct program_space * current_program_space
Definition progspace.c:40
void set_current_program_space(struct program_space *pspace)
Definition progspace.c:243
struct program_space * clone_program_space(struct program_space *dest, struct program_space *src)
Definition progspace.c:222
struct address_space * maybe_new_address_space(void)
Definition progspace.c:59
int value
Definition py-param.c:79
int record_full_is_used(void)
scoped_restore_tmpl< int > record_full_gdb_operation_disable_set(void)
struct target_ops * find_record_target(void)
Definition record.c:64
CORE_ADDR regcache_read_pc(struct regcache *regcache)
Definition regcache.c:1333
struct regcache * get_thread_arch_aspace_regcache(inferior *inf_for_target_calls, ptid_t ptid, gdbarch *arch, struct address_space *aspace)
Definition regcache.c:350
CORE_ADDR regcache_read_pc_protected(regcache *regcache)
Definition regcache.c:1361
void regcache_write_pc(struct regcache *regcache, CORE_ADDR pc)
Definition regcache.c:1377
struct regcache * get_current_regcache(void)
Definition regcache.c:429
struct regcache * get_thread_regcache(process_stratum_target *target, ptid_t ptid)
Definition regcache.c:400
void(* func)(remote_target *remote, char *)
#define enable()
Definition ser-go32.c:239
bool function_name_is_marked_for_skip(const char *function_name, const symtab_and_line &function_sal)
Definition skip.c:614
gdb::unique_xmalloc_ptr< char > exec_file_find(const char *in_pathname, int *fd)
Definition solib.c:334
void no_shared_libraries(const char *ignored, int from_tty)
Definition solib.c:1284
bool in_solib_dynsym_resolve_code(CORE_ADDR pc)
Definition solib.c:1262
void update_solib_breakpoints(void)
Definition solib.c:1298
void handle_solib_event(void)
Definition solib.c:1309
static void print_frame(const frame_print_options &opts, frame_info_ptr frame, int print_level, enum print_what print_what, int print_args, struct symtab_and_line sal)
const struct block * block
Definition symtab.h:1537
struct symbol * symbol
Definition symtab.h:1533
Definition block.h:109
CORE_ADDR entry_pc() const
Definition block.h:195
enum stop_stack_kind call_dummy
enum bpstat_what_main_action main_action
bptype type
Definition breakpoint.h:798
bp_location & first_loc()
Definition breakpoint.h:683
bpdisp disposition
Definition breakpoint.h:802
enum language language() const
Definition symtab.c:426
gdbarch * get_original_gdbarch() const
void set(gdbarch *original_gdbarch)
const char * stop_func_name
Definition infrun.c:2016
process_stratum_target * target
Definition infrun.c:2005
struct thread_info * event_thread
Definition infrun.c:2009
CORE_ADDR stop_func_end
Definition infrun.c:2015
CORE_ADDR stop_func_alt_start
Definition infrun.c:2013
execution_control_state(thread_info *thr=nullptr)
Definition infrun.c:1999
CORE_ADDR stop_func_start
Definition infrun.c:2014
struct target_waitstatus ws
Definition infrun.c:2011
const char * print_name() const
Definition symtab.h:475
const char * search_name() const
Definition symtab.c:1106
Definition gnu-nat.c:153
pid_t pid
Definition gnu-nat.c:165
struct thread_control_state thread_control
Definition infrun.c:9658
enum stop_stack_kind stop_stack_dummy
Definition infrun.c:9662
struct inferior_control_state inferior_control
Definition infrun.c:9659
enum stop_kind stop_soon
Definition inferior.h:326
int breakpoints_not_allowed
Definition progspace.h:353
Definition value.h:90
scoped_disable_commit_resumed(const char *reason)
Definition infrun.c:3113
scoped_enable_commit_resumed(const char *reason)
Definition infrun.c:3193
const address_space * aspace
Definition infrun.c:1417
CORE_ADDR address
Definition infrun.c:1418
int nonsteppable_watchpoint_p
Definition infrun.c:1422
ULONGEST stop_id
Definition infrun.c:8848
bool changed() const
Definition infrun.c:8883
thread_info_ref thread
Definition infrun.c:8856
DISABLE_COPY_AND_ASSIGN(stop_context)
ptid_t ptid
Definition infrun.c:8852
const block * value_block() const
Definition symtab.h:1549
struct obj_section * section
Definition symtab.h:2330
struct symtab * symtab
Definition symtab.h:2328
CORE_ADDR pc
Definition symtab.h:2337
CORE_ADDR end
Definition symtab.h:2338
struct program_space * pspace
Definition symtab.h:2326
const char * filename
Definition symtab.h:1725
virtual int async_wait_fd() TARGET_DEFAULT_NORETURN(noprocess())
const char * shortname() const
Definition target.h:456
virtual bool is_async_p() TARGET_DEFAULT_RETURN(false)
target_waitstatus & set_spurious()
Definition waitstatus.h:300
target_waitstatus & set_no_resumed()
Definition waitstatus.h:321
target_waitstatus & set_stopped(gdb_signal sig)
Definition waitstatus.h:230
enum gdb_signal sig
Definition waitstatus.h:414
target_waitstatus & set_ignore()
Definition waitstatus.h:307
target_waitkind kind() const
Definition waitstatus.h:345
std::string to_string() const
Definition waitstatus.c:26
CORE_ADDR step_range_start
Definition gdbthread.h:124
CORE_ADDR step_range_end
Definition gdbthread.h:125
enum step_over_calls_kind step_over_calls
Definition gdbthread.h:161
struct symbol * step_start_function
Definition gdbthread.h:128
struct breakpoint * exception_resume_breakpoint
Definition gdbthread.h:105
struct breakpoint * step_resume_breakpoint
Definition gdbthread.h:102
struct breakpoint * single_step_breakpoints
Definition gdbthread.h:112
virtual struct return_value_info * return_value()
Definition thread-fsm.h:63
bool finished_p() const
Definition thread-fsm.h:87
virtual void clean_up(struct thread_info *thread)
Definition thread-fsm.h:48
virtual bool should_stop(struct thread_info *thread)=0
virtual bool should_notify_stop()
Definition thread-fsm.h:77
ULONGEST length() const
Definition gdbtypes.h:983
Definition ui.h:55
int command_editing
Definition ui.h:87
int async
Definition ui.h:106
enum prompt_state prompt_state
Definition ui.h:136
void register_file_handler()
Definition ui.c:164
Definition value.h:130
static struct value * allocate_computed(struct type *type, const struct lval_funcs *funcs, void *closure)
Definition value.c:981
static struct value * allocate(struct type *type)
Definition value.c:957
struct type * type() const
Definition value.h:180
LONGEST offset() const
Definition value.h:222
gdb::array_view< gdb_byte > contents_all_raw()
Definition value.c:1021
bool optimized_out()
Definition value.c:1279
ptid_t ptid
Definition infrun.c:4045
target_waitstatus ws
Definition infrun.c:4048
process_stratum_target * target
Definition infrun.c:4042
@ SYMFILE_NO_READ
@ SYMFILE_DEFER_BP_RESET
struct obj_section * find_pc_overlay(CORE_ADDR pc)
Definition symfile.c:3174
int overlay_cache_invalid
Definition symfile.c:2976
struct compunit_symtab * find_pc_compunit_symtab(CORE_ADDR pc)
Definition symtab.c:2946
struct block_symbol lookup_symbol_search_name(const char *search_name, const struct block *block, domain_enum domain)
Definition symtab.c:1976
struct symtab_and_line find_pc_line(CORE_ADDR pc, int notcurrent)
Definition symtab.c:3295
@ VAR_DOMAIN
Definition symtab.h:910
std::string make_target_connection_string(process_stratum_target *t)
void target_dcache_invalidate(void)
void target_find_description(void)
void target_clear_description(void)
bool target_have_steppable_watchpoint()
Definition target.c:508
bool target_is_async_p()
Definition target.c:402
ptid_t target_wait(ptid_t ptid, struct target_waitstatus *status, target_wait_flags options)
Definition target.c:2586
bool target_has_pending_events()
Definition target.c:2692
int target_has_stack()
Definition target.c:177
exec_direction_kind target_execution_direction()
Definition target.c:410
void target_async(bool enable)
Definition target.c:4337
bool target_supports_stopped_by_hw_breakpoint()
Definition target.c:498
bool target_can_execute_reverse()
Definition target.c:596
int target_record_is_replaying(ptid_t ptid)
Definition target.c:4132
void target_detach(inferior *inf, int from_tty)
Definition target.c:2531
bool target_can_async_p()
Definition target.c:384
void target_pass_signals(gdb::array_view< const unsigned char > pass_signals)
Definition target.c:2698
bool target_can_lock_scheduler()
Definition target.c:374
void target_program_signals(gdb::array_view< const unsigned char > program_signals)
Definition target.c:2704
void target_record_stop_replaying(void)
Definition target.c:4148
bool target_stopped_by_hw_breakpoint()
Definition target.c:492
void target_follow_fork(inferior *child_inf, ptid_t child_ptid, target_waitkind fork_kind, bool follow_child, bool detach_fork)
Definition target.c:2721
void target_commit_resumed()
Definition target.c:2683
bool target_has_execution(inferior *inf)
Definition target.c:201
int target_record_will_replay(ptid_t ptid, int dir)
Definition target.c:4140
bool target_supports_stopped_by_sw_breakpoint()
Definition target.c:484
int target_supports_multi_process(void)
Definition target.c:3000
void target_thread_events(int enable)
Definition target.c:4349
void target_stop(ptid_t ptid)
Definition target.c:3782
LONGEST target_read(struct target_ops *ops, enum target_object object, const char *annex, gdb_byte *buf, ULONGEST offset, LONGEST len)
Definition target.c:1970
bool target_is_non_stop_p()
Definition target.c:4394
bool exists_non_stop_target()
Definition target.c:4406
std::string target_pid_to_str(ptid_t ptid)
Definition target.c:2623
void target_resume(ptid_t scope_ptid, int step, enum gdb_signal signal)
Definition target.c:2657
bool target_stopped_by_watchpoint()
Definition target.c:470
LONGEST target_write(struct target_ops *ops, enum target_object object, const char *annex, const gdb_byte *buf, ULONGEST offset, LONGEST len)
Definition target.c:2249
bool target_stopped_by_sw_breakpoint()
Definition target.c:478
void target_pass_ctrlc(void)
Definition target.c:3812
const char * target_shortname()
Definition target.c:216
void target_follow_exec(inferior *follow_inf, ptid_t ptid, const char *execd_pathname)
Definition target.c:2744
void target_mourn_inferior(ptid_t ptid)
Definition target.c:2758
int target_thread_alive(ptid_t ptid)
Definition target.c:3770
int target_supports_disable_randomization(void)
Definition target.c:2992
void update_target_permissions(void)
Definition target.c:4473
#define target_stopped_data_address(target, addr_p)
Definition target.h:2106
bool may_insert_fast_tracepoints
@ INF_REG_EVENT
Definition target.h:134
@ INF_EXEC_COMPLETE
Definition target.h:136
bool may_write_registers
@ TARGET_OBJECT_SIGNAL_INFO
Definition target.h:187
bool may_insert_tracepoints
bool may_insert_breakpoints
bool may_write_memory
bool may_stop
int gdb_in_secondary_prompt_p(struct ui *ui)
Definition top.c:942
void set_current_traceframe(int num)
#define current_uiout
Definition ui-out.h:40
static string_field_s * string_field(const char *name, const char *str, string_field_s &&tmp={})
Definition ui-out.h:126
struct ui * main_ui
Definition ui.c:34
struct ui * current_ui
Definition ui.c:35
@ PROMPT_BLOCKED
Definition ui.h:35
@ PROMPT_NEEDED
Definition ui.h:39
#define SWITCH_THRU_ALL_UIS()
Definition ui.h:215
static ui_range all_uis()
Definition ui.h:222
int query(const char *ctlstr,...)
Definition utils.c:943
const char * paddress(struct gdbarch *gdbarch, CORE_ADDR addr)
Definition utils.c:3166
bool pagination_enabled
Definition utils.c:122
void gdb_printf(struct ui_file *stream, const char *format,...)
Definition utils.c:1886
void gdb_flush(struct ui_file *stream)
Definition utils.c:1498
void gdb_puts(const char *linebuffer, struct ui_file *stream)
Definition utils.c:1809
#define gdb_stderr
Definition utils.h:187
#define gdb_stdout
Definition utils.h:182
void clear_internalvar(struct internalvar *var)
Definition value.c:2265
CORE_ADDR value_as_address(struct value *val)
Definition value.c:2636
struct internalvar * create_internalvar_type_lazy(const char *name, const struct internalvar_funcs *funcs, void *data)
Definition value.c:1966
struct internalvar * lookup_internalvar(const char *name)
Definition value.c:2001
void set_internalvar_integer(struct internalvar *var, LONGEST l)
Definition value.c:2232
@ TARGET_WNOHANG
Definition wait.h:32
target_waitkind
Definition waitstatus.h:30
@ TARGET_WAITKIND_NO_RESUMED
Definition waitstatus.h:96
@ TARGET_WAITKIND_THREAD_EXITED
Definition waitstatus.h:102
@ TARGET_WAITKIND_SPURIOUS
Definition waitstatus.h:78
@ TARGET_WAITKIND_VFORK_DONE
Definition waitstatus.h:66
@ TARGET_WAITKIND_THREAD_CREATED
Definition waitstatus.h:99
@ TARGET_WAITKIND_LOADED
Definition waitstatus.h:44
@ TARGET_WAITKIND_SIGNALLED
Definition waitstatus.h:40
@ TARGET_WAITKIND_STOPPED
Definition waitstatus.h:36
@ TARGET_WAITKIND_EXITED
Definition waitstatus.h:32
@ TARGET_WAITKIND_SYSCALL_RETURN
Definition waitstatus.h:73
@ TARGET_WAITKIND_SYSCALL_ENTRY
Definition waitstatus.h:72
@ TARGET_WAITKIND_NO_HISTORY
Definition waitstatus.h:93
@ TARGET_WAITKIND_FORKED
Definition waitstatus.h:49
@ TARGET_WAITKIND_VFORKED
Definition waitstatus.h:53
@ TARGET_WAITKIND_EXECD
Definition waitstatus.h:57
@ TARGET_WAITKIND_IGNORE
Definition waitstatus.h:89
@ TARGET_STOPPED_BY_SW_BREAKPOINT
Definition waitstatus.h:434
@ TARGET_STOPPED_BY_SINGLE_STEP
Definition waitstatus.h:443
@ TARGET_STOPPED_BY_WATCHPOINT
Definition waitstatus.h:440
@ TARGET_STOPPED_BY_HW_BREAKPOINT
Definition waitstatus.h:437
@ TARGET_STOPPED_BY_NO_REASON
Definition waitstatus.h:431
#define nullptr
Definition x86-cpuid.h:28