GDB (xrefs)
Loading...
Searching...
No Matches
ravenscar-thread.c
Go to the documentation of this file.
1/* Ada Ravenscar thread support.
2
3 Copyright (C) 2004-2023 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20#include "defs.h"
21#include "gdbcore.h"
22#include "gdbthread.h"
23#include "ada-lang.h"
24#include "target.h"
25#include "inferior.h"
26#include "command.h"
27#include "ravenscar-thread.h"
28#include "observable.h"
29#include "gdbcmd.h"
30#include "top.h"
31#include "regcache.h"
32#include "objfiles.h"
33#include <unordered_map>
34
35/* This module provides support for "Ravenscar" tasks (Ada) when
36 debugging on bare-metal targets.
37
38 The typical situation is when debugging a bare-metal target over
39 the remote protocol. In that situation, the system does not know
40 about high-level concepts such as threads, only about some code
41 running on one or more CPUs. And since the remote protocol does not
42 provide any handling for CPUs, the de facto standard for handling
43 them is to have one thread per CPU, where the thread's ptid has
44 its lwp field set to the CPU number (eg: 1 for the first CPU,
45 2 for the second one, etc). This module will make that assumption.
46
47 This module then creates and maintains the list of threads based
48 on the list of Ada tasks, with one thread per Ada task. The convention
49 is that threads corresponding to the CPUs (see assumption above)
50 have a ptid_t of the form (PID, LWP, 0), while threads corresponding
51 to our Ada tasks have a ptid_t of the form (PID, 0, TID) where TID
52 is the Ada task's ID as extracted from Ada runtime information.
53
54 Switching to a given Ada task (or its underlying thread) is performed
55 by fetching the registers of that task from the memory area where
56 the registers were saved. For any of the other operations, the
57 operation is performed by first finding the CPU on which the task
58 is running, switching to its corresponding ptid, and then performing
59 the operation on that ptid using the target beneath us. */
60
61/* If true, ravenscar task support is enabled. */
62static bool ravenscar_task_support = true;
63
64static const char running_thread_name[] = "__gnat_running_thread_table";
65
66static const char known_tasks_name[] = "system__tasking__debug__known_tasks";
67static const char first_task_name[] = "system__tasking__debug__first_task";
68
70 = "system__bb__threads__initialize";
71
73 "ravenscar",
74 N_("Ravenscar tasks."),
75 N_("Ravenscar tasks support.")
76};
77
79{
84
85 const target_info &info () const override
86 { return ravenscar_target_info; }
87
88 strata stratum () const override { return thread_stratum; }
89
90 ptid_t wait (ptid_t, struct target_waitstatus *, target_wait_flags) override;
91 void resume (ptid_t, int, enum gdb_signal) override;
92
93 void fetch_registers (struct regcache *, int) override;
94 void store_registers (struct regcache *, int) override;
95
96 void prepare_to_store (struct regcache *) override;
97
98 bool stopped_by_sw_breakpoint () override;
99
100 bool stopped_by_hw_breakpoint () override;
101
102 bool stopped_by_watchpoint () override;
103
104 bool stopped_data_address (CORE_ADDR *) override;
105
107 const char *annex,
108 gdb_byte *readbuf,
109 const gdb_byte *writebuf,
110 ULONGEST offset, ULONGEST len,
111 ULONGEST *xfered_len) override;
112
113 bool thread_alive (ptid_t ptid) override;
114
115 int core_of_thread (ptid_t ptid) override;
116
117 void update_thread_list () override;
118
119 std::string pid_to_str (ptid_t) override;
120
121 ptid_t get_ada_task_ptid (long lwp, ULONGEST thread) override;
122
123 struct btrace_target_info *enable_btrace (thread_info *tp,
124 const struct btrace_config *conf)
125 override
126 {
127 process_stratum_target *proc_target
129 ptid_t underlying = get_base_thread_from_ravenscar_task (tp->ptid);
130 tp = proc_target->find_thread (underlying);
131
132 return beneath ()->enable_btrace (tp, conf);
133 }
134
135 void mourn_inferior () override;
136
137 void close () override
138 {
139 delete this;
140 }
141
143
144private:
145
146 /* PTID of the last thread that received an event.
147 This can be useful to determine the associated task that received
148 the event, to make it the current task. */
150
151 ptid_t active_task (int cpu);
152 bool task_is_currently_active (ptid_t ptid);
153 bool runtime_initialized ();
154 int get_thread_base_cpu (ptid_t ptid);
155 ptid_t get_base_thread_from_ravenscar_task (ptid_t ptid);
156 void add_thread (struct ada_task_info *task);
157
158 /* Like switch_to_thread, but uses the base ptid for the thread. */
160 {
161 process_stratum_target *proc_target
163 ptid_t underlying = get_base_thread_from_ravenscar_task (ptid);
164 switch_to_thread (proc_target->find_thread (underlying));
165 }
166
167 /* Some targets use lazy FPU initialization. On these, the FP
168 registers for a given task might be uninitialized, or stored in
169 the per-task context, or simply be the live registers on the CPU.
170 This enum is used to encode this information. */
172 {
173 /* This target doesn't do anything special for FP registers -- if
174 any exist, they are treated just identical to non-FP
175 registers. */
177 /* This target uses the lazy FP scheme, and the FP registers are
178 taken from the CPU. This can happen for any task, because if a
179 task switch occurs, the registers aren't immediately written to
180 the per-task context -- this is deferred until the current task
181 causes an FPU trap. */
183 /* This target uses the lazy FP scheme, and the FP registers are
184 not available. Maybe this task never initialized the FPU, or
185 maybe GDB couldn't find the required symbol. */
187 };
188
189 /* Return the FPU state. */
191 const ravenscar_arch_ops *arch_ops);
192
193 /* This maps a TID to the CPU on which it was running. This is
194 needed because sometimes the runtime will report an active task
195 that hasn't yet been put on the list of tasks that is read by
196 ada-tasks.c. */
197 std::unordered_map<ULONGEST, int> m_cpu_map;
198};
199
200/* Return true iff PTID corresponds to a ravenscar task. */
201
202static bool
203is_ravenscar_task (ptid_t ptid)
204{
205 /* By construction, ravenscar tasks have their LWP set to zero.
206 Also make sure that the TID is nonzero, as some remotes, when
207 asked for the list of threads, will return the first thread
208 as having its TID set to zero. For instance, TSIM version
209 2.0.48 for LEON3 sends 'm0' as a reply to the 'qfThreadInfo'
210 query, which the remote protocol layer then treats as a thread
211 whose TID is 0. This is obviously not a ravenscar task. */
212 return ptid.lwp () == 0 && ptid.tid () != 0;
213}
214
215/* Given PTID, which can be either a ravenscar task or a CPU thread,
216 return which CPU that ptid is running on.
217
218 This assume that PTID is a valid ptid_t. Otherwise, a gdb_assert
219 will be triggered. */
220
221int
223{
224 int base_cpu;
225
226 if (is_ravenscar_task (ptid))
227 {
228 /* Prefer to not read inferior memory if possible, to avoid
229 reentrancy problems with xfer_partial. */
230 auto iter = m_cpu_map.find (ptid.tid ());
231
232 if (iter != m_cpu_map.end ())
233 base_cpu = iter->second;
234 else
235 {
236 struct ada_task_info *task_info = ada_get_task_info_from_ptid (ptid);
237
238 gdb_assert (task_info != NULL);
239 base_cpu = task_info->base_cpu;
240 }
241 }
242 else
243 {
244 /* We assume that the LWP of the PTID is equal to the CPU number. */
245 base_cpu = ptid.lwp ();
246 }
247
248 return base_cpu;
249}
250
251/* Given a ravenscar task (identified by its ptid_t PTID), return true
252 if this task is the currently active task on the cpu that task is
253 running on.
254
255 In other words, this function determine which CPU this task is
256 currently running on, and then return nonzero if the CPU in question
257 is executing the code for that task. If that's the case, then
258 that task's registers are in the CPU bank. Otherwise, the task
259 is currently suspended, and its registers have been saved in memory. */
260
261bool
263{
264 ptid_t active_task_ptid = active_task (get_thread_base_cpu (ptid));
265
266 return ptid == active_task_ptid;
267}
268
269/* Return the CPU thread (as a ptid_t) on which the given ravenscar
270 task is running.
271
272 This is the thread that corresponds to the CPU on which the task
273 is running. */
274
275ptid_t
277{
278 int base_cpu;
279
280 if (!is_ravenscar_task (ptid))
281 return ptid;
282
283 base_cpu = get_thread_base_cpu (ptid);
284 return ptid_t (ptid.pid (), base_cpu);
285}
286
287/* Fetch the ravenscar running thread from target memory, make sure
288 there's a corresponding thread in the thread list, and return it.
289 If the runtime is not initialized, return NULL. */
290
293{
294 process_stratum_target *proc_target
296
297 int base_cpu;
298
299 gdb_assert (!is_ravenscar_task (m_base_ptid));
300 base_cpu = get_thread_base_cpu (m_base_ptid);
301
302 if (!runtime_initialized ())
303 return nullptr;
304
305 /* It's possible for runtime_initialized to return true but for it
306 not to be fully initialized. For example, this can happen for a
307 breakpoint placed at the task's beginning. */
308 ptid_t active_ptid = active_task (base_cpu);
309 if (active_ptid == null_ptid)
310 return nullptr;
311
312 /* The running thread may not have been added to
313 system.tasking.debug's list yet; so ravenscar_update_thread_list
314 may not always add it to the thread list. Add it here. */
315 thread_info *active_thr = proc_target->find_thread (active_ptid);
316 if (active_thr == nullptr)
317 {
318 active_thr = ::add_thread (proc_target, active_ptid);
319 m_cpu_map[active_ptid.tid ()] = base_cpu;
320 }
321 return active_thr;
322}
323
324/* The Ravenscar Runtime exports a symbol which contains the ID of
325 the thread that is currently running. Try to locate that symbol
326 and return its associated minimal symbol.
327 Return NULL if not found. */
328
329static struct bound_minimal_symbol
331{
332 struct bound_minimal_symbol msym;
333
334 msym = lookup_minimal_symbol (running_thread_name, NULL, NULL);
335 if (!msym.minsym)
336 /* Older versions of the GNAT runtime were using a different
337 (less ideal) name for the symbol where the active thread ID
338 is stored. If we couldn't find the symbol using the latest
339 name, then try the old one. */
340 msym = lookup_minimal_symbol ("running_thread", NULL, NULL);
341
342 return msym;
343}
344
345/* Return True if the Ada Ravenscar run-time can be found in the
346 application. */
347
348static bool
350{
351 struct bound_minimal_symbol msym_ravenscar_runtime_initializer
353 struct bound_minimal_symbol msym_known_tasks
355 struct bound_minimal_symbol msym_first_task
357 struct bound_minimal_symbol msym_running_thread
359
360 return (msym_ravenscar_runtime_initializer.minsym
361 && (msym_known_tasks.minsym || msym_first_task.minsym)
362 && msym_running_thread.minsym);
363}
364
365/* Return True if the Ada Ravenscar run-time can be found in the
366 application, and if it has been initialized on target. */
367
368bool
370{
371 return active_task (1) != null_ptid;
372}
373
374/* Return the ID of the thread that is currently running.
375 Return 0 if the ID could not be determined. */
376
377static CORE_ADDR
379{
380 struct bound_minimal_symbol object_msym = get_running_thread_msymbol ();
381 int object_size;
382 int buf_size;
383 gdb_byte *buf;
384 CORE_ADDR object_addr;
385 struct type *builtin_type_void_data_ptr
387
388 if (!object_msym.minsym)
389 return 0;
390
391 object_size = builtin_type_void_data_ptr->length ();
392 object_addr = (object_msym.value_address ()
393 + (cpu - 1) * object_size);
394 buf_size = object_size;
395 buf = (gdb_byte *) alloca (buf_size);
396 read_memory (object_addr, buf, buf_size);
397 return extract_typed_address (buf, builtin_type_void_data_ptr);
398}
399
400void
401ravenscar_thread_target::resume (ptid_t ptid, int step,
402 enum gdb_signal siggnal)
403{
404 /* If we see a wildcard resume, we simply pass that on. Otherwise,
405 arrange to resume the base ptid. */
407 if (ptid.is_pid ())
408 {
409 /* We only have one process, so resume all threads of it. */
410 ptid = minus_one_ptid;
411 }
412 else if (ptid != minus_one_ptid)
413 ptid = m_base_ptid;
414 beneath ()->resume (ptid, step, siggnal);
415}
416
417ptid_t
420 target_wait_flags options)
421{
423 = as_process_stratum_target (this->beneath ());
424 ptid_t event_ptid;
425
426 if (ptid != minus_one_ptid)
427 ptid = m_base_ptid;
428 event_ptid = beneath->wait (ptid, status, 0);
429 /* Find any new threads that might have been created, and return the
430 active thread.
431
432 Only do it if the program is still alive, though. Otherwise,
433 this causes problems when debugging through the remote protocol,
434 because we might try switching threads (and thus sending packets)
435 after the remote has disconnected. */
436 if (status->kind () != TARGET_WAITKIND_EXITED
437 && status->kind () != TARGET_WAITKIND_SIGNALLED
439 {
440 m_base_ptid = event_ptid;
441 this->update_thread_list ();
442 thread_info *thr = this->add_active_thread ();
443 if (thr != nullptr)
444 return thr->ptid;
445 }
446 return event_ptid;
447}
448
449/* Add the thread associated to the given TASK to the thread list
450 (if the thread has already been added, this is a no-op). */
451
452void
454{
455 if (current_inferior ()->find_thread (task->ptid) == NULL)
456 {
457 ::add_thread (current_inferior ()->process_target (), task->ptid);
458 m_cpu_map[task->ptid.tid ()] = task->base_cpu;
459 }
460}
461
462void
464{
465 /* iterate_over_live_ada_tasks requires that inferior_ptid be set,
466 but this isn't always the case in target methods. So, we ensure
467 it here. */
468 scoped_restore save_ptid = make_scoped_restore (&inferior_ptid,
470
471 /* Do not clear the thread list before adding the Ada task, to keep
472 the thread that the process stratum has included into it
473 (m_base_ptid) and the running thread, that may not have been included
474 to system.tasking.debug's list yet. */
475
476 iterate_over_live_ada_tasks ([this] (struct ada_task_info *task)
477 {
478 this->add_thread (task);
479 });
480}
481
482ptid_t
484{
485 CORE_ADDR tid = get_running_thread_id (cpu);
486
487 if (tid == 0)
488 return null_ptid;
489 else
490 return ptid_t (m_base_ptid.pid (), 0, tid);
491}
492
493bool
495{
496 /* Ravenscar tasks are non-terminating. */
497 return true;
498}
499
500std::string
502{
503 if (!is_ravenscar_task (ptid))
504 return beneath ()->pid_to_str (ptid);
505
506 return string_printf ("Ravenscar Thread 0x%s",
507 phex_nz (ptid.tid (), sizeof (ULONGEST)));
508}
509
510CORE_ADDR
512{
513 struct gdbarch *gdbarch = regcache->arch ();
514 const int sp_regnum = gdbarch_sp_regnum (gdbarch);
515 ULONGEST stack_address;
517 return (CORE_ADDR) stack_address;
518}
519
520void
522 int regnum,
523 CORE_ADDR descriptor,
524 CORE_ADDR stack_base) const
525{
526 CORE_ADDR addr;
528 addr = stack_base;
529 else
530 addr = descriptor;
531 addr += offsets[regnum];
532
533 struct gdbarch *gdbarch = regcache->arch ();
535 gdb_byte *buf = (gdb_byte *) alloca (size);
536 read_memory (addr, buf, size);
537 regcache->raw_supply (regnum, buf);
538}
539
540void
542 int regnum) const
543{
544 gdb_assert (regnum != -1);
545
546 struct gdbarch *gdbarch = regcache->arch ();
547 /* The tid is the thread_id field, which is a pointer to the thread. */
548 CORE_ADDR thread_descriptor_address
549 = (CORE_ADDR) regcache->ptid ().tid ();
550
551 int sp_regno = -1;
552 CORE_ADDR stack_address = 0;
554 {
555 /* We must supply SP for get_stack_base, so recurse. */
556 sp_regno = gdbarch_sp_regnum (gdbarch);
557 gdb_assert (!(sp_regno >= first_stack_register
558 && sp_regno <= last_stack_register));
559 fetch_register (regcache, sp_regno);
560 stack_address = get_stack_base (regcache);
561 }
562
563 if (regnum < offsets.size () && offsets[regnum] != -1)
564 supply_one_register (regcache, regnum, thread_descriptor_address,
565 stack_address);
566}
567
568void
570 CORE_ADDR descriptor,
571 CORE_ADDR stack_base) const
572{
573 CORE_ADDR addr;
575 addr = stack_base;
576 else
577 addr = descriptor;
578 addr += offsets[regnum];
579
580 struct gdbarch *gdbarch = regcache->arch ();
582 gdb_byte *buf = (gdb_byte *) alloca (size);
584 write_memory (addr, buf, size);
585}
586
587void
589 int regnum) const
590{
591 gdb_assert (regnum != -1);
592
593 /* The tid is the thread_id field, which is a pointer to the thread. */
594 CORE_ADDR thread_descriptor_address
595 = (CORE_ADDR) regcache->ptid ().tid ();
596
597 CORE_ADDR stack_address = 0;
599 stack_address = get_stack_base (regcache);
600
601 if (regnum < offsets.size () && offsets[regnum] != -1)
602 store_one_register (regcache, regnum, thread_descriptor_address,
603 stack_address);
604}
605
606/* Temporarily set the ptid of a regcache to some other value. When
607 this object is destroyed, the regcache's original ptid is
608 restored. */
609
611{
612public:
613
616 m_save_ptid (regcache->ptid ())
617 {
618 m_regcache->set_ptid (new_ptid);
619 }
620
625
626private:
627
628 /* The regcache. */
630 /* The saved ptid. */
632};
633
636 const ravenscar_arch_ops *arch_ops)
637{
638 /* We want to return true if the special FP register handling is
639 needed. If this target doesn't have lazy FP, then no special
640 treatment is ever needed. */
641 if (!arch_ops->on_demand_fp ())
642 return NOTHING_SPECIAL;
643
644 bound_minimal_symbol fpu_context
645 = lookup_minimal_symbol ("system__bb__cpu_primitives__current_fpu_context",
646 nullptr, nullptr);
647 /* If the symbol can't be found, just fall back. */
648 if (fpu_context.minsym == nullptr)
649 return NO_FP_REGISTERS;
650
651 struct type *ptr_type = builtin_type (target_gdbarch ())->builtin_data_ptr;
652 ptr_type = lookup_pointer_type (ptr_type);
653 value *val = value_from_pointer (ptr_type, fpu_context.value_address ());
654
655 int cpu = get_thread_base_cpu (regcache->ptid ());
656 /* The array index type has a lower bound of 1 -- it is Ada code --
657 so subtract 1 here. */
658 val = value_ptradd (val, cpu - 1);
659
660 val = value_ind (val);
661 CORE_ADDR fpu_task = value_as_long (val);
662
663 /* The tid is the thread_id field, which is a pointer to the thread. */
664 CORE_ADDR thread_descriptor_address
665 = (CORE_ADDR) regcache->ptid ().tid ();
666 if (fpu_task == (thread_descriptor_address
667 + arch_ops->get_fpu_context_offset ()))
668 return LIVE_FP_REGISTERS;
669
670 int v_init_offset = arch_ops->get_v_init_offset ();
671 gdb_byte init = 0;
672 read_memory (thread_descriptor_address + v_init_offset, &init, 1);
673 return init ? NOTHING_SPECIAL : NO_FP_REGISTERS;
674}
675
676void
678 int regnum)
679{
680 ptid_t ptid = regcache->ptid ();
681
682 if (runtime_initialized () && is_ravenscar_task (ptid))
683 {
684 struct gdbarch *gdbarch = regcache->arch ();
685 bool is_active = task_is_currently_active (ptid);
687 gdb::optional<fpu_state> fp_state;
688
689 int low_reg = regnum == -1 ? 0 : regnum;
690 int high_reg = regnum == -1 ? gdbarch_num_regs (gdbarch) : regnum + 1;
691
692 ptid_t base = get_base_thread_from_ravenscar_task (ptid);
693 for (int i = low_reg; i < high_reg; ++i)
694 {
695 bool use_beneath = false;
696 if (arch_ops->is_fp_register (i))
697 {
698 if (!fp_state.has_value ())
699 fp_state = get_fpu_state (regcache, arch_ops);
700 if (*fp_state == NO_FP_REGISTERS)
701 continue;
702 if (*fp_state == LIVE_FP_REGISTERS
703 || (is_active && *fp_state == NOTHING_SPECIAL))
704 use_beneath = true;
705 }
706 else
707 use_beneath = is_active;
708
709 if (use_beneath)
710 {
713 }
714 else
715 arch_ops->fetch_register (regcache, i);
716 }
717 }
718 else
720}
721
722void
724 int regnum)
725{
726 ptid_t ptid = regcache->ptid ();
727
728 if (runtime_initialized () && is_ravenscar_task (ptid))
729 {
730 struct gdbarch *gdbarch = regcache->arch ();
731 bool is_active = task_is_currently_active (ptid);
733 gdb::optional<fpu_state> fp_state;
734
735 int low_reg = regnum == -1 ? 0 : regnum;
736 int high_reg = regnum == -1 ? gdbarch_num_regs (gdbarch) : regnum + 1;
737
738 ptid_t base = get_base_thread_from_ravenscar_task (ptid);
739 for (int i = low_reg; i < high_reg; ++i)
740 {
741 bool use_beneath = false;
742 if (arch_ops->is_fp_register (i))
743 {
744 if (!fp_state.has_value ())
745 fp_state = get_fpu_state (regcache, arch_ops);
746 if (*fp_state == NO_FP_REGISTERS)
747 continue;
748 if (*fp_state == LIVE_FP_REGISTERS
749 || (is_active && *fp_state == NOTHING_SPECIAL))
750 use_beneath = true;
751 }
752 else
753 use_beneath = is_active;
754
755 if (use_beneath)
756 {
759 }
760 else
761 arch_ops->store_register (regcache, i);
762 }
763 }
764 else
766}
767
768void
770{
771 ptid_t ptid = regcache->ptid ();
772
773 if (runtime_initialized () && is_ravenscar_task (ptid))
774 {
775 if (task_is_currently_active (ptid))
776 {
777 ptid_t base = get_base_thread_from_ravenscar_task (ptid);
780 }
781 else
782 {
783 /* Nothing. */
784 }
785 }
786 else
788}
789
790/* Implement the to_stopped_by_sw_breakpoint target_ops "method". */
791
792bool
799
800/* Implement the to_stopped_by_hw_breakpoint target_ops "method". */
801
802bool
809
810/* Implement the to_stopped_by_watchpoint target_ops "method". */
811
812bool
819
820/* Implement the to_stopped_data_address target_ops "method". */
821
822bool
829
830void
832{
833 m_base_ptid = null_ptid;
834 target_ops *beneath = this->beneath ();
836 beneath->mourn_inferior ();
837}
838
839/* Implement the to_core_of_thread target_ops "method". */
840
841int
848
849/* Implement the target xfer_partial method. */
850
853 const char *annex,
854 gdb_byte *readbuf,
855 const gdb_byte *writebuf,
856 ULONGEST offset, ULONGEST len,
857 ULONGEST *xfered_len)
858{
859 scoped_restore save_ptid = make_scoped_restore (&inferior_ptid);
860 /* Calling get_base_thread_from_ravenscar_task can read memory from
861 the inferior. However, that function is written to prefer our
862 internal map, so it should not result in recursive calls in
863 practice. */
865 return beneath ()->xfer_partial (object, annex, readbuf, writebuf,
866 offset, len, xfered_len);
867}
868
869/* Observer on inferior_created: push ravenscar thread stratum if needed. */
870
871static void
873{
874 const char *err_msg;
875
879 return;
880
881 err_msg = ada_get_tcb_types_info ();
882 if (err_msg != NULL)
883 {
884 warning (_("%s. Task/thread support disabled."), err_msg);
885 return;
886 }
887
889 inf->push_target (target_ops_up (rtarget));
890 thread_info *thr = rtarget->add_active_thread ();
891 if (thr != nullptr)
892 switch_to_thread (thr);
893}
894
895ptid_t
897{
898 return ptid_t (m_base_ptid.pid (), 0, thread);
899}
900
901/* Command-list for the "set/show ravenscar" prefix command. */
904
905/* Implement the "show ravenscar task-switching" command. */
906
907static void
909 struct cmd_list_element *c,
910 const char *value)
911{
913 gdb_printf (file, _("\
914Support for Ravenscar task/thread switching is enabled\n"));
915 else
916 gdb_printf (file, _("\
917Support for Ravenscar task/thread switching is disabled\n"));
918}
919
920/* Module startup initialization function, automagically called by
921 init.c. */
922
924void
926{
927 /* Notice when the inferior is created in order to push the
928 ravenscar ops if needed. */
930 "ravenscar-thread");
931
933 ("ravenscar", no_class,
934 _("Prefix command for changing Ravenscar-specific settings."),
935 _("Prefix command for showing Ravenscar-specific settings."),
937 &setlist, &showlist);
938
939 add_setshow_boolean_cmd ("task-switching", class_obscure,
941Enable or disable support for GNAT Ravenscar tasks."), _("\
942Show whether support for GNAT Ravenscar tasks is enabled."),
943 _("\
944Enable or disable support for task/thread switching with the GNAT\n\
945Ravenscar run-time library for bareboard configuration."),
948}
int regnum
struct ada_task_info * ada_get_task_info_from_ptid(ptid_t ptid)
Definition ada-tasks.c:392
const char * ada_get_tcb_types_info(void)
Definition ada-tasks.c:503
void iterate_over_live_ada_tasks(ada_task_list_iterator_ftype iterator)
Definition ada-tasks.c:412
struct gdbarch * target_gdbarch(void)
int unpush_target(struct target_ops *t)
Definition inferior.c:96
thread_info * find_thread(ptid_t ptid)
gdbarch * arch() const
Definition regcache.c:231
void raw_collect(int regnum, void *buf) const override
Definition regcache.c:1127
void raw_supply(int regnum, const void *buf) override
Definition regcache.c:1062
ptid_t ptid() const
Definition regcache.h:408
void set_ptid(const ptid_t ptid)
Definition regcache.h:415
temporarily_change_regcache_ptid(struct regcache *regcache, ptid_t new_ptid)
ptid_t ptid
Definition gdbthread.h:259
struct cmd_list_element * showlist
Definition cli-cmds.c:127
struct cmd_list_element * setlist
Definition cli-cmds.c:119
set_show_commands add_setshow_prefix_cmd(const char *name, command_class theclass, const char *set_doc, const char *show_doc, cmd_list_element **set_subcommands_list, cmd_list_element **show_subcommands_list, cmd_list_element **set_list, cmd_list_element **show_list)
Definition cli-decode.c:428
set_show_commands add_setshow_boolean_cmd(const char *name, enum command_class theclass, bool *var, const char *set_doc, const char *show_doc, const char *help_doc, cmd_func_ftype *set_func, show_value_ftype *show_func, struct cmd_list_element **set_list, struct cmd_list_element **show_list)
Definition cli-decode.c:809
@ class_obscure
Definition command.h:64
@ no_class
Definition command.h:53
void write_memory(CORE_ADDR memaddr, const bfd_byte *myaddr, ssize_t len)
Definition corefile.c:347
void read_memory(CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
Definition corefile.c:238
CORE_ADDR extract_typed_address(const gdb_byte *buf, struct type *type)
Definition findvar.c:152
@ sp_regnum
Definition frv-tdep.h:35
int gdbarch_num_regs(struct gdbarch *gdbarch)
Definition gdbarch.c:1930
int gdbarch_sp_regnum(struct gdbarch *gdbarch)
Definition gdbarch.c:2037
struct ravenscar_arch_ops * gdbarch_ravenscar_ops(struct gdbarch *gdbarch)
Definition gdbarch.c:5089
void switch_to_thread(struct thread_info *thr)
Definition thread.c:1360
struct type * lookup_pointer_type(struct type *type)
Definition gdbtypes.c:430
const struct builtin_type * builtin_type(struct gdbarch *gdbarch)
Definition gdbtypes.c:6168
mach_port_t mach_port_t name mach_port_t mach_port_t name kern_return_t int status
Definition gnu-nat.c:1790
size_t size
Definition go32-nat.c:239
ptid_t inferior_ptid
Definition infcmd.c:74
struct inferior * current_inferior(void)
Definition inferior.c:55
struct bound_minimal_symbol lookup_minimal_symbol(const char *name, const char *sfile, struct objfile *objf)
Definition minsyms.c:363
struct bound_minimal_symbol lookup_minimal_symbol(const char *, const char *, struct objfile *)
Definition minsyms.c:363
observable< inferior * > inferior_created
static process_stratum_target * as_process_stratum_target(target_ops *target)
static void show_ravenscar_task_switching_command(struct ui_file *file, int from_tty, struct cmd_list_element *c, const char *value)
static bool ravenscar_task_support
static bool has_ravenscar_runtime()
static const target_info ravenscar_target_info
void _initialize_ravenscar()
static void ravenscar_inferior_created(inferior *inf)
static struct cmd_list_element * set_ravenscar_list
static const char first_task_name[]
static bool is_ravenscar_task(ptid_t ptid)
static CORE_ADDR get_running_thread_id(int cpu)
static const char running_thread_name[]
static struct bound_minimal_symbol get_running_thread_msymbol()
static const char ravenscar_runtime_initializer[]
static const char known_tasks_name[]
static struct cmd_list_element * show_ravenscar_list
int register_size(struct gdbarch *gdbarch, int regnum)
Definition regcache.c:170
enum register_status regcache_cooked_read_unsigned(struct regcache *regcache, int regnum, ULONGEST *val)
Definition regcache.c:796
ptid_t ptid
Definition ada-lang.h:120
CORE_ADDR value_address() const
Definition minsyms.h:41
struct minimal_symbol * minsym
Definition minsyms.h:49
struct type * builtin_data_ptr
Definition gdbtypes.h:2135
Definition gnu-nat.c:153
bool on_demand_fp() const
const int first_stack_register
void supply_one_register(struct regcache *regcache, int regnum, CORE_ADDR descriptor, CORE_ADDR stack_base) const
const gdb::array_view< const int > offsets
bool is_fp_register(int regnum) const
void store_one_register(struct regcache *regcache, int regnum, CORE_ADDR descriptor, CORE_ADDR stack_base) const
CORE_ADDR get_stack_base(struct regcache *) const
int get_fpu_context_offset() const
const int last_stack_register
int get_v_init_offset() const
void fetch_register(struct regcache *recache, int regnum) const
void store_register(struct regcache *recache, int regnum) const
void set_base_thread_from_ravenscar_task(ptid_t ptid)
void prepare_to_store(struct regcache *) override
void mourn_inferior() override
std::unordered_map< ULONGEST, int > m_cpu_map
bool task_is_currently_active(ptid_t ptid)
enum target_xfer_status xfer_partial(enum target_object object, const char *annex, gdb_byte *readbuf, const gdb_byte *writebuf, ULONGEST offset, ULONGEST len, ULONGEST *xfered_len) override
std::string pid_to_str(ptid_t) override
strata stratum() const override
fpu_state get_fpu_state(struct regcache *regcache, const ravenscar_arch_ops *arch_ops)
bool stopped_by_sw_breakpoint() override
ptid_t wait(ptid_t, struct target_waitstatus *, target_wait_flags) override
void resume(ptid_t, int, enum gdb_signal) override
bool stopped_by_hw_breakpoint() override
ptid_t get_ada_task_ptid(long lwp, ULONGEST thread) override
int get_thread_base_cpu(ptid_t ptid)
void add_thread(struct ada_task_info *task)
ptid_t get_base_thread_from_ravenscar_task(ptid_t ptid)
void store_registers(struct regcache *, int) override
thread_info * add_active_thread()
bool stopped_data_address(CORE_ADDR *) override
void fetch_registers(struct regcache *, int) override
void update_thread_list() override
int core_of_thread(ptid_t ptid) override
bool stopped_by_watchpoint() override
const target_info & info() const override
struct btrace_target_info * enable_btrace(thread_info *tp, const struct btrace_config *conf) override
bool thread_alive(ptid_t ptid) override
virtual ptid_t wait(ptid_t, struct target_waitstatus *, target_wait_flags options) TARGET_DEFAULT_FUNC(default_target_wait)
virtual int core_of_thread(ptid_t ptid) TARGET_DEFAULT_RETURN(-1)
virtual std::string pid_to_str(ptid_t) TARGET_DEFAULT_FUNC(default_pid_to_str)
virtual void fetch_registers(struct regcache *, int) TARGET_DEFAULT_IGNORE()
virtual bool stopped_by_sw_breakpoint() TARGET_DEFAULT_RETURN(false)
virtual bool stopped_by_watchpoint() TARGET_DEFAULT_RETURN(false)
target_ops * beneath() const
Definition target.c:3041
virtual bool stopped_data_address(CORE_ADDR *) TARGET_DEFAULT_RETURN(false)
virtual struct btrace_target_info * enable_btrace(thread_info *tp, const struct btrace_config *conf) TARGET_DEFAULT_NORETURN(tcomplain())
virtual void store_registers(struct regcache *, int) TARGET_DEFAULT_NORETURN(noprocess())
virtual enum target_xfer_status xfer_partial(enum target_object object, const char *annex, gdb_byte *readbuf, const gdb_byte *writebuf, ULONGEST offset, ULONGEST len, ULONGEST *xfered_len) TARGET_DEFAULT_RETURN(TARGET_XFER_E_IO)
virtual bool stopped_by_hw_breakpoint() TARGET_DEFAULT_RETURN(false)
virtual void resume(ptid_t, int TARGET_DEBUG_PRINTER(target_debug_print_step), enum gdb_signal) TARGET_DEFAULT_NORETURN(noprocess())
virtual void prepare_to_store(struct regcache *) TARGET_DEFAULT_NORETURN(noprocess())
virtual void mourn_inferior() TARGET_DEFAULT_FUNC(default_mourn_inferior)
ULONGEST length() const
Definition gdbtypes.h:983
Definition value.h:130
target_xfer_status
Definition target.h:219
target_object
Definition target.h:143
strata
Definition target.h:94
@ thread_stratum
Definition target.h:98
std::unique_ptr< target_ops, target_ops_deleter > target_ops_up
Definition target.h:1347
void gdb_printf(struct ui_file *stream, const char *format,...)
Definition utils.c:1886
struct value * value_ptradd(struct value *arg1, LONGEST arg2)
Definition valarith.c:79
struct value * value_ind(struct value *arg1)
Definition valops.c:1630
LONGEST value_as_long(struct value *val)
Definition value.c:2554
struct value * value_from_pointer(struct type *type, CORE_ADDR addr)
Definition value.c:3500
@ TARGET_WAITKIND_SIGNALLED
Definition waitstatus.h:40
@ TARGET_WAITKIND_EXITED
Definition waitstatus.h:32