1 /* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
3 * lttng-context-callstack-stackwalk-impl.h
5 * LTTng callstack event context, stackwalk implementation. Targets
6 * kernels and architectures using the stacktrace common infrastructure
7 * introduced in the upstream Linux kernel by commit 214d8ca6ee
8 * "stacktrace: Provide common infrastructure" (merged in Linux 5.2,
9 * then gradually introduced within architectures).
11 * Copyright (C) 2014-2019 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
12 * Copyright (C) 2014 Francis Giraldeau <francis.giraldeau@gmail.com>
15 #define MAX_ENTRIES 128
17 enum lttng_cs_ctx_modes
{
23 struct lttng_stack_trace
{
24 unsigned long entries
[MAX_ENTRIES
];
25 unsigned int nr_entries
;
29 struct lttng_stack_trace stack_trace
[RING_BUFFER_MAX_NESTING
];
33 struct lttng_cs __percpu
*cs_percpu
;
34 enum lttng_cs_ctx_modes mode
;
38 unsigned int (*save_func_kernel
)(unsigned long *store
, unsigned int size
,
41 unsigned int (*save_func_user
)(unsigned long *store
, unsigned int size
);
44 int init_type_callstack_kernel(void)
47 const char *func_name
= "stack_trace_save";
51 func
= kallsyms_lookup_funcptr(func_name
);
53 printk(KERN_WARNING
"LTTng: symbol lookup failed: %s\n",
57 save_func_kernel
= (void *) func
;
62 int init_type_callstack_user(void)
65 const char *func_name
= "stack_trace_save_user";
69 func
= kallsyms_lookup_funcptr(func_name
);
71 printk(KERN_WARNING
"LTTng: symbol lookup failed: %s\n",
75 save_func_user
= (void *) func
;
80 int init_type(enum lttng_cs_ctx_modes mode
)
83 case CALLSTACK_KERNEL
:
84 return init_type_callstack_kernel();
86 return init_type_callstack_user();
93 void lttng_cs_set_init(struct lttng_cs __percpu
*cs_set
)
97 /* Keep track of nesting inside userspace callstack context code */
98 DEFINE_PER_CPU(int, callstack_user_nesting
);
101 * Note: these callbacks expect to be invoked with preemption disabled across
102 * get_size and record due to its use of a per-cpu stack.
105 struct lttng_stack_trace
*stack_trace_context(struct field_data
*fdata
, int cpu
)
107 int buffer_nesting
, cs_user_nesting
;
111 * Do not gather the userspace callstack context when the event was
112 * triggered by the userspace callstack context saving mechanism.
114 cs_user_nesting
= per_cpu(callstack_user_nesting
, cpu
);
116 if (fdata
->mode
== CALLSTACK_USER
&& cs_user_nesting
>= 1)
120 * get_cpu() is not required, preemption is already
121 * disabled while event is written.
123 * max nesting is checked in lib_ring_buffer_get_cpu().
124 * Check it again as a safety net.
126 cs
= per_cpu_ptr(fdata
->cs_percpu
, cpu
);
127 buffer_nesting
= per_cpu(lib_ring_buffer_nesting
, cpu
) - 1;
128 if (buffer_nesting
>= RING_BUFFER_MAX_NESTING
)
131 return &cs
->stack_trace
[buffer_nesting
];
135 size_t lttng_callstack_length_get_size(void *priv
, struct lttng_kernel_probe_ctx
*probe_ctx
, size_t offset
)
137 size_t orig_offset
= offset
;
139 offset
+= lib_ring_buffer_align(offset
, lttng_alignof(unsigned int));
140 offset
+= sizeof(unsigned int);
141 return offset
- orig_offset
;
145 * In order to reserve the correct size, the callstack is computed. The
146 * resulting callstack is saved to be accessed in the record step.
149 size_t lttng_callstack_sequence_get_size(void *priv
, struct lttng_kernel_probe_ctx
*probe_ctx
, size_t offset
)
151 struct lttng_stack_trace
*trace
;
152 struct field_data
*fdata
= (struct field_data
*) priv
;
153 size_t orig_offset
= offset
;
154 int cpu
= smp_processor_id();
156 /* do not write data if no space is available */
157 trace
= stack_trace_context(fdata
, cpu
);
158 if (unlikely(!trace
)) {
159 offset
+= lib_ring_buffer_align(offset
, lttng_alignof(unsigned long));
160 return offset
- orig_offset
;
163 /* reset stack trace, no need to clear memory */
164 trace
->nr_entries
= 0;
166 switch (fdata
->mode
) {
167 case CALLSTACK_KERNEL
:
168 /* do the real work and reserve space */
169 trace
->nr_entries
= save_func_kernel(trace
->entries
,
173 ++per_cpu(callstack_user_nesting
, cpu
);
174 /* do the real work and reserve space */
175 trace
->nr_entries
= save_func_user(trace
->entries
,
177 per_cpu(callstack_user_nesting
, cpu
)--;
184 * If the array is filled, add our own marker to show that the
185 * stack is incomplete.
187 offset
+= lib_ring_buffer_align(offset
, lttng_alignof(unsigned long));
188 offset
+= sizeof(unsigned long) * trace
->nr_entries
;
189 /* Add our own ULONG_MAX delimiter to show incomplete stack. */
190 if (trace
->nr_entries
== MAX_ENTRIES
)
191 offset
+= sizeof(unsigned long);
192 return offset
- orig_offset
;
196 void lttng_callstack_length_record(void *priv
, struct lttng_kernel_probe_ctx
*probe_ctx
,
197 struct lttng_kernel_ring_buffer_ctx
*ctx
,
198 struct lttng_kernel_channel_buffer
*chan
)
200 int cpu
= ctx
->priv
.reserve_cpu
;
201 struct field_data
*fdata
= (struct field_data
*) priv
;
202 struct lttng_stack_trace
*trace
= stack_trace_context(fdata
, cpu
);
203 unsigned int nr_seq_entries
;
205 if (unlikely(!trace
)) {
208 nr_seq_entries
= trace
->nr_entries
;
209 if (trace
->nr_entries
== MAX_ENTRIES
)
212 chan
->ops
->event_write(ctx
, &nr_seq_entries
, sizeof(unsigned int), lttng_alignof(unsigned int));
216 void lttng_callstack_sequence_record(void *priv
, struct lttng_kernel_probe_ctx
*probe_ctx
,
217 struct lttng_kernel_ring_buffer_ctx
*ctx
,
218 struct lttng_kernel_channel_buffer
*chan
)
220 int cpu
= ctx
->priv
.reserve_cpu
;
221 struct field_data
*fdata
= (struct field_data
*) priv
;
222 struct lttng_stack_trace
*trace
= stack_trace_context(fdata
, cpu
);
223 unsigned int nr_seq_entries
;
225 if (unlikely(!trace
)) {
226 /* We need to align even if there are 0 elements. */
227 lib_ring_buffer_align_ctx(ctx
, lttng_alignof(unsigned long));
230 nr_seq_entries
= trace
->nr_entries
;
231 if (trace
->nr_entries
== MAX_ENTRIES
)
233 chan
->ops
->event_write(ctx
, trace
->entries
,
234 sizeof(unsigned long) * trace
->nr_entries
, lttng_alignof(unsigned long));
235 /* Add our own ULONG_MAX delimiter to show incomplete stack. */
236 if (trace
->nr_entries
== MAX_ENTRIES
) {
237 unsigned long delim
= ULONG_MAX
;
239 chan
->ops
->event_write(ctx
, &delim
, sizeof(unsigned long), 1);