1 /* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
3 * lttng-context-callstack-stackwalk-impl.h
5 * LTTng callstack event context, stackwalk implementation. Targets
6 * kernels and architectures using the stacktrace common infrastructure
7 * introduced in the upstream Linux kernel by commit 214d8ca6ee
8 * "stacktrace: Provide common infrastructure" (merged in Linux 5.2,
9 * then gradually introduced within architectures).
11 * Copyright (C) 2014-2019 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
12 * Copyright (C) 2014 Francis Giraldeau <francis.giraldeau@gmail.com>
15 #define MAX_ENTRIES 128
17 enum lttng_cs_ctx_modes
{
23 struct lttng_stack_trace
{
24 unsigned long entries
[MAX_ENTRIES
];
25 unsigned int nr_entries
;
29 struct lttng_stack_trace stack_trace
[RING_BUFFER_MAX_NESTING
];
33 struct lttng_cs __percpu
*cs_percpu
;
34 enum lttng_cs_ctx_modes mode
;
38 unsigned int (*save_func_kernel
)(unsigned long *store
, unsigned int size
,
41 unsigned int (*save_func_user
)(unsigned long *store
, unsigned int size
);
44 int init_type_callstack_kernel(void)
47 const char *func_name
= "stack_trace_save";
51 func
= kallsyms_lookup_funcptr(func_name
);
53 printk(KERN_WARNING
"LTTng: symbol lookup failed: %s\n",
57 save_func_kernel
= (void *) func
;
62 int init_type_callstack_user(void)
65 const char *func_name
= "stack_trace_save_user";
69 func
= kallsyms_lookup_funcptr(func_name
);
71 printk(KERN_WARNING
"LTTng: symbol lookup failed: %s\n",
75 save_func_user
= (void *) func
;
80 int init_type(enum lttng_cs_ctx_modes mode
)
83 case CALLSTACK_KERNEL
:
84 return init_type_callstack_kernel();
86 return init_type_callstack_user();
93 void lttng_cs_set_init(struct lttng_cs __percpu
*cs_set
)
97 /* Keep track of nesting inside userspace callstack context code */
98 DEFINE_PER_CPU(int, callstack_user_nesting
);
101 struct lttng_stack_trace
*stack_trace_context(struct lttng_kernel_ctx_field
*field
,
102 struct lib_ring_buffer_ctx
*ctx
)
104 int buffer_nesting
, cs_user_nesting
;
106 struct field_data
*fdata
= field
->priv
;
109 * Do not gather the userspace callstack context when the event was
110 * triggered by the userspace callstack context saving mechanism.
112 cs_user_nesting
= per_cpu(callstack_user_nesting
, ctx
->cpu
);
114 if (fdata
->mode
== CALLSTACK_USER
&& cs_user_nesting
>= 1)
118 * get_cpu() is not required, preemption is already
119 * disabled while event is written.
121 * max nesting is checked in lib_ring_buffer_get_cpu().
122 * Check it again as a safety net.
124 cs
= per_cpu_ptr(fdata
->cs_percpu
, ctx
->cpu
);
125 buffer_nesting
= per_cpu(lib_ring_buffer_nesting
, ctx
->cpu
) - 1;
126 if (buffer_nesting
>= RING_BUFFER_MAX_NESTING
)
129 return &cs
->stack_trace
[buffer_nesting
];
133 size_t lttng_callstack_length_get_size(size_t offset
, struct lttng_kernel_ctx_field
*field
,
134 struct lib_ring_buffer_ctx
*ctx
,
135 struct lttng_channel
*chan
)
137 size_t orig_offset
= offset
;
139 offset
+= lib_ring_buffer_align(offset
, lttng_alignof(unsigned int));
140 offset
+= sizeof(unsigned int);
141 return offset
- orig_offset
;
145 * In order to reserve the correct size, the callstack is computed. The
146 * resulting callstack is saved to be accessed in the record step.
149 size_t lttng_callstack_sequence_get_size(size_t offset
, struct lttng_kernel_ctx_field
*field
,
150 struct lib_ring_buffer_ctx
*ctx
,
151 struct lttng_channel
*chan
)
153 struct lttng_stack_trace
*trace
;
154 struct field_data
*fdata
= field
->priv
;
155 size_t orig_offset
= offset
;
157 /* do not write data if no space is available */
158 trace
= stack_trace_context(field
, ctx
);
159 if (unlikely(!trace
)) {
160 offset
+= lib_ring_buffer_align(offset
, lttng_alignof(unsigned long));
161 return offset
- orig_offset
;
164 /* reset stack trace, no need to clear memory */
165 trace
->nr_entries
= 0;
167 switch (fdata
->mode
) {
168 case CALLSTACK_KERNEL
:
169 /* do the real work and reserve space */
170 trace
->nr_entries
= save_func_kernel(trace
->entries
,
174 ++per_cpu(callstack_user_nesting
, ctx
->cpu
);
175 /* do the real work and reserve space */
176 trace
->nr_entries
= save_func_user(trace
->entries
,
178 per_cpu(callstack_user_nesting
, ctx
->cpu
)--;
185 * If the array is filled, add our own marker to show that the
186 * stack is incomplete.
188 offset
+= lib_ring_buffer_align(offset
, lttng_alignof(unsigned long));
189 offset
+= sizeof(unsigned long) * trace
->nr_entries
;
190 /* Add our own ULONG_MAX delimiter to show incomplete stack. */
191 if (trace
->nr_entries
== MAX_ENTRIES
)
192 offset
+= sizeof(unsigned long);
193 return offset
- orig_offset
;
197 void lttng_callstack_length_record(struct lttng_kernel_ctx_field
*field
,
198 struct lib_ring_buffer_ctx
*ctx
,
199 struct lttng_channel
*chan
)
201 struct lttng_stack_trace
*trace
= stack_trace_context(field
, ctx
);
202 unsigned int nr_seq_entries
;
204 lib_ring_buffer_align_ctx(ctx
, lttng_alignof(unsigned int));
205 if (unlikely(!trace
)) {
208 nr_seq_entries
= trace
->nr_entries
;
209 if (trace
->nr_entries
== MAX_ENTRIES
)
212 chan
->ops
->event_write(ctx
, &nr_seq_entries
, sizeof(unsigned int));
216 void lttng_callstack_sequence_record(struct lttng_kernel_ctx_field
*field
,
217 struct lib_ring_buffer_ctx
*ctx
,
218 struct lttng_channel
*chan
)
220 struct lttng_stack_trace
*trace
= stack_trace_context(field
, ctx
);
221 unsigned int nr_seq_entries
;
223 lib_ring_buffer_align_ctx(ctx
, lttng_alignof(unsigned long));
224 if (unlikely(!trace
)) {
227 nr_seq_entries
= trace
->nr_entries
;
228 if (trace
->nr_entries
== MAX_ENTRIES
)
230 chan
->ops
->event_write(ctx
, trace
->entries
,
231 sizeof(unsigned long) * trace
->nr_entries
);
232 /* Add our own ULONG_MAX delimiter to show incomplete stack. */
233 if (trace
->nr_entries
== MAX_ENTRIES
) {
234 unsigned long delim
= ULONG_MAX
;
236 chan
->ops
->event_write(ctx
, &delim
, sizeof(unsigned long));