2 * lttng-context-callstack.c
4 * LTTng callstack event context.
6 * Copyright (C) 2014 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
7 * Copyright (C) 2014 Francis Giraldeau <francis.giraldeau@gmail.com>
9 * This library is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public
11 * License as published by the Free Software Foundation; only
12 * version 2.1 of the License.
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with this library; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 * The callstack context can be added to any kernel
24 * event. It records either the kernel or the userspace callstack, up to a
25 * max depth. The context is a CTF sequence, such that it uses only the space
26 * required for the number of callstack entries.
28 * It allocates callstack buffers per-CPU up to 4 interrupt nesting. This
29 * nesting limit is the same as defined in the ring buffer. It therefore uses a
30 * fixed amount of memory, proportional to the number of CPUs:
32 * size = cpus * nest * depth * sizeof(unsigned long)
34 * Which is about 800 bytes per-CPUs on 64-bit host and a depth of 25. The
35 * allocation is done at the initialization to avoid memory allocation
36 * overhead while tracing, using a shallow stack.
38 * The kernel callstack is recovered using save_stack_trace(), and the
39 * userspace callstack uses save_stack_trace_user(). They rely on frame
40 * pointers. These are usually available for the kernel, but the compiler
41 * option -fomit-frame-pointer frequently used in popular Linux distributions
42 * may cause the userspace callstack to be unreliable, and is a known
43 * limitation of this approach. If frame pointers are not available, it
44 * produces no error, but the callstack will be empty. We still provide the
45 * feature, because it works well for runtime environments having frame
46 * pointers. In the future, unwind support and/or last branch record may
47 * provide a solution to this problem.
49 * The symbol name resolution is left to the trace reader.
52 #include <linux/module.h>
53 #include <linux/slab.h>
54 #include <linux/sched.h>
55 #include <linux/utsname.h>
56 #include <linux/stacktrace.h>
57 #include <linux/spinlock.h>
58 #include "lttng-events.h"
59 #include "wrapper/ringbuffer/backend.h"
60 #include "wrapper/ringbuffer/frontend.h"
61 #include "wrapper/vmalloc.h"
62 #include "lttng-tracer.h"
64 #define MAX_ENTRIES 25 /* BUG: saving more than 30 entries causes trace corruption */
67 struct stack_trace items
[RING_BUFFER_MAX_NESTING
];
72 struct lttng_cs __percpu
*cs_percpu
;
75 struct lttng_cs_type
{
77 const char *save_func_name
;
78 void (*save_func
)(struct stack_trace
*trace
);
81 enum lttng_cs_ctx_modes
{
86 static struct lttng_cs_type cs_types
[] = {
88 .name
= "callstack_kernel",
89 .save_func_name
= "save_stack_trace",
93 .name
= "callstack_user",
94 .save_func_name
= "save_stack_trace_user",
100 int init_type(int mode
)
104 if (cs_types
[mode
].save_func
)
106 func
= kallsyms_lookup_funcptr(cs_types
[mode
].save_func_name
);
108 printk(KERN_WARNING
"LTTng: symbol lookup failed: %s\n",
109 cs_types
[mode
].save_func_name
);
112 cs_types
[mode
].save_func
= (void *) func
;
117 struct stack_trace
*stack_trace_context(struct lttng_ctx_field
*field
,
118 struct lib_ring_buffer_ctx
*ctx
)
122 struct field_data
*fdata
= field
->private;
125 * get_cpu() is not required, preemption is already
126 * disabled while event is written.
128 * max nesting is checked in lib_ring_buffer_get_cpu().
129 * Check it again as a safety net.
131 cs
= per_cpu_ptr(fdata
->cs_percpu
, ctx
->cpu
);
132 nesting
= per_cpu(lib_ring_buffer_nesting
, ctx
->cpu
) - 1;
133 if (nesting
>= RING_BUFFER_MAX_NESTING
) {
136 return &cs
->items
[nesting
];
140 * In order to reserve the correct size, the callstack is computed. The
141 * resulting callstack is saved to be accessed in the record step.
144 size_t lttng_callstack_get_size(size_t offset
, struct lttng_ctx_field
*field
,
145 struct lib_ring_buffer_ctx
*ctx
,
146 struct lttng_channel
*chan
)
149 struct stack_trace
*trace
;
150 struct field_data
*fdata
= field
->private;
152 /* do not write data if no space is available */
153 trace
= stack_trace_context(field
, ctx
);
157 /* reset stack trace, no need to clear memory */
158 trace
->nr_entries
= 0;
160 /* do the real work and reserve space */
161 cs_types
[fdata
->mode
].save_func(trace
);
162 size
+= lib_ring_buffer_align(offset
, lttng_alignof(unsigned int));
163 size
+= sizeof(unsigned int);
164 size
+= lib_ring_buffer_align(offset
, lttng_alignof(unsigned long));
165 size
+= sizeof(unsigned long) * trace
->nr_entries
;
170 void lttng_callstack_record(struct lttng_ctx_field
*field
,
171 struct lib_ring_buffer_ctx
*ctx
,
172 struct lttng_channel
*chan
)
174 struct stack_trace
*trace
= stack_trace_context(field
, ctx
);
178 lib_ring_buffer_align_ctx(ctx
, lttng_alignof(unsigned int));
179 chan
->ops
->event_write(ctx
, &trace
->nr_entries
, sizeof(unsigned int));
180 lib_ring_buffer_align_ctx(ctx
, lttng_alignof(unsigned long));
181 chan
->ops
->event_write(ctx
, trace
->entries
,
182 sizeof(unsigned long) * trace
->nr_entries
);
186 void field_data_free(struct field_data
*fdata
)
193 for_each_possible_cpu(cpu
) {
194 cs
= per_cpu_ptr(fdata
->cs_percpu
, cpu
);
195 for (i
= 0; i
< RING_BUFFER_MAX_NESTING
; i
++) {
196 kfree(cs
->items
[i
].entries
);
199 free_percpu(fdata
->cs_percpu
);
204 struct field_data __percpu
*field_data_create(unsigned int entries
, int type
)
207 struct stack_trace
*item
;
209 struct lttng_cs __percpu
*cs_set
;
210 struct field_data
* fdata
;
212 fdata
= kzalloc(sizeof(unsigned long) * entries
, GFP_KERNEL
);
215 cs_set
= alloc_percpu(struct lttng_cs
);
219 fdata
->cs_percpu
= cs_set
;
220 for_each_possible_cpu(cpu
) {
221 cs
= per_cpu_ptr(cs_set
, cpu
);
222 for (i
= 0; i
< RING_BUFFER_MAX_NESTING
; i
++) {
223 item
= &cs
->items
[i
];
224 item
->entries
= kzalloc(sizeof(unsigned long) * entries
, GFP_KERNEL
);
225 if (!item
->entries
) {
228 item
->max_entries
= entries
;
235 field_data_free(fdata
);
240 void lttng_callstack_destroy(struct lttng_ctx_field
*field
)
242 struct field_data
*fdata
= field
->private;
244 field_data_free(fdata
);
248 int __lttng_add_callstack_generic(struct lttng_ctx
**ctx
, int mode
)
250 const char *ctx_name
= cs_types
[mode
].name
;
251 struct lttng_ctx_field
*field
;
252 struct field_data
*fdata
;
255 ret
= init_type(mode
);
258 field
= lttng_append_context(ctx
);
261 if (lttng_find_context(*ctx
, ctx_name
)) {
262 printk("%s lttng_find_context failed\n", ctx_name
);
266 fdata
= field_data_create(MAX_ENTRIES
, mode
);
272 field
->event_field
.name
= ctx_name
;
273 field
->event_field
.type
.atype
= atype_sequence
;
274 field
->event_field
.type
.u
.sequence
.elem_type
.atype
= atype_integer
;
275 field
->event_field
.type
.u
.sequence
.elem_type
.u
.basic
.integer
.size
= sizeof(unsigned long) * CHAR_BIT
;
276 field
->event_field
.type
.u
.sequence
.elem_type
.u
.basic
.integer
.alignment
= lttng_alignof(long) * CHAR_BIT
;
277 field
->event_field
.type
.u
.sequence
.elem_type
.u
.basic
.integer
.signedness
= lttng_is_signed_type(unsigned long);
278 field
->event_field
.type
.u
.sequence
.elem_type
.u
.basic
.integer
.reverse_byte_order
= 0;
279 field
->event_field
.type
.u
.sequence
.elem_type
.u
.basic
.integer
.base
= 16;
280 field
->event_field
.type
.u
.sequence
.elem_type
.u
.basic
.integer
.encoding
= lttng_encode_none
;
282 field
->event_field
.type
.u
.sequence
.length_type
.atype
= atype_integer
;
283 field
->event_field
.type
.u
.sequence
.length_type
.u
.basic
.integer
.size
= sizeof(unsigned int) * CHAR_BIT
;
284 field
->event_field
.type
.u
.sequence
.length_type
.u
.basic
.integer
.alignment
= lttng_alignof(unsigned int) * CHAR_BIT
;
285 field
->event_field
.type
.u
.sequence
.length_type
.u
.basic
.integer
.signedness
= lttng_is_signed_type(unsigned int);
286 field
->event_field
.type
.u
.sequence
.length_type
.u
.basic
.integer
.reverse_byte_order
= 0;
287 field
->event_field
.type
.u
.sequence
.length_type
.u
.basic
.integer
.base
= 10;
288 field
->event_field
.type
.u
.sequence
.length_type
.u
.basic
.integer
.encoding
= lttng_encode_none
;
290 field
->get_size_arg
= lttng_callstack_get_size
;
291 field
->record
= lttng_callstack_record
;
292 field
->private = fdata
;
293 field
->destroy
= lttng_callstack_destroy
;
294 wrapper_vmalloc_sync_all();
295 printk("lttng add-context %s\n", ctx_name
);
299 field_data_free(fdata
);
301 lttng_remove_context_field(ctx
, field
);
306 * lttng_add_callstack_to_ctx - add callstack event context
308 * @ctx: the lttng_ctx pointer to initialize
309 * @type: the context type
311 * Supported callstack type supported:
312 * LTTNG_KERNEL_CONTEXT_CALLSTACK_KERNEL
313 * Records the callstack of the kernel
314 * LTTNG_KERNEL_CONTEXT_CALLSTACK_USER
315 * Records the callstack of the userspace program (from the kernel)
317 * Return 0 for success, or error code.
319 int lttng_add_callstack_to_ctx(struct lttng_ctx
**ctx
, int type
)
322 case LTTNG_KERNEL_CONTEXT_CALLSTACK_KERNEL
:
323 return __lttng_add_callstack_generic(ctx
, CALLSTACK_KERNEL
);
324 case LTTNG_KERNEL_CONTEXT_CALLSTACK_USER
:
325 return __lttng_add_callstack_generic(ctx
, CALLSTACK_USER
);
330 EXPORT_SYMBOL_GPL(lttng_add_callstack_to_ctx
);
332 MODULE_LICENSE("GPL and additional rights");
333 MODULE_AUTHOR("Francis Giraldeau");
334 MODULE_DESCRIPTION("Linux Trace Toolkit Callstack Support");