1 /* SPDX-License-Identifier: (GPL-2.0 or LGPL-2.1)
3 * lttng-context-callstack-stackwalk-impl.h
5 * LTTng callstack event context, stackwalk implementation. Targets
6 * kernels and architectures using the stacktrace common infrastructure
7 * introduced in the upstream Linux kernel by commit 214d8ca6ee
8 * "stacktrace: Provide common infrastructure" (merged in Linux 5.2,
9 * then gradually introduced within architectures).
11 * Copyright (C) 2014-2019 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
12 * Copyright (C) 2014 Francis Giraldeau <francis.giraldeau@gmail.com>
15 #define MAX_ENTRIES 128
17 enum lttng_cs_ctx_modes
{
23 struct lttng_stack_trace
{
24 unsigned long entries
[MAX_ENTRIES
];
25 unsigned int nr_entries
;
29 struct lttng_stack_trace stack_trace
[RING_BUFFER_MAX_NESTING
];
33 struct lttng_cs __percpu
*cs_percpu
;
34 enum lttng_cs_ctx_modes mode
;
38 unsigned int (*save_func_kernel
)(unsigned long *store
, unsigned int size
,
41 unsigned int (*save_func_user
)(unsigned long *store
, unsigned int size
);
44 const char *lttng_cs_ctx_mode_name(enum lttng_cs_ctx_modes mode
)
47 case CALLSTACK_KERNEL
:
48 return "callstack_kernel";
50 return "callstack_user";
57 int init_type_callstack_kernel(void)
60 const char *func_name
= "stack_trace_save";
64 func
= kallsyms_lookup_funcptr(func_name
);
66 printk(KERN_WARNING
"LTTng: symbol lookup failed: %s\n",
70 save_func_kernel
= (void *) func
;
75 int init_type_callstack_user(void)
78 const char *func_name
= "stack_trace_save_user";
82 func
= kallsyms_lookup_funcptr(func_name
);
84 printk(KERN_WARNING
"LTTng: symbol lookup failed: %s\n",
88 save_func_user
= (void *) func
;
93 int init_type(enum lttng_cs_ctx_modes mode
)
96 case CALLSTACK_KERNEL
:
97 return init_type_callstack_kernel();
99 return init_type_callstack_user();
106 void lttng_cs_set_init(struct lttng_cs __percpu
*cs_set
)
110 /* Keep track of nesting inside userspace callstack context code */
111 DEFINE_PER_CPU(int, callstack_user_nesting
);
114 struct lttng_stack_trace
*stack_trace_context(struct lttng_ctx_field
*field
,
115 struct lib_ring_buffer_ctx
*ctx
)
117 int buffer_nesting
, cs_user_nesting
;
119 struct field_data
*fdata
= field
->priv
;
122 * Do not gather the userspace callstack context when the event was
123 * triggered by the userspace callstack context saving mechanism.
125 cs_user_nesting
= per_cpu(callstack_user_nesting
, ctx
->cpu
);
127 if (fdata
->mode
== CALLSTACK_USER
&& cs_user_nesting
>= 1)
131 * get_cpu() is not required, preemption is already
132 * disabled while event is written.
134 * max nesting is checked in lib_ring_buffer_get_cpu().
135 * Check it again as a safety net.
137 cs
= per_cpu_ptr(fdata
->cs_percpu
, ctx
->cpu
);
138 buffer_nesting
= per_cpu(lib_ring_buffer_nesting
, ctx
->cpu
) - 1;
139 if (buffer_nesting
>= RING_BUFFER_MAX_NESTING
)
142 return &cs
->stack_trace
[buffer_nesting
];
146 * In order to reserve the correct size, the callstack is computed. The
147 * resulting callstack is saved to be accessed in the record step.
150 size_t lttng_callstack_get_size(size_t offset
, struct lttng_ctx_field
*field
,
151 struct lib_ring_buffer_ctx
*ctx
,
152 struct lttng_channel
*chan
)
154 struct lttng_stack_trace
*trace
;
155 struct field_data
*fdata
= field
->priv
;
156 size_t orig_offset
= offset
;
158 /* do not write data if no space is available */
159 trace
= stack_trace_context(field
, ctx
);
160 if (unlikely(!trace
)) {
161 offset
+= lib_ring_buffer_align(offset
, lttng_alignof(unsigned int));
162 offset
+= sizeof(unsigned int);
163 offset
+= lib_ring_buffer_align(offset
, lttng_alignof(unsigned long));
164 return offset
- orig_offset
;
167 /* reset stack trace, no need to clear memory */
168 trace
->nr_entries
= 0;
170 switch (fdata
->mode
) {
171 case CALLSTACK_KERNEL
:
172 /* do the real work and reserve space */
173 trace
->nr_entries
= save_func_kernel(trace
->entries
,
177 ++per_cpu(callstack_user_nesting
, ctx
->cpu
);
178 /* do the real work and reserve space */
179 trace
->nr_entries
= save_func_user(trace
->entries
,
181 per_cpu(callstack_user_nesting
, ctx
->cpu
)--;
188 * If the array is filled, add our own marker to show that the
189 * stack is incomplete.
191 offset
+= lib_ring_buffer_align(offset
, lttng_alignof(unsigned int));
192 offset
+= sizeof(unsigned int);
193 offset
+= lib_ring_buffer_align(offset
, lttng_alignof(unsigned long));
194 offset
+= sizeof(unsigned long) * trace
->nr_entries
;
195 /* Add our own ULONG_MAX delimiter to show incomplete stack. */
196 if (trace
->nr_entries
== MAX_ENTRIES
)
197 offset
+= sizeof(unsigned long);
198 return offset
- orig_offset
;
202 void lttng_callstack_record(struct lttng_ctx_field
*field
,
203 struct lib_ring_buffer_ctx
*ctx
,
204 struct lttng_channel
*chan
)
206 struct lttng_stack_trace
*trace
= stack_trace_context(field
, ctx
);
207 unsigned int nr_seq_entries
;
209 if (unlikely(!trace
)) {
211 lib_ring_buffer_align_ctx(ctx
, lttng_alignof(unsigned int));
212 chan
->ops
->event_write(ctx
, &nr_seq_entries
, sizeof(unsigned int));
213 lib_ring_buffer_align_ctx(ctx
, lttng_alignof(unsigned long));
216 lib_ring_buffer_align_ctx(ctx
, lttng_alignof(unsigned int));
217 nr_seq_entries
= trace
->nr_entries
;
218 if (trace
->nr_entries
== MAX_ENTRIES
)
220 chan
->ops
->event_write(ctx
, &nr_seq_entries
, sizeof(unsigned int));
221 lib_ring_buffer_align_ctx(ctx
, lttng_alignof(unsigned long));
222 chan
->ops
->event_write(ctx
, trace
->entries
,
223 sizeof(unsigned long) * trace
->nr_entries
);
224 /* Add our own ULONG_MAX delimiter to show incomplete stack. */
225 if (trace
->nr_entries
== MAX_ENTRIES
) {
226 unsigned long delim
= ULONG_MAX
;
228 chan
->ops
->event_write(ctx
, &delim
, sizeof(unsigned long));