Commit | Line | Data |
---|---|---|
b7cdc182 | 1 | /* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only) |
b29c6286 MD |
2 | * |
3 | * lttng-context-callstack-stackwalk-impl.h | |
4 | * | |
5 | * LTTng callstack event context, stackwalk implementation. Targets | |
6 | * kernels and architectures using the stacktrace common infrastructure | |
7 | * introduced in the upstream Linux kernel by commit 214d8ca6ee | |
8 | * "stacktrace: Provide common infrastructure" (merged in Linux 5.2, | |
9 | * then gradually introduced within architectures). | |
10 | * | |
11 | * Copyright (C) 2014-2019 Mathieu Desnoyers <mathieu.desnoyers@efficios.com> | |
12 | * Copyright (C) 2014 Francis Giraldeau <francis.giraldeau@gmail.com> | |
13 | */ | |
14 | ||
15 | #define MAX_ENTRIES 128 | |
16 | ||
17 | enum lttng_cs_ctx_modes { | |
18 | CALLSTACK_KERNEL = 0, | |
19 | CALLSTACK_USER = 1, | |
20 | NR_CALLSTACK_MODES, | |
21 | }; | |
22 | ||
23 | struct lttng_stack_trace { | |
24 | unsigned long entries[MAX_ENTRIES]; | |
25 | unsigned int nr_entries; | |
26 | }; | |
27 | ||
28 | struct lttng_cs { | |
29 | struct lttng_stack_trace stack_trace[RING_BUFFER_MAX_NESTING]; | |
30 | }; | |
31 | ||
32 | struct field_data { | |
33 | struct lttng_cs __percpu *cs_percpu; | |
34 | enum lttng_cs_ctx_modes mode; | |
35 | }; | |
36 | ||
37 | static | |
38 | unsigned int (*save_func_kernel)(unsigned long *store, unsigned int size, | |
39 | unsigned int skipnr); | |
40 | static | |
41 | unsigned int (*save_func_user)(unsigned long *store, unsigned int size); | |
42 | ||
b29c6286 MD |
43 | static |
44 | int init_type_callstack_kernel(void) | |
45 | { | |
46 | unsigned long func; | |
47 | const char *func_name = "stack_trace_save"; | |
48 | ||
49 | if (save_func_kernel) | |
50 | return 0; | |
51 | func = kallsyms_lookup_funcptr(func_name); | |
52 | if (!func) { | |
53 | printk(KERN_WARNING "LTTng: symbol lookup failed: %s\n", | |
54 | func_name); | |
55 | return -EINVAL; | |
56 | } | |
57 | save_func_kernel = (void *) func; | |
58 | return 0; | |
59 | } | |
60 | ||
61 | static | |
62 | int init_type_callstack_user(void) | |
63 | { | |
64 | unsigned long func; | |
65 | const char *func_name = "stack_trace_save_user"; | |
66 | ||
67 | if (save_func_user) | |
68 | return 0; | |
69 | func = kallsyms_lookup_funcptr(func_name); | |
70 | if (!func) { | |
71 | printk(KERN_WARNING "LTTng: symbol lookup failed: %s\n", | |
72 | func_name); | |
73 | return -EINVAL; | |
74 | } | |
75 | save_func_user = (void *) func; | |
76 | return 0; | |
77 | } | |
78 | ||
79 | static | |
80 | int init_type(enum lttng_cs_ctx_modes mode) | |
81 | { | |
82 | switch (mode) { | |
83 | case CALLSTACK_KERNEL: | |
84 | return init_type_callstack_kernel(); | |
85 | case CALLSTACK_USER: | |
86 | return init_type_callstack_user(); | |
87 | default: | |
88 | return -EINVAL; | |
89 | } | |
90 | } | |
91 | ||
92 | static | |
93 | void lttng_cs_set_init(struct lttng_cs __percpu *cs_set) | |
94 | { | |
95 | } | |
96 | ||
97 | /* Keep track of nesting inside userspace callstack context code */ | |
98 | DEFINE_PER_CPU(int, callstack_user_nesting); | |
99 | ||
2dc781e0 MD |
100 | /* |
101 | * Note: these callbacks expect to be invoked with preemption disabled across | |
102 | * get_size and record due to its use of a per-cpu stack. | |
103 | */ | |
b29c6286 | 104 | static |
2dc781e0 | 105 | struct lttng_stack_trace *stack_trace_context(struct field_data *fdata, int cpu) |
b29c6286 MD |
106 | { |
107 | int buffer_nesting, cs_user_nesting; | |
108 | struct lttng_cs *cs; | |
b29c6286 MD |
109 | |
110 | /* | |
111 | * Do not gather the userspace callstack context when the event was | |
112 | * triggered by the userspace callstack context saving mechanism. | |
113 | */ | |
2dc781e0 | 114 | cs_user_nesting = per_cpu(callstack_user_nesting, cpu); |
b29c6286 MD |
115 | |
116 | if (fdata->mode == CALLSTACK_USER && cs_user_nesting >= 1) | |
117 | return NULL; | |
118 | ||
119 | /* | |
120 | * get_cpu() is not required, preemption is already | |
121 | * disabled while event is written. | |
122 | * | |
123 | * max nesting is checked in lib_ring_buffer_get_cpu(). | |
124 | * Check it again as a safety net. | |
125 | */ | |
2dc781e0 MD |
126 | cs = per_cpu_ptr(fdata->cs_percpu, cpu); |
127 | buffer_nesting = per_cpu(lib_ring_buffer_nesting, cpu) - 1; | |
b29c6286 MD |
128 | if (buffer_nesting >= RING_BUFFER_MAX_NESTING) |
129 | return NULL; | |
130 | ||
131 | return &cs->stack_trace[buffer_nesting]; | |
132 | } | |
133 | ||
ceabb767 | 134 | static |
a92e844e | 135 | size_t lttng_callstack_length_get_size(void *priv, struct lttng_kernel_probe_ctx *probe_ctx, size_t offset) |
ceabb767 MD |
136 | { |
137 | size_t orig_offset = offset; | |
138 | ||
139 | offset += lib_ring_buffer_align(offset, lttng_alignof(unsigned int)); | |
140 | offset += sizeof(unsigned int); | |
141 | return offset - orig_offset; | |
142 | } | |
143 | ||
b29c6286 MD |
144 | /* |
145 | * In order to reserve the correct size, the callstack is computed. The | |
146 | * resulting callstack is saved to be accessed in the record step. | |
147 | */ | |
148 | static | |
a92e844e | 149 | size_t lttng_callstack_sequence_get_size(void *priv, struct lttng_kernel_probe_ctx *probe_ctx, size_t offset) |
b29c6286 MD |
150 | { |
151 | struct lttng_stack_trace *trace; | |
2dc781e0 | 152 | struct field_data *fdata = (struct field_data *) priv; |
b29c6286 | 153 | size_t orig_offset = offset; |
2dc781e0 | 154 | int cpu = smp_processor_id(); |
b29c6286 MD |
155 | |
156 | /* do not write data if no space is available */ | |
2dc781e0 | 157 | trace = stack_trace_context(fdata, cpu); |
b29c6286 | 158 | if (unlikely(!trace)) { |
b29c6286 MD |
159 | offset += lib_ring_buffer_align(offset, lttng_alignof(unsigned long)); |
160 | return offset - orig_offset; | |
161 | } | |
162 | ||
163 | /* reset stack trace, no need to clear memory */ | |
164 | trace->nr_entries = 0; | |
165 | ||
166 | switch (fdata->mode) { | |
167 | case CALLSTACK_KERNEL: | |
168 | /* do the real work and reserve space */ | |
169 | trace->nr_entries = save_func_kernel(trace->entries, | |
170 | MAX_ENTRIES, 0); | |
171 | break; | |
172 | case CALLSTACK_USER: | |
2dc781e0 | 173 | ++per_cpu(callstack_user_nesting, cpu); |
b29c6286 MD |
174 | /* do the real work and reserve space */ |
175 | trace->nr_entries = save_func_user(trace->entries, | |
176 | MAX_ENTRIES); | |
2dc781e0 | 177 | per_cpu(callstack_user_nesting, cpu)--; |
b29c6286 MD |
178 | break; |
179 | default: | |
180 | WARN_ON_ONCE(1); | |
181 | } | |
182 | ||
183 | /* | |
184 | * If the array is filled, add our own marker to show that the | |
185 | * stack is incomplete. | |
186 | */ | |
b29c6286 MD |
187 | offset += lib_ring_buffer_align(offset, lttng_alignof(unsigned long)); |
188 | offset += sizeof(unsigned long) * trace->nr_entries; | |
189 | /* Add our own ULONG_MAX delimiter to show incomplete stack. */ | |
190 | if (trace->nr_entries == MAX_ENTRIES) | |
191 | offset += sizeof(unsigned long); | |
192 | return offset - orig_offset; | |
193 | } | |
194 | ||
195 | static | |
a92e844e | 196 | void lttng_callstack_length_record(void *priv, struct lttng_kernel_probe_ctx *probe_ctx, |
8a57ec02 | 197 | struct lttng_kernel_ring_buffer_ctx *ctx, |
b29c6286 MD |
198 | struct lttng_channel *chan) |
199 | { | |
2dc781e0 MD |
200 | int cpu = ctx->priv.reserve_cpu; |
201 | struct field_data *fdata = (struct field_data *) priv; | |
202 | struct lttng_stack_trace *trace = stack_trace_context(fdata, cpu); | |
b29c6286 MD |
203 | unsigned int nr_seq_entries; |
204 | ||
ceabb767 | 205 | lib_ring_buffer_align_ctx(ctx, lttng_alignof(unsigned int)); |
b29c6286 MD |
206 | if (unlikely(!trace)) { |
207 | nr_seq_entries = 0; | |
ceabb767 MD |
208 | } else { |
209 | nr_seq_entries = trace->nr_entries; | |
210 | if (trace->nr_entries == MAX_ENTRIES) | |
211 | nr_seq_entries++; | |
212 | } | |
213 | chan->ops->event_write(ctx, &nr_seq_entries, sizeof(unsigned int)); | |
214 | } | |
215 | ||
216 | static | |
a92e844e | 217 | void lttng_callstack_sequence_record(void *priv, struct lttng_kernel_probe_ctx *probe_ctx, |
8a57ec02 | 218 | struct lttng_kernel_ring_buffer_ctx *ctx, |
ceabb767 MD |
219 | struct lttng_channel *chan) |
220 | { | |
2dc781e0 MD |
221 | int cpu = ctx->priv.reserve_cpu; |
222 | struct field_data *fdata = (struct field_data *) priv; | |
223 | struct lttng_stack_trace *trace = stack_trace_context(fdata, cpu); | |
ceabb767 MD |
224 | unsigned int nr_seq_entries; |
225 | ||
226 | lib_ring_buffer_align_ctx(ctx, lttng_alignof(unsigned long)); | |
227 | if (unlikely(!trace)) { | |
b29c6286 MD |
228 | return; |
229 | } | |
b29c6286 MD |
230 | nr_seq_entries = trace->nr_entries; |
231 | if (trace->nr_entries == MAX_ENTRIES) | |
232 | nr_seq_entries++; | |
b29c6286 MD |
233 | chan->ops->event_write(ctx, trace->entries, |
234 | sizeof(unsigned long) * trace->nr_entries); | |
235 | /* Add our own ULONG_MAX delimiter to show incomplete stack. */ | |
236 | if (trace->nr_entries == MAX_ENTRIES) { | |
237 | unsigned long delim = ULONG_MAX; | |
238 | ||
239 | chan->ops->event_write(ctx, &delim, sizeof(unsigned long)); | |
240 | } | |
241 | } |