Commit | Line | Data |
---|---|---|
b7cdc182 | 1 | /* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only) |
b6ee48d2 MD |
2 | * |
3 | * lttng-context-callstack-legacy-impl.h | |
4 | * | |
5 | * LTTng callstack event context, legacy implementation. Targets | |
6 | * kernels and architectures not yet using the stacktrace common | |
7 | * infrastructure introduced in the upstream Linux kernel by commit | |
8 | * 214d8ca6ee "stacktrace: Provide common infrastructure" (merged in | |
9 | * Linux 5.2, then gradually introduced within architectures). | |
10 | * | |
11 | * Copyright (C) 2014 Mathieu Desnoyers <mathieu.desnoyers@efficios.com> | |
12 | * Copyright (C) 2014 Francis Giraldeau <francis.giraldeau@gmail.com> | |
13 | */ | |
14 | ||
15 | #define MAX_ENTRIES 128 | |
16 | ||
17 | enum lttng_cs_ctx_modes { | |
18 | CALLSTACK_KERNEL = 0, | |
19 | CALLSTACK_USER = 1, | |
20 | NR_CALLSTACK_MODES, | |
21 | }; | |
22 | ||
23 | struct lttng_cs_dispatch { | |
24 | struct stack_trace stack_trace; | |
25 | unsigned long entries[MAX_ENTRIES]; | |
26 | }; | |
27 | ||
28 | struct lttng_cs { | |
29 | struct lttng_cs_dispatch dispatch[RING_BUFFER_MAX_NESTING]; | |
30 | }; | |
31 | ||
32 | struct field_data { | |
33 | struct lttng_cs __percpu *cs_percpu; | |
34 | enum lttng_cs_ctx_modes mode; | |
35 | }; | |
36 | ||
37 | struct lttng_cs_type { | |
38 | const char *name; | |
ceabb767 | 39 | const char *length_name; |
b6ee48d2 MD |
40 | const char *save_func_name; |
41 | void (*save_func)(struct stack_trace *trace); | |
42 | }; | |
43 | ||
44 | static struct lttng_cs_type cs_types[] = { | |
45 | { | |
46 | .name = "callstack_kernel", | |
ceabb767 | 47 | .length_name = "_callstack_kernel_length", |
b6ee48d2 MD |
48 | .save_func_name = "save_stack_trace", |
49 | .save_func = NULL, | |
50 | }, | |
51 | { | |
52 | .name = "callstack_user", | |
ceabb767 | 53 | .length_name = "_callstack_user_length", |
b6ee48d2 MD |
54 | .save_func_name = "save_stack_trace_user", |
55 | .save_func = NULL, | |
56 | }, | |
57 | }; | |
58 | ||
b5a89a3f MD |
59 | static |
60 | const char *lttng_cs_ctx_mode_name(enum lttng_cs_ctx_modes mode) | |
61 | { | |
62 | return cs_types[mode].name; | |
63 | } | |
64 | ||
ceabb767 MD |
65 | static |
66 | const char *lttng_cs_ctx_mode_length_name(enum lttng_cs_ctx_modes mode) | |
67 | { | |
68 | return cs_types[mode].length_name; | |
69 | } | |
70 | ||
b6ee48d2 MD |
71 | static |
72 | int init_type(enum lttng_cs_ctx_modes mode) | |
73 | { | |
74 | unsigned long func; | |
75 | ||
76 | if (cs_types[mode].save_func) | |
77 | return 0; | |
78 | func = kallsyms_lookup_funcptr(cs_types[mode].save_func_name); | |
79 | if (!func) { | |
80 | printk(KERN_WARNING "LTTng: symbol lookup failed: %s\n", | |
81 | cs_types[mode].save_func_name); | |
82 | return -EINVAL; | |
83 | } | |
84 | cs_types[mode].save_func = (void *) func; | |
85 | return 0; | |
86 | } | |
87 | ||
b5a89a3f MD |
88 | static |
89 | void lttng_cs_set_init(struct lttng_cs __percpu *cs_set) | |
90 | { | |
91 | int cpu, i; | |
92 | ||
93 | for_each_possible_cpu(cpu) { | |
94 | struct lttng_cs *cs; | |
95 | ||
96 | cs = per_cpu_ptr(cs_set, cpu); | |
97 | for (i = 0; i < RING_BUFFER_MAX_NESTING; i++) { | |
98 | struct lttng_cs_dispatch *dispatch; | |
99 | ||
100 | dispatch = &cs->dispatch[i]; | |
101 | dispatch->stack_trace.entries = dispatch->entries; | |
102 | dispatch->stack_trace.max_entries = MAX_ENTRIES; | |
103 | } | |
104 | } | |
105 | } | |
106 | ||
b6ee48d2 MD |
107 | /* Keep track of nesting inside userspace callstack context code */ |
108 | DEFINE_PER_CPU(int, callstack_user_nesting); | |
109 | ||
110 | static | |
111 | struct stack_trace *stack_trace_context(struct lttng_ctx_field *field, | |
112 | struct lib_ring_buffer_ctx *ctx) | |
113 | { | |
114 | int buffer_nesting, cs_user_nesting; | |
115 | struct lttng_cs *cs; | |
116 | struct field_data *fdata = field->priv; | |
117 | ||
118 | /* | |
119 | * Do not gather the userspace callstack context when the event was | |
120 | * triggered by the userspace callstack context saving mechanism. | |
121 | */ | |
122 | cs_user_nesting = per_cpu(callstack_user_nesting, ctx->cpu); | |
123 | ||
124 | if (fdata->mode == CALLSTACK_USER && cs_user_nesting >= 1) | |
125 | return NULL; | |
126 | ||
127 | /* | |
128 | * get_cpu() is not required, preemption is already | |
129 | * disabled while event is written. | |
130 | * | |
131 | * max nesting is checked in lib_ring_buffer_get_cpu(). | |
132 | * Check it again as a safety net. | |
133 | */ | |
134 | cs = per_cpu_ptr(fdata->cs_percpu, ctx->cpu); | |
135 | buffer_nesting = per_cpu(lib_ring_buffer_nesting, ctx->cpu) - 1; | |
136 | if (buffer_nesting >= RING_BUFFER_MAX_NESTING) | |
137 | return NULL; | |
138 | ||
139 | return &cs->dispatch[buffer_nesting].stack_trace; | |
140 | } | |
141 | ||
ceabb767 MD |
142 | static |
143 | size_t lttng_callstack_length_get_size(size_t offset, struct lttng_ctx_field *field, | |
144 | struct lib_ring_buffer_ctx *ctx, | |
145 | struct lttng_channel *chan) | |
146 | { | |
147 | size_t orig_offset = offset; | |
148 | ||
149 | offset += lib_ring_buffer_align(offset, lttng_alignof(unsigned int)); | |
150 | offset += sizeof(unsigned int); | |
151 | return offset - orig_offset; | |
152 | } | |
153 | ||
b6ee48d2 MD |
154 | /* |
155 | * In order to reserve the correct size, the callstack is computed. The | |
156 | * resulting callstack is saved to be accessed in the record step. | |
157 | */ | |
158 | static | |
ceabb767 MD |
159 | size_t lttng_callstack_sequence_get_size(size_t offset, struct lttng_ctx_field *field, |
160 | struct lib_ring_buffer_ctx *ctx, | |
161 | struct lttng_channel *chan) | |
b6ee48d2 MD |
162 | { |
163 | struct stack_trace *trace; | |
164 | struct field_data *fdata = field->priv; | |
165 | size_t orig_offset = offset; | |
166 | ||
167 | /* do not write data if no space is available */ | |
168 | trace = stack_trace_context(field, ctx); | |
169 | if (unlikely(!trace)) { | |
b6ee48d2 MD |
170 | offset += lib_ring_buffer_align(offset, lttng_alignof(unsigned long)); |
171 | return offset - orig_offset; | |
172 | } | |
173 | ||
174 | /* reset stack trace, no need to clear memory */ | |
175 | trace->nr_entries = 0; | |
176 | ||
177 | if (fdata->mode == CALLSTACK_USER) | |
178 | ++per_cpu(callstack_user_nesting, ctx->cpu); | |
179 | ||
180 | /* do the real work and reserve space */ | |
181 | cs_types[fdata->mode].save_func(trace); | |
182 | ||
183 | if (fdata->mode == CALLSTACK_USER) | |
184 | per_cpu(callstack_user_nesting, ctx->cpu)--; | |
185 | ||
186 | /* | |
187 | * Remove final ULONG_MAX delimiter. If we cannot find it, add | |
188 | * our own marker to show that the stack is incomplete. This is | |
189 | * more compact for a trace. | |
190 | */ | |
191 | if (trace->nr_entries > 0 | |
192 | && trace->entries[trace->nr_entries - 1] == ULONG_MAX) { | |
193 | trace->nr_entries--; | |
194 | } | |
b6ee48d2 MD |
195 | offset += lib_ring_buffer_align(offset, lttng_alignof(unsigned long)); |
196 | offset += sizeof(unsigned long) * trace->nr_entries; | |
197 | /* Add our own ULONG_MAX delimiter to show incomplete stack. */ | |
198 | if (trace->nr_entries == trace->max_entries) | |
199 | offset += sizeof(unsigned long); | |
200 | return offset - orig_offset; | |
201 | } | |
202 | ||
203 | static | |
ceabb767 | 204 | void lttng_callstack_length_record(struct lttng_ctx_field *field, |
b6ee48d2 MD |
205 | struct lib_ring_buffer_ctx *ctx, |
206 | struct lttng_channel *chan) | |
207 | { | |
208 | struct stack_trace *trace = stack_trace_context(field, ctx); | |
209 | unsigned int nr_seq_entries; | |
210 | ||
ceabb767 | 211 | lib_ring_buffer_align_ctx(ctx, lttng_alignof(unsigned int)); |
b6ee48d2 MD |
212 | if (unlikely(!trace)) { |
213 | nr_seq_entries = 0; | |
ceabb767 MD |
214 | } else { |
215 | nr_seq_entries = trace->nr_entries; | |
216 | if (trace->nr_entries == trace->max_entries) | |
217 | nr_seq_entries++; | |
218 | } | |
219 | chan->ops->event_write(ctx, &nr_seq_entries, sizeof(unsigned int)); | |
220 | } | |
221 | static | |
222 | void lttng_callstack_sequence_record(struct lttng_ctx_field *field, | |
223 | struct lib_ring_buffer_ctx *ctx, | |
224 | struct lttng_channel *chan) | |
225 | { | |
226 | struct stack_trace *trace = stack_trace_context(field, ctx); | |
227 | unsigned int nr_seq_entries; | |
228 | ||
229 | lib_ring_buffer_align_ctx(ctx, lttng_alignof(unsigned long)); | |
230 | if (unlikely(!trace)) { | |
b6ee48d2 MD |
231 | return; |
232 | } | |
b6ee48d2 MD |
233 | nr_seq_entries = trace->nr_entries; |
234 | if (trace->nr_entries == trace->max_entries) | |
235 | nr_seq_entries++; | |
b6ee48d2 MD |
236 | chan->ops->event_write(ctx, trace->entries, |
237 | sizeof(unsigned long) * trace->nr_entries); | |
238 | /* Add our own ULONG_MAX delimiter to show incomplete stack. */ | |
239 | if (trace->nr_entries == trace->max_entries) { | |
240 | unsigned long delim = ULONG_MAX; | |
241 | ||
242 | chan->ops->event_write(ctx, &delim, sizeof(unsigned long)); | |
243 | } | |
244 | } |