Commit | Line | Data |
---|---|---|
b7cdc182 | 1 | /* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only) |
b6ee48d2 MD |
2 | * |
3 | * lttng-context-callstack-legacy-impl.h | |
4 | * | |
5 | * LTTng callstack event context, legacy implementation. Targets | |
6 | * kernels and architectures not yet using the stacktrace common | |
7 | * infrastructure introduced in the upstream Linux kernel by commit | |
8 | * 214d8ca6ee "stacktrace: Provide common infrastructure" (merged in | |
9 | * Linux 5.2, then gradually introduced within architectures). | |
10 | * | |
11 | * Copyright (C) 2014 Mathieu Desnoyers <mathieu.desnoyers@efficios.com> | |
12 | * Copyright (C) 2014 Francis Giraldeau <francis.giraldeau@gmail.com> | |
13 | */ | |
14 | ||
15 | #define MAX_ENTRIES 128 | |
16 | ||
17 | enum lttng_cs_ctx_modes { | |
18 | CALLSTACK_KERNEL = 0, | |
19 | CALLSTACK_USER = 1, | |
20 | NR_CALLSTACK_MODES, | |
21 | }; | |
22 | ||
23 | struct lttng_cs_dispatch { | |
24 | struct stack_trace stack_trace; | |
25 | unsigned long entries[MAX_ENTRIES]; | |
26 | }; | |
27 | ||
28 | struct lttng_cs { | |
29 | struct lttng_cs_dispatch dispatch[RING_BUFFER_MAX_NESTING]; | |
30 | }; | |
31 | ||
32 | struct field_data { | |
33 | struct lttng_cs __percpu *cs_percpu; | |
34 | enum lttng_cs_ctx_modes mode; | |
35 | }; | |
36 | ||
37 | struct lttng_cs_type { | |
38 | const char *name; | |
ceabb767 | 39 | const char *length_name; |
b6ee48d2 MD |
40 | const char *save_func_name; |
41 | void (*save_func)(struct stack_trace *trace); | |
42 | }; | |
43 | ||
44 | static struct lttng_cs_type cs_types[] = { | |
45 | { | |
46 | .name = "callstack_kernel", | |
ceabb767 | 47 | .length_name = "_callstack_kernel_length", |
b6ee48d2 MD |
48 | .save_func_name = "save_stack_trace", |
49 | .save_func = NULL, | |
50 | }, | |
51 | { | |
52 | .name = "callstack_user", | |
ceabb767 | 53 | .length_name = "_callstack_user_length", |
b6ee48d2 MD |
54 | .save_func_name = "save_stack_trace_user", |
55 | .save_func = NULL, | |
56 | }, | |
57 | }; | |
58 | ||
59 | static | |
60 | int init_type(enum lttng_cs_ctx_modes mode) | |
61 | { | |
62 | unsigned long func; | |
63 | ||
64 | if (cs_types[mode].save_func) | |
65 | return 0; | |
66 | func = kallsyms_lookup_funcptr(cs_types[mode].save_func_name); | |
67 | if (!func) { | |
68 | printk(KERN_WARNING "LTTng: symbol lookup failed: %s\n", | |
69 | cs_types[mode].save_func_name); | |
70 | return -EINVAL; | |
71 | } | |
72 | cs_types[mode].save_func = (void *) func; | |
73 | return 0; | |
74 | } | |
75 | ||
b5a89a3f MD |
76 | static |
77 | void lttng_cs_set_init(struct lttng_cs __percpu *cs_set) | |
78 | { | |
79 | int cpu, i; | |
80 | ||
81 | for_each_possible_cpu(cpu) { | |
82 | struct lttng_cs *cs; | |
83 | ||
84 | cs = per_cpu_ptr(cs_set, cpu); | |
85 | for (i = 0; i < RING_BUFFER_MAX_NESTING; i++) { | |
86 | struct lttng_cs_dispatch *dispatch; | |
87 | ||
88 | dispatch = &cs->dispatch[i]; | |
89 | dispatch->stack_trace.entries = dispatch->entries; | |
90 | dispatch->stack_trace.max_entries = MAX_ENTRIES; | |
91 | } | |
92 | } | |
93 | } | |
94 | ||
b6ee48d2 MD |
95 | /* Keep track of nesting inside userspace callstack context code */ |
96 | DEFINE_PER_CPU(int, callstack_user_nesting); | |
97 | ||
ea2d95e4 MD |
98 | /* |
99 | * Note: these callbacks expect to be invoked with preemption disabled across | |
100 | * get_size and record due to its use of a per-cpu stack. | |
101 | */ | |
b6ee48d2 | 102 | static |
ea2d95e4 | 103 | struct stack_trace *stack_trace_context(struct field_data *fdata, int cpu) |
b6ee48d2 MD |
104 | { |
105 | int buffer_nesting, cs_user_nesting; | |
106 | struct lttng_cs *cs; | |
b6ee48d2 MD |
107 | |
108 | /* | |
109 | * Do not gather the userspace callstack context when the event was | |
110 | * triggered by the userspace callstack context saving mechanism. | |
111 | */ | |
ea2d95e4 | 112 | cs_user_nesting = per_cpu(callstack_user_nesting, cpu); |
b6ee48d2 MD |
113 | |
114 | if (fdata->mode == CALLSTACK_USER && cs_user_nesting >= 1) | |
115 | return NULL; | |
116 | ||
117 | /* | |
118 | * get_cpu() is not required, preemption is already | |
119 | * disabled while event is written. | |
120 | * | |
121 | * max nesting is checked in lib_ring_buffer_get_cpu(). | |
122 | * Check it again as a safety net. | |
123 | */ | |
ea2d95e4 MD |
124 | cs = per_cpu_ptr(fdata->cs_percpu, cpu); |
125 | buffer_nesting = per_cpu(lib_ring_buffer_nesting, cpu) - 1; | |
b6ee48d2 MD |
126 | if (buffer_nesting >= RING_BUFFER_MAX_NESTING) |
127 | return NULL; | |
128 | ||
129 | return &cs->dispatch[buffer_nesting].stack_trace; | |
130 | } | |
131 | ||
ceabb767 | 132 | static |
346cb5ee | 133 | size_t lttng_callstack_length_get_size(void *priv, struct lttng_kernel_probe_ctx *probe_ctx, size_t offset) |
ceabb767 MD |
134 | { |
135 | size_t orig_offset = offset; | |
136 | ||
137 | offset += lib_ring_buffer_align(offset, lttng_alignof(unsigned int)); | |
138 | offset += sizeof(unsigned int); | |
139 | return offset - orig_offset; | |
140 | } | |
141 | ||
b6ee48d2 MD |
142 | /* |
143 | * In order to reserve the correct size, the callstack is computed. The | |
144 | * resulting callstack is saved to be accessed in the record step. | |
145 | */ | |
146 | static | |
346cb5ee | 147 | size_t lttng_callstack_sequence_get_size(void *priv, struct lttng_kernel_probe_ctx *probe_ctx, size_t offset) |
b6ee48d2 MD |
148 | { |
149 | struct stack_trace *trace; | |
ea2d95e4 | 150 | struct field_data *fdata = (struct field_data *) priv; |
b6ee48d2 | 151 | size_t orig_offset = offset; |
ea2d95e4 | 152 | int cpu = smp_processor_id(); |
b6ee48d2 MD |
153 | |
154 | /* do not write data if no space is available */ | |
ea2d95e4 | 155 | trace = stack_trace_context(fdata, cpu); |
b6ee48d2 | 156 | if (unlikely(!trace)) { |
b6ee48d2 MD |
157 | offset += lib_ring_buffer_align(offset, lttng_alignof(unsigned long)); |
158 | return offset - orig_offset; | |
159 | } | |
160 | ||
161 | /* reset stack trace, no need to clear memory */ | |
162 | trace->nr_entries = 0; | |
163 | ||
164 | if (fdata->mode == CALLSTACK_USER) | |
ea2d95e4 | 165 | ++per_cpu(callstack_user_nesting, cpu); |
b6ee48d2 MD |
166 | |
167 | /* do the real work and reserve space */ | |
168 | cs_types[fdata->mode].save_func(trace); | |
169 | ||
170 | if (fdata->mode == CALLSTACK_USER) | |
ea2d95e4 | 171 | per_cpu(callstack_user_nesting, cpu)--; |
b6ee48d2 MD |
172 | |
173 | /* | |
174 | * Remove final ULONG_MAX delimiter. If we cannot find it, add | |
175 | * our own marker to show that the stack is incomplete. This is | |
176 | * more compact for a trace. | |
177 | */ | |
178 | if (trace->nr_entries > 0 | |
179 | && trace->entries[trace->nr_entries - 1] == ULONG_MAX) { | |
180 | trace->nr_entries--; | |
181 | } | |
b6ee48d2 MD |
182 | offset += lib_ring_buffer_align(offset, lttng_alignof(unsigned long)); |
183 | offset += sizeof(unsigned long) * trace->nr_entries; | |
184 | /* Add our own ULONG_MAX delimiter to show incomplete stack. */ | |
185 | if (trace->nr_entries == trace->max_entries) | |
186 | offset += sizeof(unsigned long); | |
187 | return offset - orig_offset; | |
188 | } | |
189 | ||
190 | static | |
346cb5ee | 191 | void lttng_callstack_length_record(void *priv, struct lttng_kernel_probe_ctx *probe_ctx, |
b6ee48d2 MD |
192 | struct lib_ring_buffer_ctx *ctx, |
193 | struct lttng_channel *chan) | |
194 | { | |
ea2d95e4 MD |
195 | int cpu = ctx->priv.reserve_cpu; |
196 | struct field_data *fdata = (struct field_data *) priv; | |
197 | struct stack_trace *trace = stack_trace_context(fdata, cpu); | |
b6ee48d2 MD |
198 | unsigned int nr_seq_entries; |
199 | ||
ceabb767 | 200 | lib_ring_buffer_align_ctx(ctx, lttng_alignof(unsigned int)); |
b6ee48d2 MD |
201 | if (unlikely(!trace)) { |
202 | nr_seq_entries = 0; | |
ceabb767 MD |
203 | } else { |
204 | nr_seq_entries = trace->nr_entries; | |
205 | if (trace->nr_entries == trace->max_entries) | |
206 | nr_seq_entries++; | |
207 | } | |
208 | chan->ops->event_write(ctx, &nr_seq_entries, sizeof(unsigned int)); | |
209 | } | |
210 | static | |
346cb5ee | 211 | void lttng_callstack_sequence_record(void *priv, struct lttng_kernel_probe_ctx *probe_ctx, |
ceabb767 MD |
212 | struct lib_ring_buffer_ctx *ctx, |
213 | struct lttng_channel *chan) | |
214 | { | |
ea2d95e4 MD |
215 | int cpu = ctx->priv.reserve_cpu; |
216 | struct field_data *fdata = (struct field_data *) priv; | |
217 | struct stack_trace *trace = stack_trace_context(fdata, cpu); | |
ceabb767 MD |
218 | unsigned int nr_seq_entries; |
219 | ||
220 | lib_ring_buffer_align_ctx(ctx, lttng_alignof(unsigned long)); | |
221 | if (unlikely(!trace)) { | |
b6ee48d2 MD |
222 | return; |
223 | } | |
b6ee48d2 MD |
224 | nr_seq_entries = trace->nr_entries; |
225 | if (trace->nr_entries == trace->max_entries) | |
226 | nr_seq_entries++; | |
b6ee48d2 MD |
227 | chan->ops->event_write(ctx, trace->entries, |
228 | sizeof(unsigned long) * trace->nr_entries); | |
229 | /* Add our own ULONG_MAX delimiter to show incomplete stack. */ | |
230 | if (trace->nr_entries == trace->max_entries) { | |
231 | unsigned long delim = ULONG_MAX; | |
232 | ||
233 | chan->ops->event_write(ctx, &delim, sizeof(unsigned long)); | |
234 | } | |
235 | } |