Commit | Line | Data |
---|---|---|
833ad6a0 | 1 | /* |
886d51a3 | 2 | * lttng-context-perf-counters.c |
833ad6a0 MD |
3 | * |
4 | * LTTng performance monitoring counters (perf-counters) integration module. | |
5 | * | |
886d51a3 MD |
6 | * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com> |
7 | * | |
8 | * This library is free software; you can redistribute it and/or | |
9 | * modify it under the terms of the GNU Lesser General Public | |
10 | * License as published by the Free Software Foundation; only | |
11 | * version 2.1 of the License. | |
12 | * | |
13 | * This library is distributed in the hope that it will be useful, | |
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
16 | * Lesser General Public License for more details. | |
17 | * | |
18 | * You should have received a copy of the GNU Lesser General Public | |
19 | * License along with this library; if not, write to the Free Software | |
20 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA | |
833ad6a0 MD |
21 | */ |
22 | ||
23 | #include <linux/module.h> | |
24 | #include <linux/slab.h> | |
25 | #include <linux/perf_event.h> | |
26 | #include <linux/list.h> | |
c24a0d71 | 27 | #include <linux/string.h> |
5ca7b8a3 | 28 | #include <linux/cpu.h> |
241ae9a8 MD |
29 | #include <lttng-events.h> |
30 | #include <wrapper/ringbuffer/frontend_types.h> | |
31 | #include <wrapper/vmalloc.h> | |
32 | #include <wrapper/perf.h> | |
33 | #include <lttng-tracer.h> | |
833ad6a0 | 34 | |
f1676205 MD |
35 | static |
36 | size_t perf_counter_get_size(size_t offset) | |
37 | { | |
38 | size_t size = 0; | |
39 | ||
a90917c3 | 40 | size += lib_ring_buffer_align(offset, lttng_alignof(uint64_t)); |
f1676205 MD |
41 | size += sizeof(uint64_t); |
42 | return size; | |
43 | } | |
44 | ||
833ad6a0 MD |
45 | static |
46 | void perf_counter_record(struct lttng_ctx_field *field, | |
47 | struct lib_ring_buffer_ctx *ctx, | |
a90917c3 | 48 | struct lttng_channel *chan) |
833ad6a0 MD |
49 | { |
50 | struct perf_event *event; | |
51 | uint64_t value; | |
52 | ||
2001023e | 53 | event = field->u.perf_counter->e[ctx->cpu]; |
0478c519 | 54 | if (likely(event)) { |
7b745a96 MD |
55 | if (unlikely(event->state == PERF_EVENT_STATE_ERROR)) { |
56 | value = 0; | |
57 | } else { | |
58 | event->pmu->read(event); | |
59 | value = local64_read(&event->count); | |
60 | } | |
f91fd73b MD |
61 | } else { |
62 | /* | |
63 | * Perf chooses not to be clever and not to support enabling a | |
64 | * perf counter before the cpu is brought up. Therefore, we need | |
65 | * to support having events coming (e.g. scheduler events) | |
66 | * before the counter is setup. Write an arbitrary 0 in this | |
67 | * case. | |
68 | */ | |
69 | value = 0; | |
70 | } | |
a90917c3 | 71 | lib_ring_buffer_align_ctx(ctx, lttng_alignof(value)); |
833ad6a0 MD |
72 | chan->ops->event_write(ctx, &value, sizeof(value)); |
73 | } | |
74 | ||
90f5546c MD |
75 | #if defined(CONFIG_PERF_EVENTS) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3,0,99)) |
76 | static | |
77 | void overflow_callback(struct perf_event *event, | |
78 | struct perf_sample_data *data, | |
79 | struct pt_regs *regs) | |
80 | { | |
81 | } | |
82 | #else | |
833ad6a0 MD |
83 | static |
84 | void overflow_callback(struct perf_event *event, int nmi, | |
85 | struct perf_sample_data *data, | |
86 | struct pt_regs *regs) | |
87 | { | |
88 | } | |
90f5546c | 89 | #endif |
833ad6a0 | 90 | |
2dccf128 MD |
91 | static |
92 | void lttng_destroy_perf_counter_field(struct lttng_ctx_field *field) | |
93 | { | |
2001023e | 94 | struct perf_event **events = field->u.perf_counter->e; |
2dccf128 | 95 | |
8a30119b MD |
96 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) |
97 | { | |
98 | int ret; | |
99 | ||
100 | ret = cpuhp_state_remove_instance(lttng_hp_online, | |
101 | &field->u.perf_counter->cpuhp_online.node); | |
102 | WARN_ON(ret); | |
103 | ret = cpuhp_state_remove_instance(lttng_hp_prepare, | |
104 | &field->u.perf_counter->cpuhp_prepare.node); | |
105 | WARN_ON(ret); | |
106 | } | |
107 | #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */ | |
108 | { | |
109 | int cpu; | |
110 | ||
111 | get_online_cpus(); | |
112 | for_each_online_cpu(cpu) | |
113 | perf_event_release_kernel(events[cpu]); | |
114 | put_online_cpus(); | |
8289661d | 115 | #ifdef CONFIG_HOTPLUG_CPU |
8a30119b | 116 | unregister_cpu_notifier(&field->u.perf_counter->nb); |
8289661d | 117 | #endif |
8a30119b MD |
118 | } |
119 | #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */ | |
c24a0d71 | 120 | kfree(field->event_field.name); |
2001023e | 121 | kfree(field->u.perf_counter->attr); |
2dccf128 | 122 | kfree(events); |
2001023e | 123 | kfree(field->u.perf_counter); |
2dccf128 MD |
124 | } |
125 | ||
8a30119b MD |
126 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) |
127 | ||
128 | int lttng_cpuhp_perf_counter_online(unsigned int cpu, | |
129 | struct lttng_cpuhp_node *node) | |
130 | { | |
131 | struct lttng_perf_counter_field *perf_field = | |
132 | container_of(node, struct lttng_perf_counter_field, | |
133 | cpuhp_online); | |
134 | struct perf_event **events = perf_field->e; | |
135 | struct perf_event_attr *attr = perf_field->attr; | |
136 | struct perf_event *pevent; | |
137 | ||
138 | pevent = wrapper_perf_event_create_kernel_counter(attr, | |
139 | cpu, NULL, overflow_callback); | |
140 | if (!pevent || IS_ERR(pevent)) | |
141 | return -EINVAL; | |
142 | if (pevent->state == PERF_EVENT_STATE_ERROR) { | |
143 | perf_event_release_kernel(pevent); | |
144 | return -EINVAL; | |
145 | } | |
146 | barrier(); /* Create perf counter before setting event */ | |
147 | events[cpu] = pevent; | |
148 | return 0; | |
149 | } | |
150 | ||
151 | int lttng_cpuhp_perf_counter_dead(unsigned int cpu, | |
152 | struct lttng_cpuhp_node *node) | |
153 | { | |
154 | struct lttng_perf_counter_field *perf_field = | |
155 | container_of(node, struct lttng_perf_counter_field, | |
156 | cpuhp_prepare); | |
157 | struct perf_event **events = perf_field->e; | |
158 | struct perf_event *pevent; | |
159 | ||
160 | pevent = events[cpu]; | |
161 | events[cpu] = NULL; | |
162 | barrier(); /* NULLify event before perf counter teardown */ | |
163 | perf_event_release_kernel(pevent); | |
164 | return 0; | |
165 | } | |
166 | ||
167 | #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */ | |
168 | ||
8289661d MD |
169 | #ifdef CONFIG_HOTPLUG_CPU |
170 | ||
171 | /** | |
172 | * lttng_perf_counter_hp_callback - CPU hotplug callback | |
173 | * @nb: notifier block | |
174 | * @action: hotplug action to take | |
175 | * @hcpu: CPU number | |
176 | * | |
177 | * Returns the success/failure of the operation. (%NOTIFY_OK, %NOTIFY_BAD) | |
178 | * | |
179 | * We can setup perf counters when the cpu is online (up prepare seems to be too | |
180 | * soon). | |
181 | */ | |
182 | static | |
e8f071d5 | 183 | int lttng_perf_counter_cpu_hp_callback(struct notifier_block *nb, |
8289661d MD |
184 | unsigned long action, |
185 | void *hcpu) | |
186 | { | |
187 | unsigned int cpu = (unsigned long) hcpu; | |
2001023e MD |
188 | struct lttng_perf_counter_field *perf_field = |
189 | container_of(nb, struct lttng_perf_counter_field, nb); | |
190 | struct perf_event **events = perf_field->e; | |
191 | struct perf_event_attr *attr = perf_field->attr; | |
f91fd73b | 192 | struct perf_event *pevent; |
8289661d | 193 | |
2001023e | 194 | if (!perf_field->hp_enable) |
8289661d MD |
195 | return NOTIFY_OK; |
196 | ||
197 | switch (action) { | |
198 | case CPU_ONLINE: | |
199 | case CPU_ONLINE_FROZEN: | |
90f5546c | 200 | pevent = wrapper_perf_event_create_kernel_counter(attr, |
8289661d | 201 | cpu, NULL, overflow_callback); |
0478c519 | 202 | if (!pevent || IS_ERR(pevent)) |
8289661d | 203 | return NOTIFY_BAD; |
7b745a96 MD |
204 | if (pevent->state == PERF_EVENT_STATE_ERROR) { |
205 | perf_event_release_kernel(pevent); | |
206 | return NOTIFY_BAD; | |
207 | } | |
f91fd73b MD |
208 | barrier(); /* Create perf counter before setting event */ |
209 | events[cpu] = pevent; | |
8289661d MD |
210 | break; |
211 | case CPU_UP_CANCELED: | |
212 | case CPU_UP_CANCELED_FROZEN: | |
213 | case CPU_DEAD: | |
214 | case CPU_DEAD_FROZEN: | |
f91fd73b MD |
215 | pevent = events[cpu]; |
216 | events[cpu] = NULL; | |
217 | barrier(); /* NULLify event before perf counter teardown */ | |
218 | perf_event_release_kernel(pevent); | |
8289661d MD |
219 | break; |
220 | } | |
221 | return NOTIFY_OK; | |
222 | } | |
223 | ||
224 | #endif | |
225 | ||
8a30119b MD |
226 | #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */ |
227 | ||
833ad6a0 MD |
228 | int lttng_add_perf_counter_to_ctx(uint32_t type, |
229 | uint64_t config, | |
c24a0d71 | 230 | const char *name, |
2dccf128 | 231 | struct lttng_ctx **ctx) |
833ad6a0 MD |
232 | { |
233 | struct lttng_ctx_field *field; | |
2001023e | 234 | struct lttng_perf_counter_field *perf_field; |
833ad6a0 MD |
235 | struct perf_event **events; |
236 | struct perf_event_attr *attr; | |
237 | int ret; | |
c24a0d71 | 238 | char *name_alloc; |
833ad6a0 MD |
239 | |
240 | events = kzalloc(num_possible_cpus() * sizeof(*events), GFP_KERNEL); | |
241 | if (!events) | |
242 | return -ENOMEM; | |
243 | ||
2001023e | 244 | attr = kzalloc(sizeof(struct perf_event_attr), GFP_KERNEL); |
833ad6a0 MD |
245 | if (!attr) { |
246 | ret = -ENOMEM; | |
247 | goto error_attr; | |
248 | } | |
249 | ||
250 | attr->type = type; | |
251 | attr->config = config; | |
252 | attr->size = sizeof(struct perf_event_attr); | |
253 | attr->pinned = 1; | |
254 | attr->disabled = 0; | |
255 | ||
2001023e MD |
256 | perf_field = kzalloc(sizeof(struct lttng_perf_counter_field), GFP_KERNEL); |
257 | if (!perf_field) { | |
258 | ret = -ENOMEM; | |
259 | goto error_alloc_perf_field; | |
260 | } | |
261 | perf_field->e = events; | |
262 | perf_field->attr = attr; | |
263 | ||
c24a0d71 | 264 | name_alloc = kstrdup(name, GFP_KERNEL); |
bef96e48 MD |
265 | if (!name_alloc) { |
266 | ret = -ENOMEM; | |
c24a0d71 | 267 | goto name_alloc_error; |
bef96e48 | 268 | } |
c24a0d71 | 269 | |
2dccf128 MD |
270 | field = lttng_append_context(ctx); |
271 | if (!field) { | |
272 | ret = -ENOMEM; | |
8289661d | 273 | goto append_context_error; |
833ad6a0 | 274 | } |
2001023e | 275 | if (lttng_find_context(*ctx, name_alloc)) { |
44252f0f MD |
276 | ret = -EEXIST; |
277 | goto find_error; | |
278 | } | |
8289661d | 279 | |
8a30119b MD |
280 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) |
281 | ||
282 | perf_field->cpuhp_prepare.component = LTTNG_CONTEXT_PERF_COUNTERS; | |
283 | ret = cpuhp_state_add_instance(lttng_hp_prepare, | |
284 | &perf_field->cpuhp_prepare.node); | |
285 | if (ret) | |
286 | goto cpuhp_prepare_error; | |
287 | ||
288 | perf_field->cpuhp_online.component = LTTNG_CONTEXT_PERF_COUNTERS; | |
289 | ret = cpuhp_state_add_instance(lttng_hp_online, | |
290 | &perf_field->cpuhp_online.node); | |
291 | if (ret) | |
292 | goto cpuhp_online_error; | |
293 | ||
294 | #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */ | |
295 | { | |
296 | int cpu; | |
297 | ||
8289661d | 298 | #ifdef CONFIG_HOTPLUG_CPU |
8a30119b MD |
299 | perf_field->nb.notifier_call = |
300 | lttng_perf_counter_cpu_hp_callback; | |
301 | perf_field->nb.priority = 0; | |
302 | register_cpu_notifier(&perf_field->nb); | |
8289661d | 303 | #endif |
8a30119b MD |
304 | get_online_cpus(); |
305 | for_each_online_cpu(cpu) { | |
306 | events[cpu] = wrapper_perf_event_create_kernel_counter(attr, | |
307 | cpu, NULL, overflow_callback); | |
308 | if (!events[cpu] || IS_ERR(events[cpu])) { | |
309 | ret = -EINVAL; | |
310 | goto counter_error; | |
311 | } | |
312 | if (events[cpu]->state == PERF_EVENT_STATE_ERROR) { | |
313 | ret = -EBUSY; | |
314 | goto counter_busy; | |
315 | } | |
7b745a96 | 316 | } |
8a30119b MD |
317 | put_online_cpus(); |
318 | perf_field->hp_enable = 1; | |
8289661d | 319 | } |
8a30119b | 320 | #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */ |
8289661d | 321 | |
2dccf128 | 322 | field->destroy = lttng_destroy_perf_counter_field; |
833ad6a0 | 323 | |
c24a0d71 | 324 | field->event_field.name = name_alloc; |
8070f5c0 | 325 | field->event_field.type.atype = atype_integer; |
9d7d747f | 326 | field->event_field.type.u.basic.integer.size = sizeof(uint64_t) * CHAR_BIT; |
a90917c3 | 327 | field->event_field.type.u.basic.integer.alignment = lttng_alignof(uint64_t) * CHAR_BIT; |
06254b0f | 328 | field->event_field.type.u.basic.integer.signedness = lttng_is_signed_type(uint64_t); |
8070f5c0 MD |
329 | field->event_field.type.u.basic.integer.reverse_byte_order = 0; |
330 | field->event_field.type.u.basic.integer.base = 10; | |
331 | field->event_field.type.u.basic.integer.encoding = lttng_encode_none; | |
f1676205 MD |
332 | field->get_size = perf_counter_get_size; |
333 | field->record = perf_counter_record; | |
2001023e | 334 | field->u.perf_counter = perf_field; |
a9dd15da | 335 | lttng_context_update(*ctx); |
833ad6a0 MD |
336 | |
337 | wrapper_vmalloc_sync_all(); | |
338 | return 0; | |
339 | ||
8a30119b MD |
340 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) |
341 | cpuhp_online_error: | |
342 | { | |
343 | int remove_ret; | |
344 | ||
345 | remove_ret = cpuhp_state_remove_instance(lttng_hp_prepare, | |
346 | &perf_field->cpuhp_prepare.node); | |
347 | WARN_ON(remove_ret); | |
348 | } | |
349 | cpuhp_prepare_error: | |
350 | #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */ | |
7b745a96 | 351 | counter_busy: |
8289661d | 352 | counter_error: |
f23d3165 MD |
353 | { |
354 | int cpu; | |
355 | ||
356 | for_each_online_cpu(cpu) { | |
357 | if (events[cpu] && !IS_ERR(events[cpu])) | |
358 | perf_event_release_kernel(events[cpu]); | |
359 | } | |
360 | put_online_cpus(); | |
8289661d | 361 | #ifdef CONFIG_HOTPLUG_CPU |
f23d3165 | 362 | unregister_cpu_notifier(&perf_field->nb); |
8289661d | 363 | #endif |
f23d3165 | 364 | } |
8a30119b | 365 | #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */ |
44252f0f | 366 | find_error: |
8289661d MD |
367 | lttng_remove_context_field(ctx, field); |
368 | append_context_error: | |
369 | kfree(name_alloc); | |
370 | name_alloc_error: | |
2001023e MD |
371 | kfree(perf_field); |
372 | error_alloc_perf_field: | |
833ad6a0 MD |
373 | kfree(attr); |
374 | error_attr: | |
375 | kfree(events); | |
376 | return ret; | |
377 | } |