2 * lttng-context-perf-counters.c
4 * LTTng performance monitoring counters (perf-counters) integration module.
6 * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; only
11 * version 2.1 of the License.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 #include <linux/module.h>
24 #include <linux/slab.h>
25 #include <linux/perf_event.h>
26 #include <linux/list.h>
27 #include <linux/string.h>
28 #include <linux/cpu.h>
29 #include <lttng-events.h>
30 #include <wrapper/ringbuffer/frontend_types.h>
31 #include <wrapper/vmalloc.h>
32 #include <wrapper/perf.h>
33 #include <lttng-tracer.h>
36 size_t perf_counter_get_size(size_t offset
)
40 size
+= lib_ring_buffer_align(offset
, lttng_alignof(uint64_t));
41 size
+= sizeof(uint64_t);
46 void perf_counter_record(struct lttng_ctx_field
*field
,
47 struct lib_ring_buffer_ctx
*ctx
,
48 struct lttng_channel
*chan
)
50 struct perf_event
*event
;
53 event
= field
->u
.perf_counter
->e
[ctx
->cpu
];
55 if (unlikely(event
->state
== PERF_EVENT_STATE_ERROR
)) {
58 event
->pmu
->read(event
);
59 value
= local64_read(&event
->count
);
63 * Perf chooses not to be clever and not to support enabling a
64 * perf counter before the cpu is brought up. Therefore, we need
65 * to support having events coming (e.g. scheduler events)
66 * before the counter is setup. Write an arbitrary 0 in this
71 lib_ring_buffer_align_ctx(ctx
, lttng_alignof(value
));
72 chan
->ops
->event_write(ctx
, &value
, sizeof(value
));
75 #if defined(CONFIG_PERF_EVENTS) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3,0,99))
77 void overflow_callback(struct perf_event
*event
,
78 struct perf_sample_data
*data
,
84 void overflow_callback(struct perf_event
*event
, int nmi
,
85 struct perf_sample_data
*data
,
92 void lttng_destroy_perf_counter_field(struct lttng_ctx_field
*field
)
94 struct perf_event
**events
= field
->u
.perf_counter
->e
;
96 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
100 ret
= cpuhp_state_remove_instance(lttng_hp_online
,
101 &field
->u
.perf_counter
->cpuhp_online
.node
);
103 ret
= cpuhp_state_remove_instance(lttng_hp_prepare
,
104 &field
->u
.perf_counter
->cpuhp_prepare
.node
);
107 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
112 for_each_online_cpu(cpu
)
113 perf_event_release_kernel(events
[cpu
]);
115 #ifdef CONFIG_HOTPLUG_CPU
116 unregister_cpu_notifier(&field
->u
.perf_counter
->nb
);
119 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
120 kfree(field
->event_field
.name
);
121 kfree(field
->u
.perf_counter
->attr
);
123 kfree(field
->u
.perf_counter
);
126 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
128 int lttng_cpuhp_perf_counter_online(unsigned int cpu
,
129 struct lttng_cpuhp_node
*node
)
131 struct lttng_perf_counter_field
*perf_field
=
132 container_of(node
, struct lttng_perf_counter_field
,
134 struct perf_event
**events
= perf_field
->e
;
135 struct perf_event_attr
*attr
= perf_field
->attr
;
136 struct perf_event
*pevent
;
138 pevent
= wrapper_perf_event_create_kernel_counter(attr
,
139 cpu
, NULL
, overflow_callback
);
140 if (!pevent
|| IS_ERR(pevent
))
142 if (pevent
->state
== PERF_EVENT_STATE_ERROR
) {
143 perf_event_release_kernel(pevent
);
146 barrier(); /* Create perf counter before setting event */
147 events
[cpu
] = pevent
;
151 int lttng_cpuhp_perf_counter_dead(unsigned int cpu
,
152 struct lttng_cpuhp_node
*node
)
154 struct lttng_perf_counter_field
*perf_field
=
155 container_of(node
, struct lttng_perf_counter_field
,
157 struct perf_event
**events
= perf_field
->e
;
158 struct perf_event
*pevent
;
160 pevent
= events
[cpu
];
162 barrier(); /* NULLify event before perf counter teardown */
163 perf_event_release_kernel(pevent
);
167 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
169 #ifdef CONFIG_HOTPLUG_CPU
172 * lttng_perf_counter_hp_callback - CPU hotplug callback
173 * @nb: notifier block
174 * @action: hotplug action to take
177 * Returns the success/failure of the operation. (%NOTIFY_OK, %NOTIFY_BAD)
179 * We can setup perf counters when the cpu is online (up prepare seems to be too
183 int lttng_perf_counter_cpu_hp_callback(struct notifier_block
*nb
,
184 unsigned long action
,
187 unsigned int cpu
= (unsigned long) hcpu
;
188 struct lttng_perf_counter_field
*perf_field
=
189 container_of(nb
, struct lttng_perf_counter_field
, nb
);
190 struct perf_event
**events
= perf_field
->e
;
191 struct perf_event_attr
*attr
= perf_field
->attr
;
192 struct perf_event
*pevent
;
194 if (!perf_field
->hp_enable
)
199 case CPU_ONLINE_FROZEN
:
200 pevent
= wrapper_perf_event_create_kernel_counter(attr
,
201 cpu
, NULL
, overflow_callback
);
202 if (!pevent
|| IS_ERR(pevent
))
204 if (pevent
->state
== PERF_EVENT_STATE_ERROR
) {
205 perf_event_release_kernel(pevent
);
208 barrier(); /* Create perf counter before setting event */
209 events
[cpu
] = pevent
;
211 case CPU_UP_CANCELED
:
212 case CPU_UP_CANCELED_FROZEN
:
214 case CPU_DEAD_FROZEN
:
215 pevent
= events
[cpu
];
217 barrier(); /* NULLify event before perf counter teardown */
218 perf_event_release_kernel(pevent
);
226 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
228 int lttng_add_perf_counter_to_ctx(uint32_t type
,
231 struct lttng_ctx
**ctx
)
233 struct lttng_ctx_field
*field
;
234 struct lttng_perf_counter_field
*perf_field
;
235 struct perf_event
**events
;
236 struct perf_event_attr
*attr
;
240 events
= kzalloc(num_possible_cpus() * sizeof(*events
), GFP_KERNEL
);
244 attr
= kzalloc(sizeof(struct perf_event_attr
), GFP_KERNEL
);
251 attr
->config
= config
;
252 attr
->size
= sizeof(struct perf_event_attr
);
256 perf_field
= kzalloc(sizeof(struct lttng_perf_counter_field
), GFP_KERNEL
);
259 goto error_alloc_perf_field
;
261 perf_field
->e
= events
;
262 perf_field
->attr
= attr
;
264 name_alloc
= kstrdup(name
, GFP_KERNEL
);
267 goto name_alloc_error
;
270 field
= lttng_append_context(ctx
);
273 goto append_context_error
;
275 if (lttng_find_context(*ctx
, name_alloc
)) {
280 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
282 perf_field
->cpuhp_prepare
.component
= LTTNG_CONTEXT_PERF_COUNTERS
;
283 ret
= cpuhp_state_add_instance(lttng_hp_prepare
,
284 &perf_field
->cpuhp_prepare
.node
);
286 goto cpuhp_prepare_error
;
288 perf_field
->cpuhp_online
.component
= LTTNG_CONTEXT_PERF_COUNTERS
;
289 ret
= cpuhp_state_add_instance(lttng_hp_online
,
290 &perf_field
->cpuhp_online
.node
);
292 goto cpuhp_online_error
;
294 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
298 #ifdef CONFIG_HOTPLUG_CPU
299 perf_field
->nb
.notifier_call
=
300 lttng_perf_counter_cpu_hp_callback
;
301 perf_field
->nb
.priority
= 0;
302 register_cpu_notifier(&perf_field
->nb
);
305 for_each_online_cpu(cpu
) {
306 events
[cpu
] = wrapper_perf_event_create_kernel_counter(attr
,
307 cpu
, NULL
, overflow_callback
);
308 if (!events
[cpu
] || IS_ERR(events
[cpu
])) {
312 if (events
[cpu
]->state
== PERF_EVENT_STATE_ERROR
) {
318 perf_field
->hp_enable
= 1;
320 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
322 field
->destroy
= lttng_destroy_perf_counter_field
;
324 field
->event_field
.name
= name_alloc
;
325 field
->event_field
.type
.atype
= atype_integer
;
326 field
->event_field
.type
.u
.basic
.integer
.size
= sizeof(uint64_t) * CHAR_BIT
;
327 field
->event_field
.type
.u
.basic
.integer
.alignment
= lttng_alignof(uint64_t) * CHAR_BIT
;
328 field
->event_field
.type
.u
.basic
.integer
.signedness
= lttng_is_signed_type(uint64_t);
329 field
->event_field
.type
.u
.basic
.integer
.reverse_byte_order
= 0;
330 field
->event_field
.type
.u
.basic
.integer
.base
= 10;
331 field
->event_field
.type
.u
.basic
.integer
.encoding
= lttng_encode_none
;
332 field
->get_size
= perf_counter_get_size
;
333 field
->record
= perf_counter_record
;
334 field
->u
.perf_counter
= perf_field
;
335 lttng_context_update(*ctx
);
337 wrapper_vmalloc_sync_all();
340 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
345 remove_ret
= cpuhp_state_remove_instance(lttng_hp_prepare
,
346 &perf_field
->cpuhp_prepare
.node
);
350 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
356 for_each_online_cpu(cpu
) {
357 if (events
[cpu
] && !IS_ERR(events
[cpu
]))
358 perf_event_release_kernel(events
[cpu
]);
361 #ifdef CONFIG_HOTPLUG_CPU
362 unregister_cpu_notifier(&perf_field
->nb
);
365 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
367 lttng_remove_context_field(ctx
, field
);
368 append_context_error
:
372 error_alloc_perf_field
: