1 /* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
3 * lttng-context-perf-counters.c
5 * LTTng performance monitoring counters (perf-counters) integration module.
7 * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
10 #include <linux/module.h>
11 #include <linux/slab.h>
12 #include <linux/perf_event.h>
13 #include <linux/list.h>
14 #include <linux/string.h>
15 #include <linux/cpu.h>
16 #include <lttng/events.h>
17 #include <lttng/events-internal.h>
18 #include <ringbuffer/frontend_types.h>
19 #include <wrapper/vmalloc.h>
20 #include <wrapper/perf.h>
21 #include <lttng/tracer.h>
24 size_t perf_counter_get_size(void *priv
, struct lttng_kernel_probe_ctx
*probe_ctx
, size_t offset
)
28 size
+= lib_ring_buffer_align(offset
, lttng_alignof(uint64_t));
29 size
+= sizeof(uint64_t);
34 void perf_counter_record(void *priv
, struct lttng_kernel_probe_ctx
*probe_ctx
,
35 struct lttng_kernel_ring_buffer_ctx
*ctx
,
36 struct lttng_kernel_channel_buffer
*chan
)
38 struct lttng_perf_counter_field
*perf_field
= (struct lttng_perf_counter_field
*) priv
;
39 struct perf_event
*event
;
42 event
= perf_field
->e
[ctx
->priv
.reserve_cpu
];
44 if (unlikely(event
->state
== PERF_EVENT_STATE_ERROR
)) {
47 event
->pmu
->read(event
);
48 value
= local64_read(&event
->count
);
52 * Perf chooses not to be clever and not to support enabling a
53 * perf counter before the cpu is brought up. Therefore, we need
54 * to support having events coming (e.g. scheduler events)
55 * before the counter is setup. Write an arbitrary 0 in this
60 chan
->ops
->event_write(ctx
, &value
, sizeof(value
), lttng_alignof(value
));
63 #if defined(CONFIG_PERF_EVENTS) && (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(3,1,0))
65 void overflow_callback(struct perf_event
*event
,
66 struct perf_sample_data
*data
,
72 void overflow_callback(struct perf_event
*event
, int nmi
,
73 struct perf_sample_data
*data
,
80 void lttng_destroy_perf_counter_ctx_field(void *priv
)
82 struct lttng_perf_counter_field
*perf_field
= priv
;
83 struct perf_event
**events
= perf_field
->e
;
85 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0))
89 ret
= cpuhp_state_remove_instance(lttng_hp_online
,
90 &perf_field
->cpuhp_online
.node
);
92 ret
= cpuhp_state_remove_instance(lttng_hp_prepare
,
93 &perf_field
->cpuhp_prepare
.node
);
96 #else /* #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */
101 for_each_online_cpu(cpu
)
102 perf_event_release_kernel(events
[cpu
]);
104 #ifdef CONFIG_HOTPLUG_CPU
105 unregister_cpu_notifier(&perf_field
->nb
);
108 #endif /* #else #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */
109 kfree(perf_field
->name
);
110 kfree(perf_field
->attr
);
111 kfree(perf_field
->event_field
);
112 lttng_kvfree(events
);
116 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0))
118 int lttng_cpuhp_perf_counter_online(unsigned int cpu
,
119 struct lttng_cpuhp_node
*node
)
121 struct lttng_perf_counter_field
*perf_field
=
122 container_of(node
, struct lttng_perf_counter_field
,
124 struct perf_event
**events
= perf_field
->e
;
125 struct perf_event_attr
*attr
= perf_field
->attr
;
126 struct perf_event
*pevent
;
128 pevent
= wrapper_perf_event_create_kernel_counter(attr
,
129 cpu
, NULL
, overflow_callback
);
130 if (!pevent
|| IS_ERR(pevent
))
132 if (pevent
->state
== PERF_EVENT_STATE_ERROR
) {
133 perf_event_release_kernel(pevent
);
136 barrier(); /* Create perf counter before setting event */
137 events
[cpu
] = pevent
;
141 int lttng_cpuhp_perf_counter_dead(unsigned int cpu
,
142 struct lttng_cpuhp_node
*node
)
144 struct lttng_perf_counter_field
*perf_field
=
145 container_of(node
, struct lttng_perf_counter_field
,
147 struct perf_event
**events
= perf_field
->e
;
148 struct perf_event
*pevent
;
150 pevent
= events
[cpu
];
152 barrier(); /* NULLify event before perf counter teardown */
153 perf_event_release_kernel(pevent
);
157 #else /* #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */
159 #ifdef CONFIG_HOTPLUG_CPU
162 * lttng_perf_counter_hp_callback - CPU hotplug callback
163 * @nb: notifier block
164 * @action: hotplug action to take
167 * Returns the success/failure of the operation. (%NOTIFY_OK, %NOTIFY_BAD)
169 * We can setup perf counters when the cpu is online (up prepare seems to be too
173 int lttng_perf_counter_cpu_hp_callback(struct notifier_block
*nb
,
174 unsigned long action
,
177 unsigned int cpu
= (unsigned long) hcpu
;
178 struct lttng_perf_counter_field
*perf_field
=
179 container_of(nb
, struct lttng_perf_counter_field
, nb
);
180 struct perf_event
**events
= perf_field
->e
;
181 struct perf_event_attr
*attr
= perf_field
->attr
;
182 struct perf_event
*pevent
;
184 if (!perf_field
->hp_enable
)
189 case CPU_ONLINE_FROZEN
:
190 pevent
= wrapper_perf_event_create_kernel_counter(attr
,
191 cpu
, NULL
, overflow_callback
);
192 if (!pevent
|| IS_ERR(pevent
))
194 if (pevent
->state
== PERF_EVENT_STATE_ERROR
) {
195 perf_event_release_kernel(pevent
);
198 barrier(); /* Create perf counter before setting event */
199 events
[cpu
] = pevent
;
201 case CPU_UP_CANCELED
:
202 case CPU_UP_CANCELED_FROZEN
:
204 case CPU_DEAD_FROZEN
:
205 pevent
= events
[cpu
];
207 barrier(); /* NULLify event before perf counter teardown */
208 perf_event_release_kernel(pevent
);
216 #endif /* #else #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */
218 static const struct lttng_kernel_type_common
*field_type
=
219 lttng_kernel_static_type_integer_from_type(uint64_t, __BYTE_ORDER
, 10);
221 int lttng_add_perf_counter_to_ctx(uint32_t type
,
224 struct lttng_kernel_ctx
**ctx
)
226 struct lttng_kernel_ctx_field ctx_field
= { 0 };
227 struct lttng_kernel_event_field
*event_field
;
228 struct lttng_perf_counter_field
*perf_field
;
229 struct perf_event
**events
;
230 struct perf_event_attr
*attr
;
234 if (lttng_kernel_find_context(*ctx
, name
))
236 name_alloc
= kstrdup(name
, GFP_KERNEL
);
239 goto name_alloc_error
;
241 event_field
= kzalloc(sizeof(*event_field
), GFP_KERNEL
);
244 goto event_field_alloc_error
;
246 event_field
->name
= name_alloc
;
247 event_field
->type
= field_type
;
249 events
= lttng_kvzalloc(num_possible_cpus() * sizeof(*events
), GFP_KERNEL
);
252 goto event_alloc_error
;
255 attr
= kzalloc(sizeof(struct perf_event_attr
), GFP_KERNEL
);
262 attr
->config
= config
;
263 attr
->size
= sizeof(struct perf_event_attr
);
267 perf_field
= kzalloc(sizeof(struct lttng_perf_counter_field
), GFP_KERNEL
);
270 goto error_alloc_perf_field
;
272 perf_field
->e
= events
;
273 perf_field
->attr
= attr
;
274 perf_field
->name
= name_alloc
;
275 perf_field
->event_field
= event_field
;
277 ctx_field
.event_field
= event_field
;
278 ctx_field
.get_size
= perf_counter_get_size
;
279 ctx_field
.record
= perf_counter_record
;
280 ctx_field
.destroy
= lttng_destroy_perf_counter_ctx_field
;
281 ctx_field
.priv
= perf_field
;
283 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0))
285 perf_field
->cpuhp_prepare
.component
= LTTNG_CONTEXT_PERF_COUNTERS
;
286 ret
= cpuhp_state_add_instance(lttng_hp_prepare
,
287 &perf_field
->cpuhp_prepare
.node
);
289 goto cpuhp_prepare_error
;
291 perf_field
->cpuhp_online
.component
= LTTNG_CONTEXT_PERF_COUNTERS
;
292 ret
= cpuhp_state_add_instance(lttng_hp_online
,
293 &perf_field
->cpuhp_online
.node
);
295 goto cpuhp_online_error
;
297 #else /* #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */
301 #ifdef CONFIG_HOTPLUG_CPU
302 perf_field
->nb
.notifier_call
=
303 lttng_perf_counter_cpu_hp_callback
;
304 perf_field
->nb
.priority
= 0;
305 register_cpu_notifier(&perf_field
->nb
);
308 for_each_online_cpu(cpu
) {
309 events
[cpu
] = wrapper_perf_event_create_kernel_counter(attr
,
310 cpu
, NULL
, overflow_callback
);
311 if (!events
[cpu
] || IS_ERR(events
[cpu
])) {
315 if (events
[cpu
]->state
== PERF_EVENT_STATE_ERROR
) {
321 perf_field
->hp_enable
= 1;
323 #endif /* #else #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */
325 ret
= lttng_kernel_context_append(ctx
, &ctx_field
);
328 goto append_context_error
;
332 /* Error handling. */
333 append_context_error
:
334 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0))
339 remove_ret
= cpuhp_state_remove_instance(lttng_hp_prepare
,
340 &perf_field
->cpuhp_prepare
.node
);
344 #else /* #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */
350 for_each_online_cpu(cpu
) {
351 if (events
[cpu
] && !IS_ERR(events
[cpu
]))
352 perf_event_release_kernel(events
[cpu
]);
355 #ifdef CONFIG_HOTPLUG_CPU
356 unregister_cpu_notifier(&perf_field
->nb
);
359 #endif /* #else #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */
361 error_alloc_perf_field
:
364 lttng_kvfree(events
);
367 event_field_alloc_error
: