1 /* SPDX-License-Identifier: (GPL-2.0 or LGPL-2.1)
3 * lttng-context-perf-counters.c
5 * LTTng performance monitoring counters (perf-counters) integration module.
7 * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
10 #include <linux/module.h>
11 #include <linux/slab.h>
12 #include <linux/perf_event.h>
13 #include <linux/list.h>
14 #include <linux/string.h>
15 #include <linux/cpu.h>
16 #include <lttng-events.h>
17 #include <wrapper/ringbuffer/frontend_types.h>
18 #include <wrapper/vmalloc.h>
19 #include <wrapper/perf.h>
20 #include <lttng-tracer.h>
23 size_t perf_counter_get_size(size_t offset
)
27 size
+= lib_ring_buffer_align(offset
, lttng_alignof(uint64_t));
28 size
+= sizeof(uint64_t);
33 void perf_counter_record(struct lttng_ctx_field
*field
,
34 struct lib_ring_buffer_ctx
*ctx
,
35 struct lttng_channel
*chan
)
37 struct perf_event
*event
;
40 event
= field
->u
.perf_counter
->e
[ctx
->cpu
];
42 if (unlikely(event
->state
== PERF_EVENT_STATE_ERROR
)) {
45 event
->pmu
->read(event
);
46 value
= local64_read(&event
->count
);
50 * Perf chooses not to be clever and not to support enabling a
51 * perf counter before the cpu is brought up. Therefore, we need
52 * to support having events coming (e.g. scheduler events)
53 * before the counter is setup. Write an arbitrary 0 in this
58 lib_ring_buffer_align_ctx(ctx
, lttng_alignof(value
));
59 chan
->ops
->event_write(ctx
, &value
, sizeof(value
));
62 #if defined(CONFIG_PERF_EVENTS) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3,0,99))
64 void overflow_callback(struct perf_event
*event
,
65 struct perf_sample_data
*data
,
71 void overflow_callback(struct perf_event
*event
, int nmi
,
72 struct perf_sample_data
*data
,
79 void lttng_destroy_perf_counter_field(struct lttng_ctx_field
*field
)
81 struct perf_event
**events
= field
->u
.perf_counter
->e
;
83 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
87 ret
= cpuhp_state_remove_instance(lttng_hp_online
,
88 &field
->u
.perf_counter
->cpuhp_online
.node
);
90 ret
= cpuhp_state_remove_instance(lttng_hp_prepare
,
91 &field
->u
.perf_counter
->cpuhp_prepare
.node
);
94 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
99 for_each_online_cpu(cpu
)
100 perf_event_release_kernel(events
[cpu
]);
102 #ifdef CONFIG_HOTPLUG_CPU
103 unregister_cpu_notifier(&field
->u
.perf_counter
->nb
);
106 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
107 kfree(field
->event_field
.name
);
108 kfree(field
->u
.perf_counter
->attr
);
109 lttng_kvfree(events
);
110 kfree(field
->u
.perf_counter
);
113 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
115 int lttng_cpuhp_perf_counter_online(unsigned int cpu
,
116 struct lttng_cpuhp_node
*node
)
118 struct lttng_perf_counter_field
*perf_field
=
119 container_of(node
, struct lttng_perf_counter_field
,
121 struct perf_event
**events
= perf_field
->e
;
122 struct perf_event_attr
*attr
= perf_field
->attr
;
123 struct perf_event
*pevent
;
125 pevent
= wrapper_perf_event_create_kernel_counter(attr
,
126 cpu
, NULL
, overflow_callback
);
127 if (!pevent
|| IS_ERR(pevent
))
129 if (pevent
->state
== PERF_EVENT_STATE_ERROR
) {
130 perf_event_release_kernel(pevent
);
133 barrier(); /* Create perf counter before setting event */
134 events
[cpu
] = pevent
;
138 int lttng_cpuhp_perf_counter_dead(unsigned int cpu
,
139 struct lttng_cpuhp_node
*node
)
141 struct lttng_perf_counter_field
*perf_field
=
142 container_of(node
, struct lttng_perf_counter_field
,
144 struct perf_event
**events
= perf_field
->e
;
145 struct perf_event
*pevent
;
147 pevent
= events
[cpu
];
149 barrier(); /* NULLify event before perf counter teardown */
150 perf_event_release_kernel(pevent
);
154 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
156 #ifdef CONFIG_HOTPLUG_CPU
159 * lttng_perf_counter_hp_callback - CPU hotplug callback
160 * @nb: notifier block
161 * @action: hotplug action to take
164 * Returns the success/failure of the operation. (%NOTIFY_OK, %NOTIFY_BAD)
166 * We can setup perf counters when the cpu is online (up prepare seems to be too
170 int lttng_perf_counter_cpu_hp_callback(struct notifier_block
*nb
,
171 unsigned long action
,
174 unsigned int cpu
= (unsigned long) hcpu
;
175 struct lttng_perf_counter_field
*perf_field
=
176 container_of(nb
, struct lttng_perf_counter_field
, nb
);
177 struct perf_event
**events
= perf_field
->e
;
178 struct perf_event_attr
*attr
= perf_field
->attr
;
179 struct perf_event
*pevent
;
181 if (!perf_field
->hp_enable
)
186 case CPU_ONLINE_FROZEN
:
187 pevent
= wrapper_perf_event_create_kernel_counter(attr
,
188 cpu
, NULL
, overflow_callback
);
189 if (!pevent
|| IS_ERR(pevent
))
191 if (pevent
->state
== PERF_EVENT_STATE_ERROR
) {
192 perf_event_release_kernel(pevent
);
195 barrier(); /* Create perf counter before setting event */
196 events
[cpu
] = pevent
;
198 case CPU_UP_CANCELED
:
199 case CPU_UP_CANCELED_FROZEN
:
201 case CPU_DEAD_FROZEN
:
202 pevent
= events
[cpu
];
204 barrier(); /* NULLify event before perf counter teardown */
205 perf_event_release_kernel(pevent
);
213 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
215 int lttng_add_perf_counter_to_ctx(uint32_t type
,
218 struct lttng_ctx
**ctx
)
220 struct lttng_ctx_field
*field
;
221 struct lttng_perf_counter_field
*perf_field
;
222 struct perf_event
**events
;
223 struct perf_event_attr
*attr
;
227 events
= lttng_kvzalloc(num_possible_cpus() * sizeof(*events
), GFP_KERNEL
);
231 attr
= kzalloc(sizeof(struct perf_event_attr
), GFP_KERNEL
);
238 attr
->config
= config
;
239 attr
->size
= sizeof(struct perf_event_attr
);
243 perf_field
= kzalloc(sizeof(struct lttng_perf_counter_field
), GFP_KERNEL
);
246 goto error_alloc_perf_field
;
248 perf_field
->e
= events
;
249 perf_field
->attr
= attr
;
251 name_alloc
= kstrdup(name
, GFP_KERNEL
);
254 goto name_alloc_error
;
257 field
= lttng_append_context(ctx
);
260 goto append_context_error
;
262 if (lttng_find_context(*ctx
, name_alloc
)) {
267 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
269 perf_field
->cpuhp_prepare
.component
= LTTNG_CONTEXT_PERF_COUNTERS
;
270 ret
= cpuhp_state_add_instance(lttng_hp_prepare
,
271 &perf_field
->cpuhp_prepare
.node
);
273 goto cpuhp_prepare_error
;
275 perf_field
->cpuhp_online
.component
= LTTNG_CONTEXT_PERF_COUNTERS
;
276 ret
= cpuhp_state_add_instance(lttng_hp_online
,
277 &perf_field
->cpuhp_online
.node
);
279 goto cpuhp_online_error
;
281 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
285 #ifdef CONFIG_HOTPLUG_CPU
286 perf_field
->nb
.notifier_call
=
287 lttng_perf_counter_cpu_hp_callback
;
288 perf_field
->nb
.priority
= 0;
289 register_cpu_notifier(&perf_field
->nb
);
292 for_each_online_cpu(cpu
) {
293 events
[cpu
] = wrapper_perf_event_create_kernel_counter(attr
,
294 cpu
, NULL
, overflow_callback
);
295 if (!events
[cpu
] || IS_ERR(events
[cpu
])) {
299 if (events
[cpu
]->state
== PERF_EVENT_STATE_ERROR
) {
305 perf_field
->hp_enable
= 1;
307 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
309 field
->destroy
= lttng_destroy_perf_counter_field
;
311 field
->event_field
.name
= name_alloc
;
312 field
->event_field
.type
.atype
= atype_integer
;
313 field
->event_field
.type
.u
.basic
.integer
.size
= sizeof(uint64_t) * CHAR_BIT
;
314 field
->event_field
.type
.u
.basic
.integer
.alignment
= lttng_alignof(uint64_t) * CHAR_BIT
;
315 field
->event_field
.type
.u
.basic
.integer
.signedness
= lttng_is_signed_type(uint64_t);
316 field
->event_field
.type
.u
.basic
.integer
.reverse_byte_order
= 0;
317 field
->event_field
.type
.u
.basic
.integer
.base
= 10;
318 field
->event_field
.type
.u
.basic
.integer
.encoding
= lttng_encode_none
;
319 field
->get_size
= perf_counter_get_size
;
320 field
->record
= perf_counter_record
;
321 field
->u
.perf_counter
= perf_field
;
322 lttng_context_update(*ctx
);
324 wrapper_vmalloc_sync_mappings();
327 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
332 remove_ret
= cpuhp_state_remove_instance(lttng_hp_prepare
,
333 &perf_field
->cpuhp_prepare
.node
);
337 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
343 for_each_online_cpu(cpu
) {
344 if (events
[cpu
] && !IS_ERR(events
[cpu
]))
345 perf_event_release_kernel(events
[cpu
]);
348 #ifdef CONFIG_HOTPLUG_CPU
349 unregister_cpu_notifier(&perf_field
->nb
);
352 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
354 lttng_remove_context_field(ctx
, field
);
355 append_context_error
:
359 error_alloc_perf_field
:
362 lttng_kvfree(events
);