2 * lttng-context-perf-counters.c
4 * LTTng UST performance monitoring counters (perf-counters) integration.
6 * Copyright (C) 2009-2014 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; only
11 * version 2.1 of the License.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
24 #include <sys/types.h>
31 #include <sys/syscall.h>
32 #include <lttng/ust-events.h>
33 #include <lttng/ust-tracer.h>
34 #include <lttng/ringbuffer-config.h>
35 #include <urcu/system.h>
36 #include <urcu/arch.h>
37 #include <urcu/rculist.h>
40 #include <usterr-signal-safe.h>
42 #include <urcu/tls-compat.h>
43 #include "perf_event.h"
44 #include "lttng-tracer-core.h"
47 * We use a global perf counter key and iterate on per-thread RCU lists
48 * of fields in the fast path, even though this is not strictly speaking
49 * what would provide the best fast-path complexity, to ensure teardown
50 * of sessions vs thread exit is handled racelessly.
52 * Updates and traversals of thread_list are protected by UST lock.
53 * Updates to rcu_field_list are protected by UST lock.
56 struct lttng_perf_counter_thread_field
{
57 struct lttng_perf_counter_field
*field
; /* Back reference */
58 struct perf_event_mmap_page
*pc
;
59 struct cds_list_head thread_field_node
; /* Per-field list of thread fields (node) */
60 struct cds_list_head rcu_field_node
; /* RCU per-thread list of fields (node) */
64 struct lttng_perf_counter_thread
{
65 struct cds_list_head rcu_field_list
; /* RCU per-thread list of fields */
68 struct lttng_perf_counter_field
{
69 struct perf_event_attr attr
;
70 struct cds_list_head thread_field_list
; /* Per-field list of thread fields */
73 static pthread_key_t perf_counter_key
;
76 * lttng_perf_lock - Protect lttng-ust perf counter data structures
78 * Nests within the ust_lock, and therefore within the libc dl lock.
79 * Therefore, we need to fixup the TLS before nesting into this lock.
80 * Nests inside RCU bp read-side lock. Protects against concurrent
83 static pthread_mutex_t ust_perf_mutex
= PTHREAD_MUTEX_INITIALIZER
;
86 * Cancel state when grabbing the ust_perf_mutex. Saved when locking,
87 * restored on unlock. Protected by ust_perf_mutex.
89 static int ust_perf_saved_cancelstate
;
92 * Track whether we are tracing from a signal handler nested on an
95 static DEFINE_URCU_TLS(int, ust_perf_mutex_nest
);
98 * Force a read (imply TLS fixup for dlopen) of TLS variables.
100 void lttng_ust_fixup_perf_counter_tls(void)
102 asm volatile ("" : : "m" (URCU_TLS(ust_perf_mutex_nest
)));
105 void lttng_perf_lock(void)
107 sigset_t sig_all_blocked
, orig_mask
;
110 ret
= pthread_setcancelstate(PTHREAD_CANCEL_DISABLE
, &oldstate
);
112 ERR("pthread_setcancelstate: %s", strerror(ret
));
114 sigfillset(&sig_all_blocked
);
115 ret
= pthread_sigmask(SIG_SETMASK
, &sig_all_blocked
, &orig_mask
);
117 ERR("pthread_sigmask: %s", strerror(ret
));
119 if (!URCU_TLS(ust_perf_mutex_nest
)++) {
121 * Ensure the compiler don't move the store after the close()
122 * call in case close() would be marked as leaf.
125 pthread_mutex_lock(&ust_perf_mutex
);
126 ust_perf_saved_cancelstate
= oldstate
;
128 ret
= pthread_sigmask(SIG_SETMASK
, &orig_mask
, NULL
);
130 ERR("pthread_sigmask: %s", strerror(ret
));
134 void lttng_perf_unlock(void)
136 sigset_t sig_all_blocked
, orig_mask
;
137 int ret
, newstate
, oldstate
;
138 bool restore_cancel
= false;
140 sigfillset(&sig_all_blocked
);
141 ret
= pthread_sigmask(SIG_SETMASK
, &sig_all_blocked
, &orig_mask
);
143 ERR("pthread_sigmask: %s", strerror(ret
));
146 * Ensure the compiler don't move the store before the close()
147 * call, in case close() would be marked as leaf.
150 if (!--URCU_TLS(ust_perf_mutex_nest
)) {
151 newstate
= ust_perf_saved_cancelstate
;
152 restore_cancel
= true;
153 pthread_mutex_unlock(&ust_perf_mutex
);
155 ret
= pthread_sigmask(SIG_SETMASK
, &orig_mask
, NULL
);
157 ERR("pthread_sigmask: %s", strerror(ret
));
159 if (restore_cancel
) {
160 ret
= pthread_setcancelstate(newstate
, &oldstate
);
162 ERR("pthread_setcancelstate: %s", strerror(ret
));
168 size_t perf_counter_get_size(struct lttng_ctx_field
*field
, size_t offset
)
172 size
+= lib_ring_buffer_align(offset
, lttng_alignof(uint64_t));
173 size
+= sizeof(uint64_t);
178 uint64_t read_perf_counter_syscall(
179 struct lttng_perf_counter_thread_field
*thread_field
)
183 if (caa_unlikely(thread_field
->fd
< 0))
186 if (caa_unlikely(read(thread_field
->fd
, &count
, sizeof(count
))
193 #if defined(__x86_64__) || defined(__i386__)
196 uint64_t rdpmc(unsigned int counter
)
198 unsigned int low
, high
;
200 asm volatile("rdpmc" : "=a" (low
), "=d" (high
) : "c" (counter
));
202 return low
| ((uint64_t) high
) << 32;
206 bool has_rdpmc(struct perf_event_mmap_page
*pc
)
208 if (caa_unlikely(!pc
->cap_bit0_is_deprecated
))
210 /* Since Linux kernel 3.12. */
211 return pc
->cap_user_rdpmc
;
215 uint64_t arch_read_perf_counter(
216 struct lttng_perf_counter_thread_field
*thread_field
)
220 struct perf_event_mmap_page
*pc
= thread_field
->pc
;
222 if (caa_unlikely(!pc
))
226 seq
= CMM_LOAD_SHARED(pc
->lock
);
230 if (caa_likely(has_rdpmc(pc
) && idx
)) {
233 pmcval
= rdpmc(idx
- 1);
234 /* Sign-extend the pmc register result. */
235 pmcval
<<= 64 - pc
->pmc_width
;
236 pmcval
>>= 64 - pc
->pmc_width
;
237 count
= pc
->offset
+ pmcval
;
239 /* Fall-back on system call if rdpmc cannot be used. */
240 return read_perf_counter_syscall(thread_field
);
243 } while (CMM_LOAD_SHARED(pc
->lock
) != seq
);
249 int arch_perf_keep_fd(struct lttng_perf_counter_thread_field
*thread_field
)
251 struct perf_event_mmap_page
*pc
= thread_field
->pc
;
255 return !has_rdpmc(pc
);
260 /* Generic (slow) implementation using a read system call. */
262 uint64_t arch_read_perf_counter(
263 struct lttng_perf_counter_thread_field
*thread_field
)
265 return read_perf_counter_syscall(thread_field
);
269 int arch_perf_keep_fd(struct lttng_perf_counter_thread_field
*thread_field
)
277 int sys_perf_event_open(struct perf_event_attr
*attr
,
278 pid_t pid
, int cpu
, int group_fd
,
281 return syscall(SYS_perf_event_open
, attr
, pid
, cpu
,
286 int open_perf_fd(struct perf_event_attr
*attr
)
290 fd
= sys_perf_event_open(attr
, 0, -1, -1, 0);
298 void close_perf_fd(int fd
)
307 perror("Error closing LTTng-UST perf memory mapping FD");
311 static void setup_perf(struct lttng_perf_counter_thread_field
*thread_field
)
315 perf_addr
= mmap(NULL
, sizeof(struct perf_event_mmap_page
),
316 PROT_READ
, MAP_SHARED
, thread_field
->fd
, 0);
317 if (perf_addr
== MAP_FAILED
)
319 thread_field
->pc
= perf_addr
;
321 if (!arch_perf_keep_fd(thread_field
)) {
322 close_perf_fd(thread_field
->fd
);
323 thread_field
->fd
= -1;
328 void unmap_perf_page(struct perf_event_mmap_page
*pc
)
334 ret
= munmap(pc
, sizeof(struct perf_event_mmap_page
));
336 PERROR("Error in munmap");
342 struct lttng_perf_counter_thread
*alloc_perf_counter_thread(void)
344 struct lttng_perf_counter_thread
*perf_thread
;
345 sigset_t newmask
, oldmask
;
348 ret
= sigfillset(&newmask
);
351 ret
= pthread_sigmask(SIG_BLOCK
, &newmask
, &oldmask
);
354 /* Check again with signals disabled */
355 perf_thread
= pthread_getspecific(perf_counter_key
);
358 perf_thread
= zmalloc(sizeof(*perf_thread
));
361 CDS_INIT_LIST_HEAD(&perf_thread
->rcu_field_list
);
362 ret
= pthread_setspecific(perf_counter_key
, perf_thread
);
366 ret
= pthread_sigmask(SIG_SETMASK
, &oldmask
, NULL
);
373 struct lttng_perf_counter_thread_field
*
374 add_thread_field(struct lttng_perf_counter_field
*perf_field
,
375 struct lttng_perf_counter_thread
*perf_thread
)
377 struct lttng_perf_counter_thread_field
*thread_field
;
378 sigset_t newmask
, oldmask
;
381 ret
= sigfillset(&newmask
);
384 ret
= pthread_sigmask(SIG_BLOCK
, &newmask
, &oldmask
);
387 /* Check again with signals disabled */
388 cds_list_for_each_entry_rcu(thread_field
, &perf_thread
->rcu_field_list
,
390 if (thread_field
->field
== perf_field
)
393 thread_field
= zmalloc(sizeof(*thread_field
));
396 thread_field
->field
= perf_field
;
397 thread_field
->fd
= open_perf_fd(&perf_field
->attr
);
398 if (thread_field
->fd
>= 0)
399 setup_perf(thread_field
);
401 * Note: thread_field->pc can be NULL if setup_perf() fails.
402 * Also, thread_field->fd can be -1 if open_perf_fd() fails.
405 cds_list_add_rcu(&thread_field
->rcu_field_node
,
406 &perf_thread
->rcu_field_list
);
407 cds_list_add(&thread_field
->thread_field_node
,
408 &perf_field
->thread_field_list
);
411 ret
= pthread_sigmask(SIG_SETMASK
, &oldmask
, NULL
);
418 struct lttng_perf_counter_thread_field
*
419 get_thread_field(struct lttng_perf_counter_field
*field
)
421 struct lttng_perf_counter_thread
*perf_thread
;
422 struct lttng_perf_counter_thread_field
*thread_field
;
424 perf_thread
= pthread_getspecific(perf_counter_key
);
426 perf_thread
= alloc_perf_counter_thread();
427 cds_list_for_each_entry_rcu(thread_field
, &perf_thread
->rcu_field_list
,
429 if (thread_field
->field
== field
)
432 /* perf_counter_thread_field not found, need to add one */
433 return add_thread_field(field
, perf_thread
);
437 uint64_t wrapper_perf_counter_read(struct lttng_ctx_field
*field
)
439 struct lttng_perf_counter_field
*perf_field
;
440 struct lttng_perf_counter_thread_field
*perf_thread_field
;
442 perf_field
= field
->u
.perf_counter
;
443 perf_thread_field
= get_thread_field(perf_field
);
444 return arch_read_perf_counter(perf_thread_field
);
448 void perf_counter_record(struct lttng_ctx_field
*field
,
449 struct lttng_ust_lib_ring_buffer_ctx
*ctx
,
450 struct lttng_channel
*chan
)
454 value
= wrapper_perf_counter_read(field
);
455 lib_ring_buffer_align_ctx(ctx
, lttng_alignof(value
));
456 chan
->ops
->event_write(ctx
, &value
, sizeof(value
));
460 void perf_counter_get_value(struct lttng_ctx_field
*field
,
461 struct lttng_ctx_value
*value
)
463 value
->u
.s64
= wrapper_perf_counter_read(field
);
466 /* Called with perf lock held */
468 void lttng_destroy_perf_thread_field(
469 struct lttng_perf_counter_thread_field
*thread_field
)
471 close_perf_fd(thread_field
->fd
);
472 unmap_perf_page(thread_field
->pc
);
473 cds_list_del_rcu(&thread_field
->rcu_field_node
);
474 cds_list_del(&thread_field
->thread_field_node
);
479 void lttng_destroy_perf_thread_key(void *_key
)
481 struct lttng_perf_counter_thread
*perf_thread
= _key
;
482 struct lttng_perf_counter_thread_field
*pos
, *p
;
485 cds_list_for_each_entry_safe(pos
, p
, &perf_thread
->rcu_field_list
,
487 lttng_destroy_perf_thread_field(pos
);
492 /* Called with UST lock held */
494 void lttng_destroy_perf_counter_field(struct lttng_ctx_field
*field
)
496 struct lttng_perf_counter_field
*perf_field
;
497 struct lttng_perf_counter_thread_field
*pos
, *p
;
499 free((char *) field
->event_field
.name
);
500 perf_field
= field
->u
.perf_counter
;
502 * This put is performed when no threads can concurrently
503 * perform a "get" concurrently, thanks to urcu-bp grace
504 * period. Holding the lttng perf lock protects against
505 * concurrent modification of the per-thread thread field
509 cds_list_for_each_entry_safe(pos
, p
, &perf_field
->thread_field_list
,
511 lttng_destroy_perf_thread_field(pos
);
516 #ifdef __ARM_ARCH_7A__
519 int perf_get_exclude_kernel(void)
524 #else /* __ARM_ARCH_7A__ */
527 int perf_get_exclude_kernel(void)
532 #endif /* __ARM_ARCH_7A__ */
534 /* Called with UST lock held */
535 int lttng_add_perf_counter_to_ctx(uint32_t type
,
538 struct lttng_ctx
**ctx
)
540 struct lttng_ctx_field
*field
;
541 struct lttng_perf_counter_field
*perf_field
;
545 name_alloc
= strdup(name
);
548 goto name_alloc_error
;
550 perf_field
= zmalloc(sizeof(*perf_field
));
553 goto perf_field_alloc_error
;
555 field
= lttng_append_context(ctx
);
558 goto append_context_error
;
560 if (lttng_find_context(*ctx
, name_alloc
)) {
565 field
->destroy
= lttng_destroy_perf_counter_field
;
567 field
->event_field
.name
= name_alloc
;
568 field
->event_field
.type
.atype
= atype_integer
;
569 field
->event_field
.type
.u
.basic
.integer
.size
=
570 sizeof(uint64_t) * CHAR_BIT
;
571 field
->event_field
.type
.u
.basic
.integer
.alignment
=
572 lttng_alignof(uint64_t) * CHAR_BIT
;
573 field
->event_field
.type
.u
.basic
.integer
.signedness
=
574 lttng_is_signed_type(uint64_t);
575 field
->event_field
.type
.u
.basic
.integer
.reverse_byte_order
= 0;
576 field
->event_field
.type
.u
.basic
.integer
.base
= 10;
577 field
->event_field
.type
.u
.basic
.integer
.encoding
= lttng_encode_none
;
578 field
->get_size
= perf_counter_get_size
;
579 field
->record
= perf_counter_record
;
580 field
->get_value
= perf_counter_get_value
;
582 perf_field
->attr
.type
= type
;
583 perf_field
->attr
.config
= config
;
584 perf_field
->attr
.exclude_kernel
= perf_get_exclude_kernel();
585 CDS_INIT_LIST_HEAD(&perf_field
->thread_field_list
);
586 field
->u
.perf_counter
= perf_field
;
588 /* Ensure that this perf counter can be used in this process. */
589 ret
= open_perf_fd(&perf_field
->attr
);
597 * Contexts can only be added before tracing is started, so we
598 * don't have to synchronize against concurrent threads using
602 lttng_context_update(*ctx
);
607 lttng_remove_context_field(ctx
, field
);
608 append_context_error
:
610 perf_field_alloc_error
:
616 int lttng_perf_counter_init(void)
620 ret
= pthread_key_create(&perf_counter_key
,
621 lttng_destroy_perf_thread_key
);
627 void lttng_perf_counter_exit(void)
631 ret
= pthread_key_delete(perf_counter_key
);
634 PERROR("Error in pthread_key_delete");