From: Mathieu Desnoyers Date: Wed, 6 Jul 2016 19:11:57 +0000 (-0400) Subject: Add generic fallback for perf counter read X-Git-Tag: v2.9.0-rc1~42 X-Git-Url: https://git.lttng.org./?a=commitdiff_plain;h=a3a8d943e8ed60a6d60790d3b143ee6ac1b28ce3;p=lttng-ust.git Add generic fallback for perf counter read On x86, test the cap_user_rdpmc flag and fall-back on a system call if unset. On all other architectures, use the system call fallback. Signed-off-by: Mathieu Desnoyers --- diff --git a/liblttng-ust/lttng-context-perf-counters.c b/liblttng-ust/lttng-context-perf-counters.c index a7e1b63f..dbb1e6b1 100644 --- a/liblttng-ust/lttng-context-perf-counters.c +++ b/liblttng-ust/lttng-context-perf-counters.c @@ -80,6 +80,22 @@ size_t perf_counter_get_size(struct lttng_ctx_field *field, size_t offset) return size; } +static +uint64_t read_perf_counter_syscall( + struct lttng_perf_counter_thread_field *thread_field) +{ + uint64_t count; + + if (caa_unlikely(thread_field->fd < 0)) + return 0; + + if (caa_unlikely(read(thread_field->fd, &count, sizeof(count)) + < sizeof(count))) + return 0; + + return count; +} + #if defined(__x86_64__) || defined(__i386__) static @@ -92,13 +108,8 @@ uint64_t rdpmc(unsigned int counter) return low | ((uint64_t) high) << 32; } -static bool arch_perf_use_read(void) -{ - return false; -} - static -uint64_t read_perf_counter( +uint64_t arch_read_perf_counter( struct lttng_perf_counter_thread_field *thread_field) { uint32_t seq, idx; @@ -113,7 +124,7 @@ uint64_t read_perf_counter( cmm_barrier(); idx = pc->index; - if (idx) { + if (caa_likely(pc->cap_user_rdpmc && idx)) { int64_t pmcval; pmcval = rdpmc(idx - 1); @@ -122,7 +133,8 @@ uint64_t read_perf_counter( pmcval >>= 64 - pc->pmc_width; count = pc->offset + pmcval; } else { - count = 0; + /* Fall-back on system call if rdpmc cannot be used. */ + return read_perf_counter_syscall(thread_field); } cmm_barrier(); } while (CMM_LOAD_SHARED(pc->lock) != seq); @@ -130,34 +142,33 @@ uint64_t read_perf_counter( return count; } -#elif defined (__ARM_ARCH_7A__) - -static bool arch_perf_use_read(void) -{ - return true; -} - static -uint64_t read_perf_counter( - struct lttng_perf_counter_thread_field *thread_field) +int arch_perf_keep_fd(struct lttng_perf_counter_thread_field *thread_field) { - uint64_t count; + struct perf_event_mmap_page *pc = thread_field->pc; - if (caa_unlikely(thread_field->fd < 0)) + if (!pc) return 0; + return !pc->cap_user_rdpmc; +} - if (caa_unlikely(read(thread_field->fd, &count, sizeof(count)) - < sizeof(count))) - return 0; +#else - return count; +/* Generic (slow) implementation using a read system call. */ +static +uint64_t arch_read_perf_counter( + struct lttng_perf_counter_thread_field *thread_field) +{ + return read_perf_counter_syscall(thread_field); } -#else /* defined(__x86_64__) || defined(__i386__) || defined(__ARM_ARCH_7A__) */ - -#error "Perf event counters are only supported on x86 and ARMv7 so far." +static +int arch_perf_keep_fd(struct lttng_perf_counter_thread_field *thread_field) +{ + return 1; +} -#endif /* #else defined(__x86_64__) || defined(__i386__) || defined(__ARM_ARCH_7A__) */ +#endif static int sys_perf_event_open(struct perf_event_attr *attr, @@ -205,7 +216,7 @@ struct perf_event_mmap_page *setup_perf( if (perf_addr == MAP_FAILED) perf_addr = NULL; - if (!arch_perf_use_read()) { + if (!arch_perf_keep_fd(thread_field)) { close_perf_fd(thread_field->fd); thread_field->fd = -1; } @@ -330,7 +341,7 @@ uint64_t wrapper_perf_counter_read(struct lttng_ctx_field *field) perf_field = field->u.perf_counter; perf_thread_field = get_thread_field(perf_field); - return read_perf_counter(perf_thread_field); + return arch_read_perf_counter(perf_thread_field); } static