/doc/man/Makefile
/include/Makefile
/src/common/Makefile
-/src/libcounter/Makefile
/src/liblttng-ust-comm/Makefile
/src/lib/lttng-ust-ctl/Makefile
/src/lib/lttng-ust-cyg-profile/Makefile
doc/man/Makefile
include/Makefile
src/common/Makefile
- src/libcounter/Makefile
src/liblttng-ust-comm/Makefile
src/lib/lttng-ust-ctl/Makefile
src/lib/lttng-ust-cyg-profile/Makefile
common \
libringbuffer \
liblttng-ust-comm \
- libcounter \
liblttng-ust \
lib
### ###
noinst_LTLIBRARIES = \
+ libcounter.la \
msgpack/libmsgpack.la \
snprintf/libsnprintf.la \
libcommon.la
+# counter
+libcounter_la_SOURCES = \
+ counter/counter-api.h \
+ counter/counter.c \
+ counter/counter-config.h \
+ counter/counter.h \
+ counter/counter-internal.h \
+ counter/counter-types.h \
+ counter/shm.c \
+ counter/shm.h \
+ counter/shm_internal.h \
+ counter/shm_types.h \
+ counter/smp.c \
+ counter/smp.h
+
+libcounter_la_LIBADD = -lrt
+
+if ENABLE_NUMA
+libcounter_la_LIBADD += -lnuma
+endif
+
+libcounter_la_CFLAGS = -DUST_COMPONENT="libcounter" $(AM_CFLAGS)
+
# msgpack
msgpack_libmsgpack_la_SOURCES = \
msgpack/msgpack.c \
--- /dev/null
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright (C) 2020 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *
+ * LTTng Counters API, requiring counter/config.h
+ */
+
+#ifndef _LTTNG_COUNTER_API_H
+#define _LTTNG_COUNTER_API_H
+
+#include <stdint.h>
+#include <limits.h>
+#include "counter.h"
+#include "counter-internal.h"
+#include <urcu/compiler.h>
+#include <urcu/uatomic.h>
+#include "common/bitmap.h"
+#include "libringbuffer/getcpu.h"
+
+/*
+ * Using unsigned arithmetic because overflow is defined.
+ */
+static inline int __lttng_counter_add(const struct lib_counter_config *config,
+ enum lib_counter_config_alloc alloc,
+ enum lib_counter_config_sync sync,
+ struct lib_counter *counter,
+ const size_t *dimension_indexes, int64_t v,
+ int64_t *remainder)
+{
+ size_t index;
+ bool overflow = false, underflow = false;
+ struct lib_counter_layout *layout;
+ int64_t move_sum = 0;
+
+ if (caa_unlikely(lttng_counter_validate_indexes(config, counter, dimension_indexes)))
+ return -EOVERFLOW;
+ index = lttng_counter_get_index(config, counter, dimension_indexes);
+
+ switch (alloc) {
+ case COUNTER_ALLOC_PER_CPU:
+ layout = &counter->percpu_counters[lttng_ust_get_cpu()];
+ break;
+ case COUNTER_ALLOC_GLOBAL:
+ layout = &counter->global_counters;
+ break;
+ default:
+ return -EINVAL;
+ }
+ if (caa_unlikely(!layout->counters))
+ return -ENODEV;
+
+ switch (config->counter_size) {
+ case COUNTER_SIZE_8_BIT:
+ {
+ int8_t *int_p = (int8_t *) layout->counters + index;
+ int8_t old, n, res;
+ int8_t global_sum_step = counter->global_sum_step.s8;
+
+ res = *int_p;
+ switch (sync) {
+ case COUNTER_SYNC_PER_CPU:
+ {
+ do {
+ move_sum = 0;
+ old = res;
+ n = (int8_t) ((uint8_t) old + (uint8_t) v);
+ if (caa_unlikely(n > (int8_t) global_sum_step))
+ move_sum = (int8_t) global_sum_step / 2;
+ else if (caa_unlikely(n < -(int8_t) global_sum_step))
+ move_sum = -((int8_t) global_sum_step / 2);
+ n -= move_sum;
+ res = uatomic_cmpxchg(int_p, old, n);
+ } while (old != res);
+ break;
+ }
+ case COUNTER_SYNC_GLOBAL:
+ {
+ do {
+ old = res;
+ n = (int8_t) ((uint8_t) old + (uint8_t) v);
+ res = uatomic_cmpxchg(int_p, old, n);
+ } while (old != res);
+ break;
+ }
+ default:
+ return -EINVAL;
+ }
+ if (v > 0 && (v >= UINT8_MAX || n < old))
+ overflow = true;
+ else if (v < 0 && (v <= -(int64_t) UINT8_MAX || n > old))
+ underflow = true;
+ break;
+ }
+ case COUNTER_SIZE_16_BIT:
+ {
+ int16_t *int_p = (int16_t *) layout->counters + index;
+ int16_t old, n, res;
+ int16_t global_sum_step = counter->global_sum_step.s16;
+
+ res = *int_p;
+ switch (sync) {
+ case COUNTER_SYNC_PER_CPU:
+ {
+ do {
+ move_sum = 0;
+ old = res;
+ n = (int16_t) ((uint16_t) old + (uint16_t) v);
+ if (caa_unlikely(n > (int16_t) global_sum_step))
+ move_sum = (int16_t) global_sum_step / 2;
+ else if (caa_unlikely(n < -(int16_t) global_sum_step))
+ move_sum = -((int16_t) global_sum_step / 2);
+ n -= move_sum;
+ res = uatomic_cmpxchg(int_p, old, n);
+ } while (old != res);
+ break;
+ }
+ case COUNTER_SYNC_GLOBAL:
+ {
+ do {
+ old = res;
+ n = (int16_t) ((uint16_t) old + (uint16_t) v);
+ res = uatomic_cmpxchg(int_p, old, n);
+ } while (old != res);
+ break;
+ }
+ default:
+ return -EINVAL;
+ }
+ if (v > 0 && (v >= UINT16_MAX || n < old))
+ overflow = true;
+ else if (v < 0 && (v <= -(int64_t) UINT16_MAX || n > old))
+ underflow = true;
+ break;
+ }
+ case COUNTER_SIZE_32_BIT:
+ {
+ int32_t *int_p = (int32_t *) layout->counters + index;
+ int32_t old, n, res;
+ int32_t global_sum_step = counter->global_sum_step.s32;
+
+ res = *int_p;
+ switch (sync) {
+ case COUNTER_SYNC_PER_CPU:
+ {
+ do {
+ move_sum = 0;
+ old = res;
+ n = (int32_t) ((uint32_t) old + (uint32_t) v);
+ if (caa_unlikely(n > (int32_t) global_sum_step))
+ move_sum = (int32_t) global_sum_step / 2;
+ else if (caa_unlikely(n < -(int32_t) global_sum_step))
+ move_sum = -((int32_t) global_sum_step / 2);
+ n -= move_sum;
+ res = uatomic_cmpxchg(int_p, old, n);
+ } while (old != res);
+ break;
+ }
+ case COUNTER_SYNC_GLOBAL:
+ {
+ do {
+ old = res;
+ n = (int32_t) ((uint32_t) old + (uint32_t) v);
+ res = uatomic_cmpxchg(int_p, old, n);
+ } while (old != res);
+ break;
+ }
+ default:
+ return -EINVAL;
+ }
+ if (v > 0 && (v >= UINT32_MAX || n < old))
+ overflow = true;
+ else if (v < 0 && (v <= -(int64_t) UINT32_MAX || n > old))
+ underflow = true;
+ break;
+ }
+#if CAA_BITS_PER_LONG == 64
+ case COUNTER_SIZE_64_BIT:
+ {
+ int64_t *int_p = (int64_t *) layout->counters + index;
+ int64_t old, n, res;
+ int64_t global_sum_step = counter->global_sum_step.s64;
+
+ res = *int_p;
+ switch (sync) {
+ case COUNTER_SYNC_PER_CPU:
+ {
+ do {
+ move_sum = 0;
+ old = res;
+ n = (int64_t) ((uint64_t) old + (uint64_t) v);
+ if (caa_unlikely(n > (int64_t) global_sum_step))
+ move_sum = (int64_t) global_sum_step / 2;
+ else if (caa_unlikely(n < -(int64_t) global_sum_step))
+ move_sum = -((int64_t) global_sum_step / 2);
+ n -= move_sum;
+ res = uatomic_cmpxchg(int_p, old, n);
+ } while (old != res);
+ break;
+ }
+ case COUNTER_SYNC_GLOBAL:
+ {
+ do {
+ old = res;
+ n = (int64_t) ((uint64_t) old + (uint64_t) v);
+ res = uatomic_cmpxchg(int_p, old, n);
+ } while (old != res);
+ break;
+ }
+ default:
+ return -EINVAL;
+ }
+ if (v > 0 && n < old)
+ overflow = true;
+ else if (v < 0 && n > old)
+ underflow = true;
+ break;
+ }
+#endif
+ default:
+ return -EINVAL;
+ }
+ if (caa_unlikely(overflow && !lttng_bitmap_test_bit(index, layout->overflow_bitmap)))
+ lttng_bitmap_set_bit(index, layout->overflow_bitmap);
+ else if (caa_unlikely(underflow && !lttng_bitmap_test_bit(index, layout->underflow_bitmap)))
+ lttng_bitmap_set_bit(index, layout->underflow_bitmap);
+ if (remainder)
+ *remainder = move_sum;
+ return 0;
+}
+
+static inline int __lttng_counter_add_percpu(const struct lib_counter_config *config,
+ struct lib_counter *counter,
+ const size_t *dimension_indexes, int64_t v)
+{
+ int64_t move_sum;
+ int ret;
+
+ ret = __lttng_counter_add(config, COUNTER_ALLOC_PER_CPU, config->sync,
+ counter, dimension_indexes, v, &move_sum);
+ if (caa_unlikely(ret))
+ return ret;
+ if (caa_unlikely(move_sum))
+ return __lttng_counter_add(config, COUNTER_ALLOC_GLOBAL, COUNTER_SYNC_GLOBAL,
+ counter, dimension_indexes, move_sum, NULL);
+ return 0;
+}
+
+static inline int __lttng_counter_add_global(const struct lib_counter_config *config,
+ struct lib_counter *counter,
+ const size_t *dimension_indexes, int64_t v)
+{
+ return __lttng_counter_add(config, COUNTER_ALLOC_GLOBAL, config->sync, counter,
+ dimension_indexes, v, NULL);
+}
+
+static inline int lttng_counter_add(const struct lib_counter_config *config,
+ struct lib_counter *counter,
+ const size_t *dimension_indexes, int64_t v)
+{
+ switch (config->alloc) {
+ case COUNTER_ALLOC_PER_CPU: /* Fallthrough */
+ case COUNTER_ALLOC_PER_CPU | COUNTER_ALLOC_GLOBAL:
+ return __lttng_counter_add_percpu(config, counter, dimension_indexes, v);
+ case COUNTER_ALLOC_GLOBAL:
+ return __lttng_counter_add_global(config, counter, dimension_indexes, v);
+ default:
+ return -EINVAL;
+ }
+}
+
+static inline int lttng_counter_inc(const struct lib_counter_config *config,
+ struct lib_counter *counter,
+ const size_t *dimension_indexes)
+{
+ return lttng_counter_add(config, counter, dimension_indexes, 1);
+}
+
+static inline int lttng_counter_dec(const struct lib_counter_config *config,
+ struct lib_counter *counter,
+ const size_t *dimension_indexes)
+{
+ return lttng_counter_add(config, counter, dimension_indexes, -1);
+}
+
+#endif /* _LTTNG_COUNTER_API_H */
--- /dev/null
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright (C) 2020 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *
+ * LTTng Counters Configuration
+ */
+
+#ifndef _LTTNG_COUNTER_CONFIG_H
+#define _LTTNG_COUNTER_CONFIG_H
+
+#include <stdint.h>
+
+enum lib_counter_config_alloc {
+ COUNTER_ALLOC_PER_CPU = (1 << 0),
+ COUNTER_ALLOC_GLOBAL = (1 << 1),
+};
+
+enum lib_counter_config_sync {
+ COUNTER_SYNC_PER_CPU,
+ COUNTER_SYNC_GLOBAL,
+};
+
+struct lib_counter_config {
+ uint32_t alloc; /* enum lib_counter_config_alloc flags */
+ enum lib_counter_config_sync sync;
+ enum {
+ COUNTER_ARITHMETIC_MODULAR,
+ COUNTER_ARITHMETIC_SATURATE, /* TODO */
+ } arithmetic;
+ enum {
+ COUNTER_SIZE_8_BIT = 1,
+ COUNTER_SIZE_16_BIT = 2,
+ COUNTER_SIZE_32_BIT = 4,
+ COUNTER_SIZE_64_BIT = 8,
+ } counter_size;
+};
+
+#endif /* _LTTNG_COUNTER_CONFIG_H */
--- /dev/null
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright (C) 2020 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *
+ * LTTng Counters Internal Header
+ */
+
+#ifndef _LTTNG_COUNTER_INTERNAL_H
+#define _LTTNG_COUNTER_INTERNAL_H
+
+#include <stdint.h>
+#include <lttng/ust-config.h>
+#include <urcu/compiler.h>
+#include "counter-types.h"
+
+static inline int lttng_counter_validate_indexes(
+ const struct lib_counter_config *config __attribute__((unused)),
+ struct lib_counter *counter,
+ const size_t *dimension_indexes)
+{
+ size_t nr_dimensions = counter->nr_dimensions, i;
+
+ for (i = 0; i < nr_dimensions; i++) {
+ if (caa_unlikely(dimension_indexes[i] >= counter->dimensions[i].max_nr_elem))
+ return -EOVERFLOW;
+ }
+ return 0;
+}
+
+
+static inline size_t lttng_counter_get_index(
+ const struct lib_counter_config *config __attribute__((unused)),
+ struct lib_counter *counter,
+ const size_t *dimension_indexes)
+{
+ size_t nr_dimensions = counter->nr_dimensions, i;
+ size_t index = 0;
+
+ for (i = 0; i < nr_dimensions; i++) {
+ struct lib_counter_dimension *dimension = &counter->dimensions[i];
+ const size_t *dimension_index = &dimension_indexes[i];
+
+ index += *dimension_index * dimension->stride;
+ }
+ return index;
+}
+
+#endif /* _LTTNG_COUNTER_INTERNAL_H */
--- /dev/null
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright (C) 2020 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *
+ * LTTng Counters Types
+ */
+
+#ifndef _LTTNG_COUNTER_TYPES_H
+#define _LTTNG_COUNTER_TYPES_H
+
+#include <stdint.h>
+#include <stddef.h>
+#include <stdbool.h>
+#include <sys/types.h>
+#include "counter-config.h"
+#include <lttng/ust-config.h>
+#include "shm_types.h"
+
+struct lib_counter_dimension {
+ /*
+ * Max. number of indexable elements.
+ */
+ size_t max_nr_elem;
+ /*
+ * The stride for a dimension is the multiplication factor which
+ * should be applied to its index to take into account other
+ * dimensions nested inside.
+ */
+ size_t stride;
+};
+
+struct lib_counter_layout {
+ void *counters;
+ unsigned long *overflow_bitmap;
+ unsigned long *underflow_bitmap;
+ int shm_fd;
+ size_t shm_len;
+ struct lttng_counter_shm_handle handle;
+};
+
+enum lib_counter_arithmetic {
+ LIB_COUNTER_ARITHMETIC_MODULAR,
+ LIB_COUNTER_ARITHMETIC_SATURATE,
+};
+
+struct lib_counter {
+ size_t nr_dimensions;
+ int64_t allocated_elem;
+ struct lib_counter_dimension *dimensions;
+ enum lib_counter_arithmetic arithmetic;
+ union {
+ struct {
+ int32_t max, min;
+ } limits_32_bit;
+ struct {
+ int64_t max, min;
+ } limits_64_bit;
+ } saturation;
+ union {
+ int8_t s8;
+ int16_t s16;
+ int32_t s32;
+ int64_t s64;
+ } global_sum_step; /* 0 if unused */
+ struct lib_counter_config config;
+
+ struct lib_counter_layout global_counters;
+ struct lib_counter_layout *percpu_counters;
+
+ bool is_daemon;
+ struct lttng_counter_shm_object_table *object_table;
+};
+
+#endif /* _LTTNG_COUNTER_TYPES_H */
--- /dev/null
+/* SPDX-License-Identifier: (GPL-2.0-only OR LGPL-2.1-only)
+ *
+ * counter.c
+ *
+ * Copyright (C) 2020 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#include <errno.h>
+#include "counter.h"
+#include "counter-internal.h"
+#include <urcu/system.h>
+#include <urcu/compiler.h>
+#include <stdbool.h>
+
+#include "common/macros.h"
+#include "common/align.h"
+#include "common/bitmap.h"
+
+#include "smp.h"
+#include "shm.h"
+
+static size_t lttng_counter_get_dimension_nr_elements(struct lib_counter_dimension *dimension)
+{
+ return dimension->max_nr_elem;
+}
+
+static int lttng_counter_init_stride(
+ const struct lib_counter_config *config __attribute__((unused)),
+ struct lib_counter *counter)
+{
+ size_t nr_dimensions = counter->nr_dimensions;
+ size_t stride = 1;
+ ssize_t i;
+
+ for (i = nr_dimensions - 1; i >= 0; i--) {
+ struct lib_counter_dimension *dimension = &counter->dimensions[i];
+ size_t nr_elem;
+
+ nr_elem = lttng_counter_get_dimension_nr_elements(dimension);
+ dimension->stride = stride;
+ /* nr_elem should be minimum 1 for each dimension. */
+ if (!nr_elem)
+ return -EINVAL;
+ stride *= nr_elem;
+ if (stride > SIZE_MAX / nr_elem)
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int lttng_counter_layout_init(struct lib_counter *counter, int cpu, int shm_fd)
+{
+ struct lib_counter_layout *layout;
+ size_t counter_size;
+ size_t nr_elem = counter->allocated_elem;
+ size_t shm_length = 0, counters_offset, overflow_offset, underflow_offset;
+ struct lttng_counter_shm_object *shm_object;
+
+ if (shm_fd < 0)
+ return 0; /* Skip, will be populated later. */
+
+ if (cpu == -1)
+ layout = &counter->global_counters;
+ else
+ layout = &counter->percpu_counters[cpu];
+ switch (counter->config.counter_size) {
+ case COUNTER_SIZE_8_BIT:
+ case COUNTER_SIZE_16_BIT:
+ case COUNTER_SIZE_32_BIT:
+ case COUNTER_SIZE_64_BIT:
+ counter_size = (size_t) counter->config.counter_size;
+ break;
+ default:
+ return -EINVAL;
+ }
+ layout->shm_fd = shm_fd;
+ counters_offset = shm_length;
+ shm_length += counter_size * nr_elem;
+ overflow_offset = shm_length;
+ shm_length += LTTNG_UST_ALIGN(nr_elem, 8) / 8;
+ underflow_offset = shm_length;
+ shm_length += LTTNG_UST_ALIGN(nr_elem, 8) / 8;
+ layout->shm_len = shm_length;
+ if (counter->is_daemon) {
+ /* Allocate and clear shared memory. */
+ shm_object = lttng_counter_shm_object_table_alloc(counter->object_table,
+ shm_length, LTTNG_COUNTER_SHM_OBJECT_SHM, shm_fd, cpu);
+ if (!shm_object)
+ return -ENOMEM;
+ } else {
+ /* Map pre-existing shared memory. */
+ shm_object = lttng_counter_shm_object_table_append_shm(counter->object_table,
+ shm_fd, shm_length);
+ if (!shm_object)
+ return -ENOMEM;
+ }
+ layout->counters = shm_object->memory_map + counters_offset;
+ layout->overflow_bitmap = (unsigned long *)(shm_object->memory_map + overflow_offset);
+ layout->underflow_bitmap = (unsigned long *)(shm_object->memory_map + underflow_offset);
+ return 0;
+}
+
+int lttng_counter_set_global_shm(struct lib_counter *counter, int fd)
+{
+ struct lib_counter_config *config = &counter->config;
+ struct lib_counter_layout *layout;
+
+ if (!(config->alloc & COUNTER_ALLOC_GLOBAL))
+ return -EINVAL;
+ layout = &counter->global_counters;
+ if (layout->shm_fd >= 0)
+ return -EBUSY;
+ return lttng_counter_layout_init(counter, -1, fd);
+}
+
+int lttng_counter_set_cpu_shm(struct lib_counter *counter, int cpu, int fd)
+{
+ struct lib_counter_config *config = &counter->config;
+ struct lib_counter_layout *layout;
+
+ if (cpu < 0 || cpu >= lttng_counter_num_possible_cpus())
+ return -EINVAL;
+
+ if (!(config->alloc & COUNTER_ALLOC_PER_CPU))
+ return -EINVAL;
+ layout = &counter->percpu_counters[cpu];
+ if (layout->shm_fd >= 0)
+ return -EBUSY;
+ return lttng_counter_layout_init(counter, cpu, fd);
+}
+
+static
+int lttng_counter_set_global_sum_step(struct lib_counter *counter,
+ int64_t global_sum_step)
+{
+ if (global_sum_step < 0)
+ return -EINVAL;
+
+ switch (counter->config.counter_size) {
+ case COUNTER_SIZE_8_BIT:
+ if (global_sum_step > INT8_MAX)
+ return -EINVAL;
+ counter->global_sum_step.s8 = (int8_t) global_sum_step;
+ break;
+ case COUNTER_SIZE_16_BIT:
+ if (global_sum_step > INT16_MAX)
+ return -EINVAL;
+ counter->global_sum_step.s16 = (int16_t) global_sum_step;
+ break;
+ case COUNTER_SIZE_32_BIT:
+ if (global_sum_step > INT32_MAX)
+ return -EINVAL;
+ counter->global_sum_step.s32 = (int32_t) global_sum_step;
+ break;
+ case COUNTER_SIZE_64_BIT:
+ counter->global_sum_step.s64 = global_sum_step;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static
+int validate_args(const struct lib_counter_config *config,
+ size_t nr_dimensions __attribute__((unused)),
+ const size_t *max_nr_elem,
+ int64_t global_sum_step,
+ int global_counter_fd,
+ int nr_counter_cpu_fds,
+ const int *counter_cpu_fds)
+{
+ int nr_cpus = lttng_counter_num_possible_cpus();
+
+ if (CAA_BITS_PER_LONG != 64 && config->counter_size == COUNTER_SIZE_64_BIT) {
+ WARN_ON_ONCE(1);
+ return -1;
+ }
+ if (!max_nr_elem)
+ return -1;
+ /*
+ * global sum step is only useful with allocating both per-cpu
+ * and global counters.
+ */
+ if (global_sum_step && (!(config->alloc & COUNTER_ALLOC_GLOBAL) ||
+ !(config->alloc & COUNTER_ALLOC_PER_CPU)))
+ return -1;
+ if (!(config->alloc & COUNTER_ALLOC_GLOBAL) && global_counter_fd >= 0)
+ return -1;
+ if (!(config->alloc & COUNTER_ALLOC_PER_CPU) && counter_cpu_fds)
+ return -1;
+ if (!(config->alloc & COUNTER_ALLOC_PER_CPU) && nr_counter_cpu_fds >= 0)
+ return -1;
+ if (counter_cpu_fds && nr_cpus != nr_counter_cpu_fds)
+ return -1;
+ return 0;
+}
+
+struct lib_counter *lttng_counter_create(const struct lib_counter_config *config,
+ size_t nr_dimensions,
+ const size_t *max_nr_elem,
+ int64_t global_sum_step,
+ int global_counter_fd,
+ int nr_counter_cpu_fds,
+ const int *counter_cpu_fds,
+ bool is_daemon)
+{
+ struct lib_counter *counter;
+ size_t dimension, nr_elem = 1;
+ int cpu, ret;
+ int nr_handles = 0;
+ int nr_cpus = lttng_counter_num_possible_cpus();
+
+ if (validate_args(config, nr_dimensions, max_nr_elem,
+ global_sum_step, global_counter_fd, nr_counter_cpu_fds,
+ counter_cpu_fds))
+ return NULL;
+ counter = zmalloc(sizeof(struct lib_counter));
+ if (!counter)
+ return NULL;
+ counter->global_counters.shm_fd = -1;
+ counter->config = *config;
+ counter->is_daemon = is_daemon;
+ if (lttng_counter_set_global_sum_step(counter, global_sum_step))
+ goto error_sum_step;
+ counter->nr_dimensions = nr_dimensions;
+ counter->dimensions = zmalloc(nr_dimensions * sizeof(*counter->dimensions));
+ if (!counter->dimensions)
+ goto error_dimensions;
+ for (dimension = 0; dimension < nr_dimensions; dimension++)
+ counter->dimensions[dimension].max_nr_elem = max_nr_elem[dimension];
+ if (config->alloc & COUNTER_ALLOC_PER_CPU) {
+ counter->percpu_counters = zmalloc(sizeof(struct lib_counter_layout) * nr_cpus);
+ if (!counter->percpu_counters)
+ goto error_alloc_percpu;
+ lttng_counter_for_each_possible_cpu(cpu)
+ counter->percpu_counters[cpu].shm_fd = -1;
+ }
+
+ if (lttng_counter_init_stride(config, counter))
+ goto error_init_stride;
+ //TODO saturation values.
+ for (dimension = 0; dimension < counter->nr_dimensions; dimension++)
+ nr_elem *= lttng_counter_get_dimension_nr_elements(&counter->dimensions[dimension]);
+ counter->allocated_elem = nr_elem;
+
+ if (config->alloc & COUNTER_ALLOC_GLOBAL)
+ nr_handles++;
+ if (config->alloc & COUNTER_ALLOC_PER_CPU)
+ nr_handles += nr_cpus;
+ /* Allocate table for global and per-cpu counters. */
+ counter->object_table = lttng_counter_shm_object_table_create(nr_handles);
+ if (!counter->object_table)
+ goto error_alloc_object_table;
+
+ if (config->alloc & COUNTER_ALLOC_GLOBAL) {
+ ret = lttng_counter_layout_init(counter, -1, global_counter_fd); /* global */
+ if (ret)
+ goto layout_init_error;
+ }
+ if ((config->alloc & COUNTER_ALLOC_PER_CPU) && counter_cpu_fds) {
+ lttng_counter_for_each_possible_cpu(cpu) {
+ ret = lttng_counter_layout_init(counter, cpu, counter_cpu_fds[cpu]);
+ if (ret)
+ goto layout_init_error;
+ }
+ }
+ return counter;
+
+layout_init_error:
+ lttng_counter_shm_object_table_destroy(counter->object_table, is_daemon);
+error_alloc_object_table:
+error_init_stride:
+ free(counter->percpu_counters);
+error_alloc_percpu:
+ free(counter->dimensions);
+error_dimensions:
+error_sum_step:
+ free(counter);
+ return NULL;
+}
+
+void lttng_counter_destroy(struct lib_counter *counter)
+{
+ struct lib_counter_config *config = &counter->config;
+
+ if (config->alloc & COUNTER_ALLOC_PER_CPU)
+ free(counter->percpu_counters);
+ lttng_counter_shm_object_table_destroy(counter->object_table, counter->is_daemon);
+ free(counter->dimensions);
+ free(counter);
+}
+
+int lttng_counter_get_global_shm(struct lib_counter *counter, int *fd, size_t *len)
+{
+ int shm_fd;
+
+ shm_fd = counter->global_counters.shm_fd;
+ if (shm_fd < 0)
+ return -1;
+ *fd = shm_fd;
+ *len = counter->global_counters.shm_len;
+ return 0;
+}
+
+int lttng_counter_get_cpu_shm(struct lib_counter *counter, int cpu, int *fd, size_t *len)
+{
+ struct lib_counter_layout *layout;
+ int shm_fd;
+
+ if (cpu >= lttng_counter_num_possible_cpus())
+ return -1;
+ layout = &counter->percpu_counters[cpu];
+ shm_fd = layout->shm_fd;
+ if (shm_fd < 0)
+ return -1;
+ *fd = shm_fd;
+ *len = layout->shm_len;
+ return 0;
+}
+
+int lttng_counter_read(const struct lib_counter_config *config,
+ struct lib_counter *counter,
+ const size_t *dimension_indexes,
+ int cpu, int64_t *value, bool *overflow,
+ bool *underflow)
+{
+ size_t index;
+ struct lib_counter_layout *layout;
+
+ if (caa_unlikely(lttng_counter_validate_indexes(config, counter, dimension_indexes)))
+ return -EOVERFLOW;
+ index = lttng_counter_get_index(config, counter, dimension_indexes);
+
+ switch (config->alloc) {
+ case COUNTER_ALLOC_PER_CPU:
+ if (cpu < 0 || cpu >= lttng_counter_num_possible_cpus())
+ return -EINVAL;
+ layout = &counter->percpu_counters[cpu];
+ break;
+ case COUNTER_ALLOC_PER_CPU | COUNTER_ALLOC_GLOBAL:
+ if (cpu >= 0) {
+ if (cpu >= lttng_counter_num_possible_cpus())
+ return -EINVAL;
+ layout = &counter->percpu_counters[cpu];
+ } else {
+ layout = &counter->global_counters;
+ }
+ break;
+ case COUNTER_ALLOC_GLOBAL:
+ if (cpu >= 0)
+ return -EINVAL;
+ layout = &counter->global_counters;
+ break;
+ default:
+ return -EINVAL;
+ }
+ if (caa_unlikely(!layout->counters))
+ return -ENODEV;
+
+ switch (config->counter_size) {
+ case COUNTER_SIZE_8_BIT:
+ {
+ int8_t *int_p = (int8_t *) layout->counters + index;
+ *value = (int64_t) CMM_LOAD_SHARED(*int_p);
+ break;
+ }
+ case COUNTER_SIZE_16_BIT:
+ {
+ int16_t *int_p = (int16_t *) layout->counters + index;
+ *value = (int64_t) CMM_LOAD_SHARED(*int_p);
+ break;
+ }
+ case COUNTER_SIZE_32_BIT:
+ {
+ int32_t *int_p = (int32_t *) layout->counters + index;
+ *value = (int64_t) CMM_LOAD_SHARED(*int_p);
+ break;
+ }
+#if CAA_BITS_PER_LONG == 64
+ case COUNTER_SIZE_64_BIT:
+ {
+ int64_t *int_p = (int64_t *) layout->counters + index;
+ *value = CMM_LOAD_SHARED(*int_p);
+ break;
+ }
+#endif
+ default:
+ return -EINVAL;
+ }
+ *overflow = lttng_bitmap_test_bit(index, layout->overflow_bitmap);
+ *underflow = lttng_bitmap_test_bit(index, layout->underflow_bitmap);
+ return 0;
+}
+
+int lttng_counter_aggregate(const struct lib_counter_config *config,
+ struct lib_counter *counter,
+ const size_t *dimension_indexes,
+ int64_t *value, bool *overflow,
+ bool *underflow)
+{
+ int cpu, ret;
+ int64_t v, sum = 0;
+ bool of, uf;
+
+ *overflow = false;
+ *underflow = false;
+
+ switch (config->alloc) {
+ case COUNTER_ALLOC_GLOBAL: /* Fallthrough */
+ case COUNTER_ALLOC_PER_CPU | COUNTER_ALLOC_GLOBAL:
+ /* Read global counter. */
+ ret = lttng_counter_read(config, counter, dimension_indexes,
+ -1, &v, &of, &uf);
+ if (ret < 0)
+ return ret;
+ sum += v;
+ *overflow |= of;
+ *underflow |= uf;
+ break;
+ case COUNTER_ALLOC_PER_CPU:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (config->alloc) {
+ case COUNTER_ALLOC_GLOBAL:
+ break;
+ case COUNTER_ALLOC_PER_CPU | COUNTER_ALLOC_GLOBAL: /* Fallthrough */
+ case COUNTER_ALLOC_PER_CPU:
+ lttng_counter_for_each_possible_cpu(cpu) {
+ int64_t old = sum;
+
+ ret = lttng_counter_read(config, counter, dimension_indexes,
+ cpu, &v, &of, &uf);
+ if (ret < 0)
+ return ret;
+ *overflow |= of;
+ *underflow |= uf;
+ /* Overflow is defined on unsigned types. */
+ sum = (int64_t) ((uint64_t) old + (uint64_t) v);
+ if (v > 0 && sum < old)
+ *overflow = true;
+ else if (v < 0 && sum > old)
+ *underflow = true;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+ *value = sum;
+ return 0;
+}
+
+static
+int lttng_counter_clear_cpu(const struct lib_counter_config *config,
+ struct lib_counter *counter,
+ const size_t *dimension_indexes,
+ int cpu)
+{
+ size_t index;
+ struct lib_counter_layout *layout;
+
+ if (caa_unlikely(lttng_counter_validate_indexes(config, counter, dimension_indexes)))
+ return -EOVERFLOW;
+ index = lttng_counter_get_index(config, counter, dimension_indexes);
+
+ switch (config->alloc) {
+ case COUNTER_ALLOC_PER_CPU:
+ if (cpu < 0 || cpu >= lttng_counter_num_possible_cpus())
+ return -EINVAL;
+ layout = &counter->percpu_counters[cpu];
+ break;
+ case COUNTER_ALLOC_PER_CPU | COUNTER_ALLOC_GLOBAL:
+ if (cpu >= 0) {
+ if (cpu >= lttng_counter_num_possible_cpus())
+ return -EINVAL;
+ layout = &counter->percpu_counters[cpu];
+ } else {
+ layout = &counter->global_counters;
+ }
+ break;
+ case COUNTER_ALLOC_GLOBAL:
+ if (cpu >= 0)
+ return -EINVAL;
+ layout = &counter->global_counters;
+ break;
+ default:
+ return -EINVAL;
+ }
+ if (caa_unlikely(!layout->counters))
+ return -ENODEV;
+
+ switch (config->counter_size) {
+ case COUNTER_SIZE_8_BIT:
+ {
+ int8_t *int_p = (int8_t *) layout->counters + index;
+ CMM_STORE_SHARED(*int_p, 0);
+ break;
+ }
+ case COUNTER_SIZE_16_BIT:
+ {
+ int16_t *int_p = (int16_t *) layout->counters + index;
+ CMM_STORE_SHARED(*int_p, 0);
+ break;
+ }
+ case COUNTER_SIZE_32_BIT:
+ {
+ int32_t *int_p = (int32_t *) layout->counters + index;
+ CMM_STORE_SHARED(*int_p, 0);
+ break;
+ }
+#if CAA_BITS_PER_LONG == 64
+ case COUNTER_SIZE_64_BIT:
+ {
+ int64_t *int_p = (int64_t *) layout->counters + index;
+ CMM_STORE_SHARED(*int_p, 0);
+ break;
+ }
+#endif
+ default:
+ return -EINVAL;
+ }
+ lttng_bitmap_clear_bit(index, layout->overflow_bitmap);
+ lttng_bitmap_clear_bit(index, layout->underflow_bitmap);
+ return 0;
+}
+
+int lttng_counter_clear(const struct lib_counter_config *config,
+ struct lib_counter *counter,
+ const size_t *dimension_indexes)
+{
+ int cpu, ret;
+
+ switch (config->alloc) {
+ case COUNTER_ALLOC_PER_CPU:
+ break;
+ case COUNTER_ALLOC_GLOBAL: /* Fallthrough */
+ case COUNTER_ALLOC_PER_CPU | COUNTER_ALLOC_GLOBAL:
+ /* Clear global counter. */
+ ret = lttng_counter_clear_cpu(config, counter, dimension_indexes, -1);
+ if (ret < 0)
+ return ret;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (config->alloc) {
+ case COUNTER_ALLOC_PER_CPU: /* Fallthrough */
+ case COUNTER_ALLOC_PER_CPU | COUNTER_ALLOC_GLOBAL:
+ lttng_counter_for_each_possible_cpu(cpu) {
+ ret = lttng_counter_clear_cpu(config, counter, dimension_indexes, cpu);
+ if (ret < 0)
+ return ret;
+ }
+ break;
+ case COUNTER_ALLOC_GLOBAL:
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
--- /dev/null
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright (C) 2020 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *
+ * LTTng Counters API
+ */
+
+#ifndef _LTTNG_COUNTER_H
+#define _LTTNG_COUNTER_H
+
+#include <stdint.h>
+#include <lttng/ust-config.h>
+#include "counter-types.h"
+
+/* max_nr_elem is for each dimension. */
+struct lib_counter *lttng_counter_create(const struct lib_counter_config *config,
+ size_t nr_dimensions,
+ const size_t *max_nr_elem,
+ int64_t global_sum_step,
+ int global_counter_fd,
+ int nr_counter_cpu_fds,
+ const int *counter_cpu_fds,
+ bool is_daemon)
+ __attribute__((visibility("hidden")));
+
+void lttng_counter_destroy(struct lib_counter *counter)
+ __attribute__((visibility("hidden")));
+
+int lttng_counter_set_global_shm(struct lib_counter *counter, int fd)
+ __attribute__((visibility("hidden")));
+
+int lttng_counter_set_cpu_shm(struct lib_counter *counter, int cpu, int fd)
+ __attribute__((visibility("hidden")));
+
+int lttng_counter_get_global_shm(struct lib_counter *counter, int *fd, size_t *len)
+ __attribute__((visibility("hidden")));
+
+int lttng_counter_get_cpu_shm(struct lib_counter *counter, int cpu, int *fd, size_t *len)
+ __attribute__((visibility("hidden")));
+
+int lttng_counter_read(const struct lib_counter_config *config,
+ struct lib_counter *counter,
+ const size_t *dimension_indexes,
+ int cpu, int64_t *value,
+ bool *overflow, bool *underflow)
+ __attribute__((visibility("hidden")));
+
+int lttng_counter_aggregate(const struct lib_counter_config *config,
+ struct lib_counter *counter,
+ const size_t *dimension_indexes,
+ int64_t *value,
+ bool *overflow, bool *underflow)
+ __attribute__((visibility("hidden")));
+
+int lttng_counter_clear(const struct lib_counter_config *config,
+ struct lib_counter *counter,
+ const size_t *dimension_indexes)
+ __attribute__((visibility("hidden")));
+
+#endif /* _LTTNG_COUNTER_H */
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2005-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#define _LGPL_SOURCE
+#include "shm.h"
+#include <unistd.h>
+#include <fcntl.h>
+#include <sys/mman.h>
+#include <sys/types.h>
+#include <sys/stat.h> /* For mode constants */
+#include <fcntl.h> /* For O_* constants */
+#include <assert.h>
+#include <stdio.h>
+#include <signal.h>
+#include <dirent.h>
+#include <limits.h>
+#include <stdbool.h>
+#include <stdint.h>
+
+#ifdef HAVE_LIBNUMA
+#include <numa.h>
+#include <numaif.h>
+#endif
+
+#include <lttng/ust-utils.h>
+
+#include "common/macros.h"
+#include "common/ust-fd.h"
+#include "libringbuffer/mmap.h"
+
+/*
+ * Ensure we have the required amount of space available by writing 0
+ * into the entire buffer. Not doing so can trigger SIGBUS when going
+ * beyond the available shm space.
+ */
+static
+int zero_file(int fd, size_t len)
+{
+ ssize_t retlen;
+ size_t written = 0;
+ char *zeropage;
+ long pagelen;
+ int ret;
+
+ pagelen = sysconf(_SC_PAGESIZE);
+ if (pagelen < 0)
+ return (int) pagelen;
+ zeropage = calloc(pagelen, 1);
+ if (!zeropage)
+ return -ENOMEM;
+
+ while (len > written) {
+ do {
+ retlen = write(fd, zeropage,
+ min_t(size_t, pagelen, len - written));
+ } while (retlen == -1UL && errno == EINTR);
+ if (retlen < 0) {
+ ret = (int) retlen;
+ goto error;
+ }
+ written += retlen;
+ }
+ ret = 0;
+error:
+ free(zeropage);
+ return ret;
+}
+
+struct lttng_counter_shm_object_table *lttng_counter_shm_object_table_create(size_t max_nb_obj)
+{
+ struct lttng_counter_shm_object_table *table;
+
+ table = zmalloc(sizeof(struct lttng_counter_shm_object_table) +
+ max_nb_obj * sizeof(table->objects[0]));
+ if (!table)
+ return NULL;
+ table->size = max_nb_obj;
+ return table;
+}
+
+static
+struct lttng_counter_shm_object *_lttng_counter_shm_object_table_alloc_shm(struct lttng_counter_shm_object_table *table,
+ size_t memory_map_size,
+ int cpu_fd)
+{
+ int shmfd, ret;
+ struct lttng_counter_shm_object *obj;
+ char *memory_map;
+
+ if (cpu_fd < 0)
+ return NULL;
+ if (table->allocated_len >= table->size)
+ return NULL;
+ obj = &table->objects[table->allocated_len];
+
+ /* create shm */
+
+ shmfd = cpu_fd;
+ ret = zero_file(shmfd, memory_map_size);
+ if (ret) {
+ PERROR("zero_file");
+ goto error_zero_file;
+ }
+ ret = ftruncate(shmfd, memory_map_size);
+ if (ret) {
+ PERROR("ftruncate");
+ goto error_ftruncate;
+ }
+ /*
+ * Also ensure the file metadata is synced with the storage by using
+ * fsync(2).
+ */
+ ret = fsync(shmfd);
+ if (ret) {
+ PERROR("fsync");
+ goto error_fsync;
+ }
+ obj->shm_fd_ownership = 0;
+ obj->shm_fd = shmfd;
+
+ /* memory_map: mmap */
+ memory_map = mmap(NULL, memory_map_size, PROT_READ | PROT_WRITE,
+ MAP_SHARED | LTTNG_MAP_POPULATE, shmfd, 0);
+ if (memory_map == MAP_FAILED) {
+ PERROR("mmap");
+ goto error_mmap;
+ }
+ obj->type = LTTNG_COUNTER_SHM_OBJECT_SHM;
+ obj->memory_map = memory_map;
+ obj->memory_map_size = memory_map_size;
+ obj->allocated_len = 0;
+ obj->index = table->allocated_len++;
+
+ return obj;
+
+error_mmap:
+error_fsync:
+error_ftruncate:
+error_zero_file:
+ return NULL;
+}
+
+static
+struct lttng_counter_shm_object *_lttng_counter_shm_object_table_alloc_mem(struct lttng_counter_shm_object_table *table,
+ size_t memory_map_size)
+{
+ struct lttng_counter_shm_object *obj;
+ void *memory_map;
+
+ if (table->allocated_len >= table->size)
+ return NULL;
+ obj = &table->objects[table->allocated_len];
+
+ memory_map = zmalloc(memory_map_size);
+ if (!memory_map)
+ goto alloc_error;
+
+ /* no shm_fd */
+ obj->shm_fd = -1;
+ obj->shm_fd_ownership = 0;
+
+ obj->type = LTTNG_COUNTER_SHM_OBJECT_MEM;
+ obj->memory_map = memory_map;
+ obj->memory_map_size = memory_map_size;
+ obj->allocated_len = 0;
+ obj->index = table->allocated_len++;
+
+ return obj;
+
+alloc_error:
+ return NULL;
+}
+
+/*
+ * libnuma prints errors on the console even for numa_available().
+ * Work-around this limitation by using get_mempolicy() directly to
+ * check whether the kernel supports mempolicy.
+ */
+#ifdef HAVE_LIBNUMA
+static bool lttng_is_numa_available(void)
+{
+ int ret;
+
+ ret = get_mempolicy(NULL, NULL, 0, NULL, 0);
+ if (ret && errno == ENOSYS) {
+ return false;
+ }
+ return numa_available() > 0;
+}
+#endif
+
+struct lttng_counter_shm_object *lttng_counter_shm_object_table_alloc(struct lttng_counter_shm_object_table *table,
+ size_t memory_map_size,
+ enum lttng_counter_shm_object_type type,
+ int cpu_fd,
+ int cpu)
+{
+ struct lttng_counter_shm_object *shm_object;
+#ifdef HAVE_LIBNUMA
+ int oldnode = 0, node;
+ bool numa_avail;
+
+ numa_avail = lttng_is_numa_available();
+ if (numa_avail) {
+ oldnode = numa_preferred();
+ if (cpu >= 0) {
+ node = numa_node_of_cpu(cpu);
+ if (node >= 0)
+ numa_set_preferred(node);
+ }
+ if (cpu < 0 || node < 0)
+ numa_set_localalloc();
+ }
+#endif /* HAVE_LIBNUMA */
+ switch (type) {
+ case LTTNG_COUNTER_SHM_OBJECT_SHM:
+ shm_object = _lttng_counter_shm_object_table_alloc_shm(table, memory_map_size,
+ cpu_fd);
+ break;
+ case LTTNG_COUNTER_SHM_OBJECT_MEM:
+ shm_object = _lttng_counter_shm_object_table_alloc_mem(table, memory_map_size);
+ break;
+ default:
+ assert(0);
+ }
+#ifdef HAVE_LIBNUMA
+ if (numa_avail)
+ numa_set_preferred(oldnode);
+#endif /* HAVE_LIBNUMA */
+ return shm_object;
+}
+
+struct lttng_counter_shm_object *lttng_counter_shm_object_table_append_shm(struct lttng_counter_shm_object_table *table,
+ int shm_fd,
+ size_t memory_map_size)
+{
+ struct lttng_counter_shm_object *obj;
+ char *memory_map;
+
+ if (table->allocated_len >= table->size)
+ return NULL;
+
+ obj = &table->objects[table->allocated_len];
+
+ obj->shm_fd = shm_fd;
+ obj->shm_fd_ownership = 1;
+
+ /* memory_map: mmap */
+ memory_map = mmap(NULL, memory_map_size, PROT_READ | PROT_WRITE,
+ MAP_SHARED | LTTNG_MAP_POPULATE, shm_fd, 0);
+ if (memory_map == MAP_FAILED) {
+ PERROR("mmap");
+ goto error_mmap;
+ }
+ obj->type = LTTNG_COUNTER_SHM_OBJECT_SHM;
+ obj->memory_map = memory_map;
+ obj->memory_map_size = memory_map_size;
+ obj->allocated_len = memory_map_size;
+ obj->index = table->allocated_len++;
+
+ return obj;
+
+error_mmap:
+ return NULL;
+}
+
+/*
+ * Passing ownership of mem to object.
+ */
+struct lttng_counter_shm_object *lttng_counter_shm_object_table_append_mem(struct lttng_counter_shm_object_table *table,
+ void *mem, size_t memory_map_size)
+{
+ struct lttng_counter_shm_object *obj;
+
+ if (table->allocated_len >= table->size)
+ return NULL;
+ obj = &table->objects[table->allocated_len];
+
+ obj->shm_fd = -1;
+ obj->shm_fd_ownership = 0;
+
+ obj->type = LTTNG_COUNTER_SHM_OBJECT_MEM;
+ obj->memory_map = mem;
+ obj->memory_map_size = memory_map_size;
+ obj->allocated_len = memory_map_size;
+ obj->index = table->allocated_len++;
+
+ return obj;
+
+ return NULL;
+}
+
+static
+void lttng_counter_shmp_object_destroy(struct lttng_counter_shm_object *obj, int consumer)
+{
+ switch (obj->type) {
+ case LTTNG_COUNTER_SHM_OBJECT_SHM:
+ {
+ int ret;
+
+ ret = munmap(obj->memory_map, obj->memory_map_size);
+ if (ret) {
+ PERROR("umnmap");
+ assert(0);
+ }
+
+ if (obj->shm_fd_ownership) {
+ /* Delete FDs only if called from app (not consumer). */
+ if (!consumer) {
+ lttng_ust_lock_fd_tracker();
+ ret = close(obj->shm_fd);
+ if (!ret) {
+ lttng_ust_delete_fd_from_tracker(obj->shm_fd);
+ } else {
+ PERROR("close");
+ assert(0);
+ }
+ lttng_ust_unlock_fd_tracker();
+ } else {
+ ret = close(obj->shm_fd);
+ if (ret) {
+ PERROR("close");
+ assert(0);
+ }
+ }
+ }
+ break;
+ }
+ case LTTNG_COUNTER_SHM_OBJECT_MEM:
+ {
+ free(obj->memory_map);
+ break;
+ }
+ default:
+ assert(0);
+ }
+}
+
+void lttng_counter_shm_object_table_destroy(struct lttng_counter_shm_object_table *table, int consumer)
+{
+ int i;
+
+ for (i = 0; i < table->allocated_len; i++)
+ lttng_counter_shmp_object_destroy(&table->objects[i], consumer);
+ free(table);
+}
+
+/*
+ * lttng_counter_zalloc_shm - allocate memory within a shm object.
+ *
+ * Shared memory is already zeroed by shmget.
+ * *NOT* multithread-safe (should be protected by mutex).
+ * Returns a -1, -1 tuple on error.
+ */
+struct lttng_counter_shm_ref lttng_counter_zalloc_shm(struct lttng_counter_shm_object *obj, size_t len)
+{
+ struct lttng_counter_shm_ref ref;
+ struct lttng_counter_shm_ref shm_ref_error = { -1, -1 };
+
+ if (obj->memory_map_size - obj->allocated_len < len)
+ return shm_ref_error;
+ ref.index = obj->index;
+ ref.offset = obj->allocated_len;
+ obj->allocated_len += len;
+ return ref;
+}
+
+void lttng_counter_align_shm(struct lttng_counter_shm_object *obj, size_t align)
+{
+ size_t offset_len = lttng_ust_offset_align(obj->allocated_len, align);
+ obj->allocated_len += offset_len;
+}
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2011-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#ifndef _LIBCOUNTER_SHM_H
+#define _LIBCOUNTER_SHM_H
+
+#include <stddef.h>
+#include <stdint.h>
+#include <unistd.h>
+#include "common/logging.h"
+#include <urcu/compiler.h>
+#include "shm_types.h"
+
+/* lttng_counter_handle_create - for UST. */
+extern
+struct lttng_counter_shm_handle *lttng_counter_handle_create(void *data,
+ uint64_t memory_map_size, int wakeup_fd);
+/* lttng_counter_handle_add_cpu - for UST. */
+extern
+int lttng_counter_handle_add_cpu(struct lttng_counter_shm_handle *handle,
+ int shm_fd, uint32_t cpu_nr,
+ uint64_t memory_map_size);
+
+unsigned int lttng_counter_handle_get_nr_cpus(struct lttng_counter_shm_handle *handle)
+ __attribute__((visibility("hidden")));
+
+/*
+ * Pointer dereferencing. We don't trust the shm_ref, so we validate
+ * both the index and offset with known boundaries.
+ *
+ * "shmp" and "shmp_index" guarantee that it's safe to use the pointer
+ * target type, even in the occurrence of shm_ref modification by an
+ * untrusted process having write access to the shm_ref. We return a
+ * NULL pointer if the ranges are invalid.
+ */
+static inline
+char *_lttng_counter_shmp_offset(struct lttng_counter_shm_object_table *table,
+ struct lttng_counter_shm_ref *ref,
+ size_t idx, size_t elem_size)
+{
+ struct lttng_counter_shm_object *obj;
+ size_t objindex, ref_offset;
+
+ objindex = (size_t) ref->index;
+ if (caa_unlikely(objindex >= table->allocated_len))
+ return NULL;
+ obj = &table->objects[objindex];
+ ref_offset = (size_t) ref->offset;
+ ref_offset += idx * elem_size;
+ /* Check if part of the element returned would exceed the limits. */
+ if (caa_unlikely(ref_offset + elem_size > obj->memory_map_size))
+ return NULL;
+ return &obj->memory_map[ref_offset];
+}
+
+#define lttng_counter_shmp_index(handle, ref, index) \
+ ({ \
+ __typeof__((ref)._type) ____ptr_ret; \
+ ____ptr_ret = (__typeof__(____ptr_ret)) _lttng_counter_shmp_offset((handle)->table, &(ref)._ref, index, sizeof(*____ptr_ret)); \
+ ____ptr_ret; \
+ })
+
+#define lttng_counter_shmp(handle, ref) lttng_counter_shmp_index(handle, ref, 0)
+
+static inline
+void _lttng_counter_set_shmp(struct lttng_counter_shm_ref *ref, struct lttng_counter_shm_ref src)
+{
+ *ref = src;
+}
+
+#define lttng_counter_set_shmp(ref, src) _lttng_counter_set_shmp(&(ref)._ref, src)
+
+struct lttng_counter_shm_object_table *lttng_counter_shm_object_table_create(size_t max_nb_obj)
+ __attribute__((visibility("hidden")));
+
+struct lttng_counter_shm_object *lttng_counter_shm_object_table_alloc(struct lttng_counter_shm_object_table *table,
+ size_t memory_map_size,
+ enum lttng_counter_shm_object_type type,
+ const int cpu_fd,
+ int cpu)
+ __attribute__((visibility("hidden")));
+
+struct lttng_counter_shm_object *lttng_counter_shm_object_table_append_shm(struct lttng_counter_shm_object_table *table,
+ int shm_fd, size_t memory_map_size)
+ __attribute__((visibility("hidden")));
+
+/* mem ownership is passed to lttng_counter_shm_object_table_append_mem(). */
+struct lttng_counter_shm_object *lttng_counter_shm_object_table_append_mem(struct lttng_counter_shm_object_table *table,
+ void *mem, size_t memory_map_size)
+ __attribute__((visibility("hidden")));
+
+void lttng_counter_shm_object_table_destroy(struct lttng_counter_shm_object_table *table, int consumer)
+ __attribute__((visibility("hidden")));
+
+/*
+ * lttng_counter_zalloc_shm - allocate memory within a shm object.
+ *
+ * Shared memory is already zeroed by shmget.
+ * *NOT* multithread-safe (should be protected by mutex).
+ * Returns a -1, -1 tuple on error.
+ */
+struct lttng_counter_shm_ref lttng_counter_zalloc_shm(struct lttng_counter_shm_object *obj, size_t len)
+ __attribute__((visibility("hidden")));
+
+void lttng_counter_align_shm(struct lttng_counter_shm_object *obj, size_t align)
+ __attribute__((visibility("hidden")));
+
+static inline
+int lttng_counter_shm_get_shm_fd(struct lttng_counter_shm_handle *handle, struct lttng_counter_shm_ref *ref)
+{
+ struct lttng_counter_shm_object_table *table = handle->table;
+ struct lttng_counter_shm_object *obj;
+ size_t index;
+
+ index = (size_t) ref->index;
+ if (caa_unlikely(index >= table->allocated_len))
+ return -EPERM;
+ obj = &table->objects[index];
+ return obj->shm_fd;
+}
+
+
+static inline
+int lttng_counter_shm_get_shm_size(struct lttng_counter_shm_handle *handle, struct lttng_counter_shm_ref *ref,
+ uint64_t *size)
+{
+ struct lttng_counter_shm_object_table *table = handle->table;
+ struct lttng_counter_shm_object *obj;
+ size_t index;
+
+ index = (size_t) ref->index;
+ if (caa_unlikely(index >= table->allocated_len))
+ return -EPERM;
+ obj = &table->objects[index];
+ *size = obj->memory_map_size;
+ return 0;
+}
+
+#endif /* _LIBCOUNTER_SHM_H */
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2011-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#ifndef _LIBCOUNTER_SHM_INTERNAL_H
+#define _LIBCOUNTER_SHM_INTERNAL_H
+
+struct lttng_counter_shm_ref {
+ volatile ssize_t index; /* within the object table */
+ volatile ssize_t offset; /* within the object */
+};
+
+#define DECLARE_LTTNG_COUNTER_SHMP(type, name) \
+ union { \
+ struct lttng_counter_shm_ref _ref; \
+ type *_type; \
+ } name
+
+#endif /* _LIBCOUNTER_SHM_INTERNAL_H */
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2011-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#ifndef _LIBCOUNTER_SHM_TYPES_H
+#define _LIBCOUNTER_SHM_TYPES_H
+
+#include <stdint.h>
+#include <stddef.h>
+#include <limits.h>
+#include "shm_internal.h"
+
+enum lttng_counter_shm_object_type {
+ LTTNG_COUNTER_SHM_OBJECT_SHM,
+ LTTNG_COUNTER_SHM_OBJECT_MEM,
+};
+
+struct lttng_counter_shm_object {
+ enum lttng_counter_shm_object_type type;
+ size_t index; /* within the object table */
+ int shm_fd; /* shm fd */
+ char *memory_map;
+ size_t memory_map_size;
+ uint64_t allocated_len;
+ int shm_fd_ownership;
+};
+
+struct lttng_counter_shm_object_table {
+ size_t size;
+ size_t allocated_len;
+ struct lttng_counter_shm_object objects[];
+};
+
+struct lttng_counter_shm_handle {
+ struct lttng_counter_shm_object_table *table;
+};
+
+#endif /* _LIBCOUNTER_SHM_TYPES_H */
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2011-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ * Copyright (C) 2019 Michael Jeanson <mjeanson@efficios.com>
+ */
+
+#define _LGPL_SOURCE
+
+#include <unistd.h>
+#include <pthread.h>
+#include "smp.h"
+
+int __lttng_counter_num_possible_cpus;
+
+#if (defined(__GLIBC__) || defined( __UCLIBC__))
+void _lttng_counter_get_num_possible_cpus(void)
+{
+ int result;
+
+ /* On Linux, when some processors are offline
+ * _SC_NPROCESSORS_CONF counts the offline
+ * processors, whereas _SC_NPROCESSORS_ONLN
+ * does not. If we used _SC_NPROCESSORS_ONLN,
+ * getcpu() could return a value greater than
+ * this sysconf, in which case the arrays
+ * indexed by processor would overflow.
+ */
+ result = sysconf(_SC_NPROCESSORS_CONF);
+ if (result == -1)
+ return;
+ __lttng_counter_num_possible_cpus = result;
+}
+
+#else
+
+/*
+ * The MUSL libc implementation of the _SC_NPROCESSORS_CONF sysconf does not
+ * return the number of configured CPUs in the system but relies on the cpu
+ * affinity mask of the current task.
+ *
+ * So instead we use a strategy similar to GLIBC's, counting the cpu
+ * directories in "/sys/devices/system/cpu" and fallback on the value from
+ * sysconf if it fails.
+ */
+
+#include <dirent.h>
+#include <limits.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/types.h>
+
+#define __max(a,b) ((a)>(b)?(a):(b))
+
+void _lttng_counter_get_num_possible_cpus(void)
+{
+ int result, count = 0;
+ DIR *cpudir;
+ struct dirent *entry;
+
+ cpudir = opendir("/sys/devices/system/cpu");
+ if (cpudir == NULL)
+ goto end;
+
+ /*
+ * Count the number of directories named "cpu" followed by and
+ * integer. This is the same strategy as glibc uses.
+ */
+ while ((entry = readdir(cpudir))) {
+ if (entry->d_type == DT_DIR &&
+ strncmp(entry->d_name, "cpu", 3) == 0) {
+
+ char *endptr;
+ unsigned long cpu_num;
+
+ cpu_num = strtoul(entry->d_name + 3, &endptr, 10);
+ if ((cpu_num < ULONG_MAX) && (endptr != entry->d_name + 3)
+ && (*endptr == '\0')) {
+ count++;
+ }
+ }
+ }
+
+end:
+ /*
+ * Get the sysconf value as a fallback. Keep the highest number.
+ */
+ result = __max(sysconf(_SC_NPROCESSORS_CONF), count);
+
+ /*
+ * If both methods failed, don't store the value.
+ */
+ if (result < 1)
+ return;
+ __lttng_counter_num_possible_cpus = result;
+}
+#endif
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2011-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#ifndef _LIBCOUNTER_SMP_H
+#define _LIBCOUNTER_SMP_H
+
+/*
+ * 4kB of per-cpu data available.
+ */
+#define LTTNG_COUNTER_PER_CPU_MEM_SIZE 4096
+
+extern int __lttng_counter_num_possible_cpus
+ __attribute__((visibility("hidden")));
+
+extern void _lttng_counter_get_num_possible_cpus(void)
+ __attribute__((visibility("hidden")));
+
+static inline
+int lttng_counter_num_possible_cpus(void)
+{
+ if (!__lttng_counter_num_possible_cpus)
+ _lttng_counter_get_num_possible_cpus();
+ return __lttng_counter_num_possible_cpus;
+}
+
+#define lttng_counter_for_each_possible_cpu(cpu) \
+ for ((cpu) = 0; (cpu) < lttng_counter_num_possible_cpus(); (cpu)++)
+
+#endif /* _LIBCOUNTER_SMP_H */
#include "liblttng-ust/lttng-tracer-core.h"
#include "liblttng-ust/lttng-counter-client.h"
-#include "libcounter/shm.h"
-#include "libcounter/smp.h"
-#include "libcounter/counter.h"
+#include "common/counter/smp.h"
+#include "common/counter/counter.h"
/*
* Number of milliseconds to retry before failing metadata writes on
+++ /dev/null
-# SPDX-License-Identifier: LGPL-2.1-only
-
-AM_CFLAGS += -fno-strict-aliasing
-
-noinst_LTLIBRARIES = libcounter.la
-
-libcounter_la_SOURCES = \
- counter.c smp.c smp.h shm.c shm.h shm_internal.h shm_types.h \
- counter-api.h counter.h counter-internal.h counter-types.h \
- counter-config.h
-
-libcounter_la_LIBADD = \
- -lpthread \
- -lrt
-
-if ENABLE_NUMA
-libcounter_la_LIBADD += -lnuma
-endif
-
-libcounter_la_CFLAGS = -DUST_COMPONENT="libcounter" $(AM_CFLAGS)
+++ /dev/null
-/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright (C) 2020 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- *
- * LTTng Counters API, requiring counter/config.h
- */
-
-#ifndef _LTTNG_COUNTER_API_H
-#define _LTTNG_COUNTER_API_H
-
-#include <stdint.h>
-#include <limits.h>
-#include "counter.h"
-#include "counter-internal.h"
-#include <urcu/compiler.h>
-#include <urcu/uatomic.h>
-#include "common/bitmap.h"
-#include "../libringbuffer/getcpu.h"
-
-/*
- * Using unsigned arithmetic because overflow is defined.
- */
-static inline int __lttng_counter_add(const struct lib_counter_config *config,
- enum lib_counter_config_alloc alloc,
- enum lib_counter_config_sync sync,
- struct lib_counter *counter,
- const size_t *dimension_indexes, int64_t v,
- int64_t *remainder)
-{
- size_t index;
- bool overflow = false, underflow = false;
- struct lib_counter_layout *layout;
- int64_t move_sum = 0;
-
- if (caa_unlikely(lttng_counter_validate_indexes(config, counter, dimension_indexes)))
- return -EOVERFLOW;
- index = lttng_counter_get_index(config, counter, dimension_indexes);
-
- switch (alloc) {
- case COUNTER_ALLOC_PER_CPU:
- layout = &counter->percpu_counters[lttng_ust_get_cpu()];
- break;
- case COUNTER_ALLOC_GLOBAL:
- layout = &counter->global_counters;
- break;
- default:
- return -EINVAL;
- }
- if (caa_unlikely(!layout->counters))
- return -ENODEV;
-
- switch (config->counter_size) {
- case COUNTER_SIZE_8_BIT:
- {
- int8_t *int_p = (int8_t *) layout->counters + index;
- int8_t old, n, res;
- int8_t global_sum_step = counter->global_sum_step.s8;
-
- res = *int_p;
- switch (sync) {
- case COUNTER_SYNC_PER_CPU:
- {
- do {
- move_sum = 0;
- old = res;
- n = (int8_t) ((uint8_t) old + (uint8_t) v);
- if (caa_unlikely(n > (int8_t) global_sum_step))
- move_sum = (int8_t) global_sum_step / 2;
- else if (caa_unlikely(n < -(int8_t) global_sum_step))
- move_sum = -((int8_t) global_sum_step / 2);
- n -= move_sum;
- res = uatomic_cmpxchg(int_p, old, n);
- } while (old != res);
- break;
- }
- case COUNTER_SYNC_GLOBAL:
- {
- do {
- old = res;
- n = (int8_t) ((uint8_t) old + (uint8_t) v);
- res = uatomic_cmpxchg(int_p, old, n);
- } while (old != res);
- break;
- }
- default:
- return -EINVAL;
- }
- if (v > 0 && (v >= UINT8_MAX || n < old))
- overflow = true;
- else if (v < 0 && (v <= -(int64_t) UINT8_MAX || n > old))
- underflow = true;
- break;
- }
- case COUNTER_SIZE_16_BIT:
- {
- int16_t *int_p = (int16_t *) layout->counters + index;
- int16_t old, n, res;
- int16_t global_sum_step = counter->global_sum_step.s16;
-
- res = *int_p;
- switch (sync) {
- case COUNTER_SYNC_PER_CPU:
- {
- do {
- move_sum = 0;
- old = res;
- n = (int16_t) ((uint16_t) old + (uint16_t) v);
- if (caa_unlikely(n > (int16_t) global_sum_step))
- move_sum = (int16_t) global_sum_step / 2;
- else if (caa_unlikely(n < -(int16_t) global_sum_step))
- move_sum = -((int16_t) global_sum_step / 2);
- n -= move_sum;
- res = uatomic_cmpxchg(int_p, old, n);
- } while (old != res);
- break;
- }
- case COUNTER_SYNC_GLOBAL:
- {
- do {
- old = res;
- n = (int16_t) ((uint16_t) old + (uint16_t) v);
- res = uatomic_cmpxchg(int_p, old, n);
- } while (old != res);
- break;
- }
- default:
- return -EINVAL;
- }
- if (v > 0 && (v >= UINT16_MAX || n < old))
- overflow = true;
- else if (v < 0 && (v <= -(int64_t) UINT16_MAX || n > old))
- underflow = true;
- break;
- }
- case COUNTER_SIZE_32_BIT:
- {
- int32_t *int_p = (int32_t *) layout->counters + index;
- int32_t old, n, res;
- int32_t global_sum_step = counter->global_sum_step.s32;
-
- res = *int_p;
- switch (sync) {
- case COUNTER_SYNC_PER_CPU:
- {
- do {
- move_sum = 0;
- old = res;
- n = (int32_t) ((uint32_t) old + (uint32_t) v);
- if (caa_unlikely(n > (int32_t) global_sum_step))
- move_sum = (int32_t) global_sum_step / 2;
- else if (caa_unlikely(n < -(int32_t) global_sum_step))
- move_sum = -((int32_t) global_sum_step / 2);
- n -= move_sum;
- res = uatomic_cmpxchg(int_p, old, n);
- } while (old != res);
- break;
- }
- case COUNTER_SYNC_GLOBAL:
- {
- do {
- old = res;
- n = (int32_t) ((uint32_t) old + (uint32_t) v);
- res = uatomic_cmpxchg(int_p, old, n);
- } while (old != res);
- break;
- }
- default:
- return -EINVAL;
- }
- if (v > 0 && (v >= UINT32_MAX || n < old))
- overflow = true;
- else if (v < 0 && (v <= -(int64_t) UINT32_MAX || n > old))
- underflow = true;
- break;
- }
-#if CAA_BITS_PER_LONG == 64
- case COUNTER_SIZE_64_BIT:
- {
- int64_t *int_p = (int64_t *) layout->counters + index;
- int64_t old, n, res;
- int64_t global_sum_step = counter->global_sum_step.s64;
-
- res = *int_p;
- switch (sync) {
- case COUNTER_SYNC_PER_CPU:
- {
- do {
- move_sum = 0;
- old = res;
- n = (int64_t) ((uint64_t) old + (uint64_t) v);
- if (caa_unlikely(n > (int64_t) global_sum_step))
- move_sum = (int64_t) global_sum_step / 2;
- else if (caa_unlikely(n < -(int64_t) global_sum_step))
- move_sum = -((int64_t) global_sum_step / 2);
- n -= move_sum;
- res = uatomic_cmpxchg(int_p, old, n);
- } while (old != res);
- break;
- }
- case COUNTER_SYNC_GLOBAL:
- {
- do {
- old = res;
- n = (int64_t) ((uint64_t) old + (uint64_t) v);
- res = uatomic_cmpxchg(int_p, old, n);
- } while (old != res);
- break;
- }
- default:
- return -EINVAL;
- }
- if (v > 0 && n < old)
- overflow = true;
- else if (v < 0 && n > old)
- underflow = true;
- break;
- }
-#endif
- default:
- return -EINVAL;
- }
- if (caa_unlikely(overflow && !lttng_bitmap_test_bit(index, layout->overflow_bitmap)))
- lttng_bitmap_set_bit(index, layout->overflow_bitmap);
- else if (caa_unlikely(underflow && !lttng_bitmap_test_bit(index, layout->underflow_bitmap)))
- lttng_bitmap_set_bit(index, layout->underflow_bitmap);
- if (remainder)
- *remainder = move_sum;
- return 0;
-}
-
-static inline int __lttng_counter_add_percpu(const struct lib_counter_config *config,
- struct lib_counter *counter,
- const size_t *dimension_indexes, int64_t v)
-{
- int64_t move_sum;
- int ret;
-
- ret = __lttng_counter_add(config, COUNTER_ALLOC_PER_CPU, config->sync,
- counter, dimension_indexes, v, &move_sum);
- if (caa_unlikely(ret))
- return ret;
- if (caa_unlikely(move_sum))
- return __lttng_counter_add(config, COUNTER_ALLOC_GLOBAL, COUNTER_SYNC_GLOBAL,
- counter, dimension_indexes, move_sum, NULL);
- return 0;
-}
-
-static inline int __lttng_counter_add_global(const struct lib_counter_config *config,
- struct lib_counter *counter,
- const size_t *dimension_indexes, int64_t v)
-{
- return __lttng_counter_add(config, COUNTER_ALLOC_GLOBAL, config->sync, counter,
- dimension_indexes, v, NULL);
-}
-
-static inline int lttng_counter_add(const struct lib_counter_config *config,
- struct lib_counter *counter,
- const size_t *dimension_indexes, int64_t v)
-{
- switch (config->alloc) {
- case COUNTER_ALLOC_PER_CPU: /* Fallthrough */
- case COUNTER_ALLOC_PER_CPU | COUNTER_ALLOC_GLOBAL:
- return __lttng_counter_add_percpu(config, counter, dimension_indexes, v);
- case COUNTER_ALLOC_GLOBAL:
- return __lttng_counter_add_global(config, counter, dimension_indexes, v);
- default:
- return -EINVAL;
- }
-}
-
-static inline int lttng_counter_inc(const struct lib_counter_config *config,
- struct lib_counter *counter,
- const size_t *dimension_indexes)
-{
- return lttng_counter_add(config, counter, dimension_indexes, 1);
-}
-
-static inline int lttng_counter_dec(const struct lib_counter_config *config,
- struct lib_counter *counter,
- const size_t *dimension_indexes)
-{
- return lttng_counter_add(config, counter, dimension_indexes, -1);
-}
-
-#endif /* _LTTNG_COUNTER_API_H */
+++ /dev/null
-/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright (C) 2020 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- *
- * LTTng Counters Configuration
- */
-
-#ifndef _LTTNG_COUNTER_CONFIG_H
-#define _LTTNG_COUNTER_CONFIG_H
-
-#include <stdint.h>
-
-enum lib_counter_config_alloc {
- COUNTER_ALLOC_PER_CPU = (1 << 0),
- COUNTER_ALLOC_GLOBAL = (1 << 1),
-};
-
-enum lib_counter_config_sync {
- COUNTER_SYNC_PER_CPU,
- COUNTER_SYNC_GLOBAL,
-};
-
-struct lib_counter_config {
- uint32_t alloc; /* enum lib_counter_config_alloc flags */
- enum lib_counter_config_sync sync;
- enum {
- COUNTER_ARITHMETIC_MODULAR,
- COUNTER_ARITHMETIC_SATURATE, /* TODO */
- } arithmetic;
- enum {
- COUNTER_SIZE_8_BIT = 1,
- COUNTER_SIZE_16_BIT = 2,
- COUNTER_SIZE_32_BIT = 4,
- COUNTER_SIZE_64_BIT = 8,
- } counter_size;
-};
-
-#endif /* _LTTNG_COUNTER_CONFIG_H */
+++ /dev/null
-/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright (C) 2020 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- *
- * LTTng Counters Internal Header
- */
-
-#ifndef _LTTNG_COUNTER_INTERNAL_H
-#define _LTTNG_COUNTER_INTERNAL_H
-
-#include <stdint.h>
-#include <lttng/ust-config.h>
-#include <urcu/compiler.h>
-#include "counter-types.h"
-
-static inline int lttng_counter_validate_indexes(
- const struct lib_counter_config *config __attribute__((unused)),
- struct lib_counter *counter,
- const size_t *dimension_indexes)
-{
- size_t nr_dimensions = counter->nr_dimensions, i;
-
- for (i = 0; i < nr_dimensions; i++) {
- if (caa_unlikely(dimension_indexes[i] >= counter->dimensions[i].max_nr_elem))
- return -EOVERFLOW;
- }
- return 0;
-}
-
-
-static inline size_t lttng_counter_get_index(
- const struct lib_counter_config *config __attribute__((unused)),
- struct lib_counter *counter,
- const size_t *dimension_indexes)
-{
- size_t nr_dimensions = counter->nr_dimensions, i;
- size_t index = 0;
-
- for (i = 0; i < nr_dimensions; i++) {
- struct lib_counter_dimension *dimension = &counter->dimensions[i];
- const size_t *dimension_index = &dimension_indexes[i];
-
- index += *dimension_index * dimension->stride;
- }
- return index;
-}
-
-#endif /* _LTTNG_COUNTER_INTERNAL_H */
+++ /dev/null
-/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright (C) 2020 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- *
- * LTTng Counters Types
- */
-
-#ifndef _LTTNG_COUNTER_TYPES_H
-#define _LTTNG_COUNTER_TYPES_H
-
-#include <stdint.h>
-#include <stddef.h>
-#include <stdbool.h>
-#include <sys/types.h>
-#include "counter-config.h"
-#include <lttng/ust-config.h>
-#include "shm_types.h"
-
-struct lib_counter_dimension {
- /*
- * Max. number of indexable elements.
- */
- size_t max_nr_elem;
- /*
- * The stride for a dimension is the multiplication factor which
- * should be applied to its index to take into account other
- * dimensions nested inside.
- */
- size_t stride;
-};
-
-struct lib_counter_layout {
- void *counters;
- unsigned long *overflow_bitmap;
- unsigned long *underflow_bitmap;
- int shm_fd;
- size_t shm_len;
- struct lttng_counter_shm_handle handle;
-};
-
-enum lib_counter_arithmetic {
- LIB_COUNTER_ARITHMETIC_MODULAR,
- LIB_COUNTER_ARITHMETIC_SATURATE,
-};
-
-struct lib_counter {
- size_t nr_dimensions;
- int64_t allocated_elem;
- struct lib_counter_dimension *dimensions;
- enum lib_counter_arithmetic arithmetic;
- union {
- struct {
- int32_t max, min;
- } limits_32_bit;
- struct {
- int64_t max, min;
- } limits_64_bit;
- } saturation;
- union {
- int8_t s8;
- int16_t s16;
- int32_t s32;
- int64_t s64;
- } global_sum_step; /* 0 if unused */
- struct lib_counter_config config;
-
- struct lib_counter_layout global_counters;
- struct lib_counter_layout *percpu_counters;
-
- bool is_daemon;
- struct lttng_counter_shm_object_table *object_table;
-};
-
-#endif /* _LTTNG_COUNTER_TYPES_H */
+++ /dev/null
-/* SPDX-License-Identifier: (GPL-2.0-only OR LGPL-2.1-only)
- *
- * counter.c
- *
- * Copyright (C) 2020 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#include <errno.h>
-#include "counter.h"
-#include "counter-internal.h"
-#include <urcu/system.h>
-#include <urcu/compiler.h>
-#include <stdbool.h>
-
-#include "common/macros.h"
-#include "common/align.h"
-#include "common/bitmap.h"
-
-#include "smp.h"
-#include "shm.h"
-
-static size_t lttng_counter_get_dimension_nr_elements(struct lib_counter_dimension *dimension)
-{
- return dimension->max_nr_elem;
-}
-
-static int lttng_counter_init_stride(
- const struct lib_counter_config *config __attribute__((unused)),
- struct lib_counter *counter)
-{
- size_t nr_dimensions = counter->nr_dimensions;
- size_t stride = 1;
- ssize_t i;
-
- for (i = nr_dimensions - 1; i >= 0; i--) {
- struct lib_counter_dimension *dimension = &counter->dimensions[i];
- size_t nr_elem;
-
- nr_elem = lttng_counter_get_dimension_nr_elements(dimension);
- dimension->stride = stride;
- /* nr_elem should be minimum 1 for each dimension. */
- if (!nr_elem)
- return -EINVAL;
- stride *= nr_elem;
- if (stride > SIZE_MAX / nr_elem)
- return -EINVAL;
- }
- return 0;
-}
-
-static int lttng_counter_layout_init(struct lib_counter *counter, int cpu, int shm_fd)
-{
- struct lib_counter_layout *layout;
- size_t counter_size;
- size_t nr_elem = counter->allocated_elem;
- size_t shm_length = 0, counters_offset, overflow_offset, underflow_offset;
- struct lttng_counter_shm_object *shm_object;
-
- if (shm_fd < 0)
- return 0; /* Skip, will be populated later. */
-
- if (cpu == -1)
- layout = &counter->global_counters;
- else
- layout = &counter->percpu_counters[cpu];
- switch (counter->config.counter_size) {
- case COUNTER_SIZE_8_BIT:
- case COUNTER_SIZE_16_BIT:
- case COUNTER_SIZE_32_BIT:
- case COUNTER_SIZE_64_BIT:
- counter_size = (size_t) counter->config.counter_size;
- break;
- default:
- return -EINVAL;
- }
- layout->shm_fd = shm_fd;
- counters_offset = shm_length;
- shm_length += counter_size * nr_elem;
- overflow_offset = shm_length;
- shm_length += LTTNG_UST_ALIGN(nr_elem, 8) / 8;
- underflow_offset = shm_length;
- shm_length += LTTNG_UST_ALIGN(nr_elem, 8) / 8;
- layout->shm_len = shm_length;
- if (counter->is_daemon) {
- /* Allocate and clear shared memory. */
- shm_object = lttng_counter_shm_object_table_alloc(counter->object_table,
- shm_length, LTTNG_COUNTER_SHM_OBJECT_SHM, shm_fd, cpu);
- if (!shm_object)
- return -ENOMEM;
- } else {
- /* Map pre-existing shared memory. */
- shm_object = lttng_counter_shm_object_table_append_shm(counter->object_table,
- shm_fd, shm_length);
- if (!shm_object)
- return -ENOMEM;
- }
- layout->counters = shm_object->memory_map + counters_offset;
- layout->overflow_bitmap = (unsigned long *)(shm_object->memory_map + overflow_offset);
- layout->underflow_bitmap = (unsigned long *)(shm_object->memory_map + underflow_offset);
- return 0;
-}
-
-int lttng_counter_set_global_shm(struct lib_counter *counter, int fd)
-{
- struct lib_counter_config *config = &counter->config;
- struct lib_counter_layout *layout;
-
- if (!(config->alloc & COUNTER_ALLOC_GLOBAL))
- return -EINVAL;
- layout = &counter->global_counters;
- if (layout->shm_fd >= 0)
- return -EBUSY;
- return lttng_counter_layout_init(counter, -1, fd);
-}
-
-int lttng_counter_set_cpu_shm(struct lib_counter *counter, int cpu, int fd)
-{
- struct lib_counter_config *config = &counter->config;
- struct lib_counter_layout *layout;
-
- if (cpu < 0 || cpu >= lttng_counter_num_possible_cpus())
- return -EINVAL;
-
- if (!(config->alloc & COUNTER_ALLOC_PER_CPU))
- return -EINVAL;
- layout = &counter->percpu_counters[cpu];
- if (layout->shm_fd >= 0)
- return -EBUSY;
- return lttng_counter_layout_init(counter, cpu, fd);
-}
-
-static
-int lttng_counter_set_global_sum_step(struct lib_counter *counter,
- int64_t global_sum_step)
-{
- if (global_sum_step < 0)
- return -EINVAL;
-
- switch (counter->config.counter_size) {
- case COUNTER_SIZE_8_BIT:
- if (global_sum_step > INT8_MAX)
- return -EINVAL;
- counter->global_sum_step.s8 = (int8_t) global_sum_step;
- break;
- case COUNTER_SIZE_16_BIT:
- if (global_sum_step > INT16_MAX)
- return -EINVAL;
- counter->global_sum_step.s16 = (int16_t) global_sum_step;
- break;
- case COUNTER_SIZE_32_BIT:
- if (global_sum_step > INT32_MAX)
- return -EINVAL;
- counter->global_sum_step.s32 = (int32_t) global_sum_step;
- break;
- case COUNTER_SIZE_64_BIT:
- counter->global_sum_step.s64 = global_sum_step;
- break;
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-
-static
-int validate_args(const struct lib_counter_config *config,
- size_t nr_dimensions __attribute__((unused)),
- const size_t *max_nr_elem,
- int64_t global_sum_step,
- int global_counter_fd,
- int nr_counter_cpu_fds,
- const int *counter_cpu_fds)
-{
- int nr_cpus = lttng_counter_num_possible_cpus();
-
- if (CAA_BITS_PER_LONG != 64 && config->counter_size == COUNTER_SIZE_64_BIT) {
- WARN_ON_ONCE(1);
- return -1;
- }
- if (!max_nr_elem)
- return -1;
- /*
- * global sum step is only useful with allocating both per-cpu
- * and global counters.
- */
- if (global_sum_step && (!(config->alloc & COUNTER_ALLOC_GLOBAL) ||
- !(config->alloc & COUNTER_ALLOC_PER_CPU)))
- return -1;
- if (!(config->alloc & COUNTER_ALLOC_GLOBAL) && global_counter_fd >= 0)
- return -1;
- if (!(config->alloc & COUNTER_ALLOC_PER_CPU) && counter_cpu_fds)
- return -1;
- if (!(config->alloc & COUNTER_ALLOC_PER_CPU) && nr_counter_cpu_fds >= 0)
- return -1;
- if (counter_cpu_fds && nr_cpus != nr_counter_cpu_fds)
- return -1;
- return 0;
-}
-
-struct lib_counter *lttng_counter_create(const struct lib_counter_config *config,
- size_t nr_dimensions,
- const size_t *max_nr_elem,
- int64_t global_sum_step,
- int global_counter_fd,
- int nr_counter_cpu_fds,
- const int *counter_cpu_fds,
- bool is_daemon)
-{
- struct lib_counter *counter;
- size_t dimension, nr_elem = 1;
- int cpu, ret;
- int nr_handles = 0;
- int nr_cpus = lttng_counter_num_possible_cpus();
-
- if (validate_args(config, nr_dimensions, max_nr_elem,
- global_sum_step, global_counter_fd, nr_counter_cpu_fds,
- counter_cpu_fds))
- return NULL;
- counter = zmalloc(sizeof(struct lib_counter));
- if (!counter)
- return NULL;
- counter->global_counters.shm_fd = -1;
- counter->config = *config;
- counter->is_daemon = is_daemon;
- if (lttng_counter_set_global_sum_step(counter, global_sum_step))
- goto error_sum_step;
- counter->nr_dimensions = nr_dimensions;
- counter->dimensions = zmalloc(nr_dimensions * sizeof(*counter->dimensions));
- if (!counter->dimensions)
- goto error_dimensions;
- for (dimension = 0; dimension < nr_dimensions; dimension++)
- counter->dimensions[dimension].max_nr_elem = max_nr_elem[dimension];
- if (config->alloc & COUNTER_ALLOC_PER_CPU) {
- counter->percpu_counters = zmalloc(sizeof(struct lib_counter_layout) * nr_cpus);
- if (!counter->percpu_counters)
- goto error_alloc_percpu;
- lttng_counter_for_each_possible_cpu(cpu)
- counter->percpu_counters[cpu].shm_fd = -1;
- }
-
- if (lttng_counter_init_stride(config, counter))
- goto error_init_stride;
- //TODO saturation values.
- for (dimension = 0; dimension < counter->nr_dimensions; dimension++)
- nr_elem *= lttng_counter_get_dimension_nr_elements(&counter->dimensions[dimension]);
- counter->allocated_elem = nr_elem;
-
- if (config->alloc & COUNTER_ALLOC_GLOBAL)
- nr_handles++;
- if (config->alloc & COUNTER_ALLOC_PER_CPU)
- nr_handles += nr_cpus;
- /* Allocate table for global and per-cpu counters. */
- counter->object_table = lttng_counter_shm_object_table_create(nr_handles);
- if (!counter->object_table)
- goto error_alloc_object_table;
-
- if (config->alloc & COUNTER_ALLOC_GLOBAL) {
- ret = lttng_counter_layout_init(counter, -1, global_counter_fd); /* global */
- if (ret)
- goto layout_init_error;
- }
- if ((config->alloc & COUNTER_ALLOC_PER_CPU) && counter_cpu_fds) {
- lttng_counter_for_each_possible_cpu(cpu) {
- ret = lttng_counter_layout_init(counter, cpu, counter_cpu_fds[cpu]);
- if (ret)
- goto layout_init_error;
- }
- }
- return counter;
-
-layout_init_error:
- lttng_counter_shm_object_table_destroy(counter->object_table, is_daemon);
-error_alloc_object_table:
-error_init_stride:
- free(counter->percpu_counters);
-error_alloc_percpu:
- free(counter->dimensions);
-error_dimensions:
-error_sum_step:
- free(counter);
- return NULL;
-}
-
-void lttng_counter_destroy(struct lib_counter *counter)
-{
- struct lib_counter_config *config = &counter->config;
-
- if (config->alloc & COUNTER_ALLOC_PER_CPU)
- free(counter->percpu_counters);
- lttng_counter_shm_object_table_destroy(counter->object_table, counter->is_daemon);
- free(counter->dimensions);
- free(counter);
-}
-
-int lttng_counter_get_global_shm(struct lib_counter *counter, int *fd, size_t *len)
-{
- int shm_fd;
-
- shm_fd = counter->global_counters.shm_fd;
- if (shm_fd < 0)
- return -1;
- *fd = shm_fd;
- *len = counter->global_counters.shm_len;
- return 0;
-}
-
-int lttng_counter_get_cpu_shm(struct lib_counter *counter, int cpu, int *fd, size_t *len)
-{
- struct lib_counter_layout *layout;
- int shm_fd;
-
- if (cpu >= lttng_counter_num_possible_cpus())
- return -1;
- layout = &counter->percpu_counters[cpu];
- shm_fd = layout->shm_fd;
- if (shm_fd < 0)
- return -1;
- *fd = shm_fd;
- *len = layout->shm_len;
- return 0;
-}
-
-int lttng_counter_read(const struct lib_counter_config *config,
- struct lib_counter *counter,
- const size_t *dimension_indexes,
- int cpu, int64_t *value, bool *overflow,
- bool *underflow)
-{
- size_t index;
- struct lib_counter_layout *layout;
-
- if (caa_unlikely(lttng_counter_validate_indexes(config, counter, dimension_indexes)))
- return -EOVERFLOW;
- index = lttng_counter_get_index(config, counter, dimension_indexes);
-
- switch (config->alloc) {
- case COUNTER_ALLOC_PER_CPU:
- if (cpu < 0 || cpu >= lttng_counter_num_possible_cpus())
- return -EINVAL;
- layout = &counter->percpu_counters[cpu];
- break;
- case COUNTER_ALLOC_PER_CPU | COUNTER_ALLOC_GLOBAL:
- if (cpu >= 0) {
- if (cpu >= lttng_counter_num_possible_cpus())
- return -EINVAL;
- layout = &counter->percpu_counters[cpu];
- } else {
- layout = &counter->global_counters;
- }
- break;
- case COUNTER_ALLOC_GLOBAL:
- if (cpu >= 0)
- return -EINVAL;
- layout = &counter->global_counters;
- break;
- default:
- return -EINVAL;
- }
- if (caa_unlikely(!layout->counters))
- return -ENODEV;
-
- switch (config->counter_size) {
- case COUNTER_SIZE_8_BIT:
- {
- int8_t *int_p = (int8_t *) layout->counters + index;
- *value = (int64_t) CMM_LOAD_SHARED(*int_p);
- break;
- }
- case COUNTER_SIZE_16_BIT:
- {
- int16_t *int_p = (int16_t *) layout->counters + index;
- *value = (int64_t) CMM_LOAD_SHARED(*int_p);
- break;
- }
- case COUNTER_SIZE_32_BIT:
- {
- int32_t *int_p = (int32_t *) layout->counters + index;
- *value = (int64_t) CMM_LOAD_SHARED(*int_p);
- break;
- }
-#if CAA_BITS_PER_LONG == 64
- case COUNTER_SIZE_64_BIT:
- {
- int64_t *int_p = (int64_t *) layout->counters + index;
- *value = CMM_LOAD_SHARED(*int_p);
- break;
- }
-#endif
- default:
- return -EINVAL;
- }
- *overflow = lttng_bitmap_test_bit(index, layout->overflow_bitmap);
- *underflow = lttng_bitmap_test_bit(index, layout->underflow_bitmap);
- return 0;
-}
-
-int lttng_counter_aggregate(const struct lib_counter_config *config,
- struct lib_counter *counter,
- const size_t *dimension_indexes,
- int64_t *value, bool *overflow,
- bool *underflow)
-{
- int cpu, ret;
- int64_t v, sum = 0;
- bool of, uf;
-
- *overflow = false;
- *underflow = false;
-
- switch (config->alloc) {
- case COUNTER_ALLOC_GLOBAL: /* Fallthrough */
- case COUNTER_ALLOC_PER_CPU | COUNTER_ALLOC_GLOBAL:
- /* Read global counter. */
- ret = lttng_counter_read(config, counter, dimension_indexes,
- -1, &v, &of, &uf);
- if (ret < 0)
- return ret;
- sum += v;
- *overflow |= of;
- *underflow |= uf;
- break;
- case COUNTER_ALLOC_PER_CPU:
- break;
- default:
- return -EINVAL;
- }
-
- switch (config->alloc) {
- case COUNTER_ALLOC_GLOBAL:
- break;
- case COUNTER_ALLOC_PER_CPU | COUNTER_ALLOC_GLOBAL: /* Fallthrough */
- case COUNTER_ALLOC_PER_CPU:
- lttng_counter_for_each_possible_cpu(cpu) {
- int64_t old = sum;
-
- ret = lttng_counter_read(config, counter, dimension_indexes,
- cpu, &v, &of, &uf);
- if (ret < 0)
- return ret;
- *overflow |= of;
- *underflow |= uf;
- /* Overflow is defined on unsigned types. */
- sum = (int64_t) ((uint64_t) old + (uint64_t) v);
- if (v > 0 && sum < old)
- *overflow = true;
- else if (v < 0 && sum > old)
- *underflow = true;
- }
- break;
- default:
- return -EINVAL;
- }
- *value = sum;
- return 0;
-}
-
-static
-int lttng_counter_clear_cpu(const struct lib_counter_config *config,
- struct lib_counter *counter,
- const size_t *dimension_indexes,
- int cpu)
-{
- size_t index;
- struct lib_counter_layout *layout;
-
- if (caa_unlikely(lttng_counter_validate_indexes(config, counter, dimension_indexes)))
- return -EOVERFLOW;
- index = lttng_counter_get_index(config, counter, dimension_indexes);
-
- switch (config->alloc) {
- case COUNTER_ALLOC_PER_CPU:
- if (cpu < 0 || cpu >= lttng_counter_num_possible_cpus())
- return -EINVAL;
- layout = &counter->percpu_counters[cpu];
- break;
- case COUNTER_ALLOC_PER_CPU | COUNTER_ALLOC_GLOBAL:
- if (cpu >= 0) {
- if (cpu >= lttng_counter_num_possible_cpus())
- return -EINVAL;
- layout = &counter->percpu_counters[cpu];
- } else {
- layout = &counter->global_counters;
- }
- break;
- case COUNTER_ALLOC_GLOBAL:
- if (cpu >= 0)
- return -EINVAL;
- layout = &counter->global_counters;
- break;
- default:
- return -EINVAL;
- }
- if (caa_unlikely(!layout->counters))
- return -ENODEV;
-
- switch (config->counter_size) {
- case COUNTER_SIZE_8_BIT:
- {
- int8_t *int_p = (int8_t *) layout->counters + index;
- CMM_STORE_SHARED(*int_p, 0);
- break;
- }
- case COUNTER_SIZE_16_BIT:
- {
- int16_t *int_p = (int16_t *) layout->counters + index;
- CMM_STORE_SHARED(*int_p, 0);
- break;
- }
- case COUNTER_SIZE_32_BIT:
- {
- int32_t *int_p = (int32_t *) layout->counters + index;
- CMM_STORE_SHARED(*int_p, 0);
- break;
- }
-#if CAA_BITS_PER_LONG == 64
- case COUNTER_SIZE_64_BIT:
- {
- int64_t *int_p = (int64_t *) layout->counters + index;
- CMM_STORE_SHARED(*int_p, 0);
- break;
- }
-#endif
- default:
- return -EINVAL;
- }
- lttng_bitmap_clear_bit(index, layout->overflow_bitmap);
- lttng_bitmap_clear_bit(index, layout->underflow_bitmap);
- return 0;
-}
-
-int lttng_counter_clear(const struct lib_counter_config *config,
- struct lib_counter *counter,
- const size_t *dimension_indexes)
-{
- int cpu, ret;
-
- switch (config->alloc) {
- case COUNTER_ALLOC_PER_CPU:
- break;
- case COUNTER_ALLOC_GLOBAL: /* Fallthrough */
- case COUNTER_ALLOC_PER_CPU | COUNTER_ALLOC_GLOBAL:
- /* Clear global counter. */
- ret = lttng_counter_clear_cpu(config, counter, dimension_indexes, -1);
- if (ret < 0)
- return ret;
- break;
- default:
- return -EINVAL;
- }
-
- switch (config->alloc) {
- case COUNTER_ALLOC_PER_CPU: /* Fallthrough */
- case COUNTER_ALLOC_PER_CPU | COUNTER_ALLOC_GLOBAL:
- lttng_counter_for_each_possible_cpu(cpu) {
- ret = lttng_counter_clear_cpu(config, counter, dimension_indexes, cpu);
- if (ret < 0)
- return ret;
- }
- break;
- case COUNTER_ALLOC_GLOBAL:
- break;
- default:
- return -EINVAL;
- }
- return 0;
-}
+++ /dev/null
-/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright (C) 2020 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- *
- * LTTng Counters API
- */
-
-#ifndef _LTTNG_COUNTER_H
-#define _LTTNG_COUNTER_H
-
-#include <stdint.h>
-#include <lttng/ust-config.h>
-#include "counter-types.h"
-
-/* max_nr_elem is for each dimension. */
-struct lib_counter *lttng_counter_create(const struct lib_counter_config *config,
- size_t nr_dimensions,
- const size_t *max_nr_elem,
- int64_t global_sum_step,
- int global_counter_fd,
- int nr_counter_cpu_fds,
- const int *counter_cpu_fds,
- bool is_daemon)
- __attribute__((visibility("hidden")));
-
-void lttng_counter_destroy(struct lib_counter *counter)
- __attribute__((visibility("hidden")));
-
-int lttng_counter_set_global_shm(struct lib_counter *counter, int fd)
- __attribute__((visibility("hidden")));
-
-int lttng_counter_set_cpu_shm(struct lib_counter *counter, int cpu, int fd)
- __attribute__((visibility("hidden")));
-
-int lttng_counter_get_global_shm(struct lib_counter *counter, int *fd, size_t *len)
- __attribute__((visibility("hidden")));
-
-int lttng_counter_get_cpu_shm(struct lib_counter *counter, int cpu, int *fd, size_t *len)
- __attribute__((visibility("hidden")));
-
-int lttng_counter_read(const struct lib_counter_config *config,
- struct lib_counter *counter,
- const size_t *dimension_indexes,
- int cpu, int64_t *value,
- bool *overflow, bool *underflow)
- __attribute__((visibility("hidden")));
-
-int lttng_counter_aggregate(const struct lib_counter_config *config,
- struct lib_counter *counter,
- const size_t *dimension_indexes,
- int64_t *value,
- bool *overflow, bool *underflow)
- __attribute__((visibility("hidden")));
-
-int lttng_counter_clear(const struct lib_counter_config *config,
- struct lib_counter *counter,
- const size_t *dimension_indexes)
- __attribute__((visibility("hidden")));
-
-#endif /* _LTTNG_COUNTER_H */
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2005-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#define _LGPL_SOURCE
-#include "shm.h"
-#include <unistd.h>
-#include <fcntl.h>
-#include <sys/mman.h>
-#include <sys/types.h>
-#include <sys/stat.h> /* For mode constants */
-#include <fcntl.h> /* For O_* constants */
-#include <assert.h>
-#include <stdio.h>
-#include <signal.h>
-#include <dirent.h>
-#include <limits.h>
-#include <stdbool.h>
-#include <stdint.h>
-
-#ifdef HAVE_LIBNUMA
-#include <numa.h>
-#include <numaif.h>
-#endif
-
-#include <lttng/ust-utils.h>
-
-#include "common/macros.h"
-#include "common/ust-fd.h"
-#include "../libringbuffer/mmap.h"
-
-/*
- * Ensure we have the required amount of space available by writing 0
- * into the entire buffer. Not doing so can trigger SIGBUS when going
- * beyond the available shm space.
- */
-static
-int zero_file(int fd, size_t len)
-{
- ssize_t retlen;
- size_t written = 0;
- char *zeropage;
- long pagelen;
- int ret;
-
- pagelen = sysconf(_SC_PAGESIZE);
- if (pagelen < 0)
- return (int) pagelen;
- zeropage = calloc(pagelen, 1);
- if (!zeropage)
- return -ENOMEM;
-
- while (len > written) {
- do {
- retlen = write(fd, zeropage,
- min_t(size_t, pagelen, len - written));
- } while (retlen == -1UL && errno == EINTR);
- if (retlen < 0) {
- ret = (int) retlen;
- goto error;
- }
- written += retlen;
- }
- ret = 0;
-error:
- free(zeropage);
- return ret;
-}
-
-struct lttng_counter_shm_object_table *lttng_counter_shm_object_table_create(size_t max_nb_obj)
-{
- struct lttng_counter_shm_object_table *table;
-
- table = zmalloc(sizeof(struct lttng_counter_shm_object_table) +
- max_nb_obj * sizeof(table->objects[0]));
- if (!table)
- return NULL;
- table->size = max_nb_obj;
- return table;
-}
-
-static
-struct lttng_counter_shm_object *_lttng_counter_shm_object_table_alloc_shm(struct lttng_counter_shm_object_table *table,
- size_t memory_map_size,
- int cpu_fd)
-{
- int shmfd, ret;
- struct lttng_counter_shm_object *obj;
- char *memory_map;
-
- if (cpu_fd < 0)
- return NULL;
- if (table->allocated_len >= table->size)
- return NULL;
- obj = &table->objects[table->allocated_len];
-
- /* create shm */
-
- shmfd = cpu_fd;
- ret = zero_file(shmfd, memory_map_size);
- if (ret) {
- PERROR("zero_file");
- goto error_zero_file;
- }
- ret = ftruncate(shmfd, memory_map_size);
- if (ret) {
- PERROR("ftruncate");
- goto error_ftruncate;
- }
- /*
- * Also ensure the file metadata is synced with the storage by using
- * fsync(2).
- */
- ret = fsync(shmfd);
- if (ret) {
- PERROR("fsync");
- goto error_fsync;
- }
- obj->shm_fd_ownership = 0;
- obj->shm_fd = shmfd;
-
- /* memory_map: mmap */
- memory_map = mmap(NULL, memory_map_size, PROT_READ | PROT_WRITE,
- MAP_SHARED | LTTNG_MAP_POPULATE, shmfd, 0);
- if (memory_map == MAP_FAILED) {
- PERROR("mmap");
- goto error_mmap;
- }
- obj->type = LTTNG_COUNTER_SHM_OBJECT_SHM;
- obj->memory_map = memory_map;
- obj->memory_map_size = memory_map_size;
- obj->allocated_len = 0;
- obj->index = table->allocated_len++;
-
- return obj;
-
-error_mmap:
-error_fsync:
-error_ftruncate:
-error_zero_file:
- return NULL;
-}
-
-static
-struct lttng_counter_shm_object *_lttng_counter_shm_object_table_alloc_mem(struct lttng_counter_shm_object_table *table,
- size_t memory_map_size)
-{
- struct lttng_counter_shm_object *obj;
- void *memory_map;
-
- if (table->allocated_len >= table->size)
- return NULL;
- obj = &table->objects[table->allocated_len];
-
- memory_map = zmalloc(memory_map_size);
- if (!memory_map)
- goto alloc_error;
-
- /* no shm_fd */
- obj->shm_fd = -1;
- obj->shm_fd_ownership = 0;
-
- obj->type = LTTNG_COUNTER_SHM_OBJECT_MEM;
- obj->memory_map = memory_map;
- obj->memory_map_size = memory_map_size;
- obj->allocated_len = 0;
- obj->index = table->allocated_len++;
-
- return obj;
-
-alloc_error:
- return NULL;
-}
-
-/*
- * libnuma prints errors on the console even for numa_available().
- * Work-around this limitation by using get_mempolicy() directly to
- * check whether the kernel supports mempolicy.
- */
-#ifdef HAVE_LIBNUMA
-static bool lttng_is_numa_available(void)
-{
- int ret;
-
- ret = get_mempolicy(NULL, NULL, 0, NULL, 0);
- if (ret && errno == ENOSYS) {
- return false;
- }
- return numa_available() > 0;
-}
-#endif
-
-struct lttng_counter_shm_object *lttng_counter_shm_object_table_alloc(struct lttng_counter_shm_object_table *table,
- size_t memory_map_size,
- enum lttng_counter_shm_object_type type,
- int cpu_fd,
- int cpu)
-{
- struct lttng_counter_shm_object *shm_object;
-#ifdef HAVE_LIBNUMA
- int oldnode = 0, node;
- bool numa_avail;
-
- numa_avail = lttng_is_numa_available();
- if (numa_avail) {
- oldnode = numa_preferred();
- if (cpu >= 0) {
- node = numa_node_of_cpu(cpu);
- if (node >= 0)
- numa_set_preferred(node);
- }
- if (cpu < 0 || node < 0)
- numa_set_localalloc();
- }
-#endif /* HAVE_LIBNUMA */
- switch (type) {
- case LTTNG_COUNTER_SHM_OBJECT_SHM:
- shm_object = _lttng_counter_shm_object_table_alloc_shm(table, memory_map_size,
- cpu_fd);
- break;
- case LTTNG_COUNTER_SHM_OBJECT_MEM:
- shm_object = _lttng_counter_shm_object_table_alloc_mem(table, memory_map_size);
- break;
- default:
- assert(0);
- }
-#ifdef HAVE_LIBNUMA
- if (numa_avail)
- numa_set_preferred(oldnode);
-#endif /* HAVE_LIBNUMA */
- return shm_object;
-}
-
-struct lttng_counter_shm_object *lttng_counter_shm_object_table_append_shm(struct lttng_counter_shm_object_table *table,
- int shm_fd,
- size_t memory_map_size)
-{
- struct lttng_counter_shm_object *obj;
- char *memory_map;
-
- if (table->allocated_len >= table->size)
- return NULL;
-
- obj = &table->objects[table->allocated_len];
-
- obj->shm_fd = shm_fd;
- obj->shm_fd_ownership = 1;
-
- /* memory_map: mmap */
- memory_map = mmap(NULL, memory_map_size, PROT_READ | PROT_WRITE,
- MAP_SHARED | LTTNG_MAP_POPULATE, shm_fd, 0);
- if (memory_map == MAP_FAILED) {
- PERROR("mmap");
- goto error_mmap;
- }
- obj->type = LTTNG_COUNTER_SHM_OBJECT_SHM;
- obj->memory_map = memory_map;
- obj->memory_map_size = memory_map_size;
- obj->allocated_len = memory_map_size;
- obj->index = table->allocated_len++;
-
- return obj;
-
-error_mmap:
- return NULL;
-}
-
-/*
- * Passing ownership of mem to object.
- */
-struct lttng_counter_shm_object *lttng_counter_shm_object_table_append_mem(struct lttng_counter_shm_object_table *table,
- void *mem, size_t memory_map_size)
-{
- struct lttng_counter_shm_object *obj;
-
- if (table->allocated_len >= table->size)
- return NULL;
- obj = &table->objects[table->allocated_len];
-
- obj->shm_fd = -1;
- obj->shm_fd_ownership = 0;
-
- obj->type = LTTNG_COUNTER_SHM_OBJECT_MEM;
- obj->memory_map = mem;
- obj->memory_map_size = memory_map_size;
- obj->allocated_len = memory_map_size;
- obj->index = table->allocated_len++;
-
- return obj;
-
- return NULL;
-}
-
-static
-void lttng_counter_shmp_object_destroy(struct lttng_counter_shm_object *obj, int consumer)
-{
- switch (obj->type) {
- case LTTNG_COUNTER_SHM_OBJECT_SHM:
- {
- int ret;
-
- ret = munmap(obj->memory_map, obj->memory_map_size);
- if (ret) {
- PERROR("umnmap");
- assert(0);
- }
-
- if (obj->shm_fd_ownership) {
- /* Delete FDs only if called from app (not consumer). */
- if (!consumer) {
- lttng_ust_lock_fd_tracker();
- ret = close(obj->shm_fd);
- if (!ret) {
- lttng_ust_delete_fd_from_tracker(obj->shm_fd);
- } else {
- PERROR("close");
- assert(0);
- }
- lttng_ust_unlock_fd_tracker();
- } else {
- ret = close(obj->shm_fd);
- if (ret) {
- PERROR("close");
- assert(0);
- }
- }
- }
- break;
- }
- case LTTNG_COUNTER_SHM_OBJECT_MEM:
- {
- free(obj->memory_map);
- break;
- }
- default:
- assert(0);
- }
-}
-
-void lttng_counter_shm_object_table_destroy(struct lttng_counter_shm_object_table *table, int consumer)
-{
- int i;
-
- for (i = 0; i < table->allocated_len; i++)
- lttng_counter_shmp_object_destroy(&table->objects[i], consumer);
- free(table);
-}
-
-/*
- * lttng_counter_zalloc_shm - allocate memory within a shm object.
- *
- * Shared memory is already zeroed by shmget.
- * *NOT* multithread-safe (should be protected by mutex).
- * Returns a -1, -1 tuple on error.
- */
-struct lttng_counter_shm_ref lttng_counter_zalloc_shm(struct lttng_counter_shm_object *obj, size_t len)
-{
- struct lttng_counter_shm_ref ref;
- struct lttng_counter_shm_ref shm_ref_error = { -1, -1 };
-
- if (obj->memory_map_size - obj->allocated_len < len)
- return shm_ref_error;
- ref.index = obj->index;
- ref.offset = obj->allocated_len;
- obj->allocated_len += len;
- return ref;
-}
-
-void lttng_counter_align_shm(struct lttng_counter_shm_object *obj, size_t align)
-{
- size_t offset_len = lttng_ust_offset_align(obj->allocated_len, align);
- obj->allocated_len += offset_len;
-}
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2011-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#ifndef _LIBCOUNTER_SHM_H
-#define _LIBCOUNTER_SHM_H
-
-#include <stddef.h>
-#include <stdint.h>
-#include <unistd.h>
-#include "common/logging.h"
-#include <urcu/compiler.h>
-#include "shm_types.h"
-
-/* lttng_counter_handle_create - for UST. */
-extern
-struct lttng_counter_shm_handle *lttng_counter_handle_create(void *data,
- uint64_t memory_map_size, int wakeup_fd);
-/* lttng_counter_handle_add_cpu - for UST. */
-extern
-int lttng_counter_handle_add_cpu(struct lttng_counter_shm_handle *handle,
- int shm_fd, uint32_t cpu_nr,
- uint64_t memory_map_size);
-
-unsigned int lttng_counter_handle_get_nr_cpus(struct lttng_counter_shm_handle *handle)
- __attribute__((visibility("hidden")));
-
-/*
- * Pointer dereferencing. We don't trust the shm_ref, so we validate
- * both the index and offset with known boundaries.
- *
- * "shmp" and "shmp_index" guarantee that it's safe to use the pointer
- * target type, even in the occurrence of shm_ref modification by an
- * untrusted process having write access to the shm_ref. We return a
- * NULL pointer if the ranges are invalid.
- */
-static inline
-char *_lttng_counter_shmp_offset(struct lttng_counter_shm_object_table *table,
- struct lttng_counter_shm_ref *ref,
- size_t idx, size_t elem_size)
-{
- struct lttng_counter_shm_object *obj;
- size_t objindex, ref_offset;
-
- objindex = (size_t) ref->index;
- if (caa_unlikely(objindex >= table->allocated_len))
- return NULL;
- obj = &table->objects[objindex];
- ref_offset = (size_t) ref->offset;
- ref_offset += idx * elem_size;
- /* Check if part of the element returned would exceed the limits. */
- if (caa_unlikely(ref_offset + elem_size > obj->memory_map_size))
- return NULL;
- return &obj->memory_map[ref_offset];
-}
-
-#define lttng_counter_shmp_index(handle, ref, index) \
- ({ \
- __typeof__((ref)._type) ____ptr_ret; \
- ____ptr_ret = (__typeof__(____ptr_ret)) _lttng_counter_shmp_offset((handle)->table, &(ref)._ref, index, sizeof(*____ptr_ret)); \
- ____ptr_ret; \
- })
-
-#define lttng_counter_shmp(handle, ref) lttng_counter_shmp_index(handle, ref, 0)
-
-static inline
-void _lttng_counter_set_shmp(struct lttng_counter_shm_ref *ref, struct lttng_counter_shm_ref src)
-{
- *ref = src;
-}
-
-#define lttng_counter_set_shmp(ref, src) _lttng_counter_set_shmp(&(ref)._ref, src)
-
-struct lttng_counter_shm_object_table *lttng_counter_shm_object_table_create(size_t max_nb_obj)
- __attribute__((visibility("hidden")));
-
-struct lttng_counter_shm_object *lttng_counter_shm_object_table_alloc(struct lttng_counter_shm_object_table *table,
- size_t memory_map_size,
- enum lttng_counter_shm_object_type type,
- const int cpu_fd,
- int cpu)
- __attribute__((visibility("hidden")));
-
-struct lttng_counter_shm_object *lttng_counter_shm_object_table_append_shm(struct lttng_counter_shm_object_table *table,
- int shm_fd, size_t memory_map_size)
- __attribute__((visibility("hidden")));
-
-/* mem ownership is passed to lttng_counter_shm_object_table_append_mem(). */
-struct lttng_counter_shm_object *lttng_counter_shm_object_table_append_mem(struct lttng_counter_shm_object_table *table,
- void *mem, size_t memory_map_size)
- __attribute__((visibility("hidden")));
-
-void lttng_counter_shm_object_table_destroy(struct lttng_counter_shm_object_table *table, int consumer)
- __attribute__((visibility("hidden")));
-
-/*
- * lttng_counter_zalloc_shm - allocate memory within a shm object.
- *
- * Shared memory is already zeroed by shmget.
- * *NOT* multithread-safe (should be protected by mutex).
- * Returns a -1, -1 tuple on error.
- */
-struct lttng_counter_shm_ref lttng_counter_zalloc_shm(struct lttng_counter_shm_object *obj, size_t len)
- __attribute__((visibility("hidden")));
-
-void lttng_counter_align_shm(struct lttng_counter_shm_object *obj, size_t align)
- __attribute__((visibility("hidden")));
-
-static inline
-int lttng_counter_shm_get_shm_fd(struct lttng_counter_shm_handle *handle, struct lttng_counter_shm_ref *ref)
-{
- struct lttng_counter_shm_object_table *table = handle->table;
- struct lttng_counter_shm_object *obj;
- size_t index;
-
- index = (size_t) ref->index;
- if (caa_unlikely(index >= table->allocated_len))
- return -EPERM;
- obj = &table->objects[index];
- return obj->shm_fd;
-}
-
-
-static inline
-int lttng_counter_shm_get_shm_size(struct lttng_counter_shm_handle *handle, struct lttng_counter_shm_ref *ref,
- uint64_t *size)
-{
- struct lttng_counter_shm_object_table *table = handle->table;
- struct lttng_counter_shm_object *obj;
- size_t index;
-
- index = (size_t) ref->index;
- if (caa_unlikely(index >= table->allocated_len))
- return -EPERM;
- obj = &table->objects[index];
- *size = obj->memory_map_size;
- return 0;
-}
-
-#endif /* _LIBCOUNTER_SHM_H */
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2011-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#ifndef _LIBCOUNTER_SHM_INTERNAL_H
-#define _LIBCOUNTER_SHM_INTERNAL_H
-
-struct lttng_counter_shm_ref {
- volatile ssize_t index; /* within the object table */
- volatile ssize_t offset; /* within the object */
-};
-
-#define DECLARE_LTTNG_COUNTER_SHMP(type, name) \
- union { \
- struct lttng_counter_shm_ref _ref; \
- type *_type; \
- } name
-
-#endif /* _LIBCOUNTER_SHM_INTERNAL_H */
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2011-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#ifndef _LIBCOUNTER_SHM_TYPES_H
-#define _LIBCOUNTER_SHM_TYPES_H
-
-#include <stdint.h>
-#include <stddef.h>
-#include <limits.h>
-#include "shm_internal.h"
-
-enum lttng_counter_shm_object_type {
- LTTNG_COUNTER_SHM_OBJECT_SHM,
- LTTNG_COUNTER_SHM_OBJECT_MEM,
-};
-
-struct lttng_counter_shm_object {
- enum lttng_counter_shm_object_type type;
- size_t index; /* within the object table */
- int shm_fd; /* shm fd */
- char *memory_map;
- size_t memory_map_size;
- uint64_t allocated_len;
- int shm_fd_ownership;
-};
-
-struct lttng_counter_shm_object_table {
- size_t size;
- size_t allocated_len;
- struct lttng_counter_shm_object objects[];
-};
-
-struct lttng_counter_shm_handle {
- struct lttng_counter_shm_object_table *table;
-};
-
-#endif /* _LIBCOUNTER_SHM_TYPES_H */
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2011-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- * Copyright (C) 2019 Michael Jeanson <mjeanson@efficios.com>
- */
-
-#define _LGPL_SOURCE
-
-#include <unistd.h>
-#include <pthread.h>
-#include "smp.h"
-
-int __lttng_counter_num_possible_cpus;
-
-#if (defined(__GLIBC__) || defined( __UCLIBC__))
-void _lttng_counter_get_num_possible_cpus(void)
-{
- int result;
-
- /* On Linux, when some processors are offline
- * _SC_NPROCESSORS_CONF counts the offline
- * processors, whereas _SC_NPROCESSORS_ONLN
- * does not. If we used _SC_NPROCESSORS_ONLN,
- * getcpu() could return a value greater than
- * this sysconf, in which case the arrays
- * indexed by processor would overflow.
- */
- result = sysconf(_SC_NPROCESSORS_CONF);
- if (result == -1)
- return;
- __lttng_counter_num_possible_cpus = result;
-}
-
-#else
-
-/*
- * The MUSL libc implementation of the _SC_NPROCESSORS_CONF sysconf does not
- * return the number of configured CPUs in the system but relies on the cpu
- * affinity mask of the current task.
- *
- * So instead we use a strategy similar to GLIBC's, counting the cpu
- * directories in "/sys/devices/system/cpu" and fallback on the value from
- * sysconf if it fails.
- */
-
-#include <dirent.h>
-#include <limits.h>
-#include <stdlib.h>
-#include <string.h>
-#include <sys/types.h>
-
-#define __max(a,b) ((a)>(b)?(a):(b))
-
-void _lttng_counter_get_num_possible_cpus(void)
-{
- int result, count = 0;
- DIR *cpudir;
- struct dirent *entry;
-
- cpudir = opendir("/sys/devices/system/cpu");
- if (cpudir == NULL)
- goto end;
-
- /*
- * Count the number of directories named "cpu" followed by and
- * integer. This is the same strategy as glibc uses.
- */
- while ((entry = readdir(cpudir))) {
- if (entry->d_type == DT_DIR &&
- strncmp(entry->d_name, "cpu", 3) == 0) {
-
- char *endptr;
- unsigned long cpu_num;
-
- cpu_num = strtoul(entry->d_name + 3, &endptr, 10);
- if ((cpu_num < ULONG_MAX) && (endptr != entry->d_name + 3)
- && (*endptr == '\0')) {
- count++;
- }
- }
- }
-
-end:
- /*
- * Get the sysconf value as a fallback. Keep the highest number.
- */
- result = __max(sysconf(_SC_NPROCESSORS_CONF), count);
-
- /*
- * If both methods failed, don't store the value.
- */
- if (result < 1)
- return;
- __lttng_counter_num_possible_cpus = result;
-}
-#endif
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2011-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#ifndef _LIBCOUNTER_SMP_H
-#define _LIBCOUNTER_SMP_H
-
-/*
- * 4kB of per-cpu data available.
- */
-#define LTTNG_COUNTER_PER_CPU_MEM_SIZE 4096
-
-extern int __lttng_counter_num_possible_cpus
- __attribute__((visibility("hidden")));
-
-extern void _lttng_counter_get_num_possible_cpus(void)
- __attribute__((visibility("hidden")));
-
-static inline
-int lttng_counter_num_possible_cpus(void)
-{
- if (!__lttng_counter_num_possible_cpus)
- _lttng_counter_get_num_possible_cpus();
- return __lttng_counter_num_possible_cpus;
-}
-
-#define lttng_counter_for_each_possible_cpu(cpu) \
- for ((cpu) = 0; (cpu) < lttng_counter_num_possible_cpus(); (cpu)++)
-
-#endif /* _LIBCOUNTER_SMP_H */
lib_LTLIBRARIES = liblttng-ust-common.la liblttng-ust-tracepoint.la liblttng-ust.la
+# ust-common
liblttng_ust_common_la_SOURCES = \
lttng-ust-urcu.c \
lttng-ust-urcu-pointer.c
liblttng_ust_support_la_LIBADD = \
$(top_builddir)/src/libringbuffer/libringbuffer.la \
- $(top_builddir)/src/libcounter/libcounter.la
+ $(top_builddir)/src/common/libcounter.la
liblttng_ust_la_LIBADD = \
-lrt \
*/
#include "ust-events-internal.h"
-#include "../libcounter/counter.h"
-#include "../libcounter/counter-api.h"
+#include "common/counter/counter.h"
+#include "common/counter/counter-api.h"
#include "lttng-tracer-core.h"
#include "lttng-counter-client.h"
*/
#include "ust-events-internal.h"
-#include "../libcounter/counter.h"
-#include "../libcounter/counter-api.h"
+#include "common/counter/counter.h"
+#include "common/counter/counter-api.h"
#include "lttng-tracer-core.h"
#include "lttng-counter-client.h"
#include "../libringbuffer/shm.h"
#include "../libringbuffer/frontend_types.h"
#include "../libringbuffer/frontend.h"
-#include "../libcounter/counter.h"
+#include "common/counter/counter.h"
#include "jhash.h"
#include <lttng/ust-abi.h>
#include "context-provider-internal.h"
#include "../libringbuffer/frontend_types.h"
#include "../libringbuffer/frontend.h"
#include "../libringbuffer/shm.h"
-#include "../libcounter/counter.h"
+#include "common/counter/counter.h"
#include "tracepoint-internal.h"
#include "lttng-tracer.h"
#include "string-utils.h"