ACLOCAL_AMFLAGS = -I m4
SUBDIRS = . include snprintf libringbuffer liblttng-ust-comm \
+ libcounter \
libmsgpack \
liblttng-ust \
liblttng-ust-ctl \
include/Makefile
include/lttng/ust-version.h
snprintf/Makefile
+ libcounter/Makefile
libmsgpack/Makefile
libringbuffer/Makefile
liblttng-ust-comm/Makefile
lttng/lttng-ust-tracelog.h \
lttng/ust-clock.h \
lttng/ust-getcpu.h \
- lttng/ust-elf.h
+ lttng/ust-elf.h \
+ lttng/counter-config.h \
+ lttng/bitmap.h
# note: usterr-signal-safe.h, core.h and share.h need namespace cleanup.
--- /dev/null
+/*
+ * lttng/bitmap.h
+ *
+ * LTTng Bitmap API
+ *
+ * Copyright (C) 2020 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _LTTNG_BITMAP_H
+#define _LTTNG_BITMAP_H
+
+#include <urcu/compiler.h>
+#include <urcu/system.h>
+#include <urcu/uatomic.h>
+#include <stdbool.h>
+
+static inline void lttng_bitmap_index(unsigned int index, unsigned int *word,
+ unsigned int *bit)
+{
+ *word = index / CAA_BITS_PER_LONG;
+ *bit = index % CAA_BITS_PER_LONG;
+}
+
+static inline void lttng_bitmap_set_bit(unsigned int index, unsigned long *p)
+{
+ unsigned int word, bit;
+ unsigned long val;
+
+ lttng_bitmap_index(index, &word, &bit);
+ val = 1U << bit;
+ uatomic_or(p + word, val);
+}
+
+static inline void lttng_bitmap_clear_bit(unsigned int index, unsigned long *p)
+{
+ unsigned int word, bit;
+ unsigned long val;
+
+ lttng_bitmap_index(index, &word, &bit);
+ val = ~(1U << bit);
+ uatomic_and(p + word, val);
+}
+
+static inline bool lttng_bitmap_test_bit(unsigned int index, unsigned long *p)
+{
+ unsigned int word, bit;
+
+ lttng_bitmap_index(index, &word, &bit);
+ return (CMM_LOAD_SHARED(p[word]) >> bit) & 0x1;
+}
+
+#endif /* _LTTNG_BITMAP_H */
--- /dev/null
+/*
+ * lttng/counter-config.h
+ *
+ * LTTng Counters Configuration
+ *
+ * Copyright (C) 2020 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _LTTNG_COUNTER_CONFIG_H
+#define _LTTNG_COUNTER_CONFIG_H
+
+#include <stdint.h>
+
+enum lib_counter_config_alloc {
+ COUNTER_ALLOC_PER_CPU = (1 << 0),
+ COUNTER_ALLOC_GLOBAL = (1 << 1),
+};
+
+enum lib_counter_config_sync {
+ COUNTER_SYNC_PER_CPU,
+ COUNTER_SYNC_GLOBAL,
+};
+
+struct lib_counter_config {
+ uint32_t alloc; /* enum lib_counter_config_alloc flags */
+ enum lib_counter_config_sync sync;
+ enum {
+ COUNTER_ARITHMETIC_MODULAR,
+ COUNTER_ARITHMETIC_SATURATE, /* TODO */
+ } arithmetic;
+ enum {
+ COUNTER_SIZE_8_BIT = 1,
+ COUNTER_SIZE_16_BIT = 2,
+ COUNTER_SIZE_32_BIT = 4,
+ COUNTER_SIZE_64_BIT = 8,
+ } counter_size;
+};
+
+#endif /* _LTTNG_COUNTER_CONFIG_H */
*/
} LTTNG_PACKED;
+
+enum lttng_ust_counter_arithmetic {
+ LTTNG_UST_COUNTER_ARITHMETIC_MODULAR = 0,
+ LTTNG_UST_COUNTER_ARITHMETIC_SATURATION = 1,
+};
+
+enum lttng_ust_counter_bitness {
+ LTTNG_UST_COUNTER_BITNESS_32BITS = 4,
+ LTTNG_UST_COUNTER_BITNESS_64BITS = 8,
+};
+
+struct lttng_ust_counter_dimension {
+ uint64_t size;
+ uint64_t underflow_index;
+ uint64_t overflow_index;
+ uint8_t has_underflow;
+ uint8_t has_overflow;
+} LTTNG_PACKED;
+
+#define LTTNG_UST_COUNTER_DIMENSION_MAX 8
+struct lttng_ust_counter_conf {
+ uint32_t arithmetic; /* enum lttng_ust_counter_arithmetic */
+ uint32_t bitness; /* enum lttng_ust_counter_bitness */
+ uint32_t number_dimensions;
+ int64_t global_sum_step;
+ struct lttng_ust_counter_dimension dimensions[LTTNG_UST_COUNTER_DIMENSION_MAX];
+} LTTNG_PACKED;
+
+struct lttng_ust_counter_value {
+ uint32_t number_dimensions;
+ uint64_t dimension_indexes[LTTNG_UST_COUNTER_DIMENSION_MAX];
+ int64_t value;
+} LTTNG_PACKED;
+
#define LTTNG_UST_EVENT_PADDING1 8
#define LTTNG_UST_EVENT_PADDING2 (LTTNG_UST_SYM_NAME_LEN + 32)
struct lttng_ust_event {
#define LTTNG_UST_EVENT_NOTIFIER_PADDING1 16
struct lttng_ust_event_notifier {
struct lttng_ust_event event;
+ uint64_t error_counter_index;
char padding[LTTNG_UST_EVENT_NOTIFIER_PADDING1];
} LTTNG_PACKED;
char padding[LTTNG_EVENT_NOTIFIER_NOTIFICATION_PADDING];
} LTTNG_PACKED;
+#define LTTNG_UST_COUNTER_PADDING1 (LTTNG_UST_SYM_NAME_LEN + 32)
+#define LTTNG_UST_COUNTER_DATA_MAX_LEN 4096U
+struct lttng_ust_counter {
+ uint64_t len;
+ char padding[LTTNG_UST_COUNTER_PADDING1];
+ char data[]; /* variable sized data */
+} LTTNG_PACKED;
+
+#define LTTNG_UST_COUNTER_GLOBAL_PADDING1 (LTTNG_UST_SYM_NAME_LEN + 32)
+struct lttng_ust_counter_global {
+ uint64_t len; /* shm len */
+ char padding[LTTNG_UST_COUNTER_GLOBAL_PADDING1];
+} LTTNG_PACKED;
+
+#define LTTNG_UST_COUNTER_CPU_PADDING1 (LTTNG_UST_SYM_NAME_LEN + 32)
+struct lttng_ust_counter_cpu {
+ uint64_t len; /* shm len */
+ uint32_t cpu_nr;
+ char padding[LTTNG_UST_COUNTER_CPU_PADDING1];
+} LTTNG_PACKED;
+
enum lttng_ust_field_type {
LTTNG_UST_FIELD_OTHER = 0,
LTTNG_UST_FIELD_INTEGER = 1,
LTTNG_UST_OBJECT_TYPE_CONTEXT = 3,
LTTNG_UST_OBJECT_TYPE_EVENT_NOTIFIER_GROUP = 4,
LTTNG_UST_OBJECT_TYPE_EVENT_NOTIFIER = 5,
+ LTTNG_UST_OBJECT_TYPE_COUNTER = 6,
+ LTTNG_UST_OBJECT_TYPE_COUNTER_GLOBAL = 7,
+ LTTNG_UST_OBJECT_TYPE_COUNTER_CPU = 8,
};
#define LTTNG_UST_OBJECT_DATA_PADDING1 32
int wakeup_fd;
uint32_t stream_nr;
} stream;
+ struct {
+ void *data;
+ } counter;
+ struct {
+ int shm_fd;
+ } counter_global;
+ struct {
+ int shm_fd;
+ uint32_t cpu_nr;
+ } counter_cpu;
char padding2[LTTNG_UST_OBJECT_DATA_PADDING2];
} u;
} LTTNG_PACKED;
_UST_CMDW(0xB0, struct lttng_ust_event_notifier)
#define LTTNG_UST_CAPTURE _UST_CMD(0xB1)
+/* Session and event notifier group commands */
+#define LTTNG_UST_COUNTER \
+ _UST_CMDW(0xC0, struct lttng_ust_counter)
+
+/* Counter commands */
+#define LTTNG_UST_COUNTER_GLOBAL \
+ _UST_CMDW(0xD0, struct lttng_ust_counter_global)
+#define LTTNG_UST_COUNTER_CPU \
+ _UST_CMDW(0xD1, struct lttng_ust_counter_cpu)
+
#define LTTNG_UST_ROOT_HANDLE 0
struct lttng_ust_obj;
struct {
int event_notifier_notif_fd;
} event_notifier_handle;
+ struct {
+ void *counter_data;
+ } counter;
+ struct {
+ int shm_fd;
+ } counter_shm;
};
struct lttng_ust_objd_ops {
#include <limits.h>
#include <stddef.h>
#include <stdint.h>
+#include <stdbool.h>
#include <sys/types.h>
#include <lttng/ust-abi.h>
enum ustctl_channel_header header_type,
int ret_code); /* return code. 0 ok, negative error */
+/*
+ * Counter API.
+ */
+
+enum ustctl_counter_bitness {
+ USTCTL_COUNTER_BITNESS_32 = 4,
+ USTCTL_COUNTER_BITNESS_64 = 8,
+};
+
+enum ustctl_counter_arithmetic {
+ USTCTL_COUNTER_ARITHMETIC_MODULAR = 0,
+ USTCTL_COUNTER_ARITHMETIC_SATURATION = 1,
+};
+
+/* Used as alloc flags. */
+enum ustctl_counter_alloc {
+ USTCTL_COUNTER_ALLOC_PER_CPU = (1 << 0),
+ USTCTL_COUNTER_ALLOC_GLOBAL = (1 << 1),
+};
+
+struct ustctl_daemon_counter;
+
+int ustctl_get_nr_cpu_per_counter(void);
+
+struct ustctl_counter_dimension {
+ uint64_t size;
+ uint64_t underflow_index;
+ uint64_t overflow_index;
+ uint8_t has_underflow;
+ uint8_t has_overflow;
+};
+
+struct ustctl_daemon_counter *
+ ustctl_create_counter(size_t nr_dimensions,
+ const struct ustctl_counter_dimension *dimensions,
+ int64_t global_sum_step,
+ int global_counter_fd,
+ int nr_counter_cpu_fds,
+ const int *counter_cpu_fds,
+ enum ustctl_counter_bitness bitness,
+ enum ustctl_counter_arithmetic arithmetic,
+ uint32_t alloc_flags);
+
+int ustctl_create_counter_data(struct ustctl_daemon_counter *counter,
+ struct lttng_ust_object_data **counter_data);
+
+int ustctl_create_counter_global_data(struct ustctl_daemon_counter *counter,
+ struct lttng_ust_object_data **counter_global_data);
+int ustctl_create_counter_cpu_data(struct ustctl_daemon_counter *counter, int cpu,
+ struct lttng_ust_object_data **counter_cpu_data);
+
+/*
+ * Each counter data and counter cpu data created need to be destroyed
+ * before calling ustctl_destroy_counter().
+ */
+void ustctl_destroy_counter(struct ustctl_daemon_counter *counter);
+
+int ustctl_send_counter_data_to_ust(int sock, int parent_handle,
+ struct lttng_ust_object_data *counter_data);
+int ustctl_send_counter_global_data_to_ust(int sock,
+ struct lttng_ust_object_data *counter_data,
+ struct lttng_ust_object_data *counter_global_data);
+int ustctl_send_counter_cpu_data_to_ust(int sock,
+ struct lttng_ust_object_data *counter_data,
+ struct lttng_ust_object_data *counter_cpu_data);
+
+int ustctl_counter_read(struct ustctl_daemon_counter *counter,
+ const size_t *dimension_indexes,
+ int cpu, int64_t *value,
+ bool *overflow, bool *underflow);
+int ustctl_counter_aggregate(struct ustctl_daemon_counter *counter,
+ const size_t *dimension_indexes,
+ int64_t *value,
+ bool *overflow, bool *underflow);
+int ustctl_counter_clear(struct ustctl_daemon_counter *counter,
+ const size_t *dimension_indexes);
+
#endif /* _LTTNG_UST_CTL_H */
struct lttng_ust_lib_ring_buffer_ctx;
struct lttng_ust_context_app;
struct lttng_event_field;
+struct lttng_event_notifier_group;
/*
* Data structures used by tracepoint event declarations, and by the
int tstate:1; /* Transient enable state */
};
+#define LTTNG_COUNTER_DIMENSION_MAX 8
+
+struct lttng_counter_dimension {
+ uint64_t size;
+ uint64_t underflow_index;
+ uint64_t overflow_index;
+ uint8_t has_underflow;
+ uint8_t has_overflow;
+};
+
+struct lttng_counter_ops {
+ struct lib_counter *(*counter_create)(size_t nr_dimensions,
+ const struct lttng_counter_dimension *dimensions,
+ int64_t global_sum_step,
+ int global_counter_fd,
+ int nr_counter_cpu_fds,
+ const int *counter_cpu_fds,
+ bool is_daemon);
+ void (*counter_destroy)(struct lib_counter *counter);
+ int (*counter_add)(struct lib_counter *counter,
+ const size_t *dimension_indexes, int64_t v);
+ int (*counter_read)(struct lib_counter *counter,
+ const size_t *dimension_indexes, int cpu,
+ int64_t *value, bool *overflow, bool *underflow);
+ int (*counter_aggregate)(struct lib_counter *counter,
+ const size_t *dimension_indexes, int64_t *value,
+ bool *overflow, bool *underflow);
+ int (*counter_clear)(struct lib_counter *counter, const size_t *dimension_indexes);
+};
+
#define LTTNG_UST_STACK_CTX_PADDING 32
struct lttng_stack_ctx {
struct lttng_event *event;
struct lttng_ctx *ctx; /* contexts for filters. */
};
+struct lttng_counter {
+ int objd;
+ struct lttng_event_notifier_group *event_notifier_group; /* owner */
+ struct lttng_counter_transport *transport;
+ struct lib_counter *counter;
+ struct lttng_counter_ops *ops;
+};
+
struct lttng_event_notifier_group {
int objd;
void *owner;
struct cds_list_head event_notifiers_head; /* list of event_notifiers */
struct lttng_ust_event_notifier_ht event_notifiers_ht; /* hashtable of event_notifiers */
struct lttng_ctx *ctx; /* contexts for filters. */
+
+ struct lttng_counter *error_counter;
+ size_t error_counter_len;
};
struct lttng_transport {
const struct lttng_ust_lib_ring_buffer_config *client_config;
};
+struct lttng_counter_transport {
+ char *name;
+ struct cds_list_head node;
+ struct lttng_counter_ops ops;
+ const struct lib_counter_config *client_config;
+};
+
struct lttng_session *lttng_session_create(void);
int lttng_session_enable(struct lttng_session *session);
int lttng_session_disable(struct lttng_session *session);
void lttng_transport_register(struct lttng_transport *transport);
void lttng_transport_unregister(struct lttng_transport *transport);
+void lttng_counter_transport_register(struct lttng_counter_transport *transport);
+void lttng_counter_transport_unregister(struct lttng_counter_transport *transport);
+
+struct lttng_counter *lttng_ust_counter_create(
+ const char *counter_transport_name,
+ size_t number_dimensions, const struct lttng_counter_dimension *dimensions);
+
void synchronize_trace(void);
int lttng_probe_register(struct lttng_probe_desc *desc);
extern const struct lttng_ust_client_lib_ring_buffer_client_cb *lttng_client_callbacks_overwrite;
struct lttng_transport *lttng_transport_find(const char *name);
+struct lttng_counter_transport *lttng_counter_transport_find(const char *name);
int lttng_probes_get_event_list(struct lttng_ust_tracepoint_list *list);
void lttng_probes_prune_event_list(struct lttng_ust_tracepoint_list *list);
uint32_t reloc_offset;
uint64_t seqnum;
} LTTNG_PACKED capture;
+ struct lttng_ust_counter counter;
+ struct lttng_ust_counter_global counter_global;
+ struct lttng_ust_counter_cpu counter_cpu;
char padding[USTCOMM_MSG_PADDING2];
} u;
} LTTNG_PACKED;
ssize_t ustcomm_recv_event_notifier_notif_fd_from_sessiond(int sock,
int *event_notifier_notif_fd);
+ssize_t ustcomm_recv_counter_from_sessiond(int sock,
+ void **counter_data, uint64_t len);
+int ustcomm_recv_counter_shm_from_sessiond(int sock,
+ int *shm_fd);
+
/*
* Returns 0 on success, negative error value on error.
* Returns -EPIPE or -ECONNRESET if other end has hung up.
--- /dev/null
+AM_CPPFLAGS = -I$(top_srcdir)/include -I$(top_builddir)/include
+AM_CFLAGS += -fno-strict-aliasing
+
+noinst_LTLIBRARIES = libcounter.la
+
+libcounter_la_SOURCES = \
+ counter.c smp.c smp.h shm.c shm.h shm_internal.h shm_types.h \
+ counter-api.h counter.h counter-internal.h counter-types.h
+
+libcounter_la_LIBADD = \
+ -lpthread \
+ -lrt
+
+if HAVE_LIBNUMA
+libcounter_la_LIBADD += -lnuma
+endif
+
+libcounter_la_CFLAGS = -DUST_COMPONENT="libcounter" $(AM_CFLAGS)
--- /dev/null
+/*
+ * counter/counter-api.h
+ *
+ * LTTng Counters API, requiring counter/config.h
+ *
+ * Copyright (C) 2020 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _LTTNG_COUNTER_API_H
+#define _LTTNG_COUNTER_API_H
+
+#include <stdint.h>
+#include <limits.h>
+#include "counter.h"
+#include "counter-internal.h"
+#include <urcu/compiler.h>
+#include <urcu/uatomic.h>
+#include <lttng/bitmap.h>
+#include "../libringbuffer/getcpu.h"
+
+/*
+ * Using unsigned arithmetic because overflow is defined.
+ */
+static inline int __lttng_counter_add(const struct lib_counter_config *config,
+ enum lib_counter_config_alloc alloc,
+ enum lib_counter_config_sync sync,
+ struct lib_counter *counter,
+ const size_t *dimension_indexes, int64_t v,
+ int64_t *remainder)
+{
+ size_t index;
+ bool overflow = false, underflow = false;
+ struct lib_counter_layout *layout;
+ int64_t move_sum = 0;
+
+ if (caa_unlikely(lttng_counter_validate_indexes(config, counter, dimension_indexes)))
+ return -EOVERFLOW;
+ index = lttng_counter_get_index(config, counter, dimension_indexes);
+
+ switch (alloc) {
+ case COUNTER_ALLOC_PER_CPU:
+ layout = &counter->percpu_counters[lttng_ust_get_cpu()];
+ break;
+ case COUNTER_ALLOC_GLOBAL:
+ layout = &counter->global_counters;
+ break;
+ default:
+ return -EINVAL;
+ }
+ if (caa_unlikely(!layout->counters))
+ return -ENODEV;
+
+ switch (config->counter_size) {
+ case COUNTER_SIZE_8_BIT:
+ {
+ int8_t *int_p = (int8_t *) layout->counters + index;
+ int8_t old, n, res;
+ int8_t global_sum_step = counter->global_sum_step.s8;
+
+ res = *int_p;
+ switch (sync) {
+ case COUNTER_SYNC_PER_CPU:
+ {
+ do {
+ move_sum = 0;
+ old = res;
+ n = (int8_t) ((uint8_t) old + (uint8_t) v);
+ if (caa_unlikely(n > (int8_t) global_sum_step))
+ move_sum = (int8_t) global_sum_step / 2;
+ else if (caa_unlikely(n < -(int8_t) global_sum_step))
+ move_sum = -((int8_t) global_sum_step / 2);
+ n -= move_sum;
+ res = uatomic_cmpxchg(int_p, old, n);
+ } while (old != res);
+ break;
+ }
+ case COUNTER_SYNC_GLOBAL:
+ {
+ do {
+ old = res;
+ n = (int8_t) ((uint8_t) old + (uint8_t) v);
+ res = uatomic_cmpxchg(int_p, old, n);
+ } while (old != res);
+ break;
+ }
+ }
+ if (v > 0 && (v >= UINT8_MAX || n < old))
+ overflow = true;
+ else if (v < 0 && (v <= -UINT8_MAX || n > old))
+ underflow = true;
+ break;
+ }
+ case COUNTER_SIZE_16_BIT:
+ {
+ int16_t *int_p = (int16_t *) layout->counters + index;
+ int16_t old, n, res;
+ int16_t global_sum_step = counter->global_sum_step.s16;
+
+ res = *int_p;
+ switch (sync) {
+ case COUNTER_SYNC_PER_CPU:
+ {
+ do {
+ move_sum = 0;
+ old = res;
+ n = (int16_t) ((uint16_t) old + (uint16_t) v);
+ if (caa_unlikely(n > (int16_t) global_sum_step))
+ move_sum = (int16_t) global_sum_step / 2;
+ else if (caa_unlikely(n < -(int16_t) global_sum_step))
+ move_sum = -((int16_t) global_sum_step / 2);
+ n -= move_sum;
+ res = uatomic_cmpxchg(int_p, old, n);
+ } while (old != res);
+ break;
+ }
+ case COUNTER_SYNC_GLOBAL:
+ {
+ do {
+ old = res;
+ n = (int16_t) ((uint16_t) old + (uint16_t) v);
+ res = uatomic_cmpxchg(int_p, old, n);
+ } while (old != res);
+ break;
+ }
+ }
+ if (v > 0 && (v >= UINT16_MAX || n < old))
+ overflow = true;
+ else if (v < 0 && (v <= -UINT16_MAX || n > old))
+ underflow = true;
+ break;
+ }
+ case COUNTER_SIZE_32_BIT:
+ {
+ int32_t *int_p = (int32_t *) layout->counters + index;
+ int32_t old, n, res;
+ int32_t global_sum_step = counter->global_sum_step.s32;
+
+ res = *int_p;
+ switch (sync) {
+ case COUNTER_SYNC_PER_CPU:
+ {
+ do {
+ move_sum = 0;
+ old = res;
+ n = (int32_t) ((uint32_t) old + (uint32_t) v);
+ if (caa_unlikely(n > (int32_t) global_sum_step))
+ move_sum = (int32_t) global_sum_step / 2;
+ else if (caa_unlikely(n < -(int32_t) global_sum_step))
+ move_sum = -((int32_t) global_sum_step / 2);
+ n -= move_sum;
+ res = uatomic_cmpxchg(int_p, old, n);
+ } while (old != res);
+ break;
+ }
+ case COUNTER_SYNC_GLOBAL:
+ {
+ do {
+ old = res;
+ n = (int32_t) ((uint32_t) old + (uint32_t) v);
+ res = uatomic_cmpxchg(int_p, old, n);
+ } while (old != res);
+ break;
+ }
+ }
+ if (v > 0 && (v >= UINT32_MAX || n < old))
+ overflow = true;
+ else if (v < 0 && (v <= -UINT32_MAX || n > old))
+ underflow = true;
+ break;
+ }
+#if CAA_BITS_PER_LONG == 64
+ case COUNTER_SIZE_64_BIT:
+ {
+ int64_t *int_p = (int64_t *) layout->counters + index;
+ int64_t old, n, res;
+ int64_t global_sum_step = counter->global_sum_step.s64;
+
+ res = *int_p;
+ switch (sync) {
+ case COUNTER_SYNC_PER_CPU:
+ {
+ do {
+ move_sum = 0;
+ old = res;
+ n = (int64_t) ((uint64_t) old + (uint64_t) v);
+ if (caa_unlikely(n > (int64_t) global_sum_step))
+ move_sum = (int64_t) global_sum_step / 2;
+ else if (caa_unlikely(n < -(int64_t) global_sum_step))
+ move_sum = -((int64_t) global_sum_step / 2);
+ n -= move_sum;
+ res = uatomic_cmpxchg(int_p, old, n);
+ } while (old != res);
+ break;
+ }
+ case COUNTER_SYNC_GLOBAL:
+ {
+ do {
+ old = res;
+ n = (int64_t) ((uint64_t) old + (uint64_t) v);
+ res = uatomic_cmpxchg(int_p, old, n);
+ } while (old != res);
+ break;
+ }
+ }
+ if (v > 0 && n < old)
+ overflow = true;
+ else if (v < 0 && n > old)
+ underflow = true;
+ break;
+ }
+#endif
+ default:
+ return -EINVAL;
+ }
+ if (caa_unlikely(overflow && !lttng_bitmap_test_bit(index, layout->overflow_bitmap)))
+ lttng_bitmap_set_bit(index, layout->overflow_bitmap);
+ else if (caa_unlikely(underflow && !lttng_bitmap_test_bit(index, layout->underflow_bitmap)))
+ lttng_bitmap_set_bit(index, layout->underflow_bitmap);
+ if (remainder)
+ *remainder = move_sum;
+ return 0;
+}
+
+static inline int __lttng_counter_add_percpu(const struct lib_counter_config *config,
+ struct lib_counter *counter,
+ const size_t *dimension_indexes, int64_t v)
+{
+ int64_t move_sum;
+ int ret;
+
+ ret = __lttng_counter_add(config, COUNTER_ALLOC_PER_CPU, config->sync,
+ counter, dimension_indexes, v, &move_sum);
+ if (caa_unlikely(ret))
+ return ret;
+ if (caa_unlikely(move_sum))
+ return __lttng_counter_add(config, COUNTER_ALLOC_GLOBAL, COUNTER_SYNC_GLOBAL,
+ counter, dimension_indexes, move_sum, NULL);
+ return 0;
+}
+
+static inline int __lttng_counter_add_global(const struct lib_counter_config *config,
+ struct lib_counter *counter,
+ const size_t *dimension_indexes, int64_t v)
+{
+ return __lttng_counter_add(config, COUNTER_ALLOC_GLOBAL, config->sync, counter,
+ dimension_indexes, v, NULL);
+}
+
+static inline int lttng_counter_add(const struct lib_counter_config *config,
+ struct lib_counter *counter,
+ const size_t *dimension_indexes, int64_t v)
+{
+ switch (config->alloc) {
+ case COUNTER_ALLOC_PER_CPU: /* Fallthrough */
+ case COUNTER_ALLOC_PER_CPU | COUNTER_ALLOC_GLOBAL:
+ return __lttng_counter_add_percpu(config, counter, dimension_indexes, v);
+ case COUNTER_ALLOC_GLOBAL:
+ return __lttng_counter_add_global(config, counter, dimension_indexes, v);
+ default:
+ return -EINVAL;
+ }
+}
+
+static inline int lttng_counter_inc(const struct lib_counter_config *config,
+ struct lib_counter *counter,
+ const size_t *dimension_indexes)
+{
+ return lttng_counter_add(config, counter, dimension_indexes, 1);
+}
+
+static inline int lttng_counter_dec(const struct lib_counter_config *config,
+ struct lib_counter *counter,
+ const size_t *dimension_indexes)
+{
+ return lttng_counter_add(config, counter, dimension_indexes, -1);
+}
+
+#endif /* _LTTNG_COUNTER_API_H */
--- /dev/null
+/*
+ * counter/counter-internal.h
+ *
+ * LTTng Counters Internal Header
+ *
+ * Copyright (C) 2020 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _LTTNG_COUNTER_INTERNAL_H
+#define _LTTNG_COUNTER_INTERNAL_H
+
+#include <stdint.h>
+#include <lttng/ust-config.h>
+#include <urcu/compiler.h>
+#include "counter-types.h"
+
+static inline int lttng_counter_validate_indexes(const struct lib_counter_config *config,
+ struct lib_counter *counter,
+ const size_t *dimension_indexes)
+{
+ size_t nr_dimensions = counter->nr_dimensions, i;
+
+ for (i = 0; i < nr_dimensions; i++) {
+ if (caa_unlikely(dimension_indexes[i] >= counter->dimensions[i].max_nr_elem))
+ return -EOVERFLOW;
+ }
+ return 0;
+}
+
+
+static inline size_t lttng_counter_get_index(const struct lib_counter_config *config,
+ struct lib_counter *counter,
+ const size_t *dimension_indexes)
+{
+ size_t nr_dimensions = counter->nr_dimensions, i;
+ size_t index = 0;
+
+ for (i = 0; i < nr_dimensions; i++) {
+ struct lib_counter_dimension *dimension = &counter->dimensions[i];
+ const size_t *dimension_index = &dimension_indexes[i];
+
+ index += *dimension_index * dimension->stride;
+ }
+ return index;
+}
+
+#endif /* _LTTNG_COUNTER_INTERNAL_H */
--- /dev/null
+/*
+ * counter/counter-types.h
+ *
+ * LTTng Counters Types
+ *
+ * Copyright (C) 2020 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _LTTNG_COUNTER_TYPES_H
+#define _LTTNG_COUNTER_TYPES_H
+
+#include <stdint.h>
+#include <stddef.h>
+#include <stdbool.h>
+#include <sys/types.h>
+#include <lttng/counter-config.h>
+#include <lttng/ust-config.h>
+#include "shm_types.h"
+
+struct lib_counter_dimension {
+ /*
+ * Max. number of indexable elements.
+ */
+ size_t max_nr_elem;
+ /*
+ * The stride for a dimension is the multiplication factor which
+ * should be applied to its index to take into account other
+ * dimensions nested inside.
+ */
+ size_t stride;
+};
+
+struct lib_counter_layout {
+ void *counters;
+ unsigned long *overflow_bitmap;
+ unsigned long *underflow_bitmap;
+ int shm_fd;
+ size_t shm_len;
+ struct lttng_counter_shm_handle handle;
+};
+
+enum lib_counter_arithmetic {
+ LIB_COUNTER_ARITHMETIC_MODULAR,
+ LIB_COUNTER_ARITHMETIC_SATURATE,
+};
+
+struct lib_counter {
+ size_t nr_dimensions;
+ int64_t allocated_elem;
+ struct lib_counter_dimension *dimensions;
+ enum lib_counter_arithmetic arithmetic;
+ union {
+ struct {
+ int32_t max, min;
+ } limits_32_bit;
+ struct {
+ int64_t max, min;
+ } limits_64_bit;
+ } saturation;
+ union {
+ int8_t s8;
+ int16_t s16;
+ int32_t s32;
+ int64_t s64;
+ } global_sum_step; /* 0 if unused */
+ struct lib_counter_config config;
+
+ struct lib_counter_layout global_counters;
+ struct lib_counter_layout *percpu_counters;
+
+ bool is_daemon;
+ struct lttng_counter_shm_object_table *object_table;
+};
+
+#endif /* _LTTNG_COUNTER_TYPES_H */
--- /dev/null
+/* SPDX-License-Identifier: (GPL-2.0-only OR LGPL-2.1-only)
+ *
+ * counter.c
+ *
+ * Copyright (C) 2020 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#define _GNU_SOURCE
+#include <errno.h>
+#include "counter.h"
+#include "counter-internal.h"
+#include <lttng/bitmap.h>
+#include <urcu/system.h>
+#include <urcu/compiler.h>
+#include <stdbool.h>
+#include <helper.h>
+#include <lttng/align.h>
+#include "smp.h"
+#include "shm.h"
+
+static size_t lttng_counter_get_dimension_nr_elements(struct lib_counter_dimension *dimension)
+{
+ return dimension->max_nr_elem;
+}
+
+static int lttng_counter_init_stride(const struct lib_counter_config *config,
+ struct lib_counter *counter)
+{
+ size_t nr_dimensions = counter->nr_dimensions;
+ size_t stride = 1;
+ ssize_t i;
+
+ for (i = nr_dimensions - 1; i >= 0; i--) {
+ struct lib_counter_dimension *dimension = &counter->dimensions[i];
+ size_t nr_elem;
+
+ nr_elem = lttng_counter_get_dimension_nr_elements(dimension);
+ dimension->stride = stride;
+ /* nr_elem should be minimum 1 for each dimension. */
+ if (!nr_elem)
+ return -EINVAL;
+ stride *= nr_elem;
+ if (stride > SIZE_MAX / nr_elem)
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int lttng_counter_layout_init(struct lib_counter *counter, int cpu, int shm_fd)
+{
+ struct lib_counter_layout *layout;
+ size_t counter_size;
+ size_t nr_elem = counter->allocated_elem;
+ size_t shm_length = 0, counters_offset, overflow_offset, underflow_offset;
+ struct lttng_counter_shm_object *shm_object;
+
+ if (shm_fd < 0)
+ return 0; /* Skip, will be populated later. */
+
+ if (cpu == -1)
+ layout = &counter->global_counters;
+ else
+ layout = &counter->percpu_counters[cpu];
+ switch (counter->config.counter_size) {
+ case COUNTER_SIZE_8_BIT:
+ case COUNTER_SIZE_16_BIT:
+ case COUNTER_SIZE_32_BIT:
+ case COUNTER_SIZE_64_BIT:
+ counter_size = (size_t) counter->config.counter_size;
+ break;
+ default:
+ return -EINVAL;
+ }
+ layout->shm_fd = shm_fd;
+ counters_offset = shm_length;
+ shm_length += counter_size * nr_elem;
+ overflow_offset = shm_length;
+ shm_length += ALIGN(nr_elem, 8) / 8;
+ underflow_offset = shm_length;
+ shm_length += ALIGN(nr_elem, 8) / 8;
+ layout->shm_len = shm_length;
+ if (counter->is_daemon) {
+ /* Allocate and clear shared memory. */
+ shm_object = lttng_counter_shm_object_table_alloc(counter->object_table,
+ shm_length, LTTNG_COUNTER_SHM_OBJECT_SHM, shm_fd, cpu);
+ if (!shm_object)
+ return -ENOMEM;
+ } else {
+ /* Map pre-existing shared memory. */
+ shm_object = lttng_counter_shm_object_table_append_shm(counter->object_table,
+ shm_fd, shm_length);
+ if (!shm_object)
+ return -ENOMEM;
+ }
+ layout->counters = shm_object->memory_map + counters_offset;
+ layout->overflow_bitmap = (unsigned long *)(shm_object->memory_map + overflow_offset);
+ layout->underflow_bitmap = (unsigned long *)(shm_object->memory_map + underflow_offset);
+ return 0;
+}
+
+int lttng_counter_set_global_shm(struct lib_counter *counter, int fd)
+{
+ struct lib_counter_config *config = &counter->config;
+ struct lib_counter_layout *layout;
+
+ if (!(config->alloc & COUNTER_ALLOC_GLOBAL))
+ return -EINVAL;
+ layout = &counter->global_counters;
+ if (layout->shm_fd >= 0)
+ return -EBUSY;
+ return lttng_counter_layout_init(counter, -1, fd);
+}
+
+int lttng_counter_set_cpu_shm(struct lib_counter *counter, int cpu, int fd)
+{
+ struct lib_counter_config *config = &counter->config;
+ struct lib_counter_layout *layout;
+
+ if (cpu < 0 || cpu >= lttng_counter_num_possible_cpus())
+ return -EINVAL;
+
+ if (!(config->alloc & COUNTER_ALLOC_PER_CPU))
+ return -EINVAL;
+ layout = &counter->percpu_counters[cpu];
+ if (layout->shm_fd >= 0)
+ return -EBUSY;
+ return lttng_counter_layout_init(counter, cpu, fd);
+}
+
+static
+int lttng_counter_set_global_sum_step(struct lib_counter *counter,
+ int64_t global_sum_step)
+{
+ if (global_sum_step < 0)
+ return -EINVAL;
+
+ switch (counter->config.counter_size) {
+ case COUNTER_SIZE_8_BIT:
+ if (global_sum_step > INT8_MAX)
+ return -EINVAL;
+ counter->global_sum_step.s8 = (int8_t) global_sum_step;
+ break;
+ case COUNTER_SIZE_16_BIT:
+ if (global_sum_step > INT16_MAX)
+ return -EINVAL;
+ counter->global_sum_step.s16 = (int16_t) global_sum_step;
+ break;
+ case COUNTER_SIZE_32_BIT:
+ if (global_sum_step > INT32_MAX)
+ return -EINVAL;
+ counter->global_sum_step.s32 = (int32_t) global_sum_step;
+ break;
+ case COUNTER_SIZE_64_BIT:
+ counter->global_sum_step.s64 = global_sum_step;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static
+int validate_args(const struct lib_counter_config *config,
+ size_t nr_dimensions,
+ const size_t *max_nr_elem,
+ int64_t global_sum_step,
+ int global_counter_fd,
+ int nr_counter_cpu_fds,
+ const int *counter_cpu_fds)
+{
+ int nr_cpus = lttng_counter_num_possible_cpus();
+
+ if (CAA_BITS_PER_LONG != 64 && config->counter_size == COUNTER_SIZE_64_BIT) {
+ WARN_ON_ONCE(1);
+ return -1;
+ }
+ if (!max_nr_elem)
+ return -1;
+ /*
+ * global sum step is only useful with allocating both per-cpu
+ * and global counters.
+ */
+ if (global_sum_step && (!(config->alloc & COUNTER_ALLOC_GLOBAL) ||
+ !(config->alloc & COUNTER_ALLOC_PER_CPU)))
+ return -1;
+ if (!(config->alloc & COUNTER_ALLOC_GLOBAL) && global_counter_fd >= 0)
+ return -1;
+ if (!(config->alloc & COUNTER_ALLOC_PER_CPU) && counter_cpu_fds)
+ return -1;
+ if (!(config->alloc & COUNTER_ALLOC_PER_CPU) && counter_cpu_fds >= 0)
+ return -1;
+ if (counter_cpu_fds && nr_cpus != nr_counter_cpu_fds)
+ return -1;
+ return 0;
+}
+
+struct lib_counter *lttng_counter_create(const struct lib_counter_config *config,
+ size_t nr_dimensions,
+ const size_t *max_nr_elem,
+ int64_t global_sum_step,
+ int global_counter_fd,
+ int nr_counter_cpu_fds,
+ const int *counter_cpu_fds,
+ bool is_daemon)
+{
+ struct lib_counter *counter;
+ size_t dimension, nr_elem = 1;
+ int cpu, ret;
+ int nr_handles = 0;
+ int nr_cpus = lttng_counter_num_possible_cpus();
+
+ if (validate_args(config, nr_dimensions, max_nr_elem,
+ global_sum_step, global_counter_fd, nr_counter_cpu_fds,
+ counter_cpu_fds))
+ return NULL;
+ counter = zmalloc(sizeof(struct lib_counter));
+ if (!counter)
+ return NULL;
+ counter->global_counters.shm_fd = -1;
+ counter->config = *config;
+ counter->is_daemon = is_daemon;
+ if (lttng_counter_set_global_sum_step(counter, global_sum_step))
+ goto error_sum_step;
+ counter->nr_dimensions = nr_dimensions;
+ counter->dimensions = zmalloc(nr_dimensions * sizeof(*counter->dimensions));
+ if (!counter->dimensions)
+ goto error_dimensions;
+ for (dimension = 0; dimension < nr_dimensions; dimension++)
+ counter->dimensions[dimension].max_nr_elem = max_nr_elem[dimension];
+ if (config->alloc & COUNTER_ALLOC_PER_CPU) {
+ counter->percpu_counters = zmalloc(sizeof(struct lib_counter_layout) * nr_cpus);
+ if (!counter->percpu_counters)
+ goto error_alloc_percpu;
+ lttng_counter_for_each_possible_cpu(cpu)
+ counter->percpu_counters[cpu].shm_fd = -1;
+ }
+
+ if (lttng_counter_init_stride(config, counter))
+ goto error_init_stride;
+ //TODO saturation values.
+ for (dimension = 0; dimension < counter->nr_dimensions; dimension++)
+ nr_elem *= lttng_counter_get_dimension_nr_elements(&counter->dimensions[dimension]);
+ counter->allocated_elem = nr_elem;
+
+ if (config->alloc & COUNTER_ALLOC_GLOBAL)
+ nr_handles++;
+ if (config->alloc & COUNTER_ALLOC_PER_CPU)
+ nr_handles += nr_cpus;
+ /* Allocate table for global and per-cpu counters. */
+ counter->object_table = lttng_counter_shm_object_table_create(nr_handles);
+ if (!counter->object_table)
+ goto error_alloc_object_table;
+
+ if (config->alloc & COUNTER_ALLOC_GLOBAL) {
+ ret = lttng_counter_layout_init(counter, -1, global_counter_fd); /* global */
+ if (ret)
+ goto layout_init_error;
+ }
+ if ((config->alloc & COUNTER_ALLOC_PER_CPU) && counter_cpu_fds) {
+ lttng_counter_for_each_possible_cpu(cpu) {
+ ret = lttng_counter_layout_init(counter, cpu, counter_cpu_fds[cpu]);
+ if (ret)
+ goto layout_init_error;
+ }
+ }
+ return counter;
+
+layout_init_error:
+ lttng_counter_shm_object_table_destroy(counter->object_table, is_daemon);
+error_alloc_object_table:
+error_init_stride:
+ free(counter->percpu_counters);
+error_alloc_percpu:
+ free(counter->dimensions);
+error_dimensions:
+error_sum_step:
+ free(counter);
+ return NULL;
+}
+
+void lttng_counter_destroy(struct lib_counter *counter)
+{
+ struct lib_counter_config *config = &counter->config;
+
+ if (config->alloc & COUNTER_ALLOC_PER_CPU)
+ free(counter->percpu_counters);
+ lttng_counter_shm_object_table_destroy(counter->object_table, counter->is_daemon);
+ free(counter->dimensions);
+ free(counter);
+}
+
+int lttng_counter_get_global_shm(struct lib_counter *counter, int *fd, size_t *len)
+{
+ int shm_fd;
+
+ shm_fd = counter->global_counters.shm_fd;
+ if (shm_fd < 0)
+ return -1;
+ *fd = shm_fd;
+ *len = counter->global_counters.shm_len;
+ return 0;
+}
+
+int lttng_counter_get_cpu_shm(struct lib_counter *counter, int cpu, int *fd, size_t *len)
+{
+ struct lib_counter_layout *layout;
+ int shm_fd;
+
+ if (cpu >= lttng_counter_num_possible_cpus())
+ return -1;
+ layout = &counter->percpu_counters[cpu];
+ shm_fd = layout->shm_fd;
+ if (shm_fd < 0)
+ return -1;
+ *fd = shm_fd;
+ *len = layout->shm_len;
+ return 0;
+}
+
+int lttng_counter_read(const struct lib_counter_config *config,
+ struct lib_counter *counter,
+ const size_t *dimension_indexes,
+ int cpu, int64_t *value, bool *overflow,
+ bool *underflow)
+{
+ size_t index;
+ struct lib_counter_layout *layout;
+
+ if (caa_unlikely(lttng_counter_validate_indexes(config, counter, dimension_indexes)))
+ return -EOVERFLOW;
+ index = lttng_counter_get_index(config, counter, dimension_indexes);
+
+ switch (config->alloc) {
+ case COUNTER_ALLOC_PER_CPU:
+ if (cpu < 0 || cpu >= lttng_counter_num_possible_cpus())
+ return -EINVAL;
+ layout = &counter->percpu_counters[cpu];
+ break;
+ case COUNTER_ALLOC_PER_CPU | COUNTER_ALLOC_GLOBAL:
+ if (cpu >= 0) {
+ if (cpu >= lttng_counter_num_possible_cpus())
+ return -EINVAL;
+ layout = &counter->percpu_counters[cpu];
+ } else {
+ layout = &counter->global_counters;
+ }
+ break;
+ case COUNTER_ALLOC_GLOBAL:
+ if (cpu >= 0)
+ return -EINVAL;
+ layout = &counter->global_counters;
+ break;
+ default:
+ return -EINVAL;
+ }
+ if (caa_unlikely(!layout->counters))
+ return -ENODEV;
+
+ switch (config->counter_size) {
+ case COUNTER_SIZE_8_BIT:
+ {
+ int8_t *int_p = (int8_t *) layout->counters + index;
+ *value = (int64_t) CMM_LOAD_SHARED(*int_p);
+ break;
+ }
+ case COUNTER_SIZE_16_BIT:
+ {
+ int16_t *int_p = (int16_t *) layout->counters + index;
+ *value = (int64_t) CMM_LOAD_SHARED(*int_p);
+ break;
+ }
+ case COUNTER_SIZE_32_BIT:
+ {
+ int32_t *int_p = (int32_t *) layout->counters + index;
+ *value = (int64_t) CMM_LOAD_SHARED(*int_p);
+ break;
+ }
+#if CAA_BITS_PER_LONG == 64
+ case COUNTER_SIZE_64_BIT:
+ {
+ int64_t *int_p = (int64_t *) layout->counters + index;
+ *value = CMM_LOAD_SHARED(*int_p);
+ break;
+ }
+#endif
+ default:
+ return -EINVAL;
+ }
+ *overflow = lttng_bitmap_test_bit(index, layout->overflow_bitmap);
+ *underflow = lttng_bitmap_test_bit(index, layout->underflow_bitmap);
+ return 0;
+}
+
+int lttng_counter_aggregate(const struct lib_counter_config *config,
+ struct lib_counter *counter,
+ const size_t *dimension_indexes,
+ int64_t *value, bool *overflow,
+ bool *underflow)
+{
+ int cpu, ret;
+ int64_t v, sum = 0;
+ bool of, uf;
+
+ *overflow = false;
+ *underflow = false;
+
+ switch (config->alloc) {
+ case COUNTER_ALLOC_GLOBAL: /* Fallthrough */
+ case COUNTER_ALLOC_PER_CPU | COUNTER_ALLOC_GLOBAL:
+ /* Read global counter. */
+ ret = lttng_counter_read(config, counter, dimension_indexes,
+ -1, &v, &of, &uf);
+ if (ret < 0)
+ return ret;
+ sum += v;
+ *overflow |= of;
+ *underflow |= uf;
+ break;
+ case COUNTER_ALLOC_PER_CPU:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (config->alloc) {
+ case COUNTER_ALLOC_GLOBAL:
+ break;
+ case COUNTER_ALLOC_PER_CPU | COUNTER_ALLOC_GLOBAL: /* Fallthrough */
+ case COUNTER_ALLOC_PER_CPU:
+ lttng_counter_for_each_possible_cpu(cpu) {
+ int64_t old = sum;
+
+ ret = lttng_counter_read(config, counter, dimension_indexes,
+ cpu, &v, &of, &uf);
+ if (ret < 0)
+ return ret;
+ *overflow |= of;
+ *underflow |= uf;
+ /* Overflow is defined on unsigned types. */
+ sum = (int64_t) ((uint64_t) old + (uint64_t) v);
+ if (v > 0 && sum < old)
+ *overflow = true;
+ else if (v < 0 && sum > old)
+ *underflow = true;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+ *value = sum;
+ return 0;
+}
+
+static
+int lttng_counter_clear_cpu(const struct lib_counter_config *config,
+ struct lib_counter *counter,
+ const size_t *dimension_indexes,
+ int cpu)
+{
+ size_t index;
+ struct lib_counter_layout *layout;
+
+ if (caa_unlikely(lttng_counter_validate_indexes(config, counter, dimension_indexes)))
+ return -EOVERFLOW;
+ index = lttng_counter_get_index(config, counter, dimension_indexes);
+
+ switch (config->alloc) {
+ case COUNTER_ALLOC_PER_CPU:
+ if (cpu < 0 || cpu >= lttng_counter_num_possible_cpus())
+ return -EINVAL;
+ layout = &counter->percpu_counters[cpu];
+ break;
+ case COUNTER_ALLOC_PER_CPU | COUNTER_ALLOC_GLOBAL:
+ if (cpu >= 0) {
+ if (cpu >= lttng_counter_num_possible_cpus())
+ return -EINVAL;
+ layout = &counter->percpu_counters[cpu];
+ } else {
+ layout = &counter->global_counters;
+ }
+ break;
+ case COUNTER_ALLOC_GLOBAL:
+ if (cpu >= 0)
+ return -EINVAL;
+ layout = &counter->global_counters;
+ break;
+ default:
+ return -EINVAL;
+ }
+ if (caa_unlikely(!layout->counters))
+ return -ENODEV;
+
+ switch (config->counter_size) {
+ case COUNTER_SIZE_8_BIT:
+ {
+ int8_t *int_p = (int8_t *) layout->counters + index;
+ CMM_STORE_SHARED(*int_p, 0);
+ break;
+ }
+ case COUNTER_SIZE_16_BIT:
+ {
+ int16_t *int_p = (int16_t *) layout->counters + index;
+ CMM_STORE_SHARED(*int_p, 0);
+ break;
+ }
+ case COUNTER_SIZE_32_BIT:
+ {
+ int32_t *int_p = (int32_t *) layout->counters + index;
+ CMM_STORE_SHARED(*int_p, 0);
+ break;
+ }
+#if CAA_BITS_PER_LONG == 64
+ case COUNTER_SIZE_64_BIT:
+ {
+ int64_t *int_p = (int64_t *) layout->counters + index;
+ CMM_STORE_SHARED(*int_p, 0);
+ break;
+ }
+#endif
+ default:
+ return -EINVAL;
+ }
+ lttng_bitmap_clear_bit(index, layout->overflow_bitmap);
+ lttng_bitmap_clear_bit(index, layout->underflow_bitmap);
+ return 0;
+}
+
+int lttng_counter_clear(const struct lib_counter_config *config,
+ struct lib_counter *counter,
+ const size_t *dimension_indexes)
+{
+ int cpu, ret;
+
+ switch (config->alloc) {
+ case COUNTER_ALLOC_PER_CPU:
+ break;
+ case COUNTER_ALLOC_GLOBAL: /* Fallthrough */
+ case COUNTER_ALLOC_PER_CPU | COUNTER_ALLOC_GLOBAL:
+ /* Clear global counter. */
+ ret = lttng_counter_clear_cpu(config, counter, dimension_indexes, -1);
+ if (ret < 0)
+ return ret;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (config->alloc) {
+ case COUNTER_ALLOC_PER_CPU: /* Fallthrough */
+ case COUNTER_ALLOC_PER_CPU | COUNTER_ALLOC_GLOBAL:
+ lttng_counter_for_each_possible_cpu(cpu) {
+ ret = lttng_counter_clear_cpu(config, counter, dimension_indexes, cpu);
+ if (ret < 0)
+ return ret;
+ }
+ break;
+ case COUNTER_ALLOC_GLOBAL:
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
--- /dev/null
+/*
+ * lttng/counter.h
+ *
+ * LTTng Counters API
+ *
+ * Copyright (C) 2020 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _LTTNG_COUNTER_H
+#define _LTTNG_COUNTER_H
+
+#include <stdint.h>
+#include <lttng/ust-config.h>
+#include "counter-types.h"
+
+/* max_nr_elem is for each dimension. */
+struct lib_counter *lttng_counter_create(const struct lib_counter_config *config,
+ size_t nr_dimensions,
+ const size_t *max_nr_elem,
+ int64_t global_sum_step,
+ int global_counter_fd,
+ int nr_counter_cpu_fds,
+ const int *counter_cpu_fds,
+ bool is_daemon);
+void lttng_counter_destroy(struct lib_counter *counter);
+
+int lttng_counter_set_global_shm(struct lib_counter *counter, int fd);
+int lttng_counter_set_cpu_shm(struct lib_counter *counter, int cpu, int fd);
+
+int lttng_counter_get_global_shm(struct lib_counter *counter, int *fd, size_t *len);
+int lttng_counter_get_cpu_shm(struct lib_counter *counter, int cpu, int *fd, size_t *len);
+
+int lttng_counter_read(const struct lib_counter_config *config,
+ struct lib_counter *counter,
+ const size_t *dimension_indexes,
+ int cpu, int64_t *value,
+ bool *overflow, bool *underflow);
+int lttng_counter_aggregate(const struct lib_counter_config *config,
+ struct lib_counter *counter,
+ const size_t *dimension_indexes,
+ int64_t *value,
+ bool *overflow, bool *underflow);
+int lttng_counter_clear(const struct lib_counter_config *config,
+ struct lib_counter *counter,
+ const size_t *dimension_indexes);
+
+#endif /* _LTTNG_COUNTER_H */
--- /dev/null
+/*
+ * libcounter/shm.c
+ *
+ * Copyright (C) 2005-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; only
+ * version 2.1 of the License.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#define _LGPL_SOURCE
+#include <config.h>
+#include "shm.h"
+#include <unistd.h>
+#include <fcntl.h>
+#include <sys/mman.h>
+#include <sys/types.h>
+#include <sys/stat.h> /* For mode constants */
+#include <fcntl.h> /* For O_* constants */
+#include <assert.h>
+#include <stdio.h>
+#include <signal.h>
+#include <dirent.h>
+#include <lttng/align.h>
+#include <limits.h>
+#include <stdbool.h>
+#include <stdint.h>
+#ifdef HAVE_LIBNUMA
+#include <numa.h>
+#include <numaif.h>
+#endif
+#include <helper.h>
+#include <ust-fd.h>
+#include "../libringbuffer/mmap.h"
+
+/*
+ * Ensure we have the required amount of space available by writing 0
+ * into the entire buffer. Not doing so can trigger SIGBUS when going
+ * beyond the available shm space.
+ */
+static
+int zero_file(int fd, size_t len)
+{
+ ssize_t retlen;
+ size_t written = 0;
+ char *zeropage;
+ long pagelen;
+ int ret;
+
+ pagelen = sysconf(_SC_PAGESIZE);
+ if (pagelen < 0)
+ return (int) pagelen;
+ zeropage = calloc(pagelen, 1);
+ if (!zeropage)
+ return -ENOMEM;
+
+ while (len > written) {
+ do {
+ retlen = write(fd, zeropage,
+ min_t(size_t, pagelen, len - written));
+ } while (retlen == -1UL && errno == EINTR);
+ if (retlen < 0) {
+ ret = (int) retlen;
+ goto error;
+ }
+ written += retlen;
+ }
+ ret = 0;
+error:
+ free(zeropage);
+ return ret;
+}
+
+struct lttng_counter_shm_object_table *lttng_counter_shm_object_table_create(size_t max_nb_obj)
+{
+ struct lttng_counter_shm_object_table *table;
+
+ table = zmalloc(sizeof(struct lttng_counter_shm_object_table) +
+ max_nb_obj * sizeof(table->objects[0]));
+ if (!table)
+ return NULL;
+ table->size = max_nb_obj;
+ return table;
+}
+
+static
+struct lttng_counter_shm_object *_lttng_counter_shm_object_table_alloc_shm(struct lttng_counter_shm_object_table *table,
+ size_t memory_map_size,
+ int cpu_fd)
+{
+ int shmfd, ret;
+ struct lttng_counter_shm_object *obj;
+ char *memory_map;
+
+ if (cpu_fd < 0)
+ return NULL;
+ if (table->allocated_len >= table->size)
+ return NULL;
+ obj = &table->objects[table->allocated_len];
+
+ /* create shm */
+
+ shmfd = cpu_fd;
+ ret = zero_file(shmfd, memory_map_size);
+ if (ret) {
+ PERROR("zero_file");
+ goto error_zero_file;
+ }
+ ret = ftruncate(shmfd, memory_map_size);
+ if (ret) {
+ PERROR("ftruncate");
+ goto error_ftruncate;
+ }
+ /*
+ * Also ensure the file metadata is synced with the storage by using
+ * fsync(2).
+ */
+ ret = fsync(shmfd);
+ if (ret) {
+ PERROR("fsync");
+ goto error_fsync;
+ }
+ obj->shm_fd_ownership = 0;
+ obj->shm_fd = shmfd;
+
+ /* memory_map: mmap */
+ memory_map = mmap(NULL, memory_map_size, PROT_READ | PROT_WRITE,
+ MAP_SHARED | LTTNG_MAP_POPULATE, shmfd, 0);
+ if (memory_map == MAP_FAILED) {
+ PERROR("mmap");
+ goto error_mmap;
+ }
+ obj->type = LTTNG_COUNTER_SHM_OBJECT_SHM;
+ obj->memory_map = memory_map;
+ obj->memory_map_size = memory_map_size;
+ obj->allocated_len = 0;
+ obj->index = table->allocated_len++;
+
+ return obj;
+
+error_mmap:
+error_fsync:
+error_ftruncate:
+error_zero_file:
+ return NULL;
+}
+
+static
+struct lttng_counter_shm_object *_lttng_counter_shm_object_table_alloc_mem(struct lttng_counter_shm_object_table *table,
+ size_t memory_map_size)
+{
+ struct lttng_counter_shm_object *obj;
+ void *memory_map;
+
+ if (table->allocated_len >= table->size)
+ return NULL;
+ obj = &table->objects[table->allocated_len];
+
+ memory_map = zmalloc(memory_map_size);
+ if (!memory_map)
+ goto alloc_error;
+
+ /* no shm_fd */
+ obj->shm_fd = -1;
+ obj->shm_fd_ownership = 0;
+
+ obj->type = LTTNG_COUNTER_SHM_OBJECT_MEM;
+ obj->memory_map = memory_map;
+ obj->memory_map_size = memory_map_size;
+ obj->allocated_len = 0;
+ obj->index = table->allocated_len++;
+
+ return obj;
+
+alloc_error:
+ return NULL;
+}
+
+/*
+ * libnuma prints errors on the console even for numa_available().
+ * Work-around this limitation by using get_mempolicy() directly to
+ * check whether the kernel supports mempolicy.
+ */
+#ifdef HAVE_LIBNUMA
+static bool lttng_is_numa_available(void)
+{
+ int ret;
+
+ ret = get_mempolicy(NULL, NULL, 0, NULL, 0);
+ if (ret && errno == ENOSYS) {
+ return false;
+ }
+ return numa_available() > 0;
+}
+#endif
+
+struct lttng_counter_shm_object *lttng_counter_shm_object_table_alloc(struct lttng_counter_shm_object_table *table,
+ size_t memory_map_size,
+ enum lttng_counter_shm_object_type type,
+ int cpu_fd,
+ int cpu)
+{
+ struct lttng_counter_shm_object *shm_object;
+#ifdef HAVE_LIBNUMA
+ int oldnode = 0, node;
+ bool numa_avail;
+
+ numa_avail = lttng_is_numa_available();
+ if (numa_avail) {
+ oldnode = numa_preferred();
+ if (cpu >= 0) {
+ node = numa_node_of_cpu(cpu);
+ if (node >= 0)
+ numa_set_preferred(node);
+ }
+ if (cpu < 0 || node < 0)
+ numa_set_localalloc();
+ }
+#endif /* HAVE_LIBNUMA */
+ switch (type) {
+ case LTTNG_COUNTER_SHM_OBJECT_SHM:
+ shm_object = _lttng_counter_shm_object_table_alloc_shm(table, memory_map_size,
+ cpu_fd);
+ break;
+ case LTTNG_COUNTER_SHM_OBJECT_MEM:
+ shm_object = _lttng_counter_shm_object_table_alloc_mem(table, memory_map_size);
+ break;
+ default:
+ assert(0);
+ }
+#ifdef HAVE_LIBNUMA
+ if (numa_avail)
+ numa_set_preferred(oldnode);
+#endif /* HAVE_LIBNUMA */
+ return shm_object;
+}
+
+struct lttng_counter_shm_object *lttng_counter_shm_object_table_append_shm(struct lttng_counter_shm_object_table *table,
+ int shm_fd,
+ size_t memory_map_size)
+{
+ struct lttng_counter_shm_object *obj;
+ char *memory_map;
+
+ if (table->allocated_len >= table->size)
+ return NULL;
+
+ obj = &table->objects[table->allocated_len];
+
+ obj->shm_fd = shm_fd;
+ obj->shm_fd_ownership = 1;
+
+ /* memory_map: mmap */
+ memory_map = mmap(NULL, memory_map_size, PROT_READ | PROT_WRITE,
+ MAP_SHARED | LTTNG_MAP_POPULATE, shm_fd, 0);
+ if (memory_map == MAP_FAILED) {
+ PERROR("mmap");
+ goto error_mmap;
+ }
+ obj->type = LTTNG_COUNTER_SHM_OBJECT_SHM;
+ obj->memory_map = memory_map;
+ obj->memory_map_size = memory_map_size;
+ obj->allocated_len = memory_map_size;
+ obj->index = table->allocated_len++;
+
+ return obj;
+
+error_mmap:
+ return NULL;
+}
+
+/*
+ * Passing ownership of mem to object.
+ */
+struct lttng_counter_shm_object *lttng_counter_shm_object_table_append_mem(struct lttng_counter_shm_object_table *table,
+ void *mem, size_t memory_map_size)
+{
+ struct lttng_counter_shm_object *obj;
+
+ if (table->allocated_len >= table->size)
+ return NULL;
+ obj = &table->objects[table->allocated_len];
+
+ obj->shm_fd = -1;
+ obj->shm_fd_ownership = 0;
+
+ obj->type = LTTNG_COUNTER_SHM_OBJECT_MEM;
+ obj->memory_map = mem;
+ obj->memory_map_size = memory_map_size;
+ obj->allocated_len = memory_map_size;
+ obj->index = table->allocated_len++;
+
+ return obj;
+
+ return NULL;
+}
+
+static
+void lttng_counter_shmp_object_destroy(struct lttng_counter_shm_object *obj, int consumer)
+{
+ switch (obj->type) {
+ case LTTNG_COUNTER_SHM_OBJECT_SHM:
+ {
+ int ret;
+
+ ret = munmap(obj->memory_map, obj->memory_map_size);
+ if (ret) {
+ PERROR("umnmap");
+ assert(0);
+ }
+
+ if (obj->shm_fd_ownership) {
+ /* Delete FDs only if called from app (not consumer). */
+ if (!consumer) {
+ lttng_ust_lock_fd_tracker();
+ ret = close(obj->shm_fd);
+ if (!ret) {
+ lttng_ust_delete_fd_from_tracker(obj->shm_fd);
+ } else {
+ PERROR("close");
+ assert(0);
+ }
+ lttng_ust_unlock_fd_tracker();
+ } else {
+ ret = close(obj->shm_fd);
+ if (ret) {
+ PERROR("close");
+ assert(0);
+ }
+ }
+ }
+ break;
+ }
+ case LTTNG_COUNTER_SHM_OBJECT_MEM:
+ {
+ free(obj->memory_map);
+ break;
+ }
+ default:
+ assert(0);
+ }
+}
+
+void lttng_counter_shm_object_table_destroy(struct lttng_counter_shm_object_table *table, int consumer)
+{
+ int i;
+
+ for (i = 0; i < table->allocated_len; i++)
+ lttng_counter_shmp_object_destroy(&table->objects[i], consumer);
+ free(table);
+}
+
+/*
+ * lttng_counter_zalloc_shm - allocate memory within a shm object.
+ *
+ * Shared memory is already zeroed by shmget.
+ * *NOT* multithread-safe (should be protected by mutex).
+ * Returns a -1, -1 tuple on error.
+ */
+struct lttng_counter_shm_ref lttng_counter_zalloc_shm(struct lttng_counter_shm_object *obj, size_t len)
+{
+ struct lttng_counter_shm_ref ref;
+ struct lttng_counter_shm_ref shm_ref_error = { -1, -1 };
+
+ if (obj->memory_map_size - obj->allocated_len < len)
+ return shm_ref_error;
+ ref.index = obj->index;
+ ref.offset = obj->allocated_len;
+ obj->allocated_len += len;
+ return ref;
+}
+
+void lttng_counter_align_shm(struct lttng_counter_shm_object *obj, size_t align)
+{
+ size_t offset_len = offset_align(obj->allocated_len, align);
+ obj->allocated_len += offset_len;
+}
--- /dev/null
+#ifndef _LIBCOUNTER_SHM_H
+#define _LIBCOUNTER_SHM_H
+
+/*
+ * libcounter/shm.h
+ *
+ * Copyright (C) 2011-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; only
+ * version 2.1 of the License.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <stddef.h>
+#include <stdint.h>
+#include <unistd.h>
+#include <usterr-signal-safe.h>
+#include <urcu/compiler.h>
+#include "shm_types.h"
+
+/* lttng_counter_handle_create - for UST. */
+extern
+struct lttng_counter_shm_handle *lttng_counter_handle_create(void *data,
+ uint64_t memory_map_size, int wakeup_fd);
+/* lttng_counter_handle_add_cpu - for UST. */
+extern
+int lttng_counter_handle_add_cpu(struct lttng_counter_shm_handle *handle,
+ int shm_fd, uint32_t cpu_nr,
+ uint64_t memory_map_size);
+unsigned int lttng_counter_handle_get_nr_cpus(struct lttng_counter_shm_handle *handle);
+
+/*
+ * Pointer dereferencing. We don't trust the shm_ref, so we validate
+ * both the index and offset with known boundaries.
+ *
+ * "shmp" and "shmp_index" guarantee that it's safe to use the pointer
+ * target type, even in the occurrence of shm_ref modification by an
+ * untrusted process having write access to the shm_ref. We return a
+ * NULL pointer if the ranges are invalid.
+ */
+static inline
+char *_lttng_counter_shmp_offset(struct lttng_counter_shm_object_table *table,
+ struct lttng_counter_shm_ref *ref,
+ size_t idx, size_t elem_size)
+{
+ struct lttng_counter_shm_object *obj;
+ size_t objindex, ref_offset;
+
+ objindex = (size_t) ref->index;
+ if (caa_unlikely(objindex >= table->allocated_len))
+ return NULL;
+ obj = &table->objects[objindex];
+ ref_offset = (size_t) ref->offset;
+ ref_offset += idx * elem_size;
+ /* Check if part of the element returned would exceed the limits. */
+ if (caa_unlikely(ref_offset + elem_size > obj->memory_map_size))
+ return NULL;
+ return &obj->memory_map[ref_offset];
+}
+
+#define lttng_counter_shmp_index(handle, ref, index) \
+ ({ \
+ __typeof__((ref)._type) ____ptr_ret; \
+ ____ptr_ret = (__typeof__(____ptr_ret)) _lttng_counter_shmp_offset((handle)->table, &(ref)._ref, index, sizeof(*____ptr_ret)); \
+ ____ptr_ret; \
+ })
+
+#define lttng_counter_shmp(handle, ref) lttng_counter_shmp_index(handle, ref, 0)
+
+static inline
+void _lttng_counter_set_shmp(struct lttng_counter_shm_ref *ref, struct lttng_counter_shm_ref src)
+{
+ *ref = src;
+}
+
+#define lttng_counter_set_shmp(ref, src) _lttng_counter_set_shmp(&(ref)._ref, src)
+
+struct lttng_counter_shm_object_table *lttng_counter_shm_object_table_create(size_t max_nb_obj);
+struct lttng_counter_shm_object *lttng_counter_shm_object_table_alloc(struct lttng_counter_shm_object_table *table,
+ size_t memory_map_size,
+ enum lttng_counter_shm_object_type type,
+ const int cpu_fd,
+ int cpu);
+struct lttng_counter_shm_object *lttng_counter_shm_object_table_append_shm(struct lttng_counter_shm_object_table *table,
+ int shm_fd, size_t memory_map_size);
+/* mem ownership is passed to lttng_counter_shm_object_table_append_mem(). */
+struct lttng_counter_shm_object *lttng_counter_shm_object_table_append_mem(struct lttng_counter_shm_object_table *table,
+ void *mem, size_t memory_map_size);
+void lttng_counter_shm_object_table_destroy(struct lttng_counter_shm_object_table *table, int consumer);
+
+/*
+ * lttng_counter_zalloc_shm - allocate memory within a shm object.
+ *
+ * Shared memory is already zeroed by shmget.
+ * *NOT* multithread-safe (should be protected by mutex).
+ * Returns a -1, -1 tuple on error.
+ */
+struct lttng_counter_shm_ref lttng_counter_zalloc_shm(struct lttng_counter_shm_object *obj, size_t len);
+void lttng_counter_align_shm(struct lttng_counter_shm_object *obj, size_t align);
+
+static inline
+int lttng_counter_shm_get_shm_fd(struct lttng_counter_shm_handle *handle, struct lttng_counter_shm_ref *ref)
+{
+ struct lttng_counter_shm_object_table *table = handle->table;
+ struct lttng_counter_shm_object *obj;
+ size_t index;
+
+ index = (size_t) ref->index;
+ if (caa_unlikely(index >= table->allocated_len))
+ return -EPERM;
+ obj = &table->objects[index];
+ return obj->shm_fd;
+}
+
+
+static inline
+int lttng_counter_shm_get_shm_size(struct lttng_counter_shm_handle *handle, struct lttng_counter_shm_ref *ref,
+ uint64_t *size)
+{
+ struct lttng_counter_shm_object_table *table = handle->table;
+ struct lttng_counter_shm_object *obj;
+ size_t index;
+
+ index = (size_t) ref->index;
+ if (caa_unlikely(index >= table->allocated_len))
+ return -EPERM;
+ obj = &table->objects[index];
+ *size = obj->memory_map_size;
+ return 0;
+}
+
+#endif /* _LIBCOUNTER_SHM_H */
--- /dev/null
+#ifndef _LIBCOUNTER_SHM_INTERNAL_H
+#define _LIBCOUNTER_SHM_INTERNAL_H
+
+/*
+ * libcounter/shm_internal.h
+ *
+ * Copyright (C) 2011-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; only
+ * version 2.1 of the License.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+struct lttng_counter_shm_ref {
+ volatile ssize_t index; /* within the object table */
+ volatile ssize_t offset; /* within the object */
+};
+
+#define DECLARE_LTTNG_COUNTER_SHMP(type, name) \
+ union { \
+ struct lttng_counter_shm_ref _ref; \
+ type *_type; \
+ } name
+
+#endif /* _LIBCOUNTER_SHM_INTERNAL_H */
--- /dev/null
+#ifndef _LIBCOUNTER_SHM_TYPES_H
+#define _LIBCOUNTER_SHM_TYPES_H
+
+/*
+ * libcounter/shm_types.h
+ *
+ * Copyright (C) 2011-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; only
+ * version 2.1 of the License.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <stdint.h>
+#include <stddef.h>
+#include <limits.h>
+#include "shm_internal.h"
+
+enum lttng_counter_shm_object_type {
+ LTTNG_COUNTER_SHM_OBJECT_SHM,
+ LTTNG_COUNTER_SHM_OBJECT_MEM,
+};
+
+struct lttng_counter_shm_object {
+ enum lttng_counter_shm_object_type type;
+ size_t index; /* within the object table */
+ int shm_fd; /* shm fd */
+ char *memory_map;
+ size_t memory_map_size;
+ uint64_t allocated_len;
+ int shm_fd_ownership;
+};
+
+struct lttng_counter_shm_object_table {
+ size_t size;
+ size_t allocated_len;
+ struct lttng_counter_shm_object objects[];
+};
+
+struct lttng_counter_shm_handle {
+ struct lttng_counter_shm_object_table *table;
+};
+
+#endif /* _LIBCOUNTER_SHM_TYPES_H */
--- /dev/null
+/*
+ * libcounter/smp.c
+ *
+ * Copyright (C) 2011-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ * Copyright (C) 2019 Michael Jeanson <mjeanson@efficios.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; only
+ * version 2.1 of the License.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#define _GNU_SOURCE
+#define _LGPL_SOURCE
+#include <unistd.h>
+#include <pthread.h>
+#include "smp.h"
+
+int __lttng_counter_num_possible_cpus;
+
+#if (defined(__GLIBC__) || defined( __UCLIBC__))
+void _lttng_counter_get_num_possible_cpus(void)
+{
+ int result;
+
+ /* On Linux, when some processors are offline
+ * _SC_NPROCESSORS_CONF counts the offline
+ * processors, whereas _SC_NPROCESSORS_ONLN
+ * does not. If we used _SC_NPROCESSORS_ONLN,
+ * getcpu() could return a value greater than
+ * this sysconf, in which case the arrays
+ * indexed by processor would overflow.
+ */
+ result = sysconf(_SC_NPROCESSORS_CONF);
+ if (result == -1)
+ return;
+ __lttng_counter_num_possible_cpus = result;
+}
+
+#else
+
+/*
+ * The MUSL libc implementation of the _SC_NPROCESSORS_CONF sysconf does not
+ * return the number of configured CPUs in the system but relies on the cpu
+ * affinity mask of the current task.
+ *
+ * So instead we use a strategy similar to GLIBC's, counting the cpu
+ * directories in "/sys/devices/system/cpu" and fallback on the value from
+ * sysconf if it fails.
+ */
+
+#include <dirent.h>
+#include <limits.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/types.h>
+
+#define __max(a,b) ((a)>(b)?(a):(b))
+
+void _lttng_counter_get_num_possible_cpus(void)
+{
+ int result, count = 0;
+ DIR *cpudir;
+ struct dirent *entry;
+
+ cpudir = opendir("/sys/devices/system/cpu");
+ if (cpudir == NULL)
+ goto end;
+
+ /*
+ * Count the number of directories named "cpu" followed by and
+ * integer. This is the same strategy as glibc uses.
+ */
+ while ((entry = readdir(cpudir))) {
+ if (entry->d_type == DT_DIR &&
+ strncmp(entry->d_name, "cpu", 3) == 0) {
+
+ char *endptr;
+ unsigned long cpu_num;
+
+ cpu_num = strtoul(entry->d_name + 3, &endptr, 10);
+ if ((cpu_num < ULONG_MAX) && (endptr != entry->d_name + 3)
+ && (*endptr == '\0')) {
+ count++;
+ }
+ }
+ }
+
+end:
+ /*
+ * Get the sysconf value as a fallback. Keep the highest number.
+ */
+ result = __max(sysconf(_SC_NPROCESSORS_CONF), count);
+
+ /*
+ * If both methods failed, don't store the value.
+ */
+ if (result < 1)
+ return;
+ __lttng_counter_num_possible_cpus = result;
+}
+#endif
--- /dev/null
+#ifndef _LIBCOUNTER_SMP_H
+#define _LIBCOUNTER_SMP_H
+
+/*
+ * libcounter/smp.h
+ *
+ * Copyright (C) 2011-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; only
+ * version 2.1 of the License.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/*
+ * 4kB of per-cpu data available.
+ */
+#define LTTNG_COUNTER_PER_CPU_MEM_SIZE 4096
+
+extern int __lttng_counter_num_possible_cpus;
+extern void _lttng_counter_get_num_possible_cpus(void);
+
+static inline
+int lttng_counter_num_possible_cpus(void)
+{
+ if (!__lttng_counter_num_possible_cpus)
+ _lttng_counter_get_num_possible_cpus();
+ return __lttng_counter_num_possible_cpus;
+}
+
+#define lttng_counter_for_each_possible_cpu(cpu) \
+ for ((cpu) = 0; (cpu) < lttng_counter_num_possible_cpus(); (cpu)++)
+
+#endif /* _LIBCOUNTER_SMP_H */
return ret;
}
+ssize_t ustcomm_recv_counter_from_sessiond(int sock,
+ void **_counter_data, uint64_t var_len)
+{
+ void *counter_data;
+ ssize_t len;
+
+ if (var_len > LTTNG_UST_COUNTER_DATA_MAX_LEN) {
+ len = -EINVAL;
+ goto error_check;
+ }
+ /* Receive variable length data */
+ counter_data = zmalloc(var_len);
+ if (!counter_data) {
+ len = -ENOMEM;
+ goto error_alloc;
+ }
+ len = ustcomm_recv_unix_sock(sock, counter_data, var_len);
+ if (len != var_len) {
+ goto error_recv;
+ }
+ *_counter_data = counter_data;
+ return len;
+
+error_recv:
+ free(counter_data);
+error_alloc:
+error_check:
+ return len;
+}
+
+int ustcomm_recv_counter_shm_from_sessiond(int sock,
+ int *shm_fd)
+{
+ ssize_t len;
+ int ret;
+ int fds[1];
+
+ /* recv shm fd fd */
+ lttng_ust_lock_fd_tracker();
+ len = ustcomm_recv_fds_unix_sock(sock, fds, 1);
+ if (len <= 0) {
+ lttng_ust_unlock_fd_tracker();
+ if (len < 0) {
+ ret = len;
+ goto error;
+ } else {
+ ret = -EIO;
+ goto error;
+ }
+ }
+
+ ret = lttng_ust_add_fd_to_tracker(fds[0]);
+ if (ret < 0) {
+ ret = close(fds[0]);
+ if (ret) {
+ PERROR("close on received shm_fd");
+ }
+ ret = -EIO;
+ lttng_ust_unlock_fd_tracker();
+ goto error;
+ }
+ *shm_fd = ret;
+ lttng_ust_unlock_fd_tracker();
+ return 0;
+
+error:
+ return ret;
+}
+
/*
* Returns 0 on success, negative error value on error.
*/
#include "../liblttng-ust/clock.h"
#include "../liblttng-ust/getenv.h"
+#include "../libcounter/shm.h"
+#include "../libcounter/smp.h"
+#include "../libcounter/counter.h"
+
/*
* Number of milliseconds to retry before failing metadata writes on
* buffer full condition. (10 seconds)
uint64_t memory_map_size;
};
+#define USTCTL_COUNTER_ATTR_DIMENSION_MAX 8
+struct ustctl_counter_attr {
+ enum ustctl_counter_arithmetic arithmetic;
+ enum ustctl_counter_bitness bitness;
+ uint32_t nr_dimensions;
+ int64_t global_sum_step;
+ struct ustctl_counter_dimension dimensions[USTCTL_COUNTER_ATTR_DIMENSION_MAX];
+};
+
+/*
+ * Counter representation within daemon.
+ */
+struct ustctl_daemon_counter {
+ struct lib_counter *counter;
+ const struct lttng_counter_ops *ops;
+ struct ustctl_counter_attr *attr; /* initial attributes */
+};
+
extern void lttng_ring_buffer_client_overwrite_init(void);
extern void lttng_ring_buffer_client_overwrite_rt_init(void);
extern void lttng_ring_buffer_client_discard_init(void);
extern void lttng_ring_buffer_client_discard_exit(void);
extern void lttng_ring_buffer_client_discard_rt_exit(void);
extern void lttng_ring_buffer_metadata_client_exit(void);
+extern void lttng_counter_client_percpu_32_modular_init(void);
+extern void lttng_counter_client_percpu_32_modular_exit(void);
+extern void lttng_counter_client_percpu_64_modular_init(void);
+extern void lttng_counter_client_percpu_64_modular_exit(void);
int ustctl_release_handle(int sock, int handle)
{
case LTTNG_UST_OBJECT_TYPE_EVENT_NOTIFIER_GROUP:
case LTTNG_UST_OBJECT_TYPE_EVENT_NOTIFIER:
break;
+ case LTTNG_UST_OBJECT_TYPE_COUNTER:
+ free(data->u.counter.data);
+ data->u.counter.data = NULL;
+ break;
+ case LTTNG_UST_OBJECT_TYPE_COUNTER_GLOBAL:
+ if (data->u.counter_global.shm_fd >= 0) {
+ ret = close(data->u.counter_global.shm_fd);
+ if (ret < 0) {
+ ret = -errno;
+ return ret;
+ }
+ data->u.counter_global.shm_fd = -1;
+ }
+ break;
+ case LTTNG_UST_OBJECT_TYPE_COUNTER_CPU:
+ if (data->u.counter_cpu.shm_fd >= 0) {
+ ret = close(data->u.counter_cpu.shm_fd);
+ if (ret < 0) {
+ ret = -errno;
+ return ret;
+ }
+ data->u.counter_cpu.shm_fd = -1;
+ }
+ break;
default:
assert(0);
}
goto error_type;
}
+ case LTTNG_UST_OBJECT_TYPE_COUNTER:
+ {
+ obj->u.counter.data = zmalloc(obj->size);
+ if (!obj->u.counter.data) {
+ ret = -ENOMEM;
+ goto error_type;
+ }
+ memcpy(obj->u.counter.data, src->u.counter.data, obj->size);
+ break;
+ }
+
+ case LTTNG_UST_OBJECT_TYPE_COUNTER_GLOBAL:
+ {
+ if (src->u.counter_global.shm_fd >= 0) {
+ obj->u.counter_global.shm_fd =
+ dup(src->u.counter_global.shm_fd);
+ if (obj->u.counter_global.shm_fd < 0) {
+ ret = errno;
+ goto error_type;
+ }
+ }
+ break;
+ }
+
+ case LTTNG_UST_OBJECT_TYPE_COUNTER_CPU:
+ {
+ obj->u.counter_cpu.cpu_nr = src->u.counter_cpu.cpu_nr;
+ if (src->u.counter_cpu.shm_fd >= 0) {
+ obj->u.counter_cpu.shm_fd =
+ dup(src->u.counter_cpu.shm_fd);
+ if (obj->u.counter_cpu.shm_fd < 0) {
+ ret = errno;
+ goto error_type;
+ }
+ }
+ break;
+ }
+
default:
ret = -EINVAL;
goto error_type;
return 0;
}
+/* counter operations */
+
+int ustctl_get_nr_cpu_per_counter(void)
+{
+ return lttng_counter_num_possible_cpus();
+}
+
+struct ustctl_daemon_counter *
+ ustctl_create_counter(size_t nr_dimensions,
+ const struct ustctl_counter_dimension *dimensions,
+ int64_t global_sum_step,
+ int global_counter_fd,
+ int nr_counter_cpu_fds,
+ const int *counter_cpu_fds,
+ enum ustctl_counter_bitness bitness,
+ enum ustctl_counter_arithmetic arithmetic,
+ uint32_t alloc_flags)
+{
+ const char *transport_name;
+ struct ustctl_daemon_counter *counter;
+ struct lttng_counter_transport *transport;
+ struct lttng_counter_dimension ust_dim[LTTNG_COUNTER_DIMENSION_MAX];
+ size_t i;
+
+ if (nr_dimensions > LTTNG_COUNTER_DIMENSION_MAX)
+ return NULL;
+ /* Currently, only per-cpu allocation is supported. */
+ switch (alloc_flags) {
+ case USTCTL_COUNTER_ALLOC_PER_CPU:
+ break;
+
+ case USTCTL_COUNTER_ALLOC_PER_CPU | USTCTL_COUNTER_ALLOC_GLOBAL:
+ case USTCTL_COUNTER_ALLOC_GLOBAL:
+ default:
+ return NULL;
+ }
+ switch (bitness) {
+ case USTCTL_COUNTER_BITNESS_32:
+ switch (arithmetic) {
+ case USTCTL_COUNTER_ARITHMETIC_MODULAR:
+ transport_name = "counter-per-cpu-32-modular";
+ break;
+ case USTCTL_COUNTER_ARITHMETIC_SATURATION:
+ transport_name = "counter-per-cpu-32-saturation";
+ break;
+ default:
+ return NULL;
+ }
+ break;
+ case USTCTL_COUNTER_BITNESS_64:
+ switch (arithmetic) {
+ case USTCTL_COUNTER_ARITHMETIC_MODULAR:
+ transport_name = "counter-per-cpu-64-modular";
+ break;
+ case USTCTL_COUNTER_ARITHMETIC_SATURATION:
+ transport_name = "counter-per-cpu-64-saturation";
+ break;
+ default:
+ return NULL;
+ }
+ break;
+ default:
+ return NULL;
+ }
+
+ transport = lttng_counter_transport_find(transport_name);
+ if (!transport) {
+ DBG("LTTng transport %s not found\n",
+ transport_name);
+ return NULL;
+ }
+
+ counter = zmalloc(sizeof(*counter));
+ if (!counter)
+ return NULL;
+ counter->attr = zmalloc(sizeof(*counter->attr));
+ if (!counter->attr)
+ goto free_counter;
+ counter->attr->bitness = bitness;
+ counter->attr->arithmetic = arithmetic;
+ counter->attr->nr_dimensions = nr_dimensions;
+ counter->attr->global_sum_step = global_sum_step;
+ for (i = 0; i < nr_dimensions; i++)
+ counter->attr->dimensions[i] = dimensions[i];
+
+ for (i = 0; i < nr_dimensions; i++) {
+ ust_dim[i].size = dimensions[i].size;
+ ust_dim[i].underflow_index = dimensions[i].underflow_index;
+ ust_dim[i].overflow_index = dimensions[i].overflow_index;
+ ust_dim[i].has_underflow = dimensions[i].has_underflow;
+ ust_dim[i].has_overflow = dimensions[i].has_overflow;
+ }
+ counter->counter = transport->ops.counter_create(nr_dimensions,
+ ust_dim, global_sum_step, global_counter_fd,
+ nr_counter_cpu_fds, counter_cpu_fds, true);
+ if (!counter->counter)
+ goto free_attr;
+ counter->ops = &transport->ops;
+ return counter;
+
+free_attr:
+ free(counter->attr);
+free_counter:
+ free(counter);
+ return NULL;
+}
+
+int ustctl_create_counter_data(struct ustctl_daemon_counter *counter,
+ struct lttng_ust_object_data **_counter_data)
+{
+ struct lttng_ust_object_data *counter_data;
+ struct lttng_ust_counter_conf counter_conf;
+ size_t i;
+ int ret;
+
+ switch (counter->attr->arithmetic) {
+ case USTCTL_COUNTER_ARITHMETIC_MODULAR:
+ counter_conf.arithmetic = LTTNG_UST_COUNTER_ARITHMETIC_MODULAR;
+ break;
+ case USTCTL_COUNTER_ARITHMETIC_SATURATION:
+ counter_conf.arithmetic = LTTNG_UST_COUNTER_ARITHMETIC_SATURATION;
+ break;
+ default:
+ return -EINVAL;
+ }
+ switch (counter->attr->bitness) {
+ case USTCTL_COUNTER_BITNESS_32:
+ counter_conf.bitness = LTTNG_UST_COUNTER_BITNESS_32BITS;
+ break;
+ case USTCTL_COUNTER_BITNESS_64:
+ counter_conf.bitness = LTTNG_UST_COUNTER_BITNESS_64BITS;
+ break;
+ default:
+ return -EINVAL;
+ }
+ counter_conf.number_dimensions = counter->attr->nr_dimensions;
+ counter_conf.global_sum_step = counter->attr->global_sum_step;
+ for (i = 0; i < counter->attr->nr_dimensions; i++) {
+ counter_conf.dimensions[i].size = counter->attr->dimensions[i].size;
+ counter_conf.dimensions[i].underflow_index = counter->attr->dimensions[i].underflow_index;
+ counter_conf.dimensions[i].overflow_index = counter->attr->dimensions[i].overflow_index;
+ counter_conf.dimensions[i].has_underflow = counter->attr->dimensions[i].has_underflow;
+ counter_conf.dimensions[i].has_overflow = counter->attr->dimensions[i].has_overflow;
+ }
+
+ counter_data = zmalloc(sizeof(*counter_data));
+ if (!counter_data) {
+ ret = -ENOMEM;
+ goto error_alloc;
+ }
+ counter_data->type = LTTNG_UST_OBJECT_TYPE_COUNTER;
+ counter_data->handle = -1;
+
+ counter_data->size = sizeof(counter_conf);
+ counter_data->u.counter.data = zmalloc(sizeof(counter_conf));
+ if (!counter_data->u.counter.data) {
+ ret = -ENOMEM;
+ goto error_alloc_data;
+ }
+
+ memcpy(counter_data->u.counter.data, &counter_conf, sizeof(counter_conf));
+ *_counter_data = counter_data;
+
+ return 0;
+
+error_alloc_data:
+ free(counter_data);
+error_alloc:
+ return ret;
+}
+
+int ustctl_create_counter_global_data(struct ustctl_daemon_counter *counter,
+ struct lttng_ust_object_data **_counter_global_data)
+{
+ struct lttng_ust_object_data *counter_global_data;
+ int ret, fd;
+ size_t len;
+
+ if (lttng_counter_get_global_shm(counter->counter, &fd, &len))
+ return -EINVAL;
+ counter_global_data = zmalloc(sizeof(*counter_global_data));
+ if (!counter_global_data) {
+ ret = -ENOMEM;
+ goto error_alloc;
+ }
+ counter_global_data->type = LTTNG_UST_OBJECT_TYPE_COUNTER_GLOBAL;
+ counter_global_data->handle = -1;
+ counter_global_data->size = len;
+ counter_global_data->u.counter_global.shm_fd = fd;
+ *_counter_global_data = counter_global_data;
+ return 0;
+
+error_alloc:
+ return ret;
+}
+
+int ustctl_create_counter_cpu_data(struct ustctl_daemon_counter *counter, int cpu,
+ struct lttng_ust_object_data **_counter_cpu_data)
+{
+ struct lttng_ust_object_data *counter_cpu_data;
+ int ret, fd;
+ size_t len;
+
+ if (lttng_counter_get_cpu_shm(counter->counter, cpu, &fd, &len))
+ return -EINVAL;
+ counter_cpu_data = zmalloc(sizeof(*counter_cpu_data));
+ if (!counter_cpu_data) {
+ ret = -ENOMEM;
+ goto error_alloc;
+ }
+ counter_cpu_data->type = LTTNG_UST_OBJECT_TYPE_COUNTER_CPU;
+ counter_cpu_data->handle = -1;
+ counter_cpu_data->size = len;
+ counter_cpu_data->u.counter_cpu.shm_fd = fd;
+ counter_cpu_data->u.counter_cpu.cpu_nr = cpu;
+ *_counter_cpu_data = counter_cpu_data;
+ return 0;
+
+error_alloc:
+ return ret;
+}
+
+void ustctl_destroy_counter(struct ustctl_daemon_counter *counter)
+{
+ counter->ops->counter_destroy(counter->counter);
+ free(counter->attr);
+ free(counter);
+}
+
+int ustctl_send_counter_data_to_ust(int sock, int parent_handle,
+ struct lttng_ust_object_data *counter_data)
+{
+ struct ustcomm_ust_msg lum;
+ struct ustcomm_ust_reply lur;
+ int ret;
+ size_t size;
+ ssize_t len;
+
+ if (!counter_data)
+ return -EINVAL;
+
+ size = counter_data->size;
+ memset(&lum, 0, sizeof(lum));
+ lum.handle = parent_handle;
+ lum.cmd = LTTNG_UST_COUNTER;
+ lum.u.counter.len = size;
+ ret = ustcomm_send_app_msg(sock, &lum);
+ if (ret)
+ return ret;
+
+ /* Send counter data */
+ len = ustcomm_send_unix_sock(sock, counter_data->u.counter.data, size);
+ if (len != size) {
+ if (len < 0)
+ return len;
+ else
+ return -EIO;
+ }
+
+ ret = ustcomm_recv_app_reply(sock, &lur, lum.handle, lum.cmd);
+ if (!ret) {
+ counter_data->handle = lur.ret_val;
+ }
+ return ret;
+}
+
+int ustctl_send_counter_global_data_to_ust(int sock,
+ struct lttng_ust_object_data *counter_data,
+ struct lttng_ust_object_data *counter_global_data)
+{
+ struct ustcomm_ust_msg lum;
+ struct ustcomm_ust_reply lur;
+ int ret, shm_fd[1];
+ size_t size;
+ ssize_t len;
+
+ if (!counter_data || !counter_global_data)
+ return -EINVAL;
+
+ size = counter_global_data->size;
+ memset(&lum, 0, sizeof(lum));
+ lum.handle = counter_data->handle; /* parent handle */
+ lum.cmd = LTTNG_UST_COUNTER_GLOBAL;
+ lum.u.counter_global.len = size;
+ ret = ustcomm_send_app_msg(sock, &lum);
+ if (ret)
+ return ret;
+
+ shm_fd[0] = counter_global_data->u.counter_global.shm_fd;
+ len = ustcomm_send_fds_unix_sock(sock, shm_fd, 1);
+ if (len <= 0) {
+ if (len < 0)
+ return len;
+ else
+ return -EIO;
+ }
+
+ ret = ustcomm_recv_app_reply(sock, &lur, lum.handle, lum.cmd);
+ if (!ret) {
+ counter_global_data->handle = lur.ret_val;
+ }
+ return ret;
+}
+
+int ustctl_send_counter_cpu_data_to_ust(int sock,
+ struct lttng_ust_object_data *counter_data,
+ struct lttng_ust_object_data *counter_cpu_data)
+{
+ struct ustcomm_ust_msg lum;
+ struct ustcomm_ust_reply lur;
+ int ret, shm_fd[1];
+ size_t size;
+ ssize_t len;
+
+ if (!counter_data || !counter_cpu_data)
+ return -EINVAL;
+
+ size = counter_cpu_data->size;
+ memset(&lum, 0, sizeof(lum));
+ lum.handle = counter_data->handle; /* parent handle */
+ lum.cmd = LTTNG_UST_COUNTER_CPU;
+ lum.u.counter_cpu.len = size;
+ lum.u.counter_cpu.cpu_nr = counter_cpu_data->u.counter_cpu.cpu_nr;
+ ret = ustcomm_send_app_msg(sock, &lum);
+ if (ret)
+ return ret;
+
+ shm_fd[0] = counter_cpu_data->u.counter_global.shm_fd;
+ len = ustcomm_send_fds_unix_sock(sock, shm_fd, 1);
+ if (len <= 0) {
+ if (len < 0)
+ return len;
+ else
+ return -EIO;
+ }
+
+ ret = ustcomm_recv_app_reply(sock, &lur, lum.handle, lum.cmd);
+ if (!ret) {
+ counter_cpu_data->handle = lur.ret_val;
+ }
+ return ret;
+}
+
+int ustctl_counter_read(struct ustctl_daemon_counter *counter,
+ const size_t *dimension_indexes,
+ int cpu, int64_t *value,
+ bool *overflow, bool *underflow)
+{
+ return counter->ops->counter_read(counter->counter, dimension_indexes, cpu,
+ value, overflow, underflow);
+}
+
+int ustctl_counter_aggregate(struct ustctl_daemon_counter *counter,
+ const size_t *dimension_indexes,
+ int64_t *value,
+ bool *overflow, bool *underflow)
+{
+ return counter->ops->counter_aggregate(counter->counter, dimension_indexes,
+ value, overflow, underflow);
+}
+
+int ustctl_counter_clear(struct ustctl_daemon_counter *counter,
+ const size_t *dimension_indexes)
+{
+ return counter->ops->counter_clear(counter->counter, dimension_indexes);
+}
+
static __attribute__((constructor))
void ustctl_init(void)
{
lttng_ring_buffer_client_overwrite_rt_init();
lttng_ring_buffer_client_discard_init();
lttng_ring_buffer_client_discard_rt_init();
+ lttng_counter_client_percpu_32_modular_init();
+ lttng_counter_client_percpu_64_modular_init();
lib_ringbuffer_signal_init();
}
lttng_ring_buffer_client_overwrite_rt_exit();
lttng_ring_buffer_client_overwrite_exit();
lttng_ring_buffer_metadata_client_exit();
+ lttng_counter_client_percpu_32_modular_exit();
+ lttng_counter_client_percpu_64_modular_exit();
}
lttng-ring-buffer-client-overwrite-rt.c \
lttng-ring-buffer-metadata-client.h \
lttng-ring-buffer-metadata-client.c \
+ lttng-counter-client-percpu-32-modular.c \
+ lttng-counter-client-percpu-64-modular.c \
lttng-clock.c lttng-getcpu.c
liblttng_ust_la_SOURCES =
liblttng_ust_la_LDFLAGS = -no-undefined -version-info $(LTTNG_UST_LIBRARY_VERSION)
liblttng_ust_support_la_LIBADD = \
- $(top_builddir)/libringbuffer/libringbuffer.la
+ $(top_builddir)/libringbuffer/libringbuffer.la \
+ $(top_builddir)/libcounter/libcounter.la
liblttng_ust_la_LIBADD = \
-lrt \
--- /dev/null
+/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
+ *
+ * lttng-counter-client-percpu-32-modular.c
+ *
+ * LTTng lib counter client. Per-cpu 32-bit counters in modular
+ * arithmetic.
+ *
+ * Copyright (C) 2020 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#include <lttng/ust-events.h>
+#include "../libcounter/counter.h"
+#include "../libcounter/counter-api.h"
+
+static const struct lib_counter_config client_config = {
+ .alloc = COUNTER_ALLOC_PER_CPU,
+ .sync = COUNTER_SYNC_PER_CPU,
+ .arithmetic = COUNTER_ARITHMETIC_MODULAR,
+ .counter_size = COUNTER_SIZE_32_BIT,
+};
+
+static struct lib_counter *counter_create(size_t nr_dimensions,
+ const struct lttng_counter_dimension *dimensions,
+ int64_t global_sum_step,
+ int global_counter_fd,
+ int nr_counter_cpu_fds,
+ const int *counter_cpu_fds,
+ bool is_daemon)
+{
+ size_t max_nr_elem[LTTNG_COUNTER_DIMENSION_MAX], i;
+
+ if (nr_dimensions > LTTNG_COUNTER_DIMENSION_MAX)
+ return NULL;
+ for (i = 0; i < nr_dimensions; i++) {
+ if (dimensions[i].has_underflow || dimensions[i].has_overflow)
+ return NULL;
+ max_nr_elem[i] = dimensions[i].size;
+ }
+ return lttng_counter_create(&client_config, nr_dimensions, max_nr_elem,
+ global_sum_step, global_counter_fd, nr_counter_cpu_fds,
+ counter_cpu_fds, is_daemon);
+}
+
+static void counter_destroy(struct lib_counter *counter)
+{
+ lttng_counter_destroy(counter);
+}
+
+static int counter_add(struct lib_counter *counter, const size_t *dimension_indexes, int64_t v)
+{
+ return lttng_counter_add(&client_config, counter, dimension_indexes, v);
+}
+
+static int counter_read(struct lib_counter *counter, const size_t *dimension_indexes, int cpu,
+ int64_t *value, bool *overflow, bool *underflow)
+{
+ return lttng_counter_read(&client_config, counter, dimension_indexes, cpu, value,
+ overflow, underflow);
+}
+
+static int counter_aggregate(struct lib_counter *counter, const size_t *dimension_indexes,
+ int64_t *value, bool *overflow, bool *underflow)
+{
+ return lttng_counter_aggregate(&client_config, counter, dimension_indexes, value,
+ overflow, underflow);
+}
+
+static int counter_clear(struct lib_counter *counter, const size_t *dimension_indexes)
+{
+ return lttng_counter_clear(&client_config, counter, dimension_indexes);
+}
+
+static struct lttng_counter_transport lttng_counter_transport = {
+ .name = "counter-per-cpu-32-modular",
+ .ops = {
+ .counter_create = counter_create,
+ .counter_destroy = counter_destroy,
+ .counter_add = counter_add,
+ .counter_read = counter_read,
+ .counter_aggregate = counter_aggregate,
+ .counter_clear = counter_clear,
+ },
+ .client_config = &client_config,
+};
+
+void lttng_counter_client_percpu_32_modular_init(void)
+{
+ lttng_counter_transport_register(<tng_counter_transport);
+}
+
+void lttng_counter_client_percpu_32_modular_exit(void)
+{
+ lttng_counter_transport_unregister(<tng_counter_transport);
+}
--- /dev/null
+/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
+ *
+ * lttng-counter-client-percpu-64-modular.c
+ *
+ * LTTng lib counter client. Per-cpu 64-bit counters in modular
+ * arithmetic.
+ *
+ * Copyright (C) 2020 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#include <lttng/ust-events.h>
+#include "../libcounter/counter.h"
+#include "../libcounter/counter-api.h"
+
+static const struct lib_counter_config client_config = {
+ .alloc = COUNTER_ALLOC_PER_CPU,
+ .sync = COUNTER_SYNC_PER_CPU,
+ .arithmetic = COUNTER_ARITHMETIC_MODULAR,
+ .counter_size = COUNTER_SIZE_64_BIT,
+};
+
+static struct lib_counter *counter_create(size_t nr_dimensions,
+ const struct lttng_counter_dimension *dimensions,
+ int64_t global_sum_step,
+ int global_counter_fd,
+ int nr_counter_cpu_fds,
+ const int *counter_cpu_fds,
+ bool is_daemon)
+{
+ size_t max_nr_elem[LTTNG_COUNTER_DIMENSION_MAX], i;
+
+ if (nr_dimensions > LTTNG_COUNTER_DIMENSION_MAX)
+ return NULL;
+ for (i = 0; i < nr_dimensions; i++) {
+ if (dimensions[i].has_underflow || dimensions[i].has_overflow)
+ return NULL;
+ max_nr_elem[i] = dimensions[i].size;
+ }
+ return lttng_counter_create(&client_config, nr_dimensions, max_nr_elem,
+ global_sum_step, global_counter_fd, nr_counter_cpu_fds,
+ counter_cpu_fds, is_daemon);
+}
+
+static void counter_destroy(struct lib_counter *counter)
+{
+ lttng_counter_destroy(counter);
+}
+
+static int counter_add(struct lib_counter *counter, const size_t *dimension_indexes, int64_t v)
+{
+ return lttng_counter_add(&client_config, counter, dimension_indexes, v);
+}
+
+static int counter_read(struct lib_counter *counter, const size_t *dimension_indexes, int cpu,
+ int64_t *value, bool *overflow, bool *underflow)
+{
+ return lttng_counter_read(&client_config, counter, dimension_indexes, cpu, value,
+ overflow, underflow);
+}
+
+static int counter_aggregate(struct lib_counter *counter, const size_t *dimension_indexes,
+ int64_t *value, bool *overflow, bool *underflow)
+{
+ return lttng_counter_aggregate(&client_config, counter, dimension_indexes, value,
+ overflow, underflow);
+}
+
+static int counter_clear(struct lib_counter *counter, const size_t *dimension_indexes)
+{
+ return lttng_counter_clear(&client_config, counter, dimension_indexes);
+}
+
+static struct lttng_counter_transport lttng_counter_transport = {
+ .name = "counter-per-cpu-64-modular",
+ .ops = {
+ .counter_create = counter_create,
+ .counter_destroy = counter_destroy,
+ .counter_add = counter_add,
+ .counter_read = counter_read,
+ .counter_aggregate = counter_aggregate,
+ .counter_clear = counter_clear,
+ },
+ .client_config = &client_config,
+};
+
+void lttng_counter_client_percpu_64_modular_init(void)
+{
+ lttng_counter_transport_register(<tng_counter_transport);
+}
+
+void lttng_counter_client_percpu_64_modular_exit(void)
+{
+ lttng_counter_transport_unregister(<tng_counter_transport);
+}
#include "ust-events-internal.h"
#include "wait.h"
#include "../libringbuffer/shm.h"
+#include "../libcounter/counter.h"
#include "jhash.h"
#include <lttng/ust-abi.h>
return session;
}
+struct lttng_counter *lttng_ust_counter_create(
+ const char *counter_transport_name,
+ size_t number_dimensions, const struct lttng_counter_dimension *dimensions)
+{
+ struct lttng_counter_transport *counter_transport = NULL;
+ struct lttng_counter *counter = NULL;
+
+ counter_transport = lttng_counter_transport_find(counter_transport_name);
+ if (!counter_transport)
+ goto notransport;
+ counter = zmalloc(sizeof(struct lttng_counter));
+ if (!counter)
+ goto nomem;
+
+ counter->ops = &counter_transport->ops;
+ counter->transport = counter_transport;
+
+ counter->counter = counter->ops->counter_create(
+ number_dimensions, dimensions, 0,
+ -1, 0, NULL, false);
+ if (!counter->counter) {
+ goto create_error;
+ }
+
+ return counter;
+
+create_error:
+ free(counter);
+nomem:
+notransport:
+ return NULL;
+}
+
+static
+void lttng_ust_counter_destroy(struct lttng_counter *counter)
+{
+ counter->ops->counter_destroy(counter->counter);
+ free(counter);
+}
+
struct lttng_event_notifier_group *lttng_event_notifier_group_create(void)
{
struct lttng_event_notifier_group *event_notifier_group;
&event_notifier_group->event_notifiers_head, node)
_lttng_event_notifier_destroy(notifier);
- /* Close the notification fd to the listener of event notifiers. */
+ if (event_notifier_group->error_counter)
+ lttng_ust_counter_destroy(event_notifier_group->error_counter);
+
+ /* Close the notification fd to the listener of event_notifiers. */
lttng_ust_lock_fd_tracker();
close_ret = close(event_notifier_group->notification_fd);
#include "../libringbuffer/frontend_types.h"
#include "../libringbuffer/shm.h"
+#include "../libcounter/counter.h"
#include "lttng-tracer.h"
#include "string-utils.h"
#include "ust-events-internal.h"
return lttng_session_disable(session);
case LTTNG_UST_SESSION_STATEDUMP:
return lttng_session_statedump(session);
+ case LTTNG_UST_COUNTER:
+ case LTTNG_UST_COUNTER_GLOBAL:
+ case LTTNG_UST_COUNTER_CPU:
+ /* Not implemented yet. */
+ return -EINVAL;
default:
return -EINVAL;
}
}
}
+/**
+ * lttng_event_notifier_group_error_counter_cmd - lttng event_notifier group error counter object command
+ *
+ * @obj: the object
+ * @cmd: the command
+ * @arg: command arg
+ * @uargs: UST arguments (internal)
+ * @owner: objd owner
+ *
+ * This descriptor implements lttng commands:
+ * LTTNG_UST_COUNTER_GLOBAL
+ * Return negative error code on error, 0 on success.
+ * LTTNG_UST_COUNTER_CPU
+ * Return negative error code on error, 0 on success.
+ */
+static
+long lttng_event_notifier_group_error_counter_cmd(int objd, unsigned int cmd, unsigned long arg,
+ union ust_args *uargs, void *owner)
+{
+ struct lttng_counter *counter = objd_private(objd);
+
+ switch (cmd) {
+ case LTTNG_UST_COUNTER_GLOBAL:
+ return -EINVAL; /* Unimplemented. */
+ case LTTNG_UST_COUNTER_CPU:
+ {
+ struct lttng_ust_counter_cpu *counter_cpu =
+ (struct lttng_ust_counter_cpu *)arg;
+ return lttng_counter_set_cpu_shm(counter->counter,
+ counter_cpu->cpu_nr, uargs->counter_shm.shm_fd);
+ }
+ default:
+ return -EINVAL;
+ }
+}
+
+int lttng_release_event_notifier_group_error_counter(int objd)
+{
+ struct lttng_counter *counter = objd_private(objd);
+
+ if (counter) {
+ return lttng_ust_objd_unref(counter->event_notifier_group->objd, 0);
+ } else {
+ return -EINVAL;
+ }
+}
+
+static const struct lttng_ust_objd_ops lttng_event_notifier_group_error_counter_ops = {
+ .release = lttng_release_event_notifier_group_error_counter,
+ .cmd = lttng_event_notifier_group_error_counter_cmd,
+};
+
+static
+int lttng_ust_event_notifier_group_create_error_counter(int event_notifier_group_objd, void *owner,
+ struct lttng_ust_counter_conf *error_counter_conf)
+{
+ const char *counter_transport_name;
+ struct lttng_event_notifier_group *event_notifier_group =
+ objd_private(event_notifier_group_objd);
+ struct lttng_counter *counter;
+ int counter_objd, ret;
+ struct lttng_counter_dimension dimensions[1];
+ size_t counter_len;
+
+ if (event_notifier_group->error_counter)
+ return -EBUSY;
+
+ if (error_counter_conf->arithmetic != LTTNG_UST_COUNTER_ARITHMETIC_MODULAR)
+ return -EINVAL;
+
+ if (error_counter_conf->number_dimensions != 1)
+ return -EINVAL;
+
+ switch (error_counter_conf->bitness) {
+ case LTTNG_UST_COUNTER_BITNESS_64BITS:
+ counter_transport_name = "counter-per-cpu-64-modular";
+ break;
+ case LTTNG_UST_COUNTER_BITNESS_32BITS:
+ counter_transport_name = "counter-per-cpu-32-modular";
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ counter_objd = objd_alloc(NULL, <tng_event_notifier_group_error_counter_ops, owner,
+ "event_notifier group error counter");
+ if (counter_objd < 0) {
+ ret = counter_objd;
+ goto objd_error;
+ }
+
+ counter_len = error_counter_conf->dimensions[0].size;
+ dimensions[0].size = counter_len;
+ dimensions[0].underflow_index = 0;
+ dimensions[0].overflow_index = 0;
+ dimensions[0].has_underflow = 0;
+ dimensions[0].has_overflow = 0;
+
+ counter = lttng_ust_counter_create(counter_transport_name, 1, dimensions);
+ if (!counter) {
+ ret = -EINVAL;
+ goto create_error;
+ }
+
+ event_notifier_group->error_counter = counter;
+ event_notifier_group->error_counter_len = counter_len;
+
+ counter->objd = counter_objd;
+ counter->event_notifier_group = event_notifier_group; /* owner */
+
+ objd_set_private(counter_objd, counter);
+ /* The error counter holds a reference on the event_notifier group. */
+ objd_ref(event_notifier_group->objd);
+
+ return counter_objd;
+
+create_error:
+ {
+ int err;
+
+ err = lttng_ust_objd_unref(counter_objd, 1);
+ assert(!err);
+ }
+objd_error:
+ return ret;
+}
+
static
long lttng_event_notifier_group_cmd(int objd, unsigned int cmd, unsigned long arg,
union ust_args *uargs, void *owner)
LTTNG_ENABLER_FORMAT_EVENT);
}
}
+ case LTTNG_UST_COUNTER:
+ {
+ struct lttng_ust_counter_conf *counter_conf =
+ (struct lttng_ust_counter_conf *) uargs->counter.counter_data;
+ return lttng_ust_event_notifier_group_create_error_counter(
+ objd, owner, counter_conf);
+ }
default:
return -EINVAL;
}
/* Event notifier group commands */
[ LTTNG_UST_EVENT_NOTIFIER_CREATE ] = "Create event notifier",
+
+ /* Session and event notifier group commands */
+ [ LTTNG_UST_COUNTER ] = "Create Counter",
+
+ /* Counter commands */
+ [ LTTNG_UST_COUNTER_GLOBAL ] = "Create Counter Global",
+ [ LTTNG_UST_COUNTER_CPU ] = "Create Counter CPU",
};
static const char *str_timeout;
extern void lttng_ring_buffer_client_discard_exit(void);
extern void lttng_ring_buffer_client_discard_rt_exit(void);
extern void lttng_ring_buffer_metadata_client_exit(void);
+extern void lttng_counter_client_percpu_32_modular_init(void);
+extern void lttng_counter_client_percpu_32_modular_exit(void);
+extern void lttng_counter_client_percpu_64_modular_init(void);
+extern void lttng_counter_client_percpu_64_modular_exit(void);
static char *get_map_shm(struct sock_info *sock_info);
ret = -ENOSYS;
}
break;
+ case LTTNG_UST_COUNTER:
+ {
+ void *counter_data;
+
+ len = ustcomm_recv_counter_from_sessiond(sock,
+ &counter_data, lum->u.counter.len);
+ switch (len) {
+ case 0: /* orderly shutdown */
+ ret = 0;
+ goto error;
+ default:
+ if (len == lum->u.counter.len) {
+ DBG("counter data received");
+ break;
+ } else if (len < 0) {
+ DBG("Receive failed from lttng-sessiond with errno %d", (int) -len);
+ if (len == -ECONNRESET) {
+ ERR("%s remote end closed connection", sock_info->name);
+ ret = len;
+ goto error;
+ }
+ ret = len;
+ goto error;
+ } else {
+ DBG("incorrect counter data message size: %zd", len);
+ ret = -EINVAL;
+ goto error;
+ }
+ }
+ args.counter.counter_data = counter_data;
+ if (ops->cmd)
+ ret = ops->cmd(lum->handle, lum->cmd,
+ (unsigned long) &lum->u,
+ &args, sock_info);
+ else
+ ret = -ENOSYS;
+ break;
+ }
+ case LTTNG_UST_COUNTER_GLOBAL:
+ {
+ /* Receive shm_fd */
+ ret = ustcomm_recv_counter_shm_from_sessiond(sock,
+ &args.counter_shm.shm_fd);
+ if (ret) {
+ goto error;
+ }
+
+ if (ops->cmd)
+ ret = ops->cmd(lum->handle, lum->cmd,
+ (unsigned long) &lum->u,
+ &args, sock_info);
+ else
+ ret = -ENOSYS;
+ break;
+ }
+ case LTTNG_UST_COUNTER_CPU:
+ {
+ /* Receive shm_fd */
+ ret = ustcomm_recv_counter_shm_from_sessiond(sock,
+ &args.counter_shm.shm_fd);
+ if (ret) {
+ goto error;
+ }
+
+ if (ops->cmd)
+ ret = ops->cmd(lum->handle, lum->cmd,
+ (unsigned long) &lum->u,
+ &args, sock_info);
+ else
+ ret = -ENOSYS;
+ break;
+ }
+
default:
if (ops->cmd)
ret = ops->cmd(lum->handle, lum->cmd,
lttng_ring_buffer_client_overwrite_rt_init();
lttng_ring_buffer_client_discard_init();
lttng_ring_buffer_client_discard_rt_init();
+ lttng_counter_client_percpu_32_modular_init();
+ lttng_counter_client_percpu_64_modular_init();
lttng_perf_counter_init();
/*
* Invoke ust malloc wrapper init before starting other threads.
lttng_ring_buffer_client_overwrite_rt_exit();
lttng_ring_buffer_client_overwrite_exit();
lttng_ring_buffer_metadata_client_exit();
+ lttng_counter_client_percpu_32_modular_exit();
+ lttng_counter_client_percpu_64_modular_exit();
lttng_ust_statedump_destroy();
exit_tracepoint();
if (!exiting) {
#include "jhash.h"
static CDS_LIST_HEAD(lttng_transport_list);
+static CDS_LIST_HEAD(lttng_counter_transport_list);
struct lttng_transport *lttng_transport_find(const char *name)
{
return NULL;
}
+struct lttng_counter_transport *lttng_counter_transport_find(const char *name)
+{
+ struct lttng_counter_transport *transport;
+
+ cds_list_for_each_entry(transport, <tng_counter_transport_list, node) {
+ if (!strcmp(transport->name, name))
+ return transport;
+ }
+ return NULL;
+}
+
/**
* lttng_transport_register - LTT transport registration
* @transport: transport structure
cds_list_del(&transport->node);
}
+/**
+ * lttng_counter_transport_register - LTTng counter transport registration
+ * @transport: transport structure
+ *
+ * Registers a counter transport which can be used as output to extract
+ * the data out of LTTng. Called with ust_lock held.
+ */
+void lttng_counter_transport_register(struct lttng_counter_transport *transport)
+{
+ cds_list_add_tail(&transport->node, <tng_counter_transport_list);
+}
+
+/**
+ * lttng_counter_transport_unregister - LTTng counter transport unregistration
+ * @transport: transport structure
+ * Called with ust_lock held.
+ */
+void lttng_counter_transport_unregister(struct lttng_counter_transport *transport)
+{
+ cds_list_del(&transport->node);
+}
+
/*
* Needed by comm layer.
*/