From: Mathieu Desnoyers Date: Fri, 4 Sep 2020 15:29:47 +0000 (-0400) Subject: Implement libcounter X-Git-Tag: v2.13.0-rc1~431 X-Git-Url: https://git.lttng.org./?a=commitdiff_plain;h=ebabbf580131acd1fe246c4d31fc5c044d36a038;p=lttng-ust.git Implement libcounter Signed-off-by: Mathieu Desnoyers Change-Id: I7bc2707e8bdea712addc84d90329486cda1a24e1 --- diff --git a/Makefile.am b/Makefile.am index a13c6e31..b6b7fb6d 100644 --- a/Makefile.am +++ b/Makefile.am @@ -1,6 +1,7 @@ ACLOCAL_AMFLAGS = -I m4 SUBDIRS = . include snprintf libringbuffer liblttng-ust-comm \ + libcounter \ libmsgpack \ liblttng-ust \ liblttng-ust-ctl \ diff --git a/configure.ac b/configure.ac index 032bb731..e5ac3200 100644 --- a/configure.ac +++ b/configure.ac @@ -504,6 +504,7 @@ AC_CONFIG_FILES([ include/Makefile include/lttng/ust-version.h snprintf/Makefile + libcounter/Makefile libmsgpack/Makefile libringbuffer/Makefile liblttng-ust-comm/Makefile diff --git a/include/Makefile.am b/include/Makefile.am index 277e4e69..23a165ae 100644 --- a/include/Makefile.am +++ b/include/Makefile.am @@ -25,7 +25,9 @@ nobase_include_HEADERS = \ lttng/lttng-ust-tracelog.h \ lttng/ust-clock.h \ lttng/ust-getcpu.h \ - lttng/ust-elf.h + lttng/ust-elf.h \ + lttng/counter-config.h \ + lttng/bitmap.h # note: usterr-signal-safe.h, core.h and share.h need namespace cleanup. diff --git a/include/lttng/bitmap.h b/include/lttng/bitmap.h new file mode 100644 index 00000000..fb57ff41 --- /dev/null +++ b/include/lttng/bitmap.h @@ -0,0 +1,70 @@ +/* + * lttng/bitmap.h + * + * LTTng Bitmap API + * + * Copyright (C) 2020 Mathieu Desnoyers + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef _LTTNG_BITMAP_H +#define _LTTNG_BITMAP_H + +#include +#include +#include +#include + +static inline void lttng_bitmap_index(unsigned int index, unsigned int *word, + unsigned int *bit) +{ + *word = index / CAA_BITS_PER_LONG; + *bit = index % CAA_BITS_PER_LONG; +} + +static inline void lttng_bitmap_set_bit(unsigned int index, unsigned long *p) +{ + unsigned int word, bit; + unsigned long val; + + lttng_bitmap_index(index, &word, &bit); + val = 1U << bit; + uatomic_or(p + word, val); +} + +static inline void lttng_bitmap_clear_bit(unsigned int index, unsigned long *p) +{ + unsigned int word, bit; + unsigned long val; + + lttng_bitmap_index(index, &word, &bit); + val = ~(1U << bit); + uatomic_and(p + word, val); +} + +static inline bool lttng_bitmap_test_bit(unsigned int index, unsigned long *p) +{ + unsigned int word, bit; + + lttng_bitmap_index(index, &word, &bit); + return (CMM_LOAD_SHARED(p[word]) >> bit) & 0x1; +} + +#endif /* _LTTNG_BITMAP_H */ diff --git a/include/lttng/counter-config.h b/include/lttng/counter-config.h new file mode 100644 index 00000000..ebe15365 --- /dev/null +++ b/include/lttng/counter-config.h @@ -0,0 +1,57 @@ +/* + * lttng/counter-config.h + * + * LTTng Counters Configuration + * + * Copyright (C) 2020 Mathieu Desnoyers + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef _LTTNG_COUNTER_CONFIG_H +#define _LTTNG_COUNTER_CONFIG_H + +#include + +enum lib_counter_config_alloc { + COUNTER_ALLOC_PER_CPU = (1 << 0), + COUNTER_ALLOC_GLOBAL = (1 << 1), +}; + +enum lib_counter_config_sync { + COUNTER_SYNC_PER_CPU, + COUNTER_SYNC_GLOBAL, +}; + +struct lib_counter_config { + uint32_t alloc; /* enum lib_counter_config_alloc flags */ + enum lib_counter_config_sync sync; + enum { + COUNTER_ARITHMETIC_MODULAR, + COUNTER_ARITHMETIC_SATURATE, /* TODO */ + } arithmetic; + enum { + COUNTER_SIZE_8_BIT = 1, + COUNTER_SIZE_16_BIT = 2, + COUNTER_SIZE_32_BIT = 4, + COUNTER_SIZE_64_BIT = 8, + } counter_size; +}; + +#endif /* _LTTNG_COUNTER_CONFIG_H */ diff --git a/include/lttng/ust-abi.h b/include/lttng/ust-abi.h index 1c80c144..6a6144f7 100644 --- a/include/lttng/ust-abi.h +++ b/include/lttng/ust-abi.h @@ -103,6 +103,40 @@ struct lttng_ust_stream { */ } LTTNG_PACKED; + +enum lttng_ust_counter_arithmetic { + LTTNG_UST_COUNTER_ARITHMETIC_MODULAR = 0, + LTTNG_UST_COUNTER_ARITHMETIC_SATURATION = 1, +}; + +enum lttng_ust_counter_bitness { + LTTNG_UST_COUNTER_BITNESS_32BITS = 4, + LTTNG_UST_COUNTER_BITNESS_64BITS = 8, +}; + +struct lttng_ust_counter_dimension { + uint64_t size; + uint64_t underflow_index; + uint64_t overflow_index; + uint8_t has_underflow; + uint8_t has_overflow; +} LTTNG_PACKED; + +#define LTTNG_UST_COUNTER_DIMENSION_MAX 8 +struct lttng_ust_counter_conf { + uint32_t arithmetic; /* enum lttng_ust_counter_arithmetic */ + uint32_t bitness; /* enum lttng_ust_counter_bitness */ + uint32_t number_dimensions; + int64_t global_sum_step; + struct lttng_ust_counter_dimension dimensions[LTTNG_UST_COUNTER_DIMENSION_MAX]; +} LTTNG_PACKED; + +struct lttng_ust_counter_value { + uint32_t number_dimensions; + uint64_t dimension_indexes[LTTNG_UST_COUNTER_DIMENSION_MAX]; + int64_t value; +} LTTNG_PACKED; + #define LTTNG_UST_EVENT_PADDING1 8 #define LTTNG_UST_EVENT_PADDING2 (LTTNG_UST_SYM_NAME_LEN + 32) struct lttng_ust_event { @@ -123,6 +157,7 @@ struct lttng_ust_event { #define LTTNG_UST_EVENT_NOTIFIER_PADDING1 16 struct lttng_ust_event_notifier { struct lttng_ust_event event; + uint64_t error_counter_index; char padding[LTTNG_UST_EVENT_NOTIFIER_PADDING1]; } LTTNG_PACKED; @@ -133,6 +168,27 @@ struct lttng_ust_event_notifier_notification { char padding[LTTNG_EVENT_NOTIFIER_NOTIFICATION_PADDING]; } LTTNG_PACKED; +#define LTTNG_UST_COUNTER_PADDING1 (LTTNG_UST_SYM_NAME_LEN + 32) +#define LTTNG_UST_COUNTER_DATA_MAX_LEN 4096U +struct lttng_ust_counter { + uint64_t len; + char padding[LTTNG_UST_COUNTER_PADDING1]; + char data[]; /* variable sized data */ +} LTTNG_PACKED; + +#define LTTNG_UST_COUNTER_GLOBAL_PADDING1 (LTTNG_UST_SYM_NAME_LEN + 32) +struct lttng_ust_counter_global { + uint64_t len; /* shm len */ + char padding[LTTNG_UST_COUNTER_GLOBAL_PADDING1]; +} LTTNG_PACKED; + +#define LTTNG_UST_COUNTER_CPU_PADDING1 (LTTNG_UST_SYM_NAME_LEN + 32) +struct lttng_ust_counter_cpu { + uint64_t len; /* shm len */ + uint32_t cpu_nr; + char padding[LTTNG_UST_COUNTER_CPU_PADDING1]; +} LTTNG_PACKED; + enum lttng_ust_field_type { LTTNG_UST_FIELD_OTHER = 0, LTTNG_UST_FIELD_INTEGER = 1, @@ -233,6 +289,9 @@ enum lttng_ust_object_type { LTTNG_UST_OBJECT_TYPE_CONTEXT = 3, LTTNG_UST_OBJECT_TYPE_EVENT_NOTIFIER_GROUP = 4, LTTNG_UST_OBJECT_TYPE_EVENT_NOTIFIER = 5, + LTTNG_UST_OBJECT_TYPE_COUNTER = 6, + LTTNG_UST_OBJECT_TYPE_COUNTER_GLOBAL = 7, + LTTNG_UST_OBJECT_TYPE_COUNTER_CPU = 8, }; #define LTTNG_UST_OBJECT_DATA_PADDING1 32 @@ -254,6 +313,16 @@ struct lttng_ust_object_data { int wakeup_fd; uint32_t stream_nr; } stream; + struct { + void *data; + } counter; + struct { + int shm_fd; + } counter_global; + struct { + int shm_fd; + uint32_t cpu_nr; + } counter_cpu; char padding2[LTTNG_UST_OBJECT_DATA_PADDING2]; } u; } LTTNG_PACKED; @@ -354,6 +423,16 @@ struct lttng_ust_event_exclusion { _UST_CMDW(0xB0, struct lttng_ust_event_notifier) #define LTTNG_UST_CAPTURE _UST_CMD(0xB1) +/* Session and event notifier group commands */ +#define LTTNG_UST_COUNTER \ + _UST_CMDW(0xC0, struct lttng_ust_counter) + +/* Counter commands */ +#define LTTNG_UST_COUNTER_GLOBAL \ + _UST_CMDW(0xD0, struct lttng_ust_counter_global) +#define LTTNG_UST_COUNTER_CPU \ + _UST_CMDW(0xD1, struct lttng_ust_counter_cpu) + #define LTTNG_UST_ROOT_HANDLE 0 struct lttng_ust_obj; @@ -376,6 +455,12 @@ union ust_args { struct { int event_notifier_notif_fd; } event_notifier_handle; + struct { + void *counter_data; + } counter; + struct { + int shm_fd; + } counter_shm; }; struct lttng_ust_objd_ops { diff --git a/include/lttng/ust-ctl.h b/include/lttng/ust-ctl.h index 35b73342..2c15a54e 100644 --- a/include/lttng/ust-ctl.h +++ b/include/lttng/ust-ctl.h @@ -22,6 +22,7 @@ #include #include #include +#include #include #include @@ -591,4 +592,81 @@ int ustctl_reply_register_channel(int sock, enum ustctl_channel_header header_type, int ret_code); /* return code. 0 ok, negative error */ +/* + * Counter API. + */ + +enum ustctl_counter_bitness { + USTCTL_COUNTER_BITNESS_32 = 4, + USTCTL_COUNTER_BITNESS_64 = 8, +}; + +enum ustctl_counter_arithmetic { + USTCTL_COUNTER_ARITHMETIC_MODULAR = 0, + USTCTL_COUNTER_ARITHMETIC_SATURATION = 1, +}; + +/* Used as alloc flags. */ +enum ustctl_counter_alloc { + USTCTL_COUNTER_ALLOC_PER_CPU = (1 << 0), + USTCTL_COUNTER_ALLOC_GLOBAL = (1 << 1), +}; + +struct ustctl_daemon_counter; + +int ustctl_get_nr_cpu_per_counter(void); + +struct ustctl_counter_dimension { + uint64_t size; + uint64_t underflow_index; + uint64_t overflow_index; + uint8_t has_underflow; + uint8_t has_overflow; +}; + +struct ustctl_daemon_counter * + ustctl_create_counter(size_t nr_dimensions, + const struct ustctl_counter_dimension *dimensions, + int64_t global_sum_step, + int global_counter_fd, + int nr_counter_cpu_fds, + const int *counter_cpu_fds, + enum ustctl_counter_bitness bitness, + enum ustctl_counter_arithmetic arithmetic, + uint32_t alloc_flags); + +int ustctl_create_counter_data(struct ustctl_daemon_counter *counter, + struct lttng_ust_object_data **counter_data); + +int ustctl_create_counter_global_data(struct ustctl_daemon_counter *counter, + struct lttng_ust_object_data **counter_global_data); +int ustctl_create_counter_cpu_data(struct ustctl_daemon_counter *counter, int cpu, + struct lttng_ust_object_data **counter_cpu_data); + +/* + * Each counter data and counter cpu data created need to be destroyed + * before calling ustctl_destroy_counter(). + */ +void ustctl_destroy_counter(struct ustctl_daemon_counter *counter); + +int ustctl_send_counter_data_to_ust(int sock, int parent_handle, + struct lttng_ust_object_data *counter_data); +int ustctl_send_counter_global_data_to_ust(int sock, + struct lttng_ust_object_data *counter_data, + struct lttng_ust_object_data *counter_global_data); +int ustctl_send_counter_cpu_data_to_ust(int sock, + struct lttng_ust_object_data *counter_data, + struct lttng_ust_object_data *counter_cpu_data); + +int ustctl_counter_read(struct ustctl_daemon_counter *counter, + const size_t *dimension_indexes, + int cpu, int64_t *value, + bool *overflow, bool *underflow); +int ustctl_counter_aggregate(struct ustctl_daemon_counter *counter, + const size_t *dimension_indexes, + int64_t *value, + bool *overflow, bool *underflow); +int ustctl_counter_clear(struct ustctl_daemon_counter *counter, + const size_t *dimension_indexes); + #endif /* _LTTNG_UST_CTL_H */ diff --git a/include/lttng/ust-events.h b/include/lttng/ust-events.h index 44b8d4c4..721b0aed 100644 --- a/include/lttng/ust-events.h +++ b/include/lttng/ust-events.h @@ -64,6 +64,7 @@ struct lttng_session; struct lttng_ust_lib_ring_buffer_ctx; struct lttng_ust_context_app; struct lttng_event_field; +struct lttng_event_notifier_group; /* * Data structures used by tracepoint event declarations, and by the @@ -616,6 +617,36 @@ struct lttng_channel { int tstate:1; /* Transient enable state */ }; +#define LTTNG_COUNTER_DIMENSION_MAX 8 + +struct lttng_counter_dimension { + uint64_t size; + uint64_t underflow_index; + uint64_t overflow_index; + uint8_t has_underflow; + uint8_t has_overflow; +}; + +struct lttng_counter_ops { + struct lib_counter *(*counter_create)(size_t nr_dimensions, + const struct lttng_counter_dimension *dimensions, + int64_t global_sum_step, + int global_counter_fd, + int nr_counter_cpu_fds, + const int *counter_cpu_fds, + bool is_daemon); + void (*counter_destroy)(struct lib_counter *counter); + int (*counter_add)(struct lib_counter *counter, + const size_t *dimension_indexes, int64_t v); + int (*counter_read)(struct lib_counter *counter, + const size_t *dimension_indexes, int cpu, + int64_t *value, bool *overflow, bool *underflow); + int (*counter_aggregate)(struct lib_counter *counter, + const size_t *dimension_indexes, int64_t *value, + bool *overflow, bool *underflow); + int (*counter_clear)(struct lib_counter *counter, const size_t *dimension_indexes); +}; + #define LTTNG_UST_STACK_CTX_PADDING 32 struct lttng_stack_ctx { struct lttng_event *event; @@ -677,6 +708,14 @@ struct lttng_session { struct lttng_ctx *ctx; /* contexts for filters. */ }; +struct lttng_counter { + int objd; + struct lttng_event_notifier_group *event_notifier_group; /* owner */ + struct lttng_counter_transport *transport; + struct lib_counter *counter; + struct lttng_counter_ops *ops; +}; + struct lttng_event_notifier_group { int objd; void *owner; @@ -686,6 +725,9 @@ struct lttng_event_notifier_group { struct cds_list_head event_notifiers_head; /* list of event_notifiers */ struct lttng_ust_event_notifier_ht event_notifiers_ht; /* hashtable of event_notifiers */ struct lttng_ctx *ctx; /* contexts for filters. */ + + struct lttng_counter *error_counter; + size_t error_counter_len; }; struct lttng_transport { @@ -695,6 +737,13 @@ struct lttng_transport { const struct lttng_ust_lib_ring_buffer_config *client_config; }; +struct lttng_counter_transport { + char *name; + struct cds_list_head node; + struct lttng_counter_ops ops; + const struct lib_counter_config *client_config; +}; + struct lttng_session *lttng_session_create(void); int lttng_session_enable(struct lttng_session *session); int lttng_session_disable(struct lttng_session *session); @@ -724,6 +773,13 @@ int lttng_attach_context(struct lttng_ust_context *context_param, void lttng_transport_register(struct lttng_transport *transport); void lttng_transport_unregister(struct lttng_transport *transport); +void lttng_counter_transport_register(struct lttng_counter_transport *transport); +void lttng_counter_transport_unregister(struct lttng_counter_transport *transport); + +struct lttng_counter *lttng_ust_counter_create( + const char *counter_transport_name, + size_t number_dimensions, const struct lttng_counter_dimension *dimensions); + void synchronize_trace(void); int lttng_probe_register(struct lttng_probe_desc *desc); @@ -810,6 +866,7 @@ extern const struct lttng_ust_client_lib_ring_buffer_client_cb *lttng_client_cal extern const struct lttng_ust_client_lib_ring_buffer_client_cb *lttng_client_callbacks_overwrite; struct lttng_transport *lttng_transport_find(const char *name); +struct lttng_counter_transport *lttng_counter_transport_find(const char *name); int lttng_probes_get_event_list(struct lttng_ust_tracepoint_list *list); void lttng_probes_prune_event_list(struct lttng_ust_tracepoint_list *list); diff --git a/include/ust-comm.h b/include/ust-comm.h index fbaef544..49a55d21 100644 --- a/include/ust-comm.h +++ b/include/ust-comm.h @@ -108,6 +108,9 @@ struct ustcomm_ust_msg { uint32_t reloc_offset; uint64_t seqnum; } LTTNG_PACKED capture; + struct lttng_ust_counter counter; + struct lttng_ust_counter_global counter_global; + struct lttng_ust_counter_cpu counter_cpu; char padding[USTCOMM_MSG_PADDING2]; } u; } LTTNG_PACKED; @@ -228,6 +231,11 @@ int ustcomm_recv_stream_from_sessiond(int sock, ssize_t ustcomm_recv_event_notifier_notif_fd_from_sessiond(int sock, int *event_notifier_notif_fd); +ssize_t ustcomm_recv_counter_from_sessiond(int sock, + void **counter_data, uint64_t len); +int ustcomm_recv_counter_shm_from_sessiond(int sock, + int *shm_fd); + /* * Returns 0 on success, negative error value on error. * Returns -EPIPE or -ECONNRESET if other end has hung up. diff --git a/libcounter/Makefile.am b/libcounter/Makefile.am new file mode 100644 index 00000000..9d52b74c --- /dev/null +++ b/libcounter/Makefile.am @@ -0,0 +1,18 @@ +AM_CPPFLAGS = -I$(top_srcdir)/include -I$(top_builddir)/include +AM_CFLAGS += -fno-strict-aliasing + +noinst_LTLIBRARIES = libcounter.la + +libcounter_la_SOURCES = \ + counter.c smp.c smp.h shm.c shm.h shm_internal.h shm_types.h \ + counter-api.h counter.h counter-internal.h counter-types.h + +libcounter_la_LIBADD = \ + -lpthread \ + -lrt + +if HAVE_LIBNUMA +libcounter_la_LIBADD += -lnuma +endif + +libcounter_la_CFLAGS = -DUST_COMPONENT="libcounter" $(AM_CFLAGS) diff --git a/libcounter/counter-api.h b/libcounter/counter-api.h new file mode 100644 index 00000000..0a7c0ade --- /dev/null +++ b/libcounter/counter-api.h @@ -0,0 +1,296 @@ +/* + * counter/counter-api.h + * + * LTTng Counters API, requiring counter/config.h + * + * Copyright (C) 2020 Mathieu Desnoyers + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef _LTTNG_COUNTER_API_H +#define _LTTNG_COUNTER_API_H + +#include +#include +#include "counter.h" +#include "counter-internal.h" +#include +#include +#include +#include "../libringbuffer/getcpu.h" + +/* + * Using unsigned arithmetic because overflow is defined. + */ +static inline int __lttng_counter_add(const struct lib_counter_config *config, + enum lib_counter_config_alloc alloc, + enum lib_counter_config_sync sync, + struct lib_counter *counter, + const size_t *dimension_indexes, int64_t v, + int64_t *remainder) +{ + size_t index; + bool overflow = false, underflow = false; + struct lib_counter_layout *layout; + int64_t move_sum = 0; + + if (caa_unlikely(lttng_counter_validate_indexes(config, counter, dimension_indexes))) + return -EOVERFLOW; + index = lttng_counter_get_index(config, counter, dimension_indexes); + + switch (alloc) { + case COUNTER_ALLOC_PER_CPU: + layout = &counter->percpu_counters[lttng_ust_get_cpu()]; + break; + case COUNTER_ALLOC_GLOBAL: + layout = &counter->global_counters; + break; + default: + return -EINVAL; + } + if (caa_unlikely(!layout->counters)) + return -ENODEV; + + switch (config->counter_size) { + case COUNTER_SIZE_8_BIT: + { + int8_t *int_p = (int8_t *) layout->counters + index; + int8_t old, n, res; + int8_t global_sum_step = counter->global_sum_step.s8; + + res = *int_p; + switch (sync) { + case COUNTER_SYNC_PER_CPU: + { + do { + move_sum = 0; + old = res; + n = (int8_t) ((uint8_t) old + (uint8_t) v); + if (caa_unlikely(n > (int8_t) global_sum_step)) + move_sum = (int8_t) global_sum_step / 2; + else if (caa_unlikely(n < -(int8_t) global_sum_step)) + move_sum = -((int8_t) global_sum_step / 2); + n -= move_sum; + res = uatomic_cmpxchg(int_p, old, n); + } while (old != res); + break; + } + case COUNTER_SYNC_GLOBAL: + { + do { + old = res; + n = (int8_t) ((uint8_t) old + (uint8_t) v); + res = uatomic_cmpxchg(int_p, old, n); + } while (old != res); + break; + } + } + if (v > 0 && (v >= UINT8_MAX || n < old)) + overflow = true; + else if (v < 0 && (v <= -UINT8_MAX || n > old)) + underflow = true; + break; + } + case COUNTER_SIZE_16_BIT: + { + int16_t *int_p = (int16_t *) layout->counters + index; + int16_t old, n, res; + int16_t global_sum_step = counter->global_sum_step.s16; + + res = *int_p; + switch (sync) { + case COUNTER_SYNC_PER_CPU: + { + do { + move_sum = 0; + old = res; + n = (int16_t) ((uint16_t) old + (uint16_t) v); + if (caa_unlikely(n > (int16_t) global_sum_step)) + move_sum = (int16_t) global_sum_step / 2; + else if (caa_unlikely(n < -(int16_t) global_sum_step)) + move_sum = -((int16_t) global_sum_step / 2); + n -= move_sum; + res = uatomic_cmpxchg(int_p, old, n); + } while (old != res); + break; + } + case COUNTER_SYNC_GLOBAL: + { + do { + old = res; + n = (int16_t) ((uint16_t) old + (uint16_t) v); + res = uatomic_cmpxchg(int_p, old, n); + } while (old != res); + break; + } + } + if (v > 0 && (v >= UINT16_MAX || n < old)) + overflow = true; + else if (v < 0 && (v <= -UINT16_MAX || n > old)) + underflow = true; + break; + } + case COUNTER_SIZE_32_BIT: + { + int32_t *int_p = (int32_t *) layout->counters + index; + int32_t old, n, res; + int32_t global_sum_step = counter->global_sum_step.s32; + + res = *int_p; + switch (sync) { + case COUNTER_SYNC_PER_CPU: + { + do { + move_sum = 0; + old = res; + n = (int32_t) ((uint32_t) old + (uint32_t) v); + if (caa_unlikely(n > (int32_t) global_sum_step)) + move_sum = (int32_t) global_sum_step / 2; + else if (caa_unlikely(n < -(int32_t) global_sum_step)) + move_sum = -((int32_t) global_sum_step / 2); + n -= move_sum; + res = uatomic_cmpxchg(int_p, old, n); + } while (old != res); + break; + } + case COUNTER_SYNC_GLOBAL: + { + do { + old = res; + n = (int32_t) ((uint32_t) old + (uint32_t) v); + res = uatomic_cmpxchg(int_p, old, n); + } while (old != res); + break; + } + } + if (v > 0 && (v >= UINT32_MAX || n < old)) + overflow = true; + else if (v < 0 && (v <= -UINT32_MAX || n > old)) + underflow = true; + break; + } +#if CAA_BITS_PER_LONG == 64 + case COUNTER_SIZE_64_BIT: + { + int64_t *int_p = (int64_t *) layout->counters + index; + int64_t old, n, res; + int64_t global_sum_step = counter->global_sum_step.s64; + + res = *int_p; + switch (sync) { + case COUNTER_SYNC_PER_CPU: + { + do { + move_sum = 0; + old = res; + n = (int64_t) ((uint64_t) old + (uint64_t) v); + if (caa_unlikely(n > (int64_t) global_sum_step)) + move_sum = (int64_t) global_sum_step / 2; + else if (caa_unlikely(n < -(int64_t) global_sum_step)) + move_sum = -((int64_t) global_sum_step / 2); + n -= move_sum; + res = uatomic_cmpxchg(int_p, old, n); + } while (old != res); + break; + } + case COUNTER_SYNC_GLOBAL: + { + do { + old = res; + n = (int64_t) ((uint64_t) old + (uint64_t) v); + res = uatomic_cmpxchg(int_p, old, n); + } while (old != res); + break; + } + } + if (v > 0 && n < old) + overflow = true; + else if (v < 0 && n > old) + underflow = true; + break; + } +#endif + default: + return -EINVAL; + } + if (caa_unlikely(overflow && !lttng_bitmap_test_bit(index, layout->overflow_bitmap))) + lttng_bitmap_set_bit(index, layout->overflow_bitmap); + else if (caa_unlikely(underflow && !lttng_bitmap_test_bit(index, layout->underflow_bitmap))) + lttng_bitmap_set_bit(index, layout->underflow_bitmap); + if (remainder) + *remainder = move_sum; + return 0; +} + +static inline int __lttng_counter_add_percpu(const struct lib_counter_config *config, + struct lib_counter *counter, + const size_t *dimension_indexes, int64_t v) +{ + int64_t move_sum; + int ret; + + ret = __lttng_counter_add(config, COUNTER_ALLOC_PER_CPU, config->sync, + counter, dimension_indexes, v, &move_sum); + if (caa_unlikely(ret)) + return ret; + if (caa_unlikely(move_sum)) + return __lttng_counter_add(config, COUNTER_ALLOC_GLOBAL, COUNTER_SYNC_GLOBAL, + counter, dimension_indexes, move_sum, NULL); + return 0; +} + +static inline int __lttng_counter_add_global(const struct lib_counter_config *config, + struct lib_counter *counter, + const size_t *dimension_indexes, int64_t v) +{ + return __lttng_counter_add(config, COUNTER_ALLOC_GLOBAL, config->sync, counter, + dimension_indexes, v, NULL); +} + +static inline int lttng_counter_add(const struct lib_counter_config *config, + struct lib_counter *counter, + const size_t *dimension_indexes, int64_t v) +{ + switch (config->alloc) { + case COUNTER_ALLOC_PER_CPU: /* Fallthrough */ + case COUNTER_ALLOC_PER_CPU | COUNTER_ALLOC_GLOBAL: + return __lttng_counter_add_percpu(config, counter, dimension_indexes, v); + case COUNTER_ALLOC_GLOBAL: + return __lttng_counter_add_global(config, counter, dimension_indexes, v); + default: + return -EINVAL; + } +} + +static inline int lttng_counter_inc(const struct lib_counter_config *config, + struct lib_counter *counter, + const size_t *dimension_indexes) +{ + return lttng_counter_add(config, counter, dimension_indexes, 1); +} + +static inline int lttng_counter_dec(const struct lib_counter_config *config, + struct lib_counter *counter, + const size_t *dimension_indexes) +{ + return lttng_counter_add(config, counter, dimension_indexes, -1); +} + +#endif /* _LTTNG_COUNTER_API_H */ diff --git a/libcounter/counter-internal.h b/libcounter/counter-internal.h new file mode 100644 index 00000000..38cb089b --- /dev/null +++ b/libcounter/counter-internal.h @@ -0,0 +1,65 @@ +/* + * counter/counter-internal.h + * + * LTTng Counters Internal Header + * + * Copyright (C) 2020 Mathieu Desnoyers + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef _LTTNG_COUNTER_INTERNAL_H +#define _LTTNG_COUNTER_INTERNAL_H + +#include +#include +#include +#include "counter-types.h" + +static inline int lttng_counter_validate_indexes(const struct lib_counter_config *config, + struct lib_counter *counter, + const size_t *dimension_indexes) +{ + size_t nr_dimensions = counter->nr_dimensions, i; + + for (i = 0; i < nr_dimensions; i++) { + if (caa_unlikely(dimension_indexes[i] >= counter->dimensions[i].max_nr_elem)) + return -EOVERFLOW; + } + return 0; +} + + +static inline size_t lttng_counter_get_index(const struct lib_counter_config *config, + struct lib_counter *counter, + const size_t *dimension_indexes) +{ + size_t nr_dimensions = counter->nr_dimensions, i; + size_t index = 0; + + for (i = 0; i < nr_dimensions; i++) { + struct lib_counter_dimension *dimension = &counter->dimensions[i]; + const size_t *dimension_index = &dimension_indexes[i]; + + index += *dimension_index * dimension->stride; + } + return index; +} + +#endif /* _LTTNG_COUNTER_INTERNAL_H */ diff --git a/libcounter/counter-types.h b/libcounter/counter-types.h new file mode 100644 index 00000000..b193fb3a --- /dev/null +++ b/libcounter/counter-types.h @@ -0,0 +1,93 @@ +/* + * counter/counter-types.h + * + * LTTng Counters Types + * + * Copyright (C) 2020 Mathieu Desnoyers + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef _LTTNG_COUNTER_TYPES_H +#define _LTTNG_COUNTER_TYPES_H + +#include +#include +#include +#include +#include +#include +#include "shm_types.h" + +struct lib_counter_dimension { + /* + * Max. number of indexable elements. + */ + size_t max_nr_elem; + /* + * The stride for a dimension is the multiplication factor which + * should be applied to its index to take into account other + * dimensions nested inside. + */ + size_t stride; +}; + +struct lib_counter_layout { + void *counters; + unsigned long *overflow_bitmap; + unsigned long *underflow_bitmap; + int shm_fd; + size_t shm_len; + struct lttng_counter_shm_handle handle; +}; + +enum lib_counter_arithmetic { + LIB_COUNTER_ARITHMETIC_MODULAR, + LIB_COUNTER_ARITHMETIC_SATURATE, +}; + +struct lib_counter { + size_t nr_dimensions; + int64_t allocated_elem; + struct lib_counter_dimension *dimensions; + enum lib_counter_arithmetic arithmetic; + union { + struct { + int32_t max, min; + } limits_32_bit; + struct { + int64_t max, min; + } limits_64_bit; + } saturation; + union { + int8_t s8; + int16_t s16; + int32_t s32; + int64_t s64; + } global_sum_step; /* 0 if unused */ + struct lib_counter_config config; + + struct lib_counter_layout global_counters; + struct lib_counter_layout *percpu_counters; + + bool is_daemon; + struct lttng_counter_shm_object_table *object_table; +}; + +#endif /* _LTTNG_COUNTER_TYPES_H */ diff --git a/libcounter/counter.c b/libcounter/counter.c new file mode 100644 index 00000000..4010d5dd --- /dev/null +++ b/libcounter/counter.c @@ -0,0 +1,564 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR LGPL-2.1-only) + * + * counter.c + * + * Copyright (C) 2020 Mathieu Desnoyers + */ + +#define _GNU_SOURCE +#include +#include "counter.h" +#include "counter-internal.h" +#include +#include +#include +#include +#include +#include +#include "smp.h" +#include "shm.h" + +static size_t lttng_counter_get_dimension_nr_elements(struct lib_counter_dimension *dimension) +{ + return dimension->max_nr_elem; +} + +static int lttng_counter_init_stride(const struct lib_counter_config *config, + struct lib_counter *counter) +{ + size_t nr_dimensions = counter->nr_dimensions; + size_t stride = 1; + ssize_t i; + + for (i = nr_dimensions - 1; i >= 0; i--) { + struct lib_counter_dimension *dimension = &counter->dimensions[i]; + size_t nr_elem; + + nr_elem = lttng_counter_get_dimension_nr_elements(dimension); + dimension->stride = stride; + /* nr_elem should be minimum 1 for each dimension. */ + if (!nr_elem) + return -EINVAL; + stride *= nr_elem; + if (stride > SIZE_MAX / nr_elem) + return -EINVAL; + } + return 0; +} + +static int lttng_counter_layout_init(struct lib_counter *counter, int cpu, int shm_fd) +{ + struct lib_counter_layout *layout; + size_t counter_size; + size_t nr_elem = counter->allocated_elem; + size_t shm_length = 0, counters_offset, overflow_offset, underflow_offset; + struct lttng_counter_shm_object *shm_object; + + if (shm_fd < 0) + return 0; /* Skip, will be populated later. */ + + if (cpu == -1) + layout = &counter->global_counters; + else + layout = &counter->percpu_counters[cpu]; + switch (counter->config.counter_size) { + case COUNTER_SIZE_8_BIT: + case COUNTER_SIZE_16_BIT: + case COUNTER_SIZE_32_BIT: + case COUNTER_SIZE_64_BIT: + counter_size = (size_t) counter->config.counter_size; + break; + default: + return -EINVAL; + } + layout->shm_fd = shm_fd; + counters_offset = shm_length; + shm_length += counter_size * nr_elem; + overflow_offset = shm_length; + shm_length += ALIGN(nr_elem, 8) / 8; + underflow_offset = shm_length; + shm_length += ALIGN(nr_elem, 8) / 8; + layout->shm_len = shm_length; + if (counter->is_daemon) { + /* Allocate and clear shared memory. */ + shm_object = lttng_counter_shm_object_table_alloc(counter->object_table, + shm_length, LTTNG_COUNTER_SHM_OBJECT_SHM, shm_fd, cpu); + if (!shm_object) + return -ENOMEM; + } else { + /* Map pre-existing shared memory. */ + shm_object = lttng_counter_shm_object_table_append_shm(counter->object_table, + shm_fd, shm_length); + if (!shm_object) + return -ENOMEM; + } + layout->counters = shm_object->memory_map + counters_offset; + layout->overflow_bitmap = (unsigned long *)(shm_object->memory_map + overflow_offset); + layout->underflow_bitmap = (unsigned long *)(shm_object->memory_map + underflow_offset); + return 0; +} + +int lttng_counter_set_global_shm(struct lib_counter *counter, int fd) +{ + struct lib_counter_config *config = &counter->config; + struct lib_counter_layout *layout; + + if (!(config->alloc & COUNTER_ALLOC_GLOBAL)) + return -EINVAL; + layout = &counter->global_counters; + if (layout->shm_fd >= 0) + return -EBUSY; + return lttng_counter_layout_init(counter, -1, fd); +} + +int lttng_counter_set_cpu_shm(struct lib_counter *counter, int cpu, int fd) +{ + struct lib_counter_config *config = &counter->config; + struct lib_counter_layout *layout; + + if (cpu < 0 || cpu >= lttng_counter_num_possible_cpus()) + return -EINVAL; + + if (!(config->alloc & COUNTER_ALLOC_PER_CPU)) + return -EINVAL; + layout = &counter->percpu_counters[cpu]; + if (layout->shm_fd >= 0) + return -EBUSY; + return lttng_counter_layout_init(counter, cpu, fd); +} + +static +int lttng_counter_set_global_sum_step(struct lib_counter *counter, + int64_t global_sum_step) +{ + if (global_sum_step < 0) + return -EINVAL; + + switch (counter->config.counter_size) { + case COUNTER_SIZE_8_BIT: + if (global_sum_step > INT8_MAX) + return -EINVAL; + counter->global_sum_step.s8 = (int8_t) global_sum_step; + break; + case COUNTER_SIZE_16_BIT: + if (global_sum_step > INT16_MAX) + return -EINVAL; + counter->global_sum_step.s16 = (int16_t) global_sum_step; + break; + case COUNTER_SIZE_32_BIT: + if (global_sum_step > INT32_MAX) + return -EINVAL; + counter->global_sum_step.s32 = (int32_t) global_sum_step; + break; + case COUNTER_SIZE_64_BIT: + counter->global_sum_step.s64 = global_sum_step; + break; + default: + return -EINVAL; + } + + return 0; +} + +static +int validate_args(const struct lib_counter_config *config, + size_t nr_dimensions, + const size_t *max_nr_elem, + int64_t global_sum_step, + int global_counter_fd, + int nr_counter_cpu_fds, + const int *counter_cpu_fds) +{ + int nr_cpus = lttng_counter_num_possible_cpus(); + + if (CAA_BITS_PER_LONG != 64 && config->counter_size == COUNTER_SIZE_64_BIT) { + WARN_ON_ONCE(1); + return -1; + } + if (!max_nr_elem) + return -1; + /* + * global sum step is only useful with allocating both per-cpu + * and global counters. + */ + if (global_sum_step && (!(config->alloc & COUNTER_ALLOC_GLOBAL) || + !(config->alloc & COUNTER_ALLOC_PER_CPU))) + return -1; + if (!(config->alloc & COUNTER_ALLOC_GLOBAL) && global_counter_fd >= 0) + return -1; + if (!(config->alloc & COUNTER_ALLOC_PER_CPU) && counter_cpu_fds) + return -1; + if (!(config->alloc & COUNTER_ALLOC_PER_CPU) && counter_cpu_fds >= 0) + return -1; + if (counter_cpu_fds && nr_cpus != nr_counter_cpu_fds) + return -1; + return 0; +} + +struct lib_counter *lttng_counter_create(const struct lib_counter_config *config, + size_t nr_dimensions, + const size_t *max_nr_elem, + int64_t global_sum_step, + int global_counter_fd, + int nr_counter_cpu_fds, + const int *counter_cpu_fds, + bool is_daemon) +{ + struct lib_counter *counter; + size_t dimension, nr_elem = 1; + int cpu, ret; + int nr_handles = 0; + int nr_cpus = lttng_counter_num_possible_cpus(); + + if (validate_args(config, nr_dimensions, max_nr_elem, + global_sum_step, global_counter_fd, nr_counter_cpu_fds, + counter_cpu_fds)) + return NULL; + counter = zmalloc(sizeof(struct lib_counter)); + if (!counter) + return NULL; + counter->global_counters.shm_fd = -1; + counter->config = *config; + counter->is_daemon = is_daemon; + if (lttng_counter_set_global_sum_step(counter, global_sum_step)) + goto error_sum_step; + counter->nr_dimensions = nr_dimensions; + counter->dimensions = zmalloc(nr_dimensions * sizeof(*counter->dimensions)); + if (!counter->dimensions) + goto error_dimensions; + for (dimension = 0; dimension < nr_dimensions; dimension++) + counter->dimensions[dimension].max_nr_elem = max_nr_elem[dimension]; + if (config->alloc & COUNTER_ALLOC_PER_CPU) { + counter->percpu_counters = zmalloc(sizeof(struct lib_counter_layout) * nr_cpus); + if (!counter->percpu_counters) + goto error_alloc_percpu; + lttng_counter_for_each_possible_cpu(cpu) + counter->percpu_counters[cpu].shm_fd = -1; + } + + if (lttng_counter_init_stride(config, counter)) + goto error_init_stride; + //TODO saturation values. + for (dimension = 0; dimension < counter->nr_dimensions; dimension++) + nr_elem *= lttng_counter_get_dimension_nr_elements(&counter->dimensions[dimension]); + counter->allocated_elem = nr_elem; + + if (config->alloc & COUNTER_ALLOC_GLOBAL) + nr_handles++; + if (config->alloc & COUNTER_ALLOC_PER_CPU) + nr_handles += nr_cpus; + /* Allocate table for global and per-cpu counters. */ + counter->object_table = lttng_counter_shm_object_table_create(nr_handles); + if (!counter->object_table) + goto error_alloc_object_table; + + if (config->alloc & COUNTER_ALLOC_GLOBAL) { + ret = lttng_counter_layout_init(counter, -1, global_counter_fd); /* global */ + if (ret) + goto layout_init_error; + } + if ((config->alloc & COUNTER_ALLOC_PER_CPU) && counter_cpu_fds) { + lttng_counter_for_each_possible_cpu(cpu) { + ret = lttng_counter_layout_init(counter, cpu, counter_cpu_fds[cpu]); + if (ret) + goto layout_init_error; + } + } + return counter; + +layout_init_error: + lttng_counter_shm_object_table_destroy(counter->object_table, is_daemon); +error_alloc_object_table: +error_init_stride: + free(counter->percpu_counters); +error_alloc_percpu: + free(counter->dimensions); +error_dimensions: +error_sum_step: + free(counter); + return NULL; +} + +void lttng_counter_destroy(struct lib_counter *counter) +{ + struct lib_counter_config *config = &counter->config; + + if (config->alloc & COUNTER_ALLOC_PER_CPU) + free(counter->percpu_counters); + lttng_counter_shm_object_table_destroy(counter->object_table, counter->is_daemon); + free(counter->dimensions); + free(counter); +} + +int lttng_counter_get_global_shm(struct lib_counter *counter, int *fd, size_t *len) +{ + int shm_fd; + + shm_fd = counter->global_counters.shm_fd; + if (shm_fd < 0) + return -1; + *fd = shm_fd; + *len = counter->global_counters.shm_len; + return 0; +} + +int lttng_counter_get_cpu_shm(struct lib_counter *counter, int cpu, int *fd, size_t *len) +{ + struct lib_counter_layout *layout; + int shm_fd; + + if (cpu >= lttng_counter_num_possible_cpus()) + return -1; + layout = &counter->percpu_counters[cpu]; + shm_fd = layout->shm_fd; + if (shm_fd < 0) + return -1; + *fd = shm_fd; + *len = layout->shm_len; + return 0; +} + +int lttng_counter_read(const struct lib_counter_config *config, + struct lib_counter *counter, + const size_t *dimension_indexes, + int cpu, int64_t *value, bool *overflow, + bool *underflow) +{ + size_t index; + struct lib_counter_layout *layout; + + if (caa_unlikely(lttng_counter_validate_indexes(config, counter, dimension_indexes))) + return -EOVERFLOW; + index = lttng_counter_get_index(config, counter, dimension_indexes); + + switch (config->alloc) { + case COUNTER_ALLOC_PER_CPU: + if (cpu < 0 || cpu >= lttng_counter_num_possible_cpus()) + return -EINVAL; + layout = &counter->percpu_counters[cpu]; + break; + case COUNTER_ALLOC_PER_CPU | COUNTER_ALLOC_GLOBAL: + if (cpu >= 0) { + if (cpu >= lttng_counter_num_possible_cpus()) + return -EINVAL; + layout = &counter->percpu_counters[cpu]; + } else { + layout = &counter->global_counters; + } + break; + case COUNTER_ALLOC_GLOBAL: + if (cpu >= 0) + return -EINVAL; + layout = &counter->global_counters; + break; + default: + return -EINVAL; + } + if (caa_unlikely(!layout->counters)) + return -ENODEV; + + switch (config->counter_size) { + case COUNTER_SIZE_8_BIT: + { + int8_t *int_p = (int8_t *) layout->counters + index; + *value = (int64_t) CMM_LOAD_SHARED(*int_p); + break; + } + case COUNTER_SIZE_16_BIT: + { + int16_t *int_p = (int16_t *) layout->counters + index; + *value = (int64_t) CMM_LOAD_SHARED(*int_p); + break; + } + case COUNTER_SIZE_32_BIT: + { + int32_t *int_p = (int32_t *) layout->counters + index; + *value = (int64_t) CMM_LOAD_SHARED(*int_p); + break; + } +#if CAA_BITS_PER_LONG == 64 + case COUNTER_SIZE_64_BIT: + { + int64_t *int_p = (int64_t *) layout->counters + index; + *value = CMM_LOAD_SHARED(*int_p); + break; + } +#endif + default: + return -EINVAL; + } + *overflow = lttng_bitmap_test_bit(index, layout->overflow_bitmap); + *underflow = lttng_bitmap_test_bit(index, layout->underflow_bitmap); + return 0; +} + +int lttng_counter_aggregate(const struct lib_counter_config *config, + struct lib_counter *counter, + const size_t *dimension_indexes, + int64_t *value, bool *overflow, + bool *underflow) +{ + int cpu, ret; + int64_t v, sum = 0; + bool of, uf; + + *overflow = false; + *underflow = false; + + switch (config->alloc) { + case COUNTER_ALLOC_GLOBAL: /* Fallthrough */ + case COUNTER_ALLOC_PER_CPU | COUNTER_ALLOC_GLOBAL: + /* Read global counter. */ + ret = lttng_counter_read(config, counter, dimension_indexes, + -1, &v, &of, &uf); + if (ret < 0) + return ret; + sum += v; + *overflow |= of; + *underflow |= uf; + break; + case COUNTER_ALLOC_PER_CPU: + break; + default: + return -EINVAL; + } + + switch (config->alloc) { + case COUNTER_ALLOC_GLOBAL: + break; + case COUNTER_ALLOC_PER_CPU | COUNTER_ALLOC_GLOBAL: /* Fallthrough */ + case COUNTER_ALLOC_PER_CPU: + lttng_counter_for_each_possible_cpu(cpu) { + int64_t old = sum; + + ret = lttng_counter_read(config, counter, dimension_indexes, + cpu, &v, &of, &uf); + if (ret < 0) + return ret; + *overflow |= of; + *underflow |= uf; + /* Overflow is defined on unsigned types. */ + sum = (int64_t) ((uint64_t) old + (uint64_t) v); + if (v > 0 && sum < old) + *overflow = true; + else if (v < 0 && sum > old) + *underflow = true; + } + break; + default: + return -EINVAL; + } + *value = sum; + return 0; +} + +static +int lttng_counter_clear_cpu(const struct lib_counter_config *config, + struct lib_counter *counter, + const size_t *dimension_indexes, + int cpu) +{ + size_t index; + struct lib_counter_layout *layout; + + if (caa_unlikely(lttng_counter_validate_indexes(config, counter, dimension_indexes))) + return -EOVERFLOW; + index = lttng_counter_get_index(config, counter, dimension_indexes); + + switch (config->alloc) { + case COUNTER_ALLOC_PER_CPU: + if (cpu < 0 || cpu >= lttng_counter_num_possible_cpus()) + return -EINVAL; + layout = &counter->percpu_counters[cpu]; + break; + case COUNTER_ALLOC_PER_CPU | COUNTER_ALLOC_GLOBAL: + if (cpu >= 0) { + if (cpu >= lttng_counter_num_possible_cpus()) + return -EINVAL; + layout = &counter->percpu_counters[cpu]; + } else { + layout = &counter->global_counters; + } + break; + case COUNTER_ALLOC_GLOBAL: + if (cpu >= 0) + return -EINVAL; + layout = &counter->global_counters; + break; + default: + return -EINVAL; + } + if (caa_unlikely(!layout->counters)) + return -ENODEV; + + switch (config->counter_size) { + case COUNTER_SIZE_8_BIT: + { + int8_t *int_p = (int8_t *) layout->counters + index; + CMM_STORE_SHARED(*int_p, 0); + break; + } + case COUNTER_SIZE_16_BIT: + { + int16_t *int_p = (int16_t *) layout->counters + index; + CMM_STORE_SHARED(*int_p, 0); + break; + } + case COUNTER_SIZE_32_BIT: + { + int32_t *int_p = (int32_t *) layout->counters + index; + CMM_STORE_SHARED(*int_p, 0); + break; + } +#if CAA_BITS_PER_LONG == 64 + case COUNTER_SIZE_64_BIT: + { + int64_t *int_p = (int64_t *) layout->counters + index; + CMM_STORE_SHARED(*int_p, 0); + break; + } +#endif + default: + return -EINVAL; + } + lttng_bitmap_clear_bit(index, layout->overflow_bitmap); + lttng_bitmap_clear_bit(index, layout->underflow_bitmap); + return 0; +} + +int lttng_counter_clear(const struct lib_counter_config *config, + struct lib_counter *counter, + const size_t *dimension_indexes) +{ + int cpu, ret; + + switch (config->alloc) { + case COUNTER_ALLOC_PER_CPU: + break; + case COUNTER_ALLOC_GLOBAL: /* Fallthrough */ + case COUNTER_ALLOC_PER_CPU | COUNTER_ALLOC_GLOBAL: + /* Clear global counter. */ + ret = lttng_counter_clear_cpu(config, counter, dimension_indexes, -1); + if (ret < 0) + return ret; + break; + default: + return -EINVAL; + } + + switch (config->alloc) { + case COUNTER_ALLOC_PER_CPU: /* Fallthrough */ + case COUNTER_ALLOC_PER_CPU | COUNTER_ALLOC_GLOBAL: + lttng_counter_for_each_possible_cpu(cpu) { + ret = lttng_counter_clear_cpu(config, counter, dimension_indexes, cpu); + if (ret < 0) + return ret; + } + break; + case COUNTER_ALLOC_GLOBAL: + break; + default: + return -EINVAL; + } + return 0; +} diff --git a/libcounter/counter.h b/libcounter/counter.h new file mode 100644 index 00000000..54f83009 --- /dev/null +++ b/libcounter/counter.h @@ -0,0 +1,65 @@ +/* + * lttng/counter.h + * + * LTTng Counters API + * + * Copyright (C) 2020 Mathieu Desnoyers + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef _LTTNG_COUNTER_H +#define _LTTNG_COUNTER_H + +#include +#include +#include "counter-types.h" + +/* max_nr_elem is for each dimension. */ +struct lib_counter *lttng_counter_create(const struct lib_counter_config *config, + size_t nr_dimensions, + const size_t *max_nr_elem, + int64_t global_sum_step, + int global_counter_fd, + int nr_counter_cpu_fds, + const int *counter_cpu_fds, + bool is_daemon); +void lttng_counter_destroy(struct lib_counter *counter); + +int lttng_counter_set_global_shm(struct lib_counter *counter, int fd); +int lttng_counter_set_cpu_shm(struct lib_counter *counter, int cpu, int fd); + +int lttng_counter_get_global_shm(struct lib_counter *counter, int *fd, size_t *len); +int lttng_counter_get_cpu_shm(struct lib_counter *counter, int cpu, int *fd, size_t *len); + +int lttng_counter_read(const struct lib_counter_config *config, + struct lib_counter *counter, + const size_t *dimension_indexes, + int cpu, int64_t *value, + bool *overflow, bool *underflow); +int lttng_counter_aggregate(const struct lib_counter_config *config, + struct lib_counter *counter, + const size_t *dimension_indexes, + int64_t *value, + bool *overflow, bool *underflow); +int lttng_counter_clear(const struct lib_counter_config *config, + struct lib_counter *counter, + const size_t *dimension_indexes); + +#endif /* _LTTNG_COUNTER_H */ diff --git a/libcounter/shm.c b/libcounter/shm.c new file mode 100644 index 00000000..a2e1f819 --- /dev/null +++ b/libcounter/shm.c @@ -0,0 +1,387 @@ +/* + * libcounter/shm.c + * + * Copyright (C) 2005-2012 Mathieu Desnoyers + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; only + * version 2.1 of the License. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#define _LGPL_SOURCE +#include +#include "shm.h" +#include +#include +#include +#include +#include /* For mode constants */ +#include /* For O_* constants */ +#include +#include +#include +#include +#include +#include +#include +#include +#ifdef HAVE_LIBNUMA +#include +#include +#endif +#include +#include +#include "../libringbuffer/mmap.h" + +/* + * Ensure we have the required amount of space available by writing 0 + * into the entire buffer. Not doing so can trigger SIGBUS when going + * beyond the available shm space. + */ +static +int zero_file(int fd, size_t len) +{ + ssize_t retlen; + size_t written = 0; + char *zeropage; + long pagelen; + int ret; + + pagelen = sysconf(_SC_PAGESIZE); + if (pagelen < 0) + return (int) pagelen; + zeropage = calloc(pagelen, 1); + if (!zeropage) + return -ENOMEM; + + while (len > written) { + do { + retlen = write(fd, zeropage, + min_t(size_t, pagelen, len - written)); + } while (retlen == -1UL && errno == EINTR); + if (retlen < 0) { + ret = (int) retlen; + goto error; + } + written += retlen; + } + ret = 0; +error: + free(zeropage); + return ret; +} + +struct lttng_counter_shm_object_table *lttng_counter_shm_object_table_create(size_t max_nb_obj) +{ + struct lttng_counter_shm_object_table *table; + + table = zmalloc(sizeof(struct lttng_counter_shm_object_table) + + max_nb_obj * sizeof(table->objects[0])); + if (!table) + return NULL; + table->size = max_nb_obj; + return table; +} + +static +struct lttng_counter_shm_object *_lttng_counter_shm_object_table_alloc_shm(struct lttng_counter_shm_object_table *table, + size_t memory_map_size, + int cpu_fd) +{ + int shmfd, ret; + struct lttng_counter_shm_object *obj; + char *memory_map; + + if (cpu_fd < 0) + return NULL; + if (table->allocated_len >= table->size) + return NULL; + obj = &table->objects[table->allocated_len]; + + /* create shm */ + + shmfd = cpu_fd; + ret = zero_file(shmfd, memory_map_size); + if (ret) { + PERROR("zero_file"); + goto error_zero_file; + } + ret = ftruncate(shmfd, memory_map_size); + if (ret) { + PERROR("ftruncate"); + goto error_ftruncate; + } + /* + * Also ensure the file metadata is synced with the storage by using + * fsync(2). + */ + ret = fsync(shmfd); + if (ret) { + PERROR("fsync"); + goto error_fsync; + } + obj->shm_fd_ownership = 0; + obj->shm_fd = shmfd; + + /* memory_map: mmap */ + memory_map = mmap(NULL, memory_map_size, PROT_READ | PROT_WRITE, + MAP_SHARED | LTTNG_MAP_POPULATE, shmfd, 0); + if (memory_map == MAP_FAILED) { + PERROR("mmap"); + goto error_mmap; + } + obj->type = LTTNG_COUNTER_SHM_OBJECT_SHM; + obj->memory_map = memory_map; + obj->memory_map_size = memory_map_size; + obj->allocated_len = 0; + obj->index = table->allocated_len++; + + return obj; + +error_mmap: +error_fsync: +error_ftruncate: +error_zero_file: + return NULL; +} + +static +struct lttng_counter_shm_object *_lttng_counter_shm_object_table_alloc_mem(struct lttng_counter_shm_object_table *table, + size_t memory_map_size) +{ + struct lttng_counter_shm_object *obj; + void *memory_map; + + if (table->allocated_len >= table->size) + return NULL; + obj = &table->objects[table->allocated_len]; + + memory_map = zmalloc(memory_map_size); + if (!memory_map) + goto alloc_error; + + /* no shm_fd */ + obj->shm_fd = -1; + obj->shm_fd_ownership = 0; + + obj->type = LTTNG_COUNTER_SHM_OBJECT_MEM; + obj->memory_map = memory_map; + obj->memory_map_size = memory_map_size; + obj->allocated_len = 0; + obj->index = table->allocated_len++; + + return obj; + +alloc_error: + return NULL; +} + +/* + * libnuma prints errors on the console even for numa_available(). + * Work-around this limitation by using get_mempolicy() directly to + * check whether the kernel supports mempolicy. + */ +#ifdef HAVE_LIBNUMA +static bool lttng_is_numa_available(void) +{ + int ret; + + ret = get_mempolicy(NULL, NULL, 0, NULL, 0); + if (ret && errno == ENOSYS) { + return false; + } + return numa_available() > 0; +} +#endif + +struct lttng_counter_shm_object *lttng_counter_shm_object_table_alloc(struct lttng_counter_shm_object_table *table, + size_t memory_map_size, + enum lttng_counter_shm_object_type type, + int cpu_fd, + int cpu) +{ + struct lttng_counter_shm_object *shm_object; +#ifdef HAVE_LIBNUMA + int oldnode = 0, node; + bool numa_avail; + + numa_avail = lttng_is_numa_available(); + if (numa_avail) { + oldnode = numa_preferred(); + if (cpu >= 0) { + node = numa_node_of_cpu(cpu); + if (node >= 0) + numa_set_preferred(node); + } + if (cpu < 0 || node < 0) + numa_set_localalloc(); + } +#endif /* HAVE_LIBNUMA */ + switch (type) { + case LTTNG_COUNTER_SHM_OBJECT_SHM: + shm_object = _lttng_counter_shm_object_table_alloc_shm(table, memory_map_size, + cpu_fd); + break; + case LTTNG_COUNTER_SHM_OBJECT_MEM: + shm_object = _lttng_counter_shm_object_table_alloc_mem(table, memory_map_size); + break; + default: + assert(0); + } +#ifdef HAVE_LIBNUMA + if (numa_avail) + numa_set_preferred(oldnode); +#endif /* HAVE_LIBNUMA */ + return shm_object; +} + +struct lttng_counter_shm_object *lttng_counter_shm_object_table_append_shm(struct lttng_counter_shm_object_table *table, + int shm_fd, + size_t memory_map_size) +{ + struct lttng_counter_shm_object *obj; + char *memory_map; + + if (table->allocated_len >= table->size) + return NULL; + + obj = &table->objects[table->allocated_len]; + + obj->shm_fd = shm_fd; + obj->shm_fd_ownership = 1; + + /* memory_map: mmap */ + memory_map = mmap(NULL, memory_map_size, PROT_READ | PROT_WRITE, + MAP_SHARED | LTTNG_MAP_POPULATE, shm_fd, 0); + if (memory_map == MAP_FAILED) { + PERROR("mmap"); + goto error_mmap; + } + obj->type = LTTNG_COUNTER_SHM_OBJECT_SHM; + obj->memory_map = memory_map; + obj->memory_map_size = memory_map_size; + obj->allocated_len = memory_map_size; + obj->index = table->allocated_len++; + + return obj; + +error_mmap: + return NULL; +} + +/* + * Passing ownership of mem to object. + */ +struct lttng_counter_shm_object *lttng_counter_shm_object_table_append_mem(struct lttng_counter_shm_object_table *table, + void *mem, size_t memory_map_size) +{ + struct lttng_counter_shm_object *obj; + + if (table->allocated_len >= table->size) + return NULL; + obj = &table->objects[table->allocated_len]; + + obj->shm_fd = -1; + obj->shm_fd_ownership = 0; + + obj->type = LTTNG_COUNTER_SHM_OBJECT_MEM; + obj->memory_map = mem; + obj->memory_map_size = memory_map_size; + obj->allocated_len = memory_map_size; + obj->index = table->allocated_len++; + + return obj; + + return NULL; +} + +static +void lttng_counter_shmp_object_destroy(struct lttng_counter_shm_object *obj, int consumer) +{ + switch (obj->type) { + case LTTNG_COUNTER_SHM_OBJECT_SHM: + { + int ret; + + ret = munmap(obj->memory_map, obj->memory_map_size); + if (ret) { + PERROR("umnmap"); + assert(0); + } + + if (obj->shm_fd_ownership) { + /* Delete FDs only if called from app (not consumer). */ + if (!consumer) { + lttng_ust_lock_fd_tracker(); + ret = close(obj->shm_fd); + if (!ret) { + lttng_ust_delete_fd_from_tracker(obj->shm_fd); + } else { + PERROR("close"); + assert(0); + } + lttng_ust_unlock_fd_tracker(); + } else { + ret = close(obj->shm_fd); + if (ret) { + PERROR("close"); + assert(0); + } + } + } + break; + } + case LTTNG_COUNTER_SHM_OBJECT_MEM: + { + free(obj->memory_map); + break; + } + default: + assert(0); + } +} + +void lttng_counter_shm_object_table_destroy(struct lttng_counter_shm_object_table *table, int consumer) +{ + int i; + + for (i = 0; i < table->allocated_len; i++) + lttng_counter_shmp_object_destroy(&table->objects[i], consumer); + free(table); +} + +/* + * lttng_counter_zalloc_shm - allocate memory within a shm object. + * + * Shared memory is already zeroed by shmget. + * *NOT* multithread-safe (should be protected by mutex). + * Returns a -1, -1 tuple on error. + */ +struct lttng_counter_shm_ref lttng_counter_zalloc_shm(struct lttng_counter_shm_object *obj, size_t len) +{ + struct lttng_counter_shm_ref ref; + struct lttng_counter_shm_ref shm_ref_error = { -1, -1 }; + + if (obj->memory_map_size - obj->allocated_len < len) + return shm_ref_error; + ref.index = obj->index; + ref.offset = obj->allocated_len; + obj->allocated_len += len; + return ref; +} + +void lttng_counter_align_shm(struct lttng_counter_shm_object *obj, size_t align) +{ + size_t offset_len = offset_align(obj->allocated_len, align); + obj->allocated_len += offset_len; +} diff --git a/libcounter/shm.h b/libcounter/shm.h new file mode 100644 index 00000000..2c6e0c7b --- /dev/null +++ b/libcounter/shm.h @@ -0,0 +1,142 @@ +#ifndef _LIBCOUNTER_SHM_H +#define _LIBCOUNTER_SHM_H + +/* + * libcounter/shm.h + * + * Copyright (C) 2011-2012 Mathieu Desnoyers + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; only + * version 2.1 of the License. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include +#include +#include +#include +#include +#include "shm_types.h" + +/* lttng_counter_handle_create - for UST. */ +extern +struct lttng_counter_shm_handle *lttng_counter_handle_create(void *data, + uint64_t memory_map_size, int wakeup_fd); +/* lttng_counter_handle_add_cpu - for UST. */ +extern +int lttng_counter_handle_add_cpu(struct lttng_counter_shm_handle *handle, + int shm_fd, uint32_t cpu_nr, + uint64_t memory_map_size); +unsigned int lttng_counter_handle_get_nr_cpus(struct lttng_counter_shm_handle *handle); + +/* + * Pointer dereferencing. We don't trust the shm_ref, so we validate + * both the index and offset with known boundaries. + * + * "shmp" and "shmp_index" guarantee that it's safe to use the pointer + * target type, even in the occurrence of shm_ref modification by an + * untrusted process having write access to the shm_ref. We return a + * NULL pointer if the ranges are invalid. + */ +static inline +char *_lttng_counter_shmp_offset(struct lttng_counter_shm_object_table *table, + struct lttng_counter_shm_ref *ref, + size_t idx, size_t elem_size) +{ + struct lttng_counter_shm_object *obj; + size_t objindex, ref_offset; + + objindex = (size_t) ref->index; + if (caa_unlikely(objindex >= table->allocated_len)) + return NULL; + obj = &table->objects[objindex]; + ref_offset = (size_t) ref->offset; + ref_offset += idx * elem_size; + /* Check if part of the element returned would exceed the limits. */ + if (caa_unlikely(ref_offset + elem_size > obj->memory_map_size)) + return NULL; + return &obj->memory_map[ref_offset]; +} + +#define lttng_counter_shmp_index(handle, ref, index) \ + ({ \ + __typeof__((ref)._type) ____ptr_ret; \ + ____ptr_ret = (__typeof__(____ptr_ret)) _lttng_counter_shmp_offset((handle)->table, &(ref)._ref, index, sizeof(*____ptr_ret)); \ + ____ptr_ret; \ + }) + +#define lttng_counter_shmp(handle, ref) lttng_counter_shmp_index(handle, ref, 0) + +static inline +void _lttng_counter_set_shmp(struct lttng_counter_shm_ref *ref, struct lttng_counter_shm_ref src) +{ + *ref = src; +} + +#define lttng_counter_set_shmp(ref, src) _lttng_counter_set_shmp(&(ref)._ref, src) + +struct lttng_counter_shm_object_table *lttng_counter_shm_object_table_create(size_t max_nb_obj); +struct lttng_counter_shm_object *lttng_counter_shm_object_table_alloc(struct lttng_counter_shm_object_table *table, + size_t memory_map_size, + enum lttng_counter_shm_object_type type, + const int cpu_fd, + int cpu); +struct lttng_counter_shm_object *lttng_counter_shm_object_table_append_shm(struct lttng_counter_shm_object_table *table, + int shm_fd, size_t memory_map_size); +/* mem ownership is passed to lttng_counter_shm_object_table_append_mem(). */ +struct lttng_counter_shm_object *lttng_counter_shm_object_table_append_mem(struct lttng_counter_shm_object_table *table, + void *mem, size_t memory_map_size); +void lttng_counter_shm_object_table_destroy(struct lttng_counter_shm_object_table *table, int consumer); + +/* + * lttng_counter_zalloc_shm - allocate memory within a shm object. + * + * Shared memory is already zeroed by shmget. + * *NOT* multithread-safe (should be protected by mutex). + * Returns a -1, -1 tuple on error. + */ +struct lttng_counter_shm_ref lttng_counter_zalloc_shm(struct lttng_counter_shm_object *obj, size_t len); +void lttng_counter_align_shm(struct lttng_counter_shm_object *obj, size_t align); + +static inline +int lttng_counter_shm_get_shm_fd(struct lttng_counter_shm_handle *handle, struct lttng_counter_shm_ref *ref) +{ + struct lttng_counter_shm_object_table *table = handle->table; + struct lttng_counter_shm_object *obj; + size_t index; + + index = (size_t) ref->index; + if (caa_unlikely(index >= table->allocated_len)) + return -EPERM; + obj = &table->objects[index]; + return obj->shm_fd; +} + + +static inline +int lttng_counter_shm_get_shm_size(struct lttng_counter_shm_handle *handle, struct lttng_counter_shm_ref *ref, + uint64_t *size) +{ + struct lttng_counter_shm_object_table *table = handle->table; + struct lttng_counter_shm_object *obj; + size_t index; + + index = (size_t) ref->index; + if (caa_unlikely(index >= table->allocated_len)) + return -EPERM; + obj = &table->objects[index]; + *size = obj->memory_map_size; + return 0; +} + +#endif /* _LIBCOUNTER_SHM_H */ diff --git a/libcounter/shm_internal.h b/libcounter/shm_internal.h new file mode 100644 index 00000000..dcc3aab6 --- /dev/null +++ b/libcounter/shm_internal.h @@ -0,0 +1,35 @@ +#ifndef _LIBCOUNTER_SHM_INTERNAL_H +#define _LIBCOUNTER_SHM_INTERNAL_H + +/* + * libcounter/shm_internal.h + * + * Copyright (C) 2011-2012 Mathieu Desnoyers + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; only + * version 2.1 of the License. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +struct lttng_counter_shm_ref { + volatile ssize_t index; /* within the object table */ + volatile ssize_t offset; /* within the object */ +}; + +#define DECLARE_LTTNG_COUNTER_SHMP(type, name) \ + union { \ + struct lttng_counter_shm_ref _ref; \ + type *_type; \ + } name + +#endif /* _LIBCOUNTER_SHM_INTERNAL_H */ diff --git a/libcounter/shm_types.h b/libcounter/shm_types.h new file mode 100644 index 00000000..2086a832 --- /dev/null +++ b/libcounter/shm_types.h @@ -0,0 +1,54 @@ +#ifndef _LIBCOUNTER_SHM_TYPES_H +#define _LIBCOUNTER_SHM_TYPES_H + +/* + * libcounter/shm_types.h + * + * Copyright (C) 2011-2012 Mathieu Desnoyers + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; only + * version 2.1 of the License. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include +#include +#include +#include "shm_internal.h" + +enum lttng_counter_shm_object_type { + LTTNG_COUNTER_SHM_OBJECT_SHM, + LTTNG_COUNTER_SHM_OBJECT_MEM, +}; + +struct lttng_counter_shm_object { + enum lttng_counter_shm_object_type type; + size_t index; /* within the object table */ + int shm_fd; /* shm fd */ + char *memory_map; + size_t memory_map_size; + uint64_t allocated_len; + int shm_fd_ownership; +}; + +struct lttng_counter_shm_object_table { + size_t size; + size_t allocated_len; + struct lttng_counter_shm_object objects[]; +}; + +struct lttng_counter_shm_handle { + struct lttng_counter_shm_object_table *table; +}; + +#endif /* _LIBCOUNTER_SHM_TYPES_H */ diff --git a/libcounter/smp.c b/libcounter/smp.c new file mode 100644 index 00000000..22ad98ab --- /dev/null +++ b/libcounter/smp.c @@ -0,0 +1,111 @@ +/* + * libcounter/smp.c + * + * Copyright (C) 2011-2012 Mathieu Desnoyers + * Copyright (C) 2019 Michael Jeanson + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; only + * version 2.1 of the License. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#define _GNU_SOURCE +#define _LGPL_SOURCE +#include +#include +#include "smp.h" + +int __lttng_counter_num_possible_cpus; + +#if (defined(__GLIBC__) || defined( __UCLIBC__)) +void _lttng_counter_get_num_possible_cpus(void) +{ + int result; + + /* On Linux, when some processors are offline + * _SC_NPROCESSORS_CONF counts the offline + * processors, whereas _SC_NPROCESSORS_ONLN + * does not. If we used _SC_NPROCESSORS_ONLN, + * getcpu() could return a value greater than + * this sysconf, in which case the arrays + * indexed by processor would overflow. + */ + result = sysconf(_SC_NPROCESSORS_CONF); + if (result == -1) + return; + __lttng_counter_num_possible_cpus = result; +} + +#else + +/* + * The MUSL libc implementation of the _SC_NPROCESSORS_CONF sysconf does not + * return the number of configured CPUs in the system but relies on the cpu + * affinity mask of the current task. + * + * So instead we use a strategy similar to GLIBC's, counting the cpu + * directories in "/sys/devices/system/cpu" and fallback on the value from + * sysconf if it fails. + */ + +#include +#include +#include +#include +#include + +#define __max(a,b) ((a)>(b)?(a):(b)) + +void _lttng_counter_get_num_possible_cpus(void) +{ + int result, count = 0; + DIR *cpudir; + struct dirent *entry; + + cpudir = opendir("/sys/devices/system/cpu"); + if (cpudir == NULL) + goto end; + + /* + * Count the number of directories named "cpu" followed by and + * integer. This is the same strategy as glibc uses. + */ + while ((entry = readdir(cpudir))) { + if (entry->d_type == DT_DIR && + strncmp(entry->d_name, "cpu", 3) == 0) { + + char *endptr; + unsigned long cpu_num; + + cpu_num = strtoul(entry->d_name + 3, &endptr, 10); + if ((cpu_num < ULONG_MAX) && (endptr != entry->d_name + 3) + && (*endptr == '\0')) { + count++; + } + } + } + +end: + /* + * Get the sysconf value as a fallback. Keep the highest number. + */ + result = __max(sysconf(_SC_NPROCESSORS_CONF), count); + + /* + * If both methods failed, don't store the value. + */ + if (result < 1) + return; + __lttng_counter_num_possible_cpus = result; +} +#endif diff --git a/libcounter/smp.h b/libcounter/smp.h new file mode 100644 index 00000000..00ca7a03 --- /dev/null +++ b/libcounter/smp.h @@ -0,0 +1,43 @@ +#ifndef _LIBCOUNTER_SMP_H +#define _LIBCOUNTER_SMP_H + +/* + * libcounter/smp.h + * + * Copyright (C) 2011-2012 Mathieu Desnoyers + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; only + * version 2.1 of the License. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/* + * 4kB of per-cpu data available. + */ +#define LTTNG_COUNTER_PER_CPU_MEM_SIZE 4096 + +extern int __lttng_counter_num_possible_cpus; +extern void _lttng_counter_get_num_possible_cpus(void); + +static inline +int lttng_counter_num_possible_cpus(void) +{ + if (!__lttng_counter_num_possible_cpus) + _lttng_counter_get_num_possible_cpus(); + return __lttng_counter_num_possible_cpus; +} + +#define lttng_counter_for_each_possible_cpu(cpu) \ + for ((cpu) = 0; (cpu) < lttng_counter_num_possible_cpus(); (cpu)++) + +#endif /* _LIBCOUNTER_SMP_H */ diff --git a/liblttng-ust-comm/lttng-ust-comm.c b/liblttng-ust-comm/lttng-ust-comm.c index 9786abe7..5322702c 100644 --- a/liblttng-ust-comm/lttng-ust-comm.c +++ b/liblttng-ust-comm/lttng-ust-comm.c @@ -765,6 +765,75 @@ error: return ret; } +ssize_t ustcomm_recv_counter_from_sessiond(int sock, + void **_counter_data, uint64_t var_len) +{ + void *counter_data; + ssize_t len; + + if (var_len > LTTNG_UST_COUNTER_DATA_MAX_LEN) { + len = -EINVAL; + goto error_check; + } + /* Receive variable length data */ + counter_data = zmalloc(var_len); + if (!counter_data) { + len = -ENOMEM; + goto error_alloc; + } + len = ustcomm_recv_unix_sock(sock, counter_data, var_len); + if (len != var_len) { + goto error_recv; + } + *_counter_data = counter_data; + return len; + +error_recv: + free(counter_data); +error_alloc: +error_check: + return len; +} + +int ustcomm_recv_counter_shm_from_sessiond(int sock, + int *shm_fd) +{ + ssize_t len; + int ret; + int fds[1]; + + /* recv shm fd fd */ + lttng_ust_lock_fd_tracker(); + len = ustcomm_recv_fds_unix_sock(sock, fds, 1); + if (len <= 0) { + lttng_ust_unlock_fd_tracker(); + if (len < 0) { + ret = len; + goto error; + } else { + ret = -EIO; + goto error; + } + } + + ret = lttng_ust_add_fd_to_tracker(fds[0]); + if (ret < 0) { + ret = close(fds[0]); + if (ret) { + PERROR("close on received shm_fd"); + } + ret = -EIO; + lttng_ust_unlock_fd_tracker(); + goto error; + } + *shm_fd = ret; + lttng_ust_unlock_fd_tracker(); + return 0; + +error: + return ret; +} + /* * Returns 0 on success, negative error value on error. */ diff --git a/liblttng-ust-ctl/ustctl.c b/liblttng-ust-ctl/ustctl.c index b2fa8258..650e7290 100644 --- a/liblttng-ust-ctl/ustctl.c +++ b/liblttng-ust-ctl/ustctl.c @@ -37,6 +37,10 @@ #include "../liblttng-ust/clock.h" #include "../liblttng-ust/getenv.h" +#include "../libcounter/shm.h" +#include "../libcounter/smp.h" +#include "../libcounter/counter.h" + /* * Number of milliseconds to retry before failing metadata writes on * buffer full condition. (10 seconds) @@ -67,6 +71,24 @@ struct ustctl_consumer_stream { uint64_t memory_map_size; }; +#define USTCTL_COUNTER_ATTR_DIMENSION_MAX 8 +struct ustctl_counter_attr { + enum ustctl_counter_arithmetic arithmetic; + enum ustctl_counter_bitness bitness; + uint32_t nr_dimensions; + int64_t global_sum_step; + struct ustctl_counter_dimension dimensions[USTCTL_COUNTER_ATTR_DIMENSION_MAX]; +}; + +/* + * Counter representation within daemon. + */ +struct ustctl_daemon_counter { + struct lib_counter *counter; + const struct lttng_counter_ops *ops; + struct ustctl_counter_attr *attr; /* initial attributes */ +}; + extern void lttng_ring_buffer_client_overwrite_init(void); extern void lttng_ring_buffer_client_overwrite_rt_init(void); extern void lttng_ring_buffer_client_discard_init(void); @@ -77,6 +99,10 @@ extern void lttng_ring_buffer_client_overwrite_rt_exit(void); extern void lttng_ring_buffer_client_discard_exit(void); extern void lttng_ring_buffer_client_discard_rt_exit(void); extern void lttng_ring_buffer_metadata_client_exit(void); +extern void lttng_counter_client_percpu_32_modular_init(void); +extern void lttng_counter_client_percpu_32_modular_exit(void); +extern void lttng_counter_client_percpu_64_modular_init(void); +extern void lttng_counter_client_percpu_64_modular_exit(void); int ustctl_release_handle(int sock, int handle) { @@ -138,6 +164,30 @@ int ustctl_release_object(int sock, struct lttng_ust_object_data *data) case LTTNG_UST_OBJECT_TYPE_EVENT_NOTIFIER_GROUP: case LTTNG_UST_OBJECT_TYPE_EVENT_NOTIFIER: break; + case LTTNG_UST_OBJECT_TYPE_COUNTER: + free(data->u.counter.data); + data->u.counter.data = NULL; + break; + case LTTNG_UST_OBJECT_TYPE_COUNTER_GLOBAL: + if (data->u.counter_global.shm_fd >= 0) { + ret = close(data->u.counter_global.shm_fd); + if (ret < 0) { + ret = -errno; + return ret; + } + data->u.counter_global.shm_fd = -1; + } + break; + case LTTNG_UST_OBJECT_TYPE_COUNTER_CPU: + if (data->u.counter_cpu.shm_fd >= 0) { + ret = close(data->u.counter_cpu.shm_fd); + if (ret < 0) { + ret = -errno; + return ret; + } + data->u.counter_cpu.shm_fd = -1; + } + break; default: assert(0); } @@ -1107,6 +1157,44 @@ int ustctl_duplicate_ust_object_data(struct lttng_ust_object_data **dest, goto error_type; } + case LTTNG_UST_OBJECT_TYPE_COUNTER: + { + obj->u.counter.data = zmalloc(obj->size); + if (!obj->u.counter.data) { + ret = -ENOMEM; + goto error_type; + } + memcpy(obj->u.counter.data, src->u.counter.data, obj->size); + break; + } + + case LTTNG_UST_OBJECT_TYPE_COUNTER_GLOBAL: + { + if (src->u.counter_global.shm_fd >= 0) { + obj->u.counter_global.shm_fd = + dup(src->u.counter_global.shm_fd); + if (obj->u.counter_global.shm_fd < 0) { + ret = errno; + goto error_type; + } + } + break; + } + + case LTTNG_UST_OBJECT_TYPE_COUNTER_CPU: + { + obj->u.counter_cpu.cpu_nr = src->u.counter_cpu.cpu_nr; + if (src->u.counter_cpu.shm_fd >= 0) { + obj->u.counter_cpu.shm_fd = + dup(src->u.counter_cpu.shm_fd); + if (obj->u.counter_cpu.shm_fd < 0) { + ret = errno; + goto error_type; + } + } + break; + } + default: ret = -EINVAL; goto error_type; @@ -2363,6 +2451,373 @@ int ustctl_regenerate_statedump(int sock, int handle) return 0; } +/* counter operations */ + +int ustctl_get_nr_cpu_per_counter(void) +{ + return lttng_counter_num_possible_cpus(); +} + +struct ustctl_daemon_counter * + ustctl_create_counter(size_t nr_dimensions, + const struct ustctl_counter_dimension *dimensions, + int64_t global_sum_step, + int global_counter_fd, + int nr_counter_cpu_fds, + const int *counter_cpu_fds, + enum ustctl_counter_bitness bitness, + enum ustctl_counter_arithmetic arithmetic, + uint32_t alloc_flags) +{ + const char *transport_name; + struct ustctl_daemon_counter *counter; + struct lttng_counter_transport *transport; + struct lttng_counter_dimension ust_dim[LTTNG_COUNTER_DIMENSION_MAX]; + size_t i; + + if (nr_dimensions > LTTNG_COUNTER_DIMENSION_MAX) + return NULL; + /* Currently, only per-cpu allocation is supported. */ + switch (alloc_flags) { + case USTCTL_COUNTER_ALLOC_PER_CPU: + break; + + case USTCTL_COUNTER_ALLOC_PER_CPU | USTCTL_COUNTER_ALLOC_GLOBAL: + case USTCTL_COUNTER_ALLOC_GLOBAL: + default: + return NULL; + } + switch (bitness) { + case USTCTL_COUNTER_BITNESS_32: + switch (arithmetic) { + case USTCTL_COUNTER_ARITHMETIC_MODULAR: + transport_name = "counter-per-cpu-32-modular"; + break; + case USTCTL_COUNTER_ARITHMETIC_SATURATION: + transport_name = "counter-per-cpu-32-saturation"; + break; + default: + return NULL; + } + break; + case USTCTL_COUNTER_BITNESS_64: + switch (arithmetic) { + case USTCTL_COUNTER_ARITHMETIC_MODULAR: + transport_name = "counter-per-cpu-64-modular"; + break; + case USTCTL_COUNTER_ARITHMETIC_SATURATION: + transport_name = "counter-per-cpu-64-saturation"; + break; + default: + return NULL; + } + break; + default: + return NULL; + } + + transport = lttng_counter_transport_find(transport_name); + if (!transport) { + DBG("LTTng transport %s not found\n", + transport_name); + return NULL; + } + + counter = zmalloc(sizeof(*counter)); + if (!counter) + return NULL; + counter->attr = zmalloc(sizeof(*counter->attr)); + if (!counter->attr) + goto free_counter; + counter->attr->bitness = bitness; + counter->attr->arithmetic = arithmetic; + counter->attr->nr_dimensions = nr_dimensions; + counter->attr->global_sum_step = global_sum_step; + for (i = 0; i < nr_dimensions; i++) + counter->attr->dimensions[i] = dimensions[i]; + + for (i = 0; i < nr_dimensions; i++) { + ust_dim[i].size = dimensions[i].size; + ust_dim[i].underflow_index = dimensions[i].underflow_index; + ust_dim[i].overflow_index = dimensions[i].overflow_index; + ust_dim[i].has_underflow = dimensions[i].has_underflow; + ust_dim[i].has_overflow = dimensions[i].has_overflow; + } + counter->counter = transport->ops.counter_create(nr_dimensions, + ust_dim, global_sum_step, global_counter_fd, + nr_counter_cpu_fds, counter_cpu_fds, true); + if (!counter->counter) + goto free_attr; + counter->ops = &transport->ops; + return counter; + +free_attr: + free(counter->attr); +free_counter: + free(counter); + return NULL; +} + +int ustctl_create_counter_data(struct ustctl_daemon_counter *counter, + struct lttng_ust_object_data **_counter_data) +{ + struct lttng_ust_object_data *counter_data; + struct lttng_ust_counter_conf counter_conf; + size_t i; + int ret; + + switch (counter->attr->arithmetic) { + case USTCTL_COUNTER_ARITHMETIC_MODULAR: + counter_conf.arithmetic = LTTNG_UST_COUNTER_ARITHMETIC_MODULAR; + break; + case USTCTL_COUNTER_ARITHMETIC_SATURATION: + counter_conf.arithmetic = LTTNG_UST_COUNTER_ARITHMETIC_SATURATION; + break; + default: + return -EINVAL; + } + switch (counter->attr->bitness) { + case USTCTL_COUNTER_BITNESS_32: + counter_conf.bitness = LTTNG_UST_COUNTER_BITNESS_32BITS; + break; + case USTCTL_COUNTER_BITNESS_64: + counter_conf.bitness = LTTNG_UST_COUNTER_BITNESS_64BITS; + break; + default: + return -EINVAL; + } + counter_conf.number_dimensions = counter->attr->nr_dimensions; + counter_conf.global_sum_step = counter->attr->global_sum_step; + for (i = 0; i < counter->attr->nr_dimensions; i++) { + counter_conf.dimensions[i].size = counter->attr->dimensions[i].size; + counter_conf.dimensions[i].underflow_index = counter->attr->dimensions[i].underflow_index; + counter_conf.dimensions[i].overflow_index = counter->attr->dimensions[i].overflow_index; + counter_conf.dimensions[i].has_underflow = counter->attr->dimensions[i].has_underflow; + counter_conf.dimensions[i].has_overflow = counter->attr->dimensions[i].has_overflow; + } + + counter_data = zmalloc(sizeof(*counter_data)); + if (!counter_data) { + ret = -ENOMEM; + goto error_alloc; + } + counter_data->type = LTTNG_UST_OBJECT_TYPE_COUNTER; + counter_data->handle = -1; + + counter_data->size = sizeof(counter_conf); + counter_data->u.counter.data = zmalloc(sizeof(counter_conf)); + if (!counter_data->u.counter.data) { + ret = -ENOMEM; + goto error_alloc_data; + } + + memcpy(counter_data->u.counter.data, &counter_conf, sizeof(counter_conf)); + *_counter_data = counter_data; + + return 0; + +error_alloc_data: + free(counter_data); +error_alloc: + return ret; +} + +int ustctl_create_counter_global_data(struct ustctl_daemon_counter *counter, + struct lttng_ust_object_data **_counter_global_data) +{ + struct lttng_ust_object_data *counter_global_data; + int ret, fd; + size_t len; + + if (lttng_counter_get_global_shm(counter->counter, &fd, &len)) + return -EINVAL; + counter_global_data = zmalloc(sizeof(*counter_global_data)); + if (!counter_global_data) { + ret = -ENOMEM; + goto error_alloc; + } + counter_global_data->type = LTTNG_UST_OBJECT_TYPE_COUNTER_GLOBAL; + counter_global_data->handle = -1; + counter_global_data->size = len; + counter_global_data->u.counter_global.shm_fd = fd; + *_counter_global_data = counter_global_data; + return 0; + +error_alloc: + return ret; +} + +int ustctl_create_counter_cpu_data(struct ustctl_daemon_counter *counter, int cpu, + struct lttng_ust_object_data **_counter_cpu_data) +{ + struct lttng_ust_object_data *counter_cpu_data; + int ret, fd; + size_t len; + + if (lttng_counter_get_cpu_shm(counter->counter, cpu, &fd, &len)) + return -EINVAL; + counter_cpu_data = zmalloc(sizeof(*counter_cpu_data)); + if (!counter_cpu_data) { + ret = -ENOMEM; + goto error_alloc; + } + counter_cpu_data->type = LTTNG_UST_OBJECT_TYPE_COUNTER_CPU; + counter_cpu_data->handle = -1; + counter_cpu_data->size = len; + counter_cpu_data->u.counter_cpu.shm_fd = fd; + counter_cpu_data->u.counter_cpu.cpu_nr = cpu; + *_counter_cpu_data = counter_cpu_data; + return 0; + +error_alloc: + return ret; +} + +void ustctl_destroy_counter(struct ustctl_daemon_counter *counter) +{ + counter->ops->counter_destroy(counter->counter); + free(counter->attr); + free(counter); +} + +int ustctl_send_counter_data_to_ust(int sock, int parent_handle, + struct lttng_ust_object_data *counter_data) +{ + struct ustcomm_ust_msg lum; + struct ustcomm_ust_reply lur; + int ret; + size_t size; + ssize_t len; + + if (!counter_data) + return -EINVAL; + + size = counter_data->size; + memset(&lum, 0, sizeof(lum)); + lum.handle = parent_handle; + lum.cmd = LTTNG_UST_COUNTER; + lum.u.counter.len = size; + ret = ustcomm_send_app_msg(sock, &lum); + if (ret) + return ret; + + /* Send counter data */ + len = ustcomm_send_unix_sock(sock, counter_data->u.counter.data, size); + if (len != size) { + if (len < 0) + return len; + else + return -EIO; + } + + ret = ustcomm_recv_app_reply(sock, &lur, lum.handle, lum.cmd); + if (!ret) { + counter_data->handle = lur.ret_val; + } + return ret; +} + +int ustctl_send_counter_global_data_to_ust(int sock, + struct lttng_ust_object_data *counter_data, + struct lttng_ust_object_data *counter_global_data) +{ + struct ustcomm_ust_msg lum; + struct ustcomm_ust_reply lur; + int ret, shm_fd[1]; + size_t size; + ssize_t len; + + if (!counter_data || !counter_global_data) + return -EINVAL; + + size = counter_global_data->size; + memset(&lum, 0, sizeof(lum)); + lum.handle = counter_data->handle; /* parent handle */ + lum.cmd = LTTNG_UST_COUNTER_GLOBAL; + lum.u.counter_global.len = size; + ret = ustcomm_send_app_msg(sock, &lum); + if (ret) + return ret; + + shm_fd[0] = counter_global_data->u.counter_global.shm_fd; + len = ustcomm_send_fds_unix_sock(sock, shm_fd, 1); + if (len <= 0) { + if (len < 0) + return len; + else + return -EIO; + } + + ret = ustcomm_recv_app_reply(sock, &lur, lum.handle, lum.cmd); + if (!ret) { + counter_global_data->handle = lur.ret_val; + } + return ret; +} + +int ustctl_send_counter_cpu_data_to_ust(int sock, + struct lttng_ust_object_data *counter_data, + struct lttng_ust_object_data *counter_cpu_data) +{ + struct ustcomm_ust_msg lum; + struct ustcomm_ust_reply lur; + int ret, shm_fd[1]; + size_t size; + ssize_t len; + + if (!counter_data || !counter_cpu_data) + return -EINVAL; + + size = counter_cpu_data->size; + memset(&lum, 0, sizeof(lum)); + lum.handle = counter_data->handle; /* parent handle */ + lum.cmd = LTTNG_UST_COUNTER_CPU; + lum.u.counter_cpu.len = size; + lum.u.counter_cpu.cpu_nr = counter_cpu_data->u.counter_cpu.cpu_nr; + ret = ustcomm_send_app_msg(sock, &lum); + if (ret) + return ret; + + shm_fd[0] = counter_cpu_data->u.counter_global.shm_fd; + len = ustcomm_send_fds_unix_sock(sock, shm_fd, 1); + if (len <= 0) { + if (len < 0) + return len; + else + return -EIO; + } + + ret = ustcomm_recv_app_reply(sock, &lur, lum.handle, lum.cmd); + if (!ret) { + counter_cpu_data->handle = lur.ret_val; + } + return ret; +} + +int ustctl_counter_read(struct ustctl_daemon_counter *counter, + const size_t *dimension_indexes, + int cpu, int64_t *value, + bool *overflow, bool *underflow) +{ + return counter->ops->counter_read(counter->counter, dimension_indexes, cpu, + value, overflow, underflow); +} + +int ustctl_counter_aggregate(struct ustctl_daemon_counter *counter, + const size_t *dimension_indexes, + int64_t *value, + bool *overflow, bool *underflow) +{ + return counter->ops->counter_aggregate(counter->counter, dimension_indexes, + value, overflow, underflow); +} + +int ustctl_counter_clear(struct ustctl_daemon_counter *counter, + const size_t *dimension_indexes) +{ + return counter->ops->counter_clear(counter->counter, dimension_indexes); +} + static __attribute__((constructor)) void ustctl_init(void) { @@ -2374,6 +2829,8 @@ void ustctl_init(void) lttng_ring_buffer_client_overwrite_rt_init(); lttng_ring_buffer_client_discard_init(); lttng_ring_buffer_client_discard_rt_init(); + lttng_counter_client_percpu_32_modular_init(); + lttng_counter_client_percpu_64_modular_init(); lib_ringbuffer_signal_init(); } @@ -2385,4 +2842,6 @@ void ustctl_exit(void) lttng_ring_buffer_client_overwrite_rt_exit(); lttng_ring_buffer_client_overwrite_exit(); lttng_ring_buffer_metadata_client_exit(); + lttng_counter_client_percpu_32_modular_exit(); + lttng_counter_client_percpu_64_modular_exit(); } diff --git a/liblttng-ust/Makefile.am b/liblttng-ust/Makefile.am index 98fb2153..fab09251 100644 --- a/liblttng-ust/Makefile.am +++ b/liblttng-ust/Makefile.am @@ -102,6 +102,8 @@ liblttng_ust_support_la_SOURCES = \ lttng-ring-buffer-client-overwrite-rt.c \ lttng-ring-buffer-metadata-client.h \ lttng-ring-buffer-metadata-client.c \ + lttng-counter-client-percpu-32-modular.c \ + lttng-counter-client-percpu-64-modular.c \ lttng-clock.c lttng-getcpu.c liblttng_ust_la_SOURCES = @@ -109,7 +111,8 @@ liblttng_ust_la_SOURCES = liblttng_ust_la_LDFLAGS = -no-undefined -version-info $(LTTNG_UST_LIBRARY_VERSION) liblttng_ust_support_la_LIBADD = \ - $(top_builddir)/libringbuffer/libringbuffer.la + $(top_builddir)/libringbuffer/libringbuffer.la \ + $(top_builddir)/libcounter/libcounter.la liblttng_ust_la_LIBADD = \ -lrt \ diff --git a/liblttng-ust/lttng-counter-client-percpu-32-modular.c b/liblttng-ust/lttng-counter-client-percpu-32-modular.c new file mode 100644 index 00000000..86fa1164 --- /dev/null +++ b/liblttng-ust/lttng-counter-client-percpu-32-modular.c @@ -0,0 +1,94 @@ +/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only) + * + * lttng-counter-client-percpu-32-modular.c + * + * LTTng lib counter client. Per-cpu 32-bit counters in modular + * arithmetic. + * + * Copyright (C) 2020 Mathieu Desnoyers + */ + +#include +#include "../libcounter/counter.h" +#include "../libcounter/counter-api.h" + +static const struct lib_counter_config client_config = { + .alloc = COUNTER_ALLOC_PER_CPU, + .sync = COUNTER_SYNC_PER_CPU, + .arithmetic = COUNTER_ARITHMETIC_MODULAR, + .counter_size = COUNTER_SIZE_32_BIT, +}; + +static struct lib_counter *counter_create(size_t nr_dimensions, + const struct lttng_counter_dimension *dimensions, + int64_t global_sum_step, + int global_counter_fd, + int nr_counter_cpu_fds, + const int *counter_cpu_fds, + bool is_daemon) +{ + size_t max_nr_elem[LTTNG_COUNTER_DIMENSION_MAX], i; + + if (nr_dimensions > LTTNG_COUNTER_DIMENSION_MAX) + return NULL; + for (i = 0; i < nr_dimensions; i++) { + if (dimensions[i].has_underflow || dimensions[i].has_overflow) + return NULL; + max_nr_elem[i] = dimensions[i].size; + } + return lttng_counter_create(&client_config, nr_dimensions, max_nr_elem, + global_sum_step, global_counter_fd, nr_counter_cpu_fds, + counter_cpu_fds, is_daemon); +} + +static void counter_destroy(struct lib_counter *counter) +{ + lttng_counter_destroy(counter); +} + +static int counter_add(struct lib_counter *counter, const size_t *dimension_indexes, int64_t v) +{ + return lttng_counter_add(&client_config, counter, dimension_indexes, v); +} + +static int counter_read(struct lib_counter *counter, const size_t *dimension_indexes, int cpu, + int64_t *value, bool *overflow, bool *underflow) +{ + return lttng_counter_read(&client_config, counter, dimension_indexes, cpu, value, + overflow, underflow); +} + +static int counter_aggregate(struct lib_counter *counter, const size_t *dimension_indexes, + int64_t *value, bool *overflow, bool *underflow) +{ + return lttng_counter_aggregate(&client_config, counter, dimension_indexes, value, + overflow, underflow); +} + +static int counter_clear(struct lib_counter *counter, const size_t *dimension_indexes) +{ + return lttng_counter_clear(&client_config, counter, dimension_indexes); +} + +static struct lttng_counter_transport lttng_counter_transport = { + .name = "counter-per-cpu-32-modular", + .ops = { + .counter_create = counter_create, + .counter_destroy = counter_destroy, + .counter_add = counter_add, + .counter_read = counter_read, + .counter_aggregate = counter_aggregate, + .counter_clear = counter_clear, + }, + .client_config = &client_config, +}; + +void lttng_counter_client_percpu_32_modular_init(void) +{ + lttng_counter_transport_register(<tng_counter_transport); +} + +void lttng_counter_client_percpu_32_modular_exit(void) +{ + lttng_counter_transport_unregister(<tng_counter_transport); +} diff --git a/liblttng-ust/lttng-counter-client-percpu-64-modular.c b/liblttng-ust/lttng-counter-client-percpu-64-modular.c new file mode 100644 index 00000000..c3851946 --- /dev/null +++ b/liblttng-ust/lttng-counter-client-percpu-64-modular.c @@ -0,0 +1,94 @@ +/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only) + * + * lttng-counter-client-percpu-64-modular.c + * + * LTTng lib counter client. Per-cpu 64-bit counters in modular + * arithmetic. + * + * Copyright (C) 2020 Mathieu Desnoyers + */ + +#include +#include "../libcounter/counter.h" +#include "../libcounter/counter-api.h" + +static const struct lib_counter_config client_config = { + .alloc = COUNTER_ALLOC_PER_CPU, + .sync = COUNTER_SYNC_PER_CPU, + .arithmetic = COUNTER_ARITHMETIC_MODULAR, + .counter_size = COUNTER_SIZE_64_BIT, +}; + +static struct lib_counter *counter_create(size_t nr_dimensions, + const struct lttng_counter_dimension *dimensions, + int64_t global_sum_step, + int global_counter_fd, + int nr_counter_cpu_fds, + const int *counter_cpu_fds, + bool is_daemon) +{ + size_t max_nr_elem[LTTNG_COUNTER_DIMENSION_MAX], i; + + if (nr_dimensions > LTTNG_COUNTER_DIMENSION_MAX) + return NULL; + for (i = 0; i < nr_dimensions; i++) { + if (dimensions[i].has_underflow || dimensions[i].has_overflow) + return NULL; + max_nr_elem[i] = dimensions[i].size; + } + return lttng_counter_create(&client_config, nr_dimensions, max_nr_elem, + global_sum_step, global_counter_fd, nr_counter_cpu_fds, + counter_cpu_fds, is_daemon); +} + +static void counter_destroy(struct lib_counter *counter) +{ + lttng_counter_destroy(counter); +} + +static int counter_add(struct lib_counter *counter, const size_t *dimension_indexes, int64_t v) +{ + return lttng_counter_add(&client_config, counter, dimension_indexes, v); +} + +static int counter_read(struct lib_counter *counter, const size_t *dimension_indexes, int cpu, + int64_t *value, bool *overflow, bool *underflow) +{ + return lttng_counter_read(&client_config, counter, dimension_indexes, cpu, value, + overflow, underflow); +} + +static int counter_aggregate(struct lib_counter *counter, const size_t *dimension_indexes, + int64_t *value, bool *overflow, bool *underflow) +{ + return lttng_counter_aggregate(&client_config, counter, dimension_indexes, value, + overflow, underflow); +} + +static int counter_clear(struct lib_counter *counter, const size_t *dimension_indexes) +{ + return lttng_counter_clear(&client_config, counter, dimension_indexes); +} + +static struct lttng_counter_transport lttng_counter_transport = { + .name = "counter-per-cpu-64-modular", + .ops = { + .counter_create = counter_create, + .counter_destroy = counter_destroy, + .counter_add = counter_add, + .counter_read = counter_read, + .counter_aggregate = counter_aggregate, + .counter_clear = counter_clear, + }, + .client_config = &client_config, +}; + +void lttng_counter_client_percpu_64_modular_init(void) +{ + lttng_counter_transport_register(<tng_counter_transport); +} + +void lttng_counter_client_percpu_64_modular_exit(void) +{ + lttng_counter_transport_unregister(<tng_counter_transport); +} diff --git a/liblttng-ust/lttng-events.c b/liblttng-ust/lttng-events.c index 66ec7b2c..6a089122 100644 --- a/liblttng-ust/lttng-events.c +++ b/liblttng-ust/lttng-events.c @@ -67,6 +67,7 @@ #include "ust-events-internal.h" #include "wait.h" #include "../libringbuffer/shm.h" +#include "../libcounter/counter.h" #include "jhash.h" #include @@ -171,6 +172,46 @@ struct lttng_session *lttng_session_create(void) return session; } +struct lttng_counter *lttng_ust_counter_create( + const char *counter_transport_name, + size_t number_dimensions, const struct lttng_counter_dimension *dimensions) +{ + struct lttng_counter_transport *counter_transport = NULL; + struct lttng_counter *counter = NULL; + + counter_transport = lttng_counter_transport_find(counter_transport_name); + if (!counter_transport) + goto notransport; + counter = zmalloc(sizeof(struct lttng_counter)); + if (!counter) + goto nomem; + + counter->ops = &counter_transport->ops; + counter->transport = counter_transport; + + counter->counter = counter->ops->counter_create( + number_dimensions, dimensions, 0, + -1, 0, NULL, false); + if (!counter->counter) { + goto create_error; + } + + return counter; + +create_error: + free(counter); +nomem: +notransport: + return NULL; +} + +static +void lttng_ust_counter_destroy(struct lttng_counter *counter) +{ + counter->ops->counter_destroy(counter->counter); + free(counter); +} + struct lttng_event_notifier_group *lttng_event_notifier_group_create(void) { struct lttng_event_notifier_group *event_notifier_group; @@ -352,7 +393,10 @@ void lttng_event_notifier_group_destroy( &event_notifier_group->event_notifiers_head, node) _lttng_event_notifier_destroy(notifier); - /* Close the notification fd to the listener of event notifiers. */ + if (event_notifier_group->error_counter) + lttng_ust_counter_destroy(event_notifier_group->error_counter); + + /* Close the notification fd to the listener of event_notifiers. */ lttng_ust_lock_fd_tracker(); close_ret = close(event_notifier_group->notification_fd); diff --git a/liblttng-ust/lttng-ust-abi.c b/liblttng-ust/lttng-ust-abi.c index 76fd7ea0..0d2058a3 100644 --- a/liblttng-ust/lttng-ust-abi.c +++ b/liblttng-ust/lttng-ust-abi.c @@ -56,6 +56,7 @@ #include "../libringbuffer/frontend_types.h" #include "../libringbuffer/shm.h" +#include "../libcounter/counter.h" #include "lttng-tracer.h" #include "string-utils.h" #include "ust-events-internal.h" @@ -637,6 +638,11 @@ long lttng_session_cmd(int objd, unsigned int cmd, unsigned long arg, return lttng_session_disable(session); case LTTNG_UST_SESSION_STATEDUMP: return lttng_session_statedump(session); + case LTTNG_UST_COUNTER: + case LTTNG_UST_COUNTER_GLOBAL: + case LTTNG_UST_COUNTER_CPU: + /* Not implemented yet. */ + return -EINVAL; default: return -EINVAL; } @@ -735,6 +741,133 @@ long lttng_event_notifier_enabler_cmd(int objd, unsigned int cmd, unsigned long } } +/** + * lttng_event_notifier_group_error_counter_cmd - lttng event_notifier group error counter object command + * + * @obj: the object + * @cmd: the command + * @arg: command arg + * @uargs: UST arguments (internal) + * @owner: objd owner + * + * This descriptor implements lttng commands: + * LTTNG_UST_COUNTER_GLOBAL + * Return negative error code on error, 0 on success. + * LTTNG_UST_COUNTER_CPU + * Return negative error code on error, 0 on success. + */ +static +long lttng_event_notifier_group_error_counter_cmd(int objd, unsigned int cmd, unsigned long arg, + union ust_args *uargs, void *owner) +{ + struct lttng_counter *counter = objd_private(objd); + + switch (cmd) { + case LTTNG_UST_COUNTER_GLOBAL: + return -EINVAL; /* Unimplemented. */ + case LTTNG_UST_COUNTER_CPU: + { + struct lttng_ust_counter_cpu *counter_cpu = + (struct lttng_ust_counter_cpu *)arg; + return lttng_counter_set_cpu_shm(counter->counter, + counter_cpu->cpu_nr, uargs->counter_shm.shm_fd); + } + default: + return -EINVAL; + } +} + +int lttng_release_event_notifier_group_error_counter(int objd) +{ + struct lttng_counter *counter = objd_private(objd); + + if (counter) { + return lttng_ust_objd_unref(counter->event_notifier_group->objd, 0); + } else { + return -EINVAL; + } +} + +static const struct lttng_ust_objd_ops lttng_event_notifier_group_error_counter_ops = { + .release = lttng_release_event_notifier_group_error_counter, + .cmd = lttng_event_notifier_group_error_counter_cmd, +}; + +static +int lttng_ust_event_notifier_group_create_error_counter(int event_notifier_group_objd, void *owner, + struct lttng_ust_counter_conf *error_counter_conf) +{ + const char *counter_transport_name; + struct lttng_event_notifier_group *event_notifier_group = + objd_private(event_notifier_group_objd); + struct lttng_counter *counter; + int counter_objd, ret; + struct lttng_counter_dimension dimensions[1]; + size_t counter_len; + + if (event_notifier_group->error_counter) + return -EBUSY; + + if (error_counter_conf->arithmetic != LTTNG_UST_COUNTER_ARITHMETIC_MODULAR) + return -EINVAL; + + if (error_counter_conf->number_dimensions != 1) + return -EINVAL; + + switch (error_counter_conf->bitness) { + case LTTNG_UST_COUNTER_BITNESS_64BITS: + counter_transport_name = "counter-per-cpu-64-modular"; + break; + case LTTNG_UST_COUNTER_BITNESS_32BITS: + counter_transport_name = "counter-per-cpu-32-modular"; + break; + default: + return -EINVAL; + } + + counter_objd = objd_alloc(NULL, <tng_event_notifier_group_error_counter_ops, owner, + "event_notifier group error counter"); + if (counter_objd < 0) { + ret = counter_objd; + goto objd_error; + } + + counter_len = error_counter_conf->dimensions[0].size; + dimensions[0].size = counter_len; + dimensions[0].underflow_index = 0; + dimensions[0].overflow_index = 0; + dimensions[0].has_underflow = 0; + dimensions[0].has_overflow = 0; + + counter = lttng_ust_counter_create(counter_transport_name, 1, dimensions); + if (!counter) { + ret = -EINVAL; + goto create_error; + } + + event_notifier_group->error_counter = counter; + event_notifier_group->error_counter_len = counter_len; + + counter->objd = counter_objd; + counter->event_notifier_group = event_notifier_group; /* owner */ + + objd_set_private(counter_objd, counter); + /* The error counter holds a reference on the event_notifier group. */ + objd_ref(event_notifier_group->objd); + + return counter_objd; + +create_error: + { + int err; + + err = lttng_ust_objd_unref(counter_objd, 1); + assert(!err); + } +objd_error: + return ret; +} + static long lttng_event_notifier_group_cmd(int objd, unsigned int cmd, unsigned long arg, union ust_args *uargs, void *owner) @@ -758,6 +891,13 @@ long lttng_event_notifier_group_cmd(int objd, unsigned int cmd, unsigned long ar LTTNG_ENABLER_FORMAT_EVENT); } } + case LTTNG_UST_COUNTER: + { + struct lttng_ust_counter_conf *counter_conf = + (struct lttng_ust_counter_conf *) uargs->counter.counter_data; + return lttng_ust_event_notifier_group_create_error_counter( + objd, owner, counter_conf); + } default: return -EINVAL; } diff --git a/liblttng-ust/lttng-ust-comm.c b/liblttng-ust/lttng-ust-comm.c index de03407b..4b13571d 100644 --- a/liblttng-ust/lttng-ust-comm.c +++ b/liblttng-ust/lttng-ust-comm.c @@ -350,6 +350,13 @@ static const char *cmd_name_mapping[] = { /* Event notifier group commands */ [ LTTNG_UST_EVENT_NOTIFIER_CREATE ] = "Create event notifier", + + /* Session and event notifier group commands */ + [ LTTNG_UST_COUNTER ] = "Create Counter", + + /* Counter commands */ + [ LTTNG_UST_COUNTER_GLOBAL ] = "Create Counter Global", + [ LTTNG_UST_COUNTER_CPU ] = "Create Counter CPU", }; static const char *str_timeout; @@ -365,6 +372,10 @@ extern void lttng_ring_buffer_client_overwrite_rt_exit(void); extern void lttng_ring_buffer_client_discard_exit(void); extern void lttng_ring_buffer_client_discard_rt_exit(void); extern void lttng_ring_buffer_metadata_client_exit(void); +extern void lttng_counter_client_percpu_32_modular_init(void); +extern void lttng_counter_client_percpu_32_modular_exit(void); +extern void lttng_counter_client_percpu_64_modular_init(void); +extern void lttng_counter_client_percpu_64_modular_exit(void); static char *get_map_shm(struct sock_info *sock_info); @@ -1133,6 +1144,79 @@ int handle_message(struct sock_info *sock_info, ret = -ENOSYS; } break; + case LTTNG_UST_COUNTER: + { + void *counter_data; + + len = ustcomm_recv_counter_from_sessiond(sock, + &counter_data, lum->u.counter.len); + switch (len) { + case 0: /* orderly shutdown */ + ret = 0; + goto error; + default: + if (len == lum->u.counter.len) { + DBG("counter data received"); + break; + } else if (len < 0) { + DBG("Receive failed from lttng-sessiond with errno %d", (int) -len); + if (len == -ECONNRESET) { + ERR("%s remote end closed connection", sock_info->name); + ret = len; + goto error; + } + ret = len; + goto error; + } else { + DBG("incorrect counter data message size: %zd", len); + ret = -EINVAL; + goto error; + } + } + args.counter.counter_data = counter_data; + if (ops->cmd) + ret = ops->cmd(lum->handle, lum->cmd, + (unsigned long) &lum->u, + &args, sock_info); + else + ret = -ENOSYS; + break; + } + case LTTNG_UST_COUNTER_GLOBAL: + { + /* Receive shm_fd */ + ret = ustcomm_recv_counter_shm_from_sessiond(sock, + &args.counter_shm.shm_fd); + if (ret) { + goto error; + } + + if (ops->cmd) + ret = ops->cmd(lum->handle, lum->cmd, + (unsigned long) &lum->u, + &args, sock_info); + else + ret = -ENOSYS; + break; + } + case LTTNG_UST_COUNTER_CPU: + { + /* Receive shm_fd */ + ret = ustcomm_recv_counter_shm_from_sessiond(sock, + &args.counter_shm.shm_fd); + if (ret) { + goto error; + } + + if (ops->cmd) + ret = ops->cmd(lum->handle, lum->cmd, + (unsigned long) &lum->u, + &args, sock_info); + else + ret = -ENOSYS; + break; + } + default: if (ops->cmd) ret = ops->cmd(lum->handle, lum->cmd, @@ -1958,6 +2042,8 @@ void __attribute__((constructor)) lttng_ust_init(void) lttng_ring_buffer_client_overwrite_rt_init(); lttng_ring_buffer_client_discard_init(); lttng_ring_buffer_client_discard_rt_init(); + lttng_counter_client_percpu_32_modular_init(); + lttng_counter_client_percpu_64_modular_init(); lttng_perf_counter_init(); /* * Invoke ust malloc wrapper init before starting other threads. @@ -2103,6 +2189,8 @@ void lttng_ust_cleanup(int exiting) lttng_ring_buffer_client_overwrite_rt_exit(); lttng_ring_buffer_client_overwrite_exit(); lttng_ring_buffer_metadata_client_exit(); + lttng_counter_client_percpu_32_modular_exit(); + lttng_counter_client_percpu_64_modular_exit(); lttng_ust_statedump_destroy(); exit_tracepoint(); if (!exiting) { diff --git a/liblttng-ust/ust-core.c b/liblttng-ust/ust-core.c index abea7bbd..e8dff983 100644 --- a/liblttng-ust/ust-core.c +++ b/liblttng-ust/ust-core.c @@ -28,6 +28,7 @@ #include "jhash.h" static CDS_LIST_HEAD(lttng_transport_list); +static CDS_LIST_HEAD(lttng_counter_transport_list); struct lttng_transport *lttng_transport_find(const char *name) { @@ -40,6 +41,17 @@ struct lttng_transport *lttng_transport_find(const char *name) return NULL; } +struct lttng_counter_transport *lttng_counter_transport_find(const char *name) +{ + struct lttng_counter_transport *transport; + + cds_list_for_each_entry(transport, <tng_counter_transport_list, node) { + if (!strcmp(transport->name, name)) + return transport; + } + return NULL; +} + /** * lttng_transport_register - LTT transport registration * @transport: transport structure @@ -62,6 +74,28 @@ void lttng_transport_unregister(struct lttng_transport *transport) cds_list_del(&transport->node); } +/** + * lttng_counter_transport_register - LTTng counter transport registration + * @transport: transport structure + * + * Registers a counter transport which can be used as output to extract + * the data out of LTTng. Called with ust_lock held. + */ +void lttng_counter_transport_register(struct lttng_counter_transport *transport) +{ + cds_list_add_tail(&transport->node, <tng_counter_transport_list); +} + +/** + * lttng_counter_transport_unregister - LTTng counter transport unregistration + * @transport: transport structure + * Called with ust_lock held. + */ +void lttng_counter_transport_unregister(struct lttng_counter_transport *transport) +{ + cds_list_del(&transport->node); +} + /* * Needed by comm layer. */